From 51c51ddf6d7cf652b32bb66fbb5abbc2592f3b41 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 3 Apr 2024 11:46:33 -0500 Subject: [PATCH 01/19] cleanup `handle_reclaims` (#552) get rid of `Option` on clean path --- accounts-db/src/accounts_db.rs | 59 +++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2049914710f6c7..0177ef6b151915 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2625,9 +2625,10 @@ impl AccountsDb { self.handle_reclaims( (!reclaim_vecs.is_empty()).then(|| reclaim_vecs.iter().flatten()), None, - Some((&self.clean_accounts_stats.purge_stats, &mut reclaim_result)), + Some(&mut reclaim_result), reset_accounts, &pubkeys_removed_from_accounts_index, + HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), ); measure.stop(); debug!("{} {}", clean_rooted, measure); @@ -3350,9 +3351,10 @@ impl AccountsDb { self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), None, - Some((&self.clean_accounts_stats.purge_stats, &mut reclaim_result)), + Some(&mut reclaim_result), reset_accounts, &pubkeys_removed_from_accounts_index, + HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), ); reclaims_time.stop(); @@ -3497,46 +3499,42 @@ impl AccountsDb { /// from store or slot shrinking, as those should only touch the slot they are /// currently storing to or shrinking. /// - /// * `purge_stats_and_reclaim_result` - Option containing `purge_stats` and `reclaim_result`. - /// `purge_stats`. `purge_stats` are stats used to track performance of purging dead slots. + /// * `reclaim_result` - Option containing `reclaim_result`. /// `reclaim_result` contains information about accounts that were removed from storage, /// does not include accounts that were removed from the cache. - /// If `purge_stats_and_reclaim_result.is_none()`, this implies there can be no dead slots - /// that happen as a result of this call, and the function will check that no slots are - /// cleaned up/removed via `process_dead_slots`. For instance, on store, no slots should - /// be cleaned up, but during the background clean accounts purges accounts from old rooted - /// slots, so outdated slots may be removed. /// /// * `reset_accounts` - Reset the append_vec store when the store is dead (count==0) /// From the clean and shrink paths it should be false since there may be an in-progress /// hash operation and the stores may hold accounts that need to be unref'ed. /// * `pubkeys_removed_from_accounts_index` - These keys have already been removed from the accounts index /// and should not be unref'd. If they exist in the accounts index, they are NEW. + /// * `handle_reclaims`. `purge_stats` are stats used to track performance of purging dead slots if + /// value is `ProcessDeadSlots`. + /// Otherwise, there can be no dead slots + /// that happen as a result of this call, and the function will check that no slots are + /// cleaned up/removed via `process_dead_slots`. For instance, on store, no slots should + /// be cleaned up, but during the background clean accounts purges accounts from old rooted + /// slots, so outdated slots may be removed. fn handle_reclaims<'a, I>( &'a self, reclaims: Option, expected_single_dead_slot: Option, - purge_stats_and_reclaim_result: Option<(&PurgeStats, &mut ReclaimResult)>, + reclaim_result: Option<&mut ReclaimResult>, reset_accounts: bool, pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, + handle_reclaims: HandleReclaims<'a>, ) where I: Iterator, { if let Some(reclaims) = reclaims { - let (purge_stats, purged_account_slots, reclaimed_offsets) = if let Some(( - purge_stats, - (ref mut purged_account_slots, ref mut reclaimed_offsets), - )) = - purge_stats_and_reclaim_result - { - ( - Some(purge_stats), - Some(purged_account_slots), - Some(reclaimed_offsets), - ) - } else { - (None, None, None) - }; + let (purged_account_slots, reclaimed_offsets) = + if let Some((ref mut purged_account_slots, ref mut reclaimed_offsets)) = + reclaim_result + { + (Some(purged_account_slots), Some(reclaimed_offsets)) + } else { + (None, None) + }; let dead_slots = self.remove_dead_accounts( reclaims, @@ -3545,7 +3543,7 @@ impl AccountsDb { reset_accounts, ); - if let Some(purge_stats) = purge_stats { + if let HandleReclaims::ProcessDeadSlots(purge_stats) = handle_reclaims { if let Some(expected_single_dead_slot) = expected_single_dead_slot { assert!(dead_slots.len() <= 1); if dead_slots.len() == 1 { @@ -5791,9 +5789,10 @@ impl AccountsDb { self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), expected_dead_slot, - Some((purge_stats, &mut ReclaimResult::default())), + Some(&mut ReclaimResult::default()), false, &pubkeys_removed_from_accounts_index, + HandleReclaims::ProcessDeadSlots(purge_stats), ); handle_reclaims_elapsed.stop(); purge_stats @@ -8497,6 +8496,8 @@ impl AccountsDb { None, reset_accounts, &HashSet::default(), + // this callsite does NOT process dead slots + HandleReclaims::DoNotProcessDeadSlots, ); handle_reclaims_time.stop(); handle_reclaims_elapsed = handle_reclaims_time.as_us(); @@ -9250,6 +9251,12 @@ pub enum CalcAccountsHashDataSource { Storages, } +#[derive(Debug, Copy, Clone)] +enum HandleReclaims<'a> { + ProcessDeadSlots(&'a PurgeStats), + DoNotProcessDeadSlots, +} + /// Which accounts hash calculation is being performed? #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum CalcAccountsHashKind { From 65a24d5140a8805bd9b837820d8049166d2f6f38 Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 3 Apr 2024 11:48:04 -0600 Subject: [PATCH 02/19] RPC sendTransaction: be more liberal in determining and setting last_valid_block_height for skip_preflight case (#483) * RPC sendTransaction: if skip_preflight, use processed-commitment Bank for last_valid_block_height and sanitization * Use nonce retry logic for skip_preflight transactions if blockhash was not found --- rpc/src/rpc.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 4c8bacfa953c7c..e6b4a28398a707 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -3647,8 +3647,11 @@ pub mod rpc_full { let (wire_transaction, unsanitized_tx) = decode_and_deserialize::(data, binary_encoding)?; - let preflight_commitment = - preflight_commitment.map(|commitment| CommitmentConfig { commitment }); + let preflight_commitment = if skip_preflight { + Some(CommitmentConfig::processed()) + } else { + preflight_commitment.map(|commitment| CommitmentConfig { commitment }) + }; let preflight_bank = &*meta.get_bank_with_config(RpcContextConfig { commitment: preflight_commitment, min_context_slot, @@ -3664,7 +3667,7 @@ pub mod rpc_full { let durable_nonce_info = transaction .get_durable_nonce() .map(|&pubkey| (pubkey, *transaction.message().recent_blockhash())); - if durable_nonce_info.is_some() { + if durable_nonce_info.is_some() || (skip_preflight && last_valid_block_height == 0) { // While it uses a defined constant, this last_valid_block_height value is chosen arbitrarily. // It provides a fallback timeout for durable-nonce transaction retries in case of // malicious packing of the retry queue. Durable-nonce transactions are otherwise From f610e7a06a878f3ef3d3f1d5d1ed0e0a9ab33bd7 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 3 Apr 2024 13:47:58 -0500 Subject: [PATCH 03/19] rework handle_reclaims ReclaimResult (#563) --- accounts-db/src/accounts_db.rs | 31 ++++++++----------------------- 1 file changed, 8 insertions(+), 23 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 0177ef6b151915..81f0b3a8fa42ad 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2621,11 +2621,9 @@ impl AccountsDb { // and those stores may be used for background hashing. let reset_accounts = false; - let mut reclaim_result = ReclaimResult::default(); - self.handle_reclaims( + let reclaim_result = self.handle_reclaims( (!reclaim_vecs.is_empty()).then(|| reclaim_vecs.iter().flatten()), None, - Some(&mut reclaim_result), reset_accounts, &pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), @@ -3347,11 +3345,9 @@ impl AccountsDb { // Don't reset from clean, since the pubkeys in those stores may need to be unref'ed // and those stores may be used for background hashing. let reset_accounts = false; - let mut reclaim_result = ReclaimResult::default(); self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), None, - Some(&mut reclaim_result), reset_accounts, &pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), @@ -3499,10 +3495,6 @@ impl AccountsDb { /// from store or slot shrinking, as those should only touch the slot they are /// currently storing to or shrinking. /// - /// * `reclaim_result` - Option containing `reclaim_result`. - /// `reclaim_result` contains information about accounts that were removed from storage, - /// does not include accounts that were removed from the cache. - /// /// * `reset_accounts` - Reset the append_vec store when the store is dead (count==0) /// From the clean and shrink paths it should be false since there may be an in-progress /// hash operation and the stores may hold accounts that need to be unref'ed. @@ -3519,27 +3511,21 @@ impl AccountsDb { &'a self, reclaims: Option, expected_single_dead_slot: Option, - reclaim_result: Option<&mut ReclaimResult>, reset_accounts: bool, pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, handle_reclaims: HandleReclaims<'a>, - ) where + ) -> ReclaimResult + where I: Iterator, { + let mut reclaim_result = ReclaimResult::default(); if let Some(reclaims) = reclaims { - let (purged_account_slots, reclaimed_offsets) = - if let Some((ref mut purged_account_slots, ref mut reclaimed_offsets)) = - reclaim_result - { - (Some(purged_account_slots), Some(reclaimed_offsets)) - } else { - (None, None) - }; + let (ref mut purged_account_slots, ref mut reclaimed_offsets) = reclaim_result; let dead_slots = self.remove_dead_accounts( reclaims, expected_single_dead_slot, - reclaimed_offsets, + Some(reclaimed_offsets), reset_accounts, ); @@ -3553,7 +3539,7 @@ impl AccountsDb { self.process_dead_slots( &dead_slots, - purged_account_slots, + Some(purged_account_slots), purge_stats, pubkeys_removed_from_accounts_index, ); @@ -3561,6 +3547,7 @@ impl AccountsDb { assert!(dead_slots.is_empty()); } } + reclaim_result } /// During clean, some zero-lamport accounts that are marked for purge should *not* actually @@ -5789,7 +5776,6 @@ impl AccountsDb { self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), expected_dead_slot, - Some(&mut ReclaimResult::default()), false, &pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(purge_stats), @@ -8493,7 +8479,6 @@ impl AccountsDb { self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), expected_single_dead_slot, - None, reset_accounts, &HashSet::default(), // this callsite does NOT process dead slots From 854a5b492928deb0c029ef938638be88d748ef8e Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 3 Apr 2024 14:56:02 -0500 Subject: [PATCH 04/19] stop once we need 10 storages (#549) --- accounts-db/src/ancient_append_vecs.rs | 31 ++++++++++++++++++-------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index a230ab5be4541e..f8717cce895b15 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -191,7 +191,12 @@ impl AncientSlotInfos { // combined ancient storages is less than the threshold, then // we've gone too far, so get rid of this entry and all after it. // Every storage after this one is larger. - if storages_remaining + ancient_storages_required < low_threshold { + // if we ever get to more than 10 required ancient storages, that is enough to stop for now. + // It will take a while to create that many. This should be a limit that only affects + // extreme testing environments. + if storages_remaining + ancient_storages_required < low_threshold + || ancient_storages_required > 10 + { self.all_infos.truncate(i); break; } @@ -2156,8 +2161,9 @@ pub mod tests { let tuning = PackedAncientStorageTuning { percent_of_alive_shrunk_data: 100, max_ancient_slots: 0, - // irrelevant - ideal_storage_size: NonZeroU64::new(1).unwrap(), + // irrelevant for what this test is trying to test, but necessary to avoid minimums + ideal_storage_size: NonZeroU64::new(get_ancient_append_vec_capacity()) + .unwrap(), can_randomly_shrink, }; infos = db.collect_sort_filter_ancient_slots(vec![slot1], &tuning); @@ -2314,7 +2320,10 @@ pub mod tests { percent_of_alive_shrunk_data: 100, max_ancient_slots: 0, // irrelevant - ideal_storage_size: NonZeroU64::new(1).unwrap(), + ideal_storage_size: NonZeroU64::new( + get_ancient_append_vec_capacity(), + ) + .unwrap(), can_randomly_shrink, }; db.collect_sort_filter_ancient_slots(slot_vec.clone(), &tuning) @@ -2624,8 +2633,9 @@ pub mod tests { let tuning = PackedAncientStorageTuning { percent_of_alive_shrunk_data: 100, max_ancient_slots: 0, - // irrelevant - ideal_storage_size: NonZeroU64::new(1).unwrap(), + // irrelevant for what this test is trying to test, but necessary to avoid minimums + ideal_storage_size: NonZeroU64::new(get_ancient_append_vec_capacity()) + .unwrap(), can_randomly_shrink, }; // note this can sort infos.all_infos @@ -2633,7 +2643,7 @@ pub mod tests { } }; - assert_eq!(infos.all_infos.len(), 2); + assert_eq!(infos.all_infos.len(), 2, "{method:?}"); storages.iter().for_each(|storage| { assert!(infos .all_infos @@ -2829,8 +2839,11 @@ pub mod tests { percent_of_alive_shrunk_data, // 0 so that we combine everything with regard to the overall # of slots limit max_ancient_slots: 0, - // this is irrelevant since the limit is artificially 0 - ideal_storage_size: NonZeroU64::new(1).unwrap(), + // irrelevant for what this test is trying to test, but necessary to avoid minimums + ideal_storage_size: NonZeroU64::new( + get_ancient_append_vec_capacity(), + ) + .unwrap(), can_randomly_shrink: false, }; infos.filter_ancient_slots(&tuning); From 72c526b2bbc364f8344cfdff9ee5d6c604aa9fca Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 3 Apr 2024 14:01:00 -0600 Subject: [PATCH 05/19] Move calculation methods; partitioned epoch-rewards reorg, 4 of 5 (#529) * Add calculation sub-submodule * Move calculation methods into sub-submodule * Move unit tests into calculation sub-submodule --- runtime/src/bank.rs | 308 +-------- .../partitioned_epoch_rewards/calculation.rs | 639 ++++++++++++++++++ .../src/bank/partitioned_epoch_rewards/mod.rs | 1 + runtime/src/bank/tests.rs | 282 -------- 4 files changed, 642 insertions(+), 588 deletions(-) create mode 100644 runtime/src/bank/partitioned_epoch_rewards/calculation.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0be5848830dd15..9bbe143de22e70 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -42,13 +42,11 @@ use { builtins::{BuiltinPrototype, BUILTINS}, metrics::*, partitioned_epoch_rewards::{ - CalculateRewardsAndDistributeVoteRewardsResult, EpochRewardCalculateParamInfo, - EpochRewardStatus, PartitionedRewardsCalculation, RewardInterval, - StakeRewardCalculationPartitioned, StakeRewards, VoteRewardsAccounts, + EpochRewardCalculateParamInfo, EpochRewardStatus, PartitionedRewardsCalculation, + RewardInterval, StakeRewards, VoteRewardsAccounts, }, }, bank_forks::BankForks, - epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, serde_snapshot::BankIncrementalSnapshotPersistence, @@ -1477,47 +1475,6 @@ impl Bank { ); } - /// Begin the process of calculating and distributing rewards. - /// This process can take multiple slots. - fn begin_partitioned_rewards( - &mut self, - reward_calc_tracer: Option, - thread_pool: &ThreadPool, - parent_epoch: Epoch, - parent_slot: Slot, - parent_block_height: u64, - rewards_metrics: &mut RewardsMetrics, - ) { - let CalculateRewardsAndDistributeVoteRewardsResult { - total_rewards, - distributed_rewards, - stake_rewards_by_partition, - } = self.calculate_rewards_and_distribute_vote_rewards( - parent_epoch, - reward_calc_tracer, - thread_pool, - rewards_metrics, - ); - - let slot = self.slot(); - let credit_start = self.block_height() + self.get_reward_calculation_num_blocks(); - - self.set_epoch_reward_status_active(stake_rewards_by_partition); - - // create EpochRewards sysvar that holds the balance of undistributed rewards with - // (total_rewards, distributed_rewards, credit_start), total capital will increase by (total_rewards - distributed_rewards) - self.create_epoch_rewards_sysvar(total_rewards, distributed_rewards, credit_start); - - datapoint_info!( - "epoch-rewards-status-update", - ("start_slot", slot, i64), - ("start_block_height", self.block_height(), i64), - ("active", 1, i64), - ("parent_slot", parent_slot, i64), - ("parent_block_height", parent_block_height, i64), - ); - } - pub fn byte_limit_for_scans(&self) -> Option { self.rc .accounts @@ -2202,149 +2159,6 @@ impl Bank { } } - /// Calculate rewards from previous epoch to prepare for partitioned distribution. - fn calculate_rewards_for_partitioning( - &self, - prev_epoch: Epoch, - reward_calc_tracer: Option, - thread_pool: &ThreadPool, - metrics: &mut RewardsMetrics, - ) -> PartitionedRewardsCalculation { - let capitalization = self.capitalization(); - let PrevEpochInflationRewards { - validator_rewards, - prev_epoch_duration_in_years, - validator_rate, - foundation_rate, - } = self.calculate_previous_epoch_inflation_rewards(capitalization, prev_epoch); - - let old_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked(); - - let (vote_account_rewards, mut stake_rewards) = self - .calculate_validator_rewards( - prev_epoch, - validator_rewards, - reward_calc_tracer, - thread_pool, - metrics, - ) - .unwrap_or_default(); - - let num_partitions = self.get_reward_distribution_num_blocks(&stake_rewards.stake_rewards); - let parent_blockhash = self - .parent() - .expect("Partitioned rewards calculation must still have access to parent Bank.") - .last_blockhash(); - let stake_rewards_by_partition = hash_rewards_into_partitions( - std::mem::take(&mut stake_rewards.stake_rewards), - &parent_blockhash, - num_partitions as usize, - ); - - PartitionedRewardsCalculation { - vote_account_rewards, - stake_rewards_by_partition: StakeRewardCalculationPartitioned { - stake_rewards_by_partition, - total_stake_rewards_lamports: stake_rewards.total_stake_rewards_lamports, - }, - old_vote_balance_and_staked, - validator_rewards, - validator_rate, - foundation_rate, - prev_epoch_duration_in_years, - capitalization, - } - } - - // Calculate rewards from previous epoch and distribute vote rewards - fn calculate_rewards_and_distribute_vote_rewards( - &self, - prev_epoch: Epoch, - reward_calc_tracer: Option, - thread_pool: &ThreadPool, - metrics: &mut RewardsMetrics, - ) -> CalculateRewardsAndDistributeVoteRewardsResult { - let PartitionedRewardsCalculation { - vote_account_rewards, - stake_rewards_by_partition, - old_vote_balance_and_staked, - validator_rewards, - validator_rate, - foundation_rate, - prev_epoch_duration_in_years, - capitalization, - } = self.calculate_rewards_for_partitioning( - prev_epoch, - reward_calc_tracer, - thread_pool, - metrics, - ); - let vote_rewards = self.store_vote_accounts_partitioned(vote_account_rewards, metrics); - - // update reward history of JUST vote_rewards, stake_rewards is vec![] here - self.update_reward_history(vec![], vote_rewards); - - let StakeRewardCalculationPartitioned { - stake_rewards_by_partition, - total_stake_rewards_lamports, - } = stake_rewards_by_partition; - - // the remaining code mirrors `update_rewards_with_thread_pool()` - - let new_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked(); - - // This is for vote rewards only. - let validator_rewards_paid = new_vote_balance_and_staked - old_vote_balance_and_staked; - self.assert_validator_rewards_paid(validator_rewards_paid); - - // verify that we didn't pay any more than we expected to - assert!(validator_rewards >= validator_rewards_paid + total_stake_rewards_lamports); - - info!( - "distributed vote rewards: {} out of {}, remaining {}", - validator_rewards_paid, validator_rewards, total_stake_rewards_lamports - ); - - let (num_stake_accounts, num_vote_accounts) = { - let stakes = self.stakes_cache.stakes(); - ( - stakes.stake_delegations().len(), - stakes.vote_accounts().len(), - ) - }; - self.capitalization - .fetch_add(validator_rewards_paid, Relaxed); - - let active_stake = if let Some(stake_history_entry) = - self.stakes_cache.stakes().history().get(prev_epoch) - { - stake_history_entry.effective - } else { - 0 - }; - - datapoint_info!( - "epoch_rewards", - ("slot", self.slot, i64), - ("epoch", prev_epoch, i64), - ("validator_rate", validator_rate, f64), - ("foundation_rate", foundation_rate, f64), - ("epoch_duration_in_years", prev_epoch_duration_in_years, f64), - ("validator_rewards", validator_rewards_paid, i64), - ("active_stake", active_stake, i64), - ("pre_capitalization", capitalization, i64), - ("post_capitalization", self.capitalization(), i64), - ("num_stake_accounts", num_stake_accounts, i64), - ("num_vote_accounts", num_vote_accounts, i64), - ); - - CalculateRewardsAndDistributeVoteRewardsResult { - total_rewards: validator_rewards_paid + total_stake_rewards_lamports, - distributed_rewards: validator_rewards_paid, - stake_rewards_by_partition, - } - } - fn assert_validator_rewards_paid(&self, validator_rewards_paid: u64) { assert_eq!( validator_rewards_paid, @@ -2609,36 +2423,6 @@ impl Bank { } } - /// Calculate epoch reward and return vote and stake rewards. - fn calculate_validator_rewards( - &self, - rewarded_epoch: Epoch, - rewards: u64, - reward_calc_tracer: Option, - thread_pool: &ThreadPool, - metrics: &mut RewardsMetrics, - ) -> Option<(VoteRewardsAccounts, StakeRewardCalculation)> { - let stakes = self.stakes_cache.stakes(); - let reward_calculate_param = self.get_epoch_reward_calculate_param_info(&stakes); - - self.calculate_reward_points_partitioned( - &reward_calculate_param, - rewards, - thread_pool, - metrics, - ) - .map(|point_value| { - self.calculate_stake_vote_rewards( - &reward_calculate_param, - rewarded_epoch, - point_value, - thread_pool, - reward_calc_tracer, - metrics, - ) - }) - } - /// Load, calculate and payout epoch rewards for stake and vote accounts fn pay_validator_rewards_with_thread_pool( &mut self, @@ -2810,68 +2594,6 @@ impl Bank { vote_with_stake_delegations_map } - /// Calculates epoch reward points from stake/vote accounts. - /// Returns reward lamports and points for the epoch or none if points == 0. - fn calculate_reward_points_partitioned( - &self, - reward_calculate_params: &EpochRewardCalculateParamInfo, - rewards: u64, - thread_pool: &ThreadPool, - metrics: &RewardsMetrics, - ) -> Option { - let EpochRewardCalculateParamInfo { - stake_history, - stake_delegations, - cached_vote_accounts, - } = reward_calculate_params; - - let solana_vote_program: Pubkey = solana_vote_program::id(); - - let get_vote_account = |vote_pubkey: &Pubkey| -> Option { - if let Some(vote_account) = cached_vote_accounts.get(vote_pubkey) { - return Some(vote_account.clone()); - } - // If accounts-db contains a valid vote account, then it should - // already have been cached in cached_vote_accounts; so the code - // below is only for sanity checking, and can be removed once - // the cache is deemed to be reliable. - let account = self.get_account_with_fixed_root(vote_pubkey)?; - VoteAccount::try_from(account).ok() - }; - - let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); - let (points, measure_us) = measure_us!(thread_pool.install(|| { - stake_delegations - .par_iter() - .map(|(_stake_pubkey, stake_account)| { - let delegation = stake_account.delegation(); - let vote_pubkey = delegation.voter_pubkey; - - let Some(vote_account) = get_vote_account(&vote_pubkey) else { - return 0; - }; - if vote_account.owner() != &solana_vote_program { - return 0; - } - let Ok(vote_state) = vote_account.vote_state() else { - return 0; - }; - - solana_stake_program::points::calculate_points( - stake_account.stake_state(), - vote_state, - stake_history, - new_warmup_cooldown_rate_epoch, - ) - .unwrap_or(0) - }) - .sum::() - })); - metrics.calculate_points_us.fetch_add(measure_us, Relaxed); - - (points > 0).then_some(PointValue { rewards, points }) - } - fn calculate_reward_points( &self, vote_with_stake_delegations_map: &VoteWithStakeDelegationsMap, @@ -3167,32 +2889,6 @@ impl Bank { .fetch_add(now.elapsed().as_micros() as u64, Relaxed); } - fn store_vote_accounts_partitioned( - &self, - vote_account_rewards: VoteRewardsAccounts, - metrics: &RewardsMetrics, - ) -> Vec<(Pubkey, RewardInfo)> { - let (_, measure_us) = measure_us!({ - // reformat data to make it not sparse. - // `StorableAccounts` does not efficiently handle sparse data. - // Not all entries in `vote_account_rewards.accounts_to_store` have a Some(account) to store. - let to_store = vote_account_rewards - .accounts_to_store - .iter() - .filter_map(|account| account.as_ref()) - .enumerate() - .map(|(i, account)| (&vote_account_rewards.rewards[i].0, account)) - .collect::>(); - self.store_accounts((self.slot(), &to_store[..])); - }); - - metrics - .store_vote_accounts_us - .fetch_add(measure_us, Relaxed); - - vote_account_rewards.rewards - } - fn store_vote_accounts( &self, vote_account_rewards: VoteRewards, diff --git a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs new file mode 100644 index 00000000000000..0921f97a2f0b39 --- /dev/null +++ b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs @@ -0,0 +1,639 @@ +use { + super::{ + Bank, CalculateRewardsAndDistributeVoteRewardsResult, EpochRewardCalculateParamInfo, + PartitionedRewardsCalculation, StakeRewardCalculationPartitioned, VoteRewardsAccounts, + }, + crate::{ + bank::{ + PrevEpochInflationRewards, RewardCalcTracer, RewardCalculationEvent, RewardsMetrics, + StakeRewardCalculation, VoteAccount, + }, + epoch_rewards_hasher::hash_rewards_into_partitions, + }, + log::info, + rayon::{ + iter::{IntoParallelRefIterator, ParallelIterator}, + ThreadPool, + }, + solana_measure::measure_us, + solana_sdk::{ + clock::{Epoch, Slot}, + pubkey::Pubkey, + reward_info::RewardInfo, + }, + solana_stake_program::points::PointValue, + std::sync::atomic::Ordering::Relaxed, +}; + +impl Bank { + /// Begin the process of calculating and distributing rewards. + /// This process can take multiple slots. + pub(in crate::bank) fn begin_partitioned_rewards( + &mut self, + reward_calc_tracer: Option, + thread_pool: &ThreadPool, + parent_epoch: Epoch, + parent_slot: Slot, + parent_block_height: u64, + rewards_metrics: &mut RewardsMetrics, + ) { + let CalculateRewardsAndDistributeVoteRewardsResult { + total_rewards, + distributed_rewards, + stake_rewards_by_partition, + } = self.calculate_rewards_and_distribute_vote_rewards( + parent_epoch, + reward_calc_tracer, + thread_pool, + rewards_metrics, + ); + + let slot = self.slot(); + let credit_start = self.block_height() + self.get_reward_calculation_num_blocks(); + + self.set_epoch_reward_status_active(stake_rewards_by_partition); + + // create EpochRewards sysvar that holds the balance of undistributed rewards with + // (total_rewards, distributed_rewards, credit_start), total capital will increase by (total_rewards - distributed_rewards) + self.create_epoch_rewards_sysvar(total_rewards, distributed_rewards, credit_start); + + datapoint_info!( + "epoch-rewards-status-update", + ("start_slot", slot, i64), + ("start_block_height", self.block_height(), i64), + ("active", 1, i64), + ("parent_slot", parent_slot, i64), + ("parent_block_height", parent_block_height, i64), + ); + } + + // Calculate rewards from previous epoch and distribute vote rewards + fn calculate_rewards_and_distribute_vote_rewards( + &self, + prev_epoch: Epoch, + reward_calc_tracer: Option, + thread_pool: &ThreadPool, + metrics: &mut RewardsMetrics, + ) -> CalculateRewardsAndDistributeVoteRewardsResult { + let PartitionedRewardsCalculation { + vote_account_rewards, + stake_rewards_by_partition, + old_vote_balance_and_staked, + validator_rewards, + validator_rate, + foundation_rate, + prev_epoch_duration_in_years, + capitalization, + } = self.calculate_rewards_for_partitioning( + prev_epoch, + reward_calc_tracer, + thread_pool, + metrics, + ); + let vote_rewards = self.store_vote_accounts_partitioned(vote_account_rewards, metrics); + + // update reward history of JUST vote_rewards, stake_rewards is vec![] here + self.update_reward_history(vec![], vote_rewards); + + let StakeRewardCalculationPartitioned { + stake_rewards_by_partition, + total_stake_rewards_lamports, + } = stake_rewards_by_partition; + + // the remaining code mirrors `update_rewards_with_thread_pool()` + + let new_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked(); + + // This is for vote rewards only. + let validator_rewards_paid = new_vote_balance_and_staked - old_vote_balance_and_staked; + self.assert_validator_rewards_paid(validator_rewards_paid); + + // verify that we didn't pay any more than we expected to + assert!(validator_rewards >= validator_rewards_paid + total_stake_rewards_lamports); + + info!( + "distributed vote rewards: {} out of {}, remaining {}", + validator_rewards_paid, validator_rewards, total_stake_rewards_lamports + ); + + let (num_stake_accounts, num_vote_accounts) = { + let stakes = self.stakes_cache.stakes(); + ( + stakes.stake_delegations().len(), + stakes.vote_accounts().len(), + ) + }; + self.capitalization + .fetch_add(validator_rewards_paid, Relaxed); + + let active_stake = if let Some(stake_history_entry) = + self.stakes_cache.stakes().history().get(prev_epoch) + { + stake_history_entry.effective + } else { + 0 + }; + + datapoint_info!( + "epoch_rewards", + ("slot", self.slot, i64), + ("epoch", prev_epoch, i64), + ("validator_rate", validator_rate, f64), + ("foundation_rate", foundation_rate, f64), + ("epoch_duration_in_years", prev_epoch_duration_in_years, f64), + ("validator_rewards", validator_rewards_paid, i64), + ("active_stake", active_stake, i64), + ("pre_capitalization", capitalization, i64), + ("post_capitalization", self.capitalization(), i64), + ("num_stake_accounts", num_stake_accounts, i64), + ("num_vote_accounts", num_vote_accounts, i64), + ); + + CalculateRewardsAndDistributeVoteRewardsResult { + total_rewards: validator_rewards_paid + total_stake_rewards_lamports, + distributed_rewards: validator_rewards_paid, + stake_rewards_by_partition, + } + } + + fn store_vote_accounts_partitioned( + &self, + vote_account_rewards: VoteRewardsAccounts, + metrics: &RewardsMetrics, + ) -> Vec<(Pubkey, RewardInfo)> { + let (_, measure_us) = measure_us!({ + // reformat data to make it not sparse. + // `StorableAccounts` does not efficiently handle sparse data. + // Not all entries in `vote_account_rewards.accounts_to_store` have a Some(account) to store. + let to_store = vote_account_rewards + .accounts_to_store + .iter() + .filter_map(|account| account.as_ref()) + .enumerate() + .map(|(i, account)| (&vote_account_rewards.rewards[i].0, account)) + .collect::>(); + self.store_accounts((self.slot(), &to_store[..])); + }); + + metrics + .store_vote_accounts_us + .fetch_add(measure_us, Relaxed); + + vote_account_rewards.rewards + } + + /// Calculate rewards from previous epoch to prepare for partitioned distribution. + pub(in crate::bank) fn calculate_rewards_for_partitioning( + &self, + prev_epoch: Epoch, + reward_calc_tracer: Option, + thread_pool: &ThreadPool, + metrics: &mut RewardsMetrics, + ) -> PartitionedRewardsCalculation { + let capitalization = self.capitalization(); + let PrevEpochInflationRewards { + validator_rewards, + prev_epoch_duration_in_years, + validator_rate, + foundation_rate, + } = self.calculate_previous_epoch_inflation_rewards(capitalization, prev_epoch); + + let old_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked(); + + let (vote_account_rewards, mut stake_rewards) = self + .calculate_validator_rewards( + prev_epoch, + validator_rewards, + reward_calc_tracer, + thread_pool, + metrics, + ) + .unwrap_or_default(); + + let num_partitions = self.get_reward_distribution_num_blocks(&stake_rewards.stake_rewards); + let parent_blockhash = self + .parent() + .expect("Partitioned rewards calculation must still have access to parent Bank.") + .last_blockhash(); + let stake_rewards_by_partition = hash_rewards_into_partitions( + std::mem::take(&mut stake_rewards.stake_rewards), + &parent_blockhash, + num_partitions as usize, + ); + + PartitionedRewardsCalculation { + vote_account_rewards, + stake_rewards_by_partition: StakeRewardCalculationPartitioned { + stake_rewards_by_partition, + total_stake_rewards_lamports: stake_rewards.total_stake_rewards_lamports, + }, + old_vote_balance_and_staked, + validator_rewards, + validator_rate, + foundation_rate, + prev_epoch_duration_in_years, + capitalization, + } + } + + /// Calculate epoch reward and return vote and stake rewards. + fn calculate_validator_rewards( + &self, + rewarded_epoch: Epoch, + rewards: u64, + reward_calc_tracer: Option, + thread_pool: &ThreadPool, + metrics: &mut RewardsMetrics, + ) -> Option<(VoteRewardsAccounts, StakeRewardCalculation)> { + let stakes = self.stakes_cache.stakes(); + let reward_calculate_param = self.get_epoch_reward_calculate_param_info(&stakes); + + self.calculate_reward_points_partitioned( + &reward_calculate_param, + rewards, + thread_pool, + metrics, + ) + .map(|point_value| { + self.calculate_stake_vote_rewards( + &reward_calculate_param, + rewarded_epoch, + point_value, + thread_pool, + reward_calc_tracer, + metrics, + ) + }) + } + + /// Calculates epoch reward points from stake/vote accounts. + /// Returns reward lamports and points for the epoch or none if points == 0. + fn calculate_reward_points_partitioned( + &self, + reward_calculate_params: &EpochRewardCalculateParamInfo, + rewards: u64, + thread_pool: &ThreadPool, + metrics: &RewardsMetrics, + ) -> Option { + let EpochRewardCalculateParamInfo { + stake_history, + stake_delegations, + cached_vote_accounts, + } = reward_calculate_params; + + let solana_vote_program: Pubkey = solana_vote_program::id(); + + let get_vote_account = |vote_pubkey: &Pubkey| -> Option { + if let Some(vote_account) = cached_vote_accounts.get(vote_pubkey) { + return Some(vote_account.clone()); + } + // If accounts-db contains a valid vote account, then it should + // already have been cached in cached_vote_accounts; so the code + // below is only for sanity checking, and can be removed once + // the cache is deemed to be reliable. + let account = self.get_account_with_fixed_root(vote_pubkey)?; + VoteAccount::try_from(account).ok() + }; + + let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); + let (points, measure_us) = measure_us!(thread_pool.install(|| { + stake_delegations + .par_iter() + .map(|(_stake_pubkey, stake_account)| { + let delegation = stake_account.delegation(); + let vote_pubkey = delegation.voter_pubkey; + + let Some(vote_account) = get_vote_account(&vote_pubkey) else { + return 0; + }; + if vote_account.owner() != &solana_vote_program { + return 0; + } + let Ok(vote_state) = vote_account.vote_state() else { + return 0; + }; + + solana_stake_program::points::calculate_points( + stake_account.stake_state(), + vote_state, + stake_history, + new_warmup_cooldown_rate_epoch, + ) + .unwrap_or(0) + }) + .sum::() + })); + metrics.calculate_points_us.fetch_add(measure_us, Relaxed); + + (points > 0).then_some(PointValue { rewards, points }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + bank::{null_tracer, tests::create_genesis_config, VoteReward}, + genesis_utils::{ + create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, + }, + stake_account::StakeAccount, + stakes::Stakes, + }, + rayon::ThreadPoolBuilder, + solana_sdk::{ + account::{accounts_equal, ReadableAccount, WritableAccount}, + native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, + reward_type::RewardType, + signature::Signer, + stake::state::Delegation, + vote::state::{VoteStateVersions, MAX_LOCKOUT_HISTORY}, + }, + solana_vote_program::vote_state, + std::sync::RwLockReadGuard, + }; + + /// Helper function to create a bank that pays some rewards + fn create_reward_bank(expected_num_delegations: usize) -> (Bank, Vec, Vec) { + let validator_keypairs = (0..expected_num_delegations) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect::>(); + + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![2_000_000_000; expected_num_delegations], + ); + + let bank = Bank::new_for_tests(&genesis_config); + + // Fill bank_forks with banks with votes landing in the next slot + // Create enough banks such that vote account will root + for validator_vote_keypairs in &validator_keypairs { + let vote_id = validator_vote_keypairs.vote_keypair.pubkey(); + let mut vote_account = bank.get_account(&vote_id).unwrap(); + // generate some rewards + let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); + for i in 0..MAX_LOCKOUT_HISTORY + 42 { + if let Some(v) = vote_state.as_mut() { + vote_state::process_slot_vote_unchecked(v, i as u64) + } + let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); + vote_state::to(&versioned, &mut vote_account).unwrap(); + match versioned { + VoteStateVersions::Current(v) => { + vote_state = Some(*v); + } + _ => panic!("Has to be of type Current"), + }; + } + bank.store_account_and_update_capitalization(&vote_id, &vote_account); + } + ( + bank, + validator_keypairs + .iter() + .map(|k| k.vote_keypair.pubkey()) + .collect(), + validator_keypairs + .iter() + .map(|k| k.stake_keypair.pubkey()) + .collect(), + ) + } + + #[test] + fn test_store_vote_accounts_partitioned() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let bank = Bank::new_for_tests(&genesis_config); + + let expected_vote_rewards_num = 100; + + let vote_rewards = (0..expected_vote_rewards_num) + .map(|_| Some((Pubkey::new_unique(), VoteReward::new_random()))) + .collect::>(); + + let mut vote_rewards_account = VoteRewardsAccounts::default(); + vote_rewards.iter().for_each(|e| { + if let Some(p) = &e { + let info = RewardInfo { + reward_type: RewardType::Voting, + lamports: p.1.vote_rewards as i64, + post_balance: p.1.vote_rewards, + commission: Some(p.1.commission), + }; + vote_rewards_account.rewards.push((p.0, info)); + vote_rewards_account + .accounts_to_store + .push(e.as_ref().map(|p| p.1.vote_account.clone())); + } + }); + + let metrics = RewardsMetrics::default(); + + let stored_vote_accounts = + bank.store_vote_accounts_partitioned(vote_rewards_account, &metrics); + assert_eq!(expected_vote_rewards_num, stored_vote_accounts.len()); + + // load accounts to make sure they were stored correctly + vote_rewards.iter().for_each(|e| { + if let Some(p) = &e { + let (k, account) = (p.0, p.1.vote_account.clone()); + let loaded_account = bank.load_slow_with_fixed_root(&bank.ancestors, &k).unwrap(); + assert!(accounts_equal(&loaded_account.0, &account)); + } + }); + } + + #[test] + fn test_store_vote_accounts_partitioned_empty() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let bank = Bank::new_for_tests(&genesis_config); + + let expected = 0; + let vote_rewards = VoteRewardsAccounts::default(); + let metrics = RewardsMetrics::default(); + + let stored_vote_accounts = bank.store_vote_accounts_partitioned(vote_rewards, &metrics); + assert_eq!(expected, stored_vote_accounts.len()); + } + + #[test] + /// Test rewards computation and partitioned rewards distribution at the epoch boundary + fn test_rewards_computation() { + solana_logger::setup(); + + let expected_num_delegations = 100; + let bank = create_reward_bank(expected_num_delegations).0; + + // Calculate rewards + let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + let mut rewards_metrics = RewardsMetrics::default(); + let expected_rewards = 100_000_000_000; + + let calculated_rewards = bank.calculate_validator_rewards( + 1, + expected_rewards, + null_tracer(), + &thread_pool, + &mut rewards_metrics, + ); + + let vote_rewards = &calculated_rewards.as_ref().unwrap().0; + let stake_rewards = &calculated_rewards.as_ref().unwrap().1; + + let total_vote_rewards: u64 = vote_rewards + .rewards + .iter() + .map(|reward| reward.1.lamports) + .sum::() as u64; + + // assert that total rewards matches the sum of vote rewards and stake rewards + assert_eq!( + stake_rewards.total_stake_rewards_lamports + total_vote_rewards, + expected_rewards + ); + + // assert that number of stake rewards matches + assert_eq!(stake_rewards.stake_rewards.len(), expected_num_delegations); + } + + #[test] + fn test_rewards_point_calculation() { + solana_logger::setup(); + + let expected_num_delegations = 100; + let (bank, _, _) = create_reward_bank(expected_num_delegations); + + let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + let rewards_metrics = RewardsMetrics::default(); + let expected_rewards = 100_000_000_000; + + let stakes: RwLockReadGuard>> = bank.stakes_cache.stakes(); + let reward_calculate_param = bank.get_epoch_reward_calculate_param_info(&stakes); + + let point_value = bank.calculate_reward_points_partitioned( + &reward_calculate_param, + expected_rewards, + &thread_pool, + &rewards_metrics, + ); + + assert!(point_value.is_some()); + assert_eq!(point_value.as_ref().unwrap().rewards, expected_rewards); + assert_eq!(point_value.as_ref().unwrap().points, 8400000000000); + } + + #[test] + fn test_rewards_point_calculation_empty() { + solana_logger::setup(); + + // bank with no rewards to distribute + let (genesis_config, _mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let bank = Bank::new_for_tests(&genesis_config); + + let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + let rewards_metrics: RewardsMetrics = RewardsMetrics::default(); + let expected_rewards = 100_000_000_000; + let stakes: RwLockReadGuard>> = bank.stakes_cache.stakes(); + let reward_calculate_param = bank.get_epoch_reward_calculate_param_info(&stakes); + + let point_value = bank.calculate_reward_points_partitioned( + &reward_calculate_param, + expected_rewards, + &thread_pool, + &rewards_metrics, + ); + + assert!(point_value.is_none()); + } + + #[test] + fn test_calculate_stake_vote_rewards() { + solana_logger::setup(); + + let expected_num_delegations = 1; + let (bank, voters, stakers) = create_reward_bank(expected_num_delegations); + + let vote_pubkey = voters.first().unwrap(); + let mut vote_account = bank + .load_slow_with_fixed_root(&bank.ancestors, vote_pubkey) + .unwrap() + .0; + + let stake_pubkey = stakers.first().unwrap(); + let stake_account = bank + .load_slow_with_fixed_root(&bank.ancestors, stake_pubkey) + .unwrap() + .0; + + let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + let mut rewards_metrics = RewardsMetrics::default(); + + let point_value = PointValue { + rewards: 100000, // lamports to split + points: 1000, // over these points + }; + let tracer = |_event: &RewardCalculationEvent| {}; + let reward_calc_tracer = Some(tracer); + let rewarded_epoch = bank.epoch(); + let stakes: RwLockReadGuard>> = bank.stakes_cache.stakes(); + let reward_calculate_param = bank.get_epoch_reward_calculate_param_info(&stakes); + let (vote_rewards_accounts, stake_reward_calculation) = bank.calculate_stake_vote_rewards( + &reward_calculate_param, + rewarded_epoch, + point_value, + &thread_pool, + reward_calc_tracer, + &mut rewards_metrics, + ); + + assert_eq!( + vote_rewards_accounts.rewards.len(), + vote_rewards_accounts.accounts_to_store.len() + ); + assert_eq!(vote_rewards_accounts.rewards.len(), 1); + let rewards = &vote_rewards_accounts.rewards[0]; + let account = &vote_rewards_accounts.accounts_to_store[0]; + let vote_rewards = 0; + let commision = 0; + _ = vote_account.checked_add_lamports(vote_rewards); + assert_eq!( + account.as_ref().unwrap().lamports(), + vote_account.lamports() + ); + assert!(accounts_equal(account.as_ref().unwrap(), &vote_account)); + assert_eq!( + rewards.1, + RewardInfo { + reward_type: RewardType::Voting, + lamports: vote_rewards as i64, + post_balance: vote_account.lamports(), + commission: Some(commision), + } + ); + assert_eq!(&rewards.0, vote_pubkey); + + assert_eq!(stake_reward_calculation.stake_rewards.len(), 1); + assert_eq!( + &stake_reward_calculation.stake_rewards[0].stake_pubkey, + stake_pubkey + ); + + let original_stake_lamport = stake_account.lamports(); + let rewards = stake_reward_calculation.stake_rewards[0] + .stake_reward_info + .lamports; + let expected_reward_info = RewardInfo { + reward_type: RewardType::Staking, + lamports: rewards, + post_balance: original_stake_lamport + rewards as u64, + commission: Some(commision), + }; + assert_eq!( + stake_reward_calculation.stake_rewards[0].stake_reward_info, + expected_reward_info, + ); + } +} diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index 8248f0c51bb387..e50b5217676aa0 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -1,3 +1,4 @@ +mod calculation; mod distribution; mod sysvar; diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 1fb875be988751..05a0d662f9572e 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -12126,105 +12126,6 @@ fn test_squash_timing_add_assign() { assert!(t0 == expected); } -/// Helper function to create a bank that pays some rewards -fn create_reward_bank(expected_num_delegations: usize) -> (Bank, Vec, Vec) { - let validator_keypairs = (0..expected_num_delegations) - .map(|_| ValidatorVoteKeypairs::new_rand()) - .collect::>(); - - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( - 1_000_000_000, - &validator_keypairs, - vec![2_000_000_000; expected_num_delegations], - ); - - let bank = Bank::new_for_tests(&genesis_config); - - // Fill bank_forks with banks with votes landing in the next slot - // Create enough banks such that vote account will root - for validator_vote_keypairs in &validator_keypairs { - let vote_id = validator_vote_keypairs.vote_keypair.pubkey(); - let mut vote_account = bank.get_account(&vote_id).unwrap(); - // generate some rewards - let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); - for i in 0..MAX_LOCKOUT_HISTORY + 42 { - if let Some(v) = vote_state.as_mut() { - vote_state::process_slot_vote_unchecked(v, i as u64) - } - let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); - vote_state::to(&versioned, &mut vote_account).unwrap(); - match versioned { - VoteStateVersions::Current(v) => { - vote_state = Some(*v); - } - _ => panic!("Has to be of type Current"), - }; - } - bank.store_account_and_update_capitalization(&vote_id, &vote_account); - } - ( - bank, - validator_keypairs - .iter() - .map(|k| k.vote_keypair.pubkey()) - .collect(), - validator_keypairs - .iter() - .map(|k| k.stake_keypair.pubkey()) - .collect(), - ) -} - -#[test] -fn test_rewards_point_calculation() { - solana_logger::setup(); - - let expected_num_delegations = 100; - let (bank, _, _) = create_reward_bank(expected_num_delegations); - - let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - let rewards_metrics = RewardsMetrics::default(); - let expected_rewards = 100_000_000_000; - - let stakes: RwLockReadGuard>> = bank.stakes_cache.stakes(); - let reward_calculate_param = bank.get_epoch_reward_calculate_param_info(&stakes); - - let point_value = bank.calculate_reward_points_partitioned( - &reward_calculate_param, - expected_rewards, - &thread_pool, - &rewards_metrics, - ); - - assert!(point_value.is_some()); - assert_eq!(point_value.as_ref().unwrap().rewards, expected_rewards); - assert_eq!(point_value.as_ref().unwrap().points, 8400000000000); -} - -#[test] -fn test_rewards_point_calculation_empty() { - solana_logger::setup(); - - // bank with no rewards to distribute - let (genesis_config, _mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank = Bank::new_for_tests(&genesis_config); - - let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - let rewards_metrics: RewardsMetrics = RewardsMetrics::default(); - let expected_rewards = 100_000_000_000; - let stakes: RwLockReadGuard>> = bank.stakes_cache.stakes(); - let reward_calculate_param = bank.get_epoch_reward_calculate_param_info(&stakes); - - let point_value = bank.calculate_reward_points_partitioned( - &reward_calculate_param, - expected_rewards, - &thread_pool, - &rewards_metrics, - ); - - assert!(point_value.is_none()); -} - /// Test that reward partition range panics when passing out of range partition index #[test] #[should_panic(expected = "index out of bounds: the len is 10 but the index is 15")] @@ -12248,46 +12149,6 @@ fn test_get_stake_rewards_partition_range_panic() { let _range = &stake_rewards_bucket[15]; } -#[test] -/// Test rewards computation and partitioned rewards distribution at the epoch boundary -fn test_rewards_computation() { - solana_logger::setup(); - - let expected_num_delegations = 100; - let bank = create_reward_bank(expected_num_delegations).0; - - // Calculate rewards - let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - let mut rewards_metrics = RewardsMetrics::default(); - let expected_rewards = 100_000_000_000; - - let calculated_rewards = bank.calculate_validator_rewards( - 1, - expected_rewards, - null_tracer(), - &thread_pool, - &mut rewards_metrics, - ); - - let vote_rewards = &calculated_rewards.as_ref().unwrap().0; - let stake_rewards = &calculated_rewards.as_ref().unwrap().1; - - let total_vote_rewards: u64 = vote_rewards - .rewards - .iter() - .map(|reward| reward.1.lamports) - .sum::() as u64; - - // assert that total rewards matches the sum of vote rewards and stake rewards - assert_eq!( - stake_rewards.total_stake_rewards_lamports + total_vote_rewards, - expected_rewards - ); - - // assert that number of stake rewards matches - assert_eq!(stake_rewards.stake_rewards.len(), expected_num_delegations); -} - /// Test rewards computation and partitioned rewards distribution at the epoch boundary (one reward distribution block) #[test] fn test_rewards_computation_and_partitioned_distribution_one_block() { @@ -12566,61 +12427,6 @@ fn test_program_execution_restricted_for_stake_account_in_reward_period() { } } -#[test] -fn test_store_vote_accounts_partitioned() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_for_tests(&genesis_config); - - let expected_vote_rewards_num = 100; - - let vote_rewards = (0..expected_vote_rewards_num) - .map(|_| Some((Pubkey::new_unique(), VoteReward::new_random()))) - .collect::>(); - - let mut vote_rewards_account = VoteRewardsAccounts::default(); - vote_rewards.iter().for_each(|e| { - if let Some(p) = &e { - let info = RewardInfo { - reward_type: RewardType::Voting, - lamports: p.1.vote_rewards as i64, - post_balance: p.1.vote_rewards, - commission: Some(p.1.commission), - }; - vote_rewards_account.rewards.push((p.0, info)); - vote_rewards_account - .accounts_to_store - .push(e.as_ref().map(|p| p.1.vote_account.clone())); - } - }); - - let metrics = RewardsMetrics::default(); - - let stored_vote_accounts = bank.store_vote_accounts_partitioned(vote_rewards_account, &metrics); - assert_eq!(expected_vote_rewards_num, stored_vote_accounts.len()); - - // load accounts to make sure they were stored correctly - vote_rewards.iter().for_each(|e| { - if let Some(p) = &e { - let (k, account) = (p.0, p.1.vote_account.clone()); - let loaded_account = bank.load_slow_with_fixed_root(&bank.ancestors, &k).unwrap(); - assert!(accounts_equal(&loaded_account.0, &account)); - } - }); -} - -#[test] -fn test_store_vote_accounts_partitioned_empty() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_for_tests(&genesis_config); - - let expected = 0; - let vote_rewards = VoteRewardsAccounts::default(); - let metrics = RewardsMetrics::default(); - - let stored_vote_accounts = bank.store_vote_accounts_partitioned(vote_rewards, &metrics); - assert_eq!(expected, stored_vote_accounts.len()); -} - #[test] fn test_system_instruction_allocate() { let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); @@ -12946,94 +12752,6 @@ fn test_calc_vote_accounts_to_store_normal() { } } -#[test] -fn test_calculate_stake_vote_rewards() { - solana_logger::setup(); - - let expected_num_delegations = 1; - let (bank, voters, stakers) = create_reward_bank(expected_num_delegations); - - let vote_pubkey = voters.first().unwrap(); - let mut vote_account = bank - .load_slow_with_fixed_root(&bank.ancestors, vote_pubkey) - .unwrap() - .0; - - let stake_pubkey = stakers.first().unwrap(); - let stake_account = bank - .load_slow_with_fixed_root(&bank.ancestors, stake_pubkey) - .unwrap() - .0; - - let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - let mut rewards_metrics = RewardsMetrics::default(); - - let point_value = PointValue { - rewards: 100000, // lamports to split - points: 1000, // over these points - }; - let tracer = |_event: &RewardCalculationEvent| {}; - let reward_calc_tracer = Some(tracer); - let rewarded_epoch = bank.epoch(); - let stakes: RwLockReadGuard>> = bank.stakes_cache.stakes(); - let reward_calculate_param = bank.get_epoch_reward_calculate_param_info(&stakes); - let (vote_rewards_accounts, stake_reward_calculation) = bank.calculate_stake_vote_rewards( - &reward_calculate_param, - rewarded_epoch, - point_value, - &thread_pool, - reward_calc_tracer, - &mut rewards_metrics, - ); - - assert_eq!( - vote_rewards_accounts.rewards.len(), - vote_rewards_accounts.accounts_to_store.len() - ); - assert_eq!(vote_rewards_accounts.rewards.len(), 1); - let rewards = &vote_rewards_accounts.rewards[0]; - let account = &vote_rewards_accounts.accounts_to_store[0]; - let vote_rewards = 0; - let commision = 0; - _ = vote_account.checked_add_lamports(vote_rewards); - assert_eq!( - account.as_ref().unwrap().lamports(), - vote_account.lamports() - ); - assert!(accounts_equal(account.as_ref().unwrap(), &vote_account)); - assert_eq!( - rewards.1, - RewardInfo { - reward_type: RewardType::Voting, - lamports: vote_rewards as i64, - post_balance: vote_account.lamports(), - commission: Some(commision), - } - ); - assert_eq!(&rewards.0, vote_pubkey); - - assert_eq!(stake_reward_calculation.stake_rewards.len(), 1); - assert_eq!( - &stake_reward_calculation.stake_rewards[0].stake_pubkey, - stake_pubkey - ); - - let original_stake_lamport = stake_account.lamports(); - let rewards = stake_reward_calculation.stake_rewards[0] - .stake_reward_info - .lamports; - let expected_reward_info = RewardInfo { - reward_type: RewardType::Staking, - lamports: rewards, - post_balance: original_stake_lamport + rewards as u64, - commission: Some(commision), - }; - assert_eq!( - stake_reward_calculation.stake_rewards[0].stake_reward_info, - expected_reward_info, - ); -} - #[test] fn test_register_hard_fork() { fn get_hard_forks(bank: &Bank) -> Vec { From f6e02e669d0b86e249c79d9efee36424adf0aa7f Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 3 Apr 2024 15:49:32 -0600 Subject: [PATCH 06/19] Bump h2 (#570) --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c92a68f9bfb0e..5210e12afff5b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2501,9 +2501,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3696f9ba5cb30d..b7f5890f442362 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2001,9 +2001,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", From f4307ad2d33317056114d847eb8b2243ad082e7e Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 3 Apr 2024 17:20:49 -0600 Subject: [PATCH 07/19] Move remaining bits; partitioned epoch-rewards reorg, 5 of 5 (#553) * Move epoch_rewards_hasher into submodule * Move unit test into epoch_rewards_hasher sub-submodule * Move integration-like tests into submodule * Move compare functionality into sub-submodule --- runtime/src/bank.rs | 97 +----- .../partitioned_epoch_rewards/calculation.rs | 12 +- .../bank/partitioned_epoch_rewards/compare.rs | 110 +++++++ .../partitioned_epoch_rewards/distribution.rs | 5 +- .../epoch_rewards_hasher.rs | 34 +- .../src/bank/partitioned_epoch_rewards/mod.rs | 302 ++++++++++++++++- runtime/src/bank/tests.rs | 306 +----------------- runtime/src/lib.rs | 1 - 8 files changed, 453 insertions(+), 414 deletions(-) create mode 100644 runtime/src/bank/partitioned_epoch_rewards/compare.rs rename runtime/src/{ => bank/partitioned_epoch_rewards}/epoch_rewards_hasher.rs (69%) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 9bbe143de22e70..da92443ae4fda5 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -42,8 +42,8 @@ use { builtins::{BuiltinPrototype, BUILTINS}, metrics::*, partitioned_epoch_rewards::{ - EpochRewardCalculateParamInfo, EpochRewardStatus, PartitionedRewardsCalculation, - RewardInterval, StakeRewards, VoteRewardsAccounts, + EpochRewardCalculateParamInfo, EpochRewardStatus, RewardInterval, StakeRewards, + VoteRewardsAccounts, }, }, bank_forks::BankForks, @@ -2476,99 +2476,6 @@ impl Bank { } } - /// compare the vote and stake accounts between the normal rewards calculation code - /// and the partitioned rewards calculation code - /// `stake_rewards_expected` and `vote_rewards_expected` are the results of the normal rewards calculation code - /// This fn should have NO side effects. - /// This fn is only called in tests or with a debug cli arg prior to partitioned rewards feature activation. - fn compare_with_partitioned_rewards_results( - stake_rewards_expected: &[StakeReward], - vote_rewards_expected: &DashMap, - partitioned_rewards: PartitionedRewardsCalculation, - ) { - // put partitioned stake rewards in a hashmap - let mut stake_rewards: HashMap = HashMap::default(); - partitioned_rewards - .stake_rewards_by_partition - .stake_rewards_by_partition - .iter() - .flatten() - .for_each(|stake_reward| { - stake_rewards.insert(stake_reward.stake_pubkey, stake_reward); - }); - - // verify stake rewards match expected - stake_rewards_expected.iter().for_each(|stake_reward| { - let partitioned = stake_rewards.remove(&stake_reward.stake_pubkey).unwrap(); - assert_eq!(partitioned, stake_reward); - }); - assert!(stake_rewards.is_empty(), "{stake_rewards:?}"); - - let mut vote_rewards: HashMap = HashMap::default(); - partitioned_rewards - .vote_account_rewards - .accounts_to_store - .iter() - .enumerate() - .for_each(|(i, account)| { - if let Some(account) = account { - let reward = &partitioned_rewards.vote_account_rewards.rewards[i]; - vote_rewards.insert(reward.0, (reward.1, account.clone())); - } - }); - - // verify vote rewards match expected - vote_rewards_expected.iter().for_each(|entry| { - if entry.value().vote_needs_store { - let partitioned = vote_rewards.remove(entry.key()).unwrap(); - let mut to_store_partitioned = partitioned.1.clone(); - to_store_partitioned.set_lamports(partitioned.0.post_balance); - let mut to_store_normal = entry.value().vote_account.clone(); - _ = to_store_normal.checked_add_lamports(entry.value().vote_rewards); - assert_eq!(to_store_partitioned, to_store_normal, "{:?}", entry.key()); - } - }); - assert!(vote_rewards.is_empty(), "{vote_rewards:?}"); - info!( - "verified partitioned rewards calculation matching: {}, {}", - partitioned_rewards - .stake_rewards_by_partition - .stake_rewards_by_partition - .iter() - .map(|rewards| rewards.len()) - .sum::(), - partitioned_rewards - .vote_account_rewards - .accounts_to_store - .len() - ); - } - - /// compare the vote and stake accounts between the normal rewards calculation code - /// and the partitioned rewards calculation code - /// `stake_rewards_expected` and `vote_rewards_expected` are the results of the normal rewards calculation code - /// This fn should have NO side effects. - fn compare_with_partitioned_rewards( - &self, - stake_rewards_expected: &[StakeReward], - vote_rewards_expected: &DashMap, - rewarded_epoch: Epoch, - thread_pool: &ThreadPool, - reward_calc_tracer: Option, - ) { - let partitioned_rewards = self.calculate_rewards_for_partitioning( - rewarded_epoch, - reward_calc_tracer, - thread_pool, - &mut RewardsMetrics::default(), - ); - Self::compare_with_partitioned_rewards_results( - stake_rewards_expected, - vote_rewards_expected, - partitioned_rewards, - ); - } - fn load_vote_and_stake_accounts( &mut self, thread_pool: &ThreadPool, diff --git a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs index 0921f97a2f0b39..bda22852c6c165 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs @@ -1,14 +1,12 @@ use { super::{ - Bank, CalculateRewardsAndDistributeVoteRewardsResult, EpochRewardCalculateParamInfo, + epoch_rewards_hasher::hash_rewards_into_partitions, Bank, + CalculateRewardsAndDistributeVoteRewardsResult, EpochRewardCalculateParamInfo, PartitionedRewardsCalculation, StakeRewardCalculationPartitioned, VoteRewardsAccounts, }, - crate::{ - bank::{ - PrevEpochInflationRewards, RewardCalcTracer, RewardCalculationEvent, RewardsMetrics, - StakeRewardCalculation, VoteAccount, - }, - epoch_rewards_hasher::hash_rewards_into_partitions, + crate::bank::{ + PrevEpochInflationRewards, RewardCalcTracer, RewardCalculationEvent, RewardsMetrics, + StakeRewardCalculation, VoteAccount, }, log::info, rayon::{ diff --git a/runtime/src/bank/partitioned_epoch_rewards/compare.rs b/runtime/src/bank/partitioned_epoch_rewards/compare.rs new file mode 100644 index 00000000000000..4ff4d67fdcd023 --- /dev/null +++ b/runtime/src/bank/partitioned_epoch_rewards/compare.rs @@ -0,0 +1,110 @@ +use { + super::{Bank, PartitionedRewardsCalculation}, + crate::bank::{RewardCalcTracer, RewardsMetrics, VoteReward}, + dashmap::DashMap, + log::info, + rayon::ThreadPool, + solana_accounts_db::stake_rewards::StakeReward, + solana_sdk::{ + account::{AccountSharedData, WritableAccount}, + clock::Epoch, + pubkey::Pubkey, + reward_info::RewardInfo, + }, + std::collections::HashMap, +}; + +impl Bank { + /// compare the vote and stake accounts between the normal rewards calculation code + /// and the partitioned rewards calculation code + /// `stake_rewards_expected` and `vote_rewards_expected` are the results of the normal rewards calculation code + /// This fn should have NO side effects. + pub(in crate::bank) fn compare_with_partitioned_rewards( + &self, + stake_rewards_expected: &[StakeReward], + vote_rewards_expected: &DashMap, + rewarded_epoch: Epoch, + thread_pool: &ThreadPool, + reward_calc_tracer: Option, + ) { + let partitioned_rewards = self.calculate_rewards_for_partitioning( + rewarded_epoch, + reward_calc_tracer, + thread_pool, + &mut RewardsMetrics::default(), + ); + Self::compare_with_partitioned_rewards_results( + stake_rewards_expected, + vote_rewards_expected, + partitioned_rewards, + ); + } + + /// compare the vote and stake accounts between the normal rewards calculation code + /// and the partitioned rewards calculation code + /// `stake_rewards_expected` and `vote_rewards_expected` are the results of the normal rewards calculation code + /// This fn should have NO side effects. + /// This fn is only called in tests or with a debug cli arg prior to partitioned rewards feature activation. + fn compare_with_partitioned_rewards_results( + stake_rewards_expected: &[StakeReward], + vote_rewards_expected: &DashMap, + partitioned_rewards: PartitionedRewardsCalculation, + ) { + // put partitioned stake rewards in a hashmap + let mut stake_rewards: HashMap = HashMap::default(); + partitioned_rewards + .stake_rewards_by_partition + .stake_rewards_by_partition + .iter() + .flatten() + .for_each(|stake_reward| { + stake_rewards.insert(stake_reward.stake_pubkey, stake_reward); + }); + + // verify stake rewards match expected + stake_rewards_expected.iter().for_each(|stake_reward| { + let partitioned = stake_rewards.remove(&stake_reward.stake_pubkey).unwrap(); + assert_eq!(partitioned, stake_reward); + }); + assert!(stake_rewards.is_empty(), "{stake_rewards:?}"); + + let mut vote_rewards: HashMap = HashMap::default(); + partitioned_rewards + .vote_account_rewards + .accounts_to_store + .iter() + .enumerate() + .for_each(|(i, account)| { + if let Some(account) = account { + let reward = &partitioned_rewards.vote_account_rewards.rewards[i]; + vote_rewards.insert(reward.0, (reward.1, account.clone())); + } + }); + + // verify vote rewards match expected + vote_rewards_expected.iter().for_each(|entry| { + if entry.value().vote_needs_store { + let partitioned = vote_rewards.remove(entry.key()).unwrap(); + let mut to_store_partitioned = partitioned.1.clone(); + to_store_partitioned.set_lamports(partitioned.0.post_balance); + let mut to_store_normal = entry.value().vote_account.clone(); + _ = to_store_normal.checked_add_lamports(entry.value().vote_rewards); + assert_eq!(to_store_partitioned, to_store_normal, "{:?}", entry.key()); + } + }); + assert!(vote_rewards.is_empty(), "{vote_rewards:?}"); + info!( + "verified partitioned rewards calculation matching: {}, {}", + partitioned_rewards + .stake_rewards_by_partition + .stake_rewards_by_partition + .iter() + .map(|rewards| rewards.len()) + .sum::(), + partitioned_rewards + .vote_account_rewards + .accounts_to_store + .len() + ); + } +} diff --git a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs index 79c73ed5b90af7..6975119a8bb409 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs @@ -136,8 +136,9 @@ impl Bank { mod tests { use { super::*, - crate::{ - bank::tests::create_genesis_config, epoch_rewards_hasher::hash_rewards_into_partitions, + crate::bank::{ + partitioned_epoch_rewards::epoch_rewards_hasher::hash_rewards_into_partitions, + tests::create_genesis_config, }, rand::Rng, solana_sdk::{ diff --git a/runtime/src/epoch_rewards_hasher.rs b/runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs similarity index 69% rename from runtime/src/epoch_rewards_hasher.rs rename to runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs index ddf45a9095a3e8..4495a59dca9968 100644 --- a/runtime/src/epoch_rewards_hasher.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs @@ -3,7 +3,7 @@ use { solana_sdk::{epoch_rewards_hasher::EpochRewardsHasher, hash::Hash}, }; -pub(crate) fn hash_rewards_into_partitions( +pub(in crate::bank::partitioned_epoch_rewards) fn hash_rewards_into_partitions( stake_rewards: StakeRewards, parent_blockhash: &Hash, num_partitions: usize, @@ -25,7 +25,13 @@ pub(crate) fn hash_rewards_into_partitions( #[cfg(test)] mod tests { - use {super::*, solana_accounts_db::stake_rewards::StakeReward, std::collections::HashMap}; + use { + super::*, + crate::bank::{tests::create_genesis_config, Bank}, + solana_accounts_db::stake_rewards::StakeReward, + solana_sdk::{epoch_schedule::EpochSchedule, native_token::LAMPORTS_PER_SOL}, + std::collections::HashMap, + }; #[test] fn test_hash_rewards_into_partitions() { @@ -85,6 +91,30 @@ mod tests { } } + /// Test that reward partition range panics when passing out of range partition index + #[test] + #[should_panic(expected = "index out of bounds: the len is 10 but the index is 15")] + fn test_get_stake_rewards_partition_range_panic() { + let (mut genesis_config, _mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); + let mut bank = Bank::new_for_tests(&genesis_config); + + // simulate 40K - 1 rewards, the expected num of credit blocks should be 10. + let expected_num = 40959; + let stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + let stake_rewards_bucket = + hash_rewards_into_partitions(stake_rewards, &Hash::new(&[1; 32]), 10); + + bank.set_epoch_reward_status_active(stake_rewards_bucket.clone()); + + // This call should panic, i.e. 15 is out of the num_credit_blocks + let _range = &stake_rewards_bucket[15]; + } + fn compare(a: &StakeRewards, b: &StakeRewards) { let mut a = a .iter() diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index e50b5217676aa0..3af20f0ddc190c 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -1,5 +1,7 @@ mod calculation; +mod compare; mod distribution; +mod epoch_rewards_hasher; mod sysvar; use { @@ -188,7 +190,13 @@ impl Bank { mod tests { use { super::*, - crate::bank::tests::create_genesis_config, + crate::{ + bank::tests::{create_genesis_config, new_bank_from_parent_with_bank_forks}, + genesis_utils::{ + create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, + }, + }, + assert_matches::assert_matches, solana_accounts_db::{ accounts_db::{ AccountShrinkThreshold, AccountsDbConfig, ACCOUNTS_DB_CONFIG_FOR_TESTING, @@ -197,7 +205,14 @@ mod tests { partitioned_rewards::TestPartitionedEpochRewards, }, solana_program_runtime::runtime_config::RuntimeConfig, - solana_sdk::{epoch_schedule::EpochSchedule, native_token::LAMPORTS_PER_SOL}, + solana_sdk::{ + epoch_schedule::EpochSchedule, + native_token::LAMPORTS_PER_SOL, + signature::Signer, + system_transaction, + vote::state::{VoteStateVersions, MAX_LOCKOUT_HISTORY}, + }, + solana_vote_program::{vote_state, vote_transaction}, }; impl Bank { @@ -352,4 +367,287 @@ mod tests { + bank.get_reward_calculation_num_blocks(), ); } + + #[test] + fn test_rewards_computation_and_partitioned_distribution_one_block() { + solana_logger::setup(); + + // setup the expected number of stake delegations + let expected_num_delegations = 100; + + let validator_keypairs = (0..expected_num_delegations) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect::>(); + + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![2_000_000_000; expected_num_delegations], + ); + + let bank0 = Bank::new_for_tests(&genesis_config); + let num_slots_in_epoch = bank0.get_slots_in_epoch(bank0.epoch()); + assert_eq!(num_slots_in_epoch, 32); + + let mut previous_bank = Arc::new(Bank::new_from_parent( + Arc::new(bank0), + &Pubkey::default(), + 1, + )); + + // simulate block progress + for slot in 2..=num_slots_in_epoch + 2 { + let pre_cap = previous_bank.capitalization(); + let curr_bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), slot); + let post_cap = curr_bank.capitalization(); + + // Fill banks with banks with votes landing in the next slot + // Create enough banks such that vote account will root + for validator_vote_keypairs in validator_keypairs.iter() { + let vote_id = validator_vote_keypairs.vote_keypair.pubkey(); + let mut vote_account = curr_bank.get_account(&vote_id).unwrap(); + // generate some rewards + let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); + for i in 0..MAX_LOCKOUT_HISTORY + 42 { + if let Some(v) = vote_state.as_mut() { + vote_state::process_slot_vote_unchecked(v, i as u64) + } + let versioned = + VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); + vote_state::to(&versioned, &mut vote_account).unwrap(); + match versioned { + VoteStateVersions::Current(v) => { + vote_state = Some(*v); + } + _ => panic!("Has to be of type Current"), + }; + } + curr_bank.store_account_and_update_capitalization(&vote_id, &vote_account); + } + + if slot == num_slots_in_epoch { + // This is the first block of epoch 1. Reward computation should happen in this block. + // assert reward compute status activated at epoch boundary + assert_matches!( + curr_bank.get_reward_interval(), + RewardInterval::InsideInterval + ); + + // cap should increase because of new epoch rewards + assert!(post_cap > pre_cap); + } else if slot == num_slots_in_epoch + 1 || slot == num_slots_in_epoch + 2 { + // 1. when curr_slot == num_slots_in_epoch + 1, the 2nd block of epoch 1, reward distribution should happen in this block. + // however, all stake rewards are paid at the this block therefore reward_status should have transitioned to inactive. And since + // rewards are transferred from epoch_rewards sysvar to stake accounts. The cap should stay the same. + // 2. when curr_slot == num_slots_in_epoch+2, the 3rd block of epoch 1. reward distribution should have already completed. Therefore, + // reward_status should stay inactive and cap should stay the same. + assert_matches!( + curr_bank.get_reward_interval(), + RewardInterval::OutsideInterval + ); + + assert_eq!(post_cap, pre_cap); + } else { + // slot is not in rewards, cap should not change + assert_eq!(post_cap, pre_cap); + } + previous_bank = Arc::new(curr_bank); + } + } + + /// Test rewards computation and partitioned rewards distribution at the epoch boundary (two reward distribution blocks) + #[test] + fn test_rewards_computation_and_partitioned_distribution_two_blocks() { + solana_logger::setup(); + + // Set up the expected number of stake delegations 100 + let expected_num_delegations = 100; + + let validator_keypairs = (0..expected_num_delegations) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect::>(); + + let GenesisConfigInfo { + mut genesis_config, .. + } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![2_000_000_000; expected_num_delegations], + ); + genesis_config.epoch_schedule = EpochSchedule::custom(32, 32, false); + + // Config stake reward distribution to be 50 per block + // We will need two blocks for reward distribution. And we can assert that the expected bank + // capital changes before/during/after reward distribution. + let mut accounts_db_config: AccountsDbConfig = ACCOUNTS_DB_CONFIG_FOR_TESTING.clone(); + accounts_db_config.test_partitioned_epoch_rewards = + TestPartitionedEpochRewards::PartitionedEpochRewardsConfigRewardBlocks { + reward_calculation_num_blocks: 1, + stake_account_stores_per_block: 50, + }; + + let bank0 = Bank::new_with_paths( + &genesis_config, + Arc::new(RuntimeConfig::default()), + Vec::new(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(accounts_db_config), + None, + None, + Arc::default(), + ); + + let num_slots_in_epoch = bank0.get_slots_in_epoch(bank0.epoch()); + assert_eq!(num_slots_in_epoch, 32); + + let mut previous_bank = Arc::new(Bank::new_from_parent( + Arc::new(bank0), + &Pubkey::default(), + 1, + )); + + // simulate block progress + for slot in 2..=num_slots_in_epoch + 3 { + let pre_cap = previous_bank.capitalization(); + let curr_bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), slot); + let post_cap = curr_bank.capitalization(); + + // Fill banks with banks with votes landing in the next slot + // Create enough banks such that vote account will root + for validator_vote_keypairs in validator_keypairs.iter() { + let vote_id = validator_vote_keypairs.vote_keypair.pubkey(); + let mut vote_account = curr_bank.get_account(&vote_id).unwrap(); + // generate some rewards + let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); + for i in 0..MAX_LOCKOUT_HISTORY + 42 { + if let Some(v) = vote_state.as_mut() { + vote_state::process_slot_vote_unchecked(v, i as u64) + } + let versioned = + VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); + vote_state::to(&versioned, &mut vote_account).unwrap(); + match versioned { + VoteStateVersions::Current(v) => { + vote_state = Some(*v); + } + _ => panic!("Has to be of type Current"), + }; + } + curr_bank.store_account_and_update_capitalization(&vote_id, &vote_account); + } + + if slot == num_slots_in_epoch { + // This is the first block of epoch 1. Reward computation should happen in this block. + // assert reward compute status activated at epoch boundary + assert_matches!( + curr_bank.get_reward_interval(), + RewardInterval::InsideInterval + ); + + // cap should increase because of new epoch rewards + assert!(post_cap > pre_cap); + } else if slot == num_slots_in_epoch + 1 { + // When curr_slot == num_slots_in_epoch + 1, the 2nd block of epoch 1, reward distribution should happen in this block. + // however, since rewards are transferred from epoch_rewards sysvar to stake accounts. The cap should stay the same. + assert_matches!( + curr_bank.get_reward_interval(), + RewardInterval::InsideInterval + ); + + assert_eq!(post_cap, pre_cap); + } else if slot == num_slots_in_epoch + 2 || slot == num_slots_in_epoch + 3 { + // 1. when curr_slot == num_slots_in_epoch + 2, the 3nd block of epoch 1, reward distribution should happen in this block. + // however, all stake rewards are paid at the this block therefore reward_status should have transitioned to inactive. And since + // rewards are transferred from epoch_rewards sysvar to stake accounts. The cap should stay the same. + // 2. when curr_slot == num_slots_in_epoch+2, the 3rd block of epoch 1. reward distribution should have already completed. Therefore, + // reward_status should stay inactive and cap should stay the same. + assert_matches!( + curr_bank.get_reward_interval(), + RewardInterval::OutsideInterval + ); + + assert_eq!(post_cap, pre_cap); + } else { + // slot is not in rewards, cap should not change + assert_eq!(post_cap, pre_cap); + } + previous_bank = Arc::new(curr_bank); + } + } + + /// Test that program execution that involves stake accounts should fail during reward period. + /// Any programs, which result in stake account changes, will throw `ProgramExecutionTemporarilyRestricted` error when + /// in reward period. + #[test] + fn test_program_execution_restricted_for_stake_account_in_reward_period() { + use solana_sdk::transaction::TransactionError::ProgramExecutionTemporarilyRestricted; + + let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand(); + let validator_keypairs = vec![&validator_vote_keypairs]; + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![1_000_000_000; 1], + ); + + let node_key = &validator_keypairs[0].node_keypair; + let stake_key = &validator_keypairs[0].stake_keypair; + + let (mut previous_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let num_slots_in_epoch = previous_bank.get_slots_in_epoch(previous_bank.epoch()); + assert_eq!(num_slots_in_epoch, 32); + + for slot in 1..=num_slots_in_epoch + 2 { + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + previous_bank.clone(), + &Pubkey::default(), + slot, + ); + + // Fill bank_forks with banks with votes landing in the next slot + // So that rewards will be paid out at the epoch boundary, i.e. slot = 32 + let vote = vote_transaction::new_vote_transaction( + vec![slot - 1], + previous_bank.hash(), + previous_bank.last_blockhash(), + &validator_vote_keypairs.node_keypair, + &validator_vote_keypairs.vote_keypair, + &validator_vote_keypairs.vote_keypair, + None, + ); + bank.process_transaction(&vote).unwrap(); + + // Insert a transfer transaction from node account to stake account + let tx = system_transaction::transfer( + node_key, + &stake_key.pubkey(), + 1, + bank.last_blockhash(), + ); + let r = bank.process_transaction(&tx); + + if slot == num_slots_in_epoch { + // When the bank is at the beginning of the new epoch, i.e. slot 32, + // ProgramExecutionTemporarilyRestricted should be thrown for the transfer transaction. + assert_eq!( + r, + Err(ProgramExecutionTemporarilyRestricted { account_index: 1 }) + ); + } else { + // When the bank is outside of reward interval, the transfer transaction should not be affected and will succeed. + assert!(r.is_ok()); + } + + // Push a dummy blockhash, so that the latest_blockhash() for the transfer transaction in each + // iteration are different. Otherwise, all those transactions will be the same, and will not be + // executed by the bank except the first one. + bank.register_unique_recent_blockhash_for_test(); + previous_bank = bank; + } + } } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 05a0d662f9572e..31e189e07960a6 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -10,7 +10,6 @@ use { accounts_background_service::{PrunedBanksRequestHandler, SendDroppedBankCallback}, bank_client::BankClient, bank_forks::BankForks, - epoch_rewards_hasher::hash_rewards_into_partitions, genesis_utils::{ self, activate_all_features, activate_feature, bootstrap_validator_stake_lamports, create_genesis_config_with_leader, create_genesis_config_with_vote_accounts, @@ -33,7 +32,6 @@ use { }, accounts_partition::{self, PartitionIndex, RentPayingAccountsByPartition}, ancestors::Ancestors, - partitioned_rewards::TestPartitionedEpochRewards, }, solana_inline_spl::token, solana_logger, @@ -113,7 +111,6 @@ use { vote_state::{ self, BlockTimestamp, Vote, VoteInit, VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY, }, - vote_transaction, }, std::{ collections::{HashMap, HashSet}, @@ -159,7 +156,7 @@ impl VoteReward { } } -fn new_bank_from_parent_with_bank_forks( +pub(in crate::bank) fn new_bank_from_parent_with_bank_forks( bank_forks: &RwLock, parent: Arc, collector_id: &Pubkey, @@ -12126,307 +12123,6 @@ fn test_squash_timing_add_assign() { assert!(t0 == expected); } -/// Test that reward partition range panics when passing out of range partition index -#[test] -#[should_panic(expected = "index out of bounds: the len is 10 but the index is 15")] -fn test_get_stake_rewards_partition_range_panic() { - let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); - let mut bank = Bank::new_for_tests(&genesis_config); - - // simulate 40K - 1 rewards, the expected num of credit blocks should be 10. - let expected_num = 40959; - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - let stake_rewards_bucket = - hash_rewards_into_partitions(stake_rewards, &Hash::new(&[1; 32]), 10); - - bank.set_epoch_reward_status_active(stake_rewards_bucket.clone()); - - // This call should panic, i.e. 15 is out of the num_credit_blocks - let _range = &stake_rewards_bucket[15]; -} - -/// Test rewards computation and partitioned rewards distribution at the epoch boundary (one reward distribution block) -#[test] -fn test_rewards_computation_and_partitioned_distribution_one_block() { - solana_logger::setup(); - - // setup the expected number of stake delegations - let expected_num_delegations = 100; - - let validator_keypairs = (0..expected_num_delegations) - .map(|_| ValidatorVoteKeypairs::new_rand()) - .collect::>(); - - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( - 1_000_000_000, - &validator_keypairs, - vec![2_000_000_000; expected_num_delegations], - ); - - let bank0 = Bank::new_for_tests(&genesis_config); - let num_slots_in_epoch = bank0.get_slots_in_epoch(bank0.epoch()); - assert_eq!(num_slots_in_epoch, 32); - - let mut previous_bank = Arc::new(Bank::new_from_parent( - Arc::new(bank0), - &Pubkey::default(), - 1, - )); - - // simulate block progress - for slot in 2..=num_slots_in_epoch + 2 { - let pre_cap = previous_bank.capitalization(); - let curr_bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), slot); - let post_cap = curr_bank.capitalization(); - - // Fill banks with banks with votes landing in the next slot - // Create enough banks such that vote account will root - for validator_vote_keypairs in validator_keypairs.iter() { - let vote_id = validator_vote_keypairs.vote_keypair.pubkey(); - let mut vote_account = curr_bank.get_account(&vote_id).unwrap(); - // generate some rewards - let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); - for i in 0..MAX_LOCKOUT_HISTORY + 42 { - if let Some(v) = vote_state.as_mut() { - vote_state::process_slot_vote_unchecked(v, i as u64) - } - let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); - vote_state::to(&versioned, &mut vote_account).unwrap(); - match versioned { - VoteStateVersions::Current(v) => { - vote_state = Some(*v); - } - _ => panic!("Has to be of type Current"), - }; - } - curr_bank.store_account_and_update_capitalization(&vote_id, &vote_account); - } - - if slot == num_slots_in_epoch { - // This is the first block of epoch 1. Reward computation should happen in this block. - // assert reward compute status activated at epoch boundary - assert_matches!( - curr_bank.get_reward_interval(), - RewardInterval::InsideInterval - ); - - // cap should increase because of new epoch rewards - assert!(post_cap > pre_cap); - } else if slot == num_slots_in_epoch + 1 || slot == num_slots_in_epoch + 2 { - // 1. when curr_slot == num_slots_in_epoch + 1, the 2nd block of epoch 1, reward distribution should happen in this block. - // however, all stake rewards are paid at the this block therefore reward_status should have transitioned to inactive. And since - // rewards are transferred from epoch_rewards sysvar to stake accounts. The cap should stay the same. - // 2. when curr_slot == num_slots_in_epoch+2, the 3rd block of epoch 1. reward distribution should have already completed. Therefore, - // reward_status should stay inactive and cap should stay the same. - assert_matches!( - curr_bank.get_reward_interval(), - RewardInterval::OutsideInterval - ); - - assert_eq!(post_cap, pre_cap); - } else { - // slot is not in rewards, cap should not change - assert_eq!(post_cap, pre_cap); - } - previous_bank = Arc::new(curr_bank); - } -} - -/// Test rewards computation and partitioned rewards distribution at the epoch boundary (two reward distribution blocks) -#[test] -fn test_rewards_computation_and_partitioned_distribution_two_blocks() { - solana_logger::setup(); - - // Set up the expected number of stake delegations 100 - let expected_num_delegations = 100; - - let validator_keypairs = (0..expected_num_delegations) - .map(|_| ValidatorVoteKeypairs::new_rand()) - .collect::>(); - - let GenesisConfigInfo { - mut genesis_config, .. - } = create_genesis_config_with_vote_accounts( - 1_000_000_000, - &validator_keypairs, - vec![2_000_000_000; expected_num_delegations], - ); - genesis_config.epoch_schedule = EpochSchedule::custom(32, 32, false); - - // Config stake reward distribution to be 50 per block - // We will need two blocks for reward distribution. And we can assert that the expected bank - // capital changes before/during/after reward distribution. - let mut accounts_db_config: AccountsDbConfig = ACCOUNTS_DB_CONFIG_FOR_TESTING.clone(); - accounts_db_config.test_partitioned_epoch_rewards = - TestPartitionedEpochRewards::PartitionedEpochRewardsConfigRewardBlocks { - reward_calculation_num_blocks: 1, - stake_account_stores_per_block: 50, - }; - - let bank0 = Bank::new_with_paths( - &genesis_config, - Arc::new(RuntimeConfig::default()), - Vec::new(), - None, - None, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - false, - Some(accounts_db_config), - None, - None, - Arc::default(), - ); - - let num_slots_in_epoch = bank0.get_slots_in_epoch(bank0.epoch()); - assert_eq!(num_slots_in_epoch, 32); - - let mut previous_bank = Arc::new(Bank::new_from_parent( - Arc::new(bank0), - &Pubkey::default(), - 1, - )); - - // simulate block progress - for slot in 2..=num_slots_in_epoch + 3 { - let pre_cap = previous_bank.capitalization(); - let curr_bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), slot); - let post_cap = curr_bank.capitalization(); - - // Fill banks with banks with votes landing in the next slot - // Create enough banks such that vote account will root - for validator_vote_keypairs in validator_keypairs.iter() { - let vote_id = validator_vote_keypairs.vote_keypair.pubkey(); - let mut vote_account = curr_bank.get_account(&vote_id).unwrap(); - // generate some rewards - let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); - for i in 0..MAX_LOCKOUT_HISTORY + 42 { - if let Some(v) = vote_state.as_mut() { - vote_state::process_slot_vote_unchecked(v, i as u64) - } - let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); - vote_state::to(&versioned, &mut vote_account).unwrap(); - match versioned { - VoteStateVersions::Current(v) => { - vote_state = Some(*v); - } - _ => panic!("Has to be of type Current"), - }; - } - curr_bank.store_account_and_update_capitalization(&vote_id, &vote_account); - } - - if slot == num_slots_in_epoch { - // This is the first block of epoch 1. Reward computation should happen in this block. - // assert reward compute status activated at epoch boundary - assert_matches!( - curr_bank.get_reward_interval(), - RewardInterval::InsideInterval - ); - - // cap should increase because of new epoch rewards - assert!(post_cap > pre_cap); - } else if slot == num_slots_in_epoch + 1 { - // When curr_slot == num_slots_in_epoch + 1, the 2nd block of epoch 1, reward distribution should happen in this block. - // however, since rewards are transferred from epoch_rewards sysvar to stake accounts. The cap should stay the same. - assert_matches!( - curr_bank.get_reward_interval(), - RewardInterval::InsideInterval - ); - - assert_eq!(post_cap, pre_cap); - } else if slot == num_slots_in_epoch + 2 || slot == num_slots_in_epoch + 3 { - // 1. when curr_slot == num_slots_in_epoch + 2, the 3nd block of epoch 1, reward distribution should happen in this block. - // however, all stake rewards are paid at the this block therefore reward_status should have transitioned to inactive. And since - // rewards are transferred from epoch_rewards sysvar to stake accounts. The cap should stay the same. - // 2. when curr_slot == num_slots_in_epoch+2, the 3rd block of epoch 1. reward distribution should have already completed. Therefore, - // reward_status should stay inactive and cap should stay the same. - assert_matches!( - curr_bank.get_reward_interval(), - RewardInterval::OutsideInterval - ); - - assert_eq!(post_cap, pre_cap); - } else { - // slot is not in rewards, cap should not change - assert_eq!(post_cap, pre_cap); - } - previous_bank = Arc::new(curr_bank); - } -} - -/// Test that program execution that involves stake accounts should fail during reward period. -/// Any programs, which result in stake account changes, will throw `ProgramExecutionTemporarilyRestricted` error when -/// in reward period. -#[test] -fn test_program_execution_restricted_for_stake_account_in_reward_period() { - use solana_sdk::transaction::TransactionError::ProgramExecutionTemporarilyRestricted; - - let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand(); - let validator_keypairs = vec![&validator_vote_keypairs]; - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( - 1_000_000_000, - &validator_keypairs, - vec![1_000_000_000; 1], - ); - - let node_key = &validator_keypairs[0].node_keypair; - let stake_key = &validator_keypairs[0].stake_keypair; - - let (mut previous_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let num_slots_in_epoch = previous_bank.get_slots_in_epoch(previous_bank.epoch()); - assert_eq!(num_slots_in_epoch, 32); - - for slot in 1..=num_slots_in_epoch + 2 { - let bank = new_bank_from_parent_with_bank_forks( - bank_forks.as_ref(), - previous_bank.clone(), - &Pubkey::default(), - slot, - ); - - // Fill bank_forks with banks with votes landing in the next slot - // So that rewards will be paid out at the epoch boundary, i.e. slot = 32 - let vote = vote_transaction::new_vote_transaction( - vec![slot - 1], - previous_bank.hash(), - previous_bank.last_blockhash(), - &validator_vote_keypairs.node_keypair, - &validator_vote_keypairs.vote_keypair, - &validator_vote_keypairs.vote_keypair, - None, - ); - bank.process_transaction(&vote).unwrap(); - - // Insert a transfer transaction from node account to stake account - let tx = - system_transaction::transfer(node_key, &stake_key.pubkey(), 1, bank.last_blockhash()); - let r = bank.process_transaction(&tx); - - if slot == num_slots_in_epoch { - // When the bank is at the beginning of the new epoch, i.e. slot 32, - // ProgramExecutionTemporarilyRestricted should be thrown for the transfer transaction. - assert_eq!( - r, - Err(ProgramExecutionTemporarilyRestricted { account_index: 1 }) - ); - } else { - // When the bank is outside of reward interval, the transfer transaction should not be affected and will succeed. - assert!(r.is_ok()); - } - - // Push a dummy blockhash, so that the latest_blockhash() for the transfer transaction in each - // iteration are different. Otherwise, all those transactions will be the same, and will not be - // executed by the bank except the first one. - bank.register_unique_recent_blockhash_for_test(); - previous_bank = bank; - } -} - #[test] fn test_system_instruction_allocate() { let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 12eab54a41cf0e..9ba006fc93ee37 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -11,7 +11,6 @@ pub mod bank_forks; pub mod bank_utils; pub mod commitment; pub mod compute_budget_details; -mod epoch_rewards_hasher; pub mod epoch_stakes; pub mod genesis_utils; pub mod installed_scheduler_pool; From 527cad2b2233a96c7d5d0bf3b9f3439057d562c2 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 4 Apr 2024 10:05:13 -0500 Subject: [PATCH 08/19] don't try to ancient pack already large storages (#548) --- accounts-db/src/ancient_append_vecs.rs | 66 ++++++++++++++++++++------ 1 file changed, 52 insertions(+), 14 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index f8717cce895b15..a7991652e63b44 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -81,6 +81,7 @@ impl AncientSlotInfos { slot: Slot, storage: Arc, can_randomly_shrink: bool, + ideal_size: NonZeroU64, ) -> bool { let mut was_randomly_shrunk = false; let alive_bytes = storage.alive_bytes() as u64; @@ -107,6 +108,12 @@ impl AncientSlotInfos { // to reduce disk space used saturating_add_assign!(self.total_alive_bytes_shrink, alive_bytes); self.shrink_indexes.push(self.all_infos.len()); + } else { + let already_ideal_size = u64::from(ideal_size) * 80 / 100; + if alive_bytes > already_ideal_size { + // do not include this append vec at all. It is already ideal size and not a candidate for shrink. + return was_randomly_shrunk; + } } self.all_infos.push(SlotInfo { slot, @@ -421,7 +428,11 @@ impl AccountsDb { slots: Vec, tuning: &PackedAncientStorageTuning, ) -> AncientSlotInfos { - let mut ancient_slot_infos = self.calc_ancient_slot_info(slots, tuning.can_randomly_shrink); + let mut ancient_slot_infos = self.calc_ancient_slot_info( + slots, + tuning.can_randomly_shrink, + tuning.ideal_storage_size, + ); ancient_slot_infos.filter_ancient_slots(tuning); ancient_slot_infos @@ -463,6 +474,7 @@ impl AccountsDb { &self, slots: Vec, can_randomly_shrink: bool, + ideal_size: NonZeroU64, ) -> AncientSlotInfos { let len = slots.len(); let mut infos = AncientSlotInfos { @@ -474,7 +486,7 @@ impl AccountsDb { for slot in &slots { if let Some(storage) = self.storage.get_slot_storage_entry(*slot) { - if infos.add(*slot, storage, can_randomly_shrink) { + if infos.add(*slot, storage, can_randomly_shrink, ideal_size) { randoms += 1; } } @@ -2152,10 +2164,19 @@ pub mod tests { match method { TestCollectInfo::Add => { // test lower level 'add' - infos.add(slot1, Arc::clone(&storage), can_randomly_shrink); + infos.add( + slot1, + Arc::clone(&storage), + can_randomly_shrink, + NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + ); } TestCollectInfo::CalcAncientSlotInfo => { - infos = db.calc_ancient_slot_info(vec![slot1], can_randomly_shrink); + infos = db.calc_ancient_slot_info( + vec![slot1], + can_randomly_shrink, + NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + ); } TestCollectInfo::CollectSortFilterInfo => { let tuning = PackedAncientStorageTuning { @@ -2169,7 +2190,7 @@ pub mod tests { infos = db.collect_sort_filter_ancient_slots(vec![slot1], &tuning); } } - assert_eq!(infos.all_infos.len(), 1); + assert_eq!(infos.all_infos.len(), 1, "{method:?}"); let should_shrink = data_size.is_none(); assert_storage_info(infos.all_infos.first().unwrap(), &storage, should_shrink); if should_shrink { @@ -2203,9 +2224,18 @@ pub mod tests { let mut infos = AncientSlotInfos::default(); let storage = db.storage.get_slot_storage_entry(slot1).unwrap(); if call_add { - infos.add(slot1, Arc::clone(&storage), can_randomly_shrink); + infos.add( + slot1, + Arc::clone(&storage), + can_randomly_shrink, + NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + ); } else { - infos = db.calc_ancient_slot_info(vec![slot1], can_randomly_shrink); + infos = db.calc_ancient_slot_info( + vec![slot1], + can_randomly_shrink, + NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + ); } assert!(infos.all_infos.is_empty()); assert!(infos.shrink_indexes.is_empty()); @@ -2231,7 +2261,11 @@ pub mod tests { .iter() .map(|storage| storage.alive_bytes() as u64) .sum::(); - let infos = db.calc_ancient_slot_info(slot_vec.clone(), can_randomly_shrink); + let infos = db.calc_ancient_slot_info( + slot_vec.clone(), + can_randomly_shrink, + NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + ); if !alive { assert!(infos.all_infos.is_empty()); assert!(infos.shrink_indexes.is_empty()); @@ -2309,9 +2343,11 @@ pub mod tests { .sum::(); let infos = match method { - TestCollectInfo::CalcAncientSlotInfo => { - db.calc_ancient_slot_info(slot_vec.clone(), can_randomly_shrink) - } + TestCollectInfo::CalcAncientSlotInfo => db.calc_ancient_slot_info( + slot_vec.clone(), + can_randomly_shrink, + NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + ), TestCollectInfo::Add => { continue; // unsupportable } @@ -2623,9 +2659,11 @@ pub mod tests { .map(|storage| storage.alive_bytes() as u64) .sum::(); let infos = match method { - TestCollectInfo::CalcAncientSlotInfo => { - db.calc_ancient_slot_info(slot_vec.clone(), can_randomly_shrink) - } + TestCollectInfo::CalcAncientSlotInfo => db.calc_ancient_slot_info( + slot_vec.clone(), + can_randomly_shrink, + NonZeroU64::new(get_ancient_append_vec_capacity()).unwrap(), + ), TestCollectInfo::Add => { continue; // unsupportable } From a088364eb07ad32a1cfbe9f826f73a7344fc766a Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 4 Apr 2024 10:14:53 -0500 Subject: [PATCH 09/19] Use std::num::Saturating over saturating_add_assign!() (#523) std::num::Saturating allows us to create integers that will override the standard arithmetic operators to use saturating math. This removes the need for a custom macro as well as reduces mental load as someone only needs to remember that they want saturating math once. This PR introduces std::num::Saturating integers to replace all use of saturating_add_assign!() in the accounts-db crate --- accounts-db/src/accounts_db.rs | 93 +++++++++++--------------- accounts-db/src/ancient_append_vecs.rs | 92 +++++++++++++------------ 2 files changed, 90 insertions(+), 95 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 81f0b3a8fa42ad..f1d0324128c148 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -89,7 +89,6 @@ use { hash::Hash, pubkey::Pubkey, rent_collector::RentCollector, - saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, }, @@ -100,6 +99,7 @@ use { fs, hash::{Hash as StdHash, Hasher as StdHasher}, io::Result as IoResult, + num::Saturating, ops::{Range, RangeBounds}, path::{Path, PathBuf}, sync::{ @@ -1742,21 +1742,21 @@ impl SplitAncientStorages { #[derive(Debug, Default)] struct FlushStats { - num_flushed: usize, - num_purged: usize, - total_size: u64, + num_flushed: Saturating, + num_purged: Saturating, + total_size: Saturating, store_accounts_timing: StoreAccountsTiming, - store_accounts_total_us: u64, + store_accounts_total_us: Saturating, } impl FlushStats { fn accumulate(&mut self, other: &Self) { - saturating_add_assign!(self.num_flushed, other.num_flushed); - saturating_add_assign!(self.num_purged, other.num_purged); - saturating_add_assign!(self.total_size, other.total_size); + self.num_flushed += other.num_flushed; + self.num_purged += other.num_purged; + self.total_size += other.total_size; self.store_accounts_timing .accumulate(&other.store_accounts_timing); - saturating_add_assign!(self.store_accounts_total_us, other.store_accounts_total_us); + self.store_accounts_total_us += other.store_accounts_total_us; } } @@ -1884,26 +1884,20 @@ pub(crate) struct ShrinkAncientStats { #[derive(Debug, Default)] pub(crate) struct ShrinkStatsSub { pub(crate) store_accounts_timing: StoreAccountsTiming, - pub(crate) rewrite_elapsed_us: u64, - pub(crate) create_and_insert_store_elapsed_us: u64, - pub(crate) unpackable_slots_count: usize, - pub(crate) newest_alive_packed_count: usize, + pub(crate) rewrite_elapsed_us: Saturating, + pub(crate) create_and_insert_store_elapsed_us: Saturating, + pub(crate) unpackable_slots_count: Saturating, + pub(crate) newest_alive_packed_count: Saturating, } impl ShrinkStatsSub { pub(crate) fn accumulate(&mut self, other: &Self) { self.store_accounts_timing .accumulate(&other.store_accounts_timing); - saturating_add_assign!(self.rewrite_elapsed_us, other.rewrite_elapsed_us); - saturating_add_assign!( - self.create_and_insert_store_elapsed_us, - other.create_and_insert_store_elapsed_us - ); - saturating_add_assign!(self.unpackable_slots_count, other.unpackable_slots_count); - saturating_add_assign!( - self.newest_alive_packed_count, - other.newest_alive_packed_count - ); + self.rewrite_elapsed_us += other.rewrite_elapsed_us; + self.create_and_insert_store_elapsed_us += other.create_and_insert_store_elapsed_us; + self.unpackable_slots_count += other.unpackable_slots_count; + self.newest_alive_packed_count += other.newest_alive_packed_count; } } #[derive(Debug, Default)] @@ -3979,7 +3973,7 @@ impl AccountsDb { let (shrink_in_progress, time_us) = measure_us!( self.get_store_for_shrink(slot, shrink_collect.alive_total_bytes as u64) ); - stats_sub.create_and_insert_store_elapsed_us = time_us; + stats_sub.create_and_insert_store_elapsed_us = Saturating(time_us); // here, we're writing back alive_accounts. That should be an atomic operation // without use of rather wide locks in this whole function, because we're @@ -3992,7 +3986,7 @@ impl AccountsDb { ); rewrite_elapsed.stop(); - stats_sub.rewrite_elapsed_us = rewrite_elapsed.as_us(); + stats_sub.rewrite_elapsed_us = Saturating(rewrite_elapsed.as_us()); // `store_accounts_frozen()` above may have purged accounts from some // other storage entries (the ones that were just overwritten by this @@ -4024,7 +4018,7 @@ impl AccountsDb { .fetch_add(1, Ordering::Relaxed); } shrink_stats.create_and_insert_store_elapsed.fetch_add( - stats_sub.create_and_insert_store_elapsed_us, + stats_sub.create_and_insert_store_elapsed_us.0, Ordering::Relaxed, ); shrink_stats.store_accounts_elapsed.fetch_add( @@ -4041,12 +4035,12 @@ impl AccountsDb { ); shrink_stats .rewrite_elapsed - .fetch_add(stats_sub.rewrite_elapsed_us, Ordering::Relaxed); + .fetch_add(stats_sub.rewrite_elapsed_us.0, Ordering::Relaxed); shrink_stats .unpackable_slots_count - .fetch_add(stats_sub.unpackable_slots_count as u64, Ordering::Relaxed); + .fetch_add(stats_sub.unpackable_slots_count.0 as u64, Ordering::Relaxed); shrink_stats.newest_alive_packed_count.fetch_add( - stats_sub.newest_alive_packed_count as u64, + stats_sub.newest_alive_packed_count.0 as u64, Ordering::Relaxed, ); } @@ -4454,7 +4448,8 @@ impl AccountsDb { let (mut shrink_in_progress, create_and_insert_store_elapsed_us) = measure_us!( current_ancient.create_if_necessary(slot, self, shrink_collect.alive_total_bytes) ); - stats_sub.create_and_insert_store_elapsed_us = create_and_insert_store_elapsed_us; + stats_sub.create_and_insert_store_elapsed_us = + Saturating(create_and_insert_store_elapsed_us); let available_bytes = current_ancient.accounts_file().accounts.remaining_bytes(); // split accounts in 'slot' into: // 'Primary', which can fit in 'current_ancient' @@ -4516,7 +4511,7 @@ impl AccountsDb { } assert_eq!(bytes_remaining_to_write, 0); rewrite_elapsed.stop(); - stats_sub.rewrite_elapsed_us = rewrite_elapsed.as_us(); + stats_sub.rewrite_elapsed_us = Saturating(rewrite_elapsed.as_us()); if slot != current_ancient.slot() { // all append vecs in this slot have been combined into an ancient append vec @@ -6116,9 +6111,9 @@ impl AccountsDb { }); datapoint_info!( "accounts_db-flush_accounts_cache_aggressively", - ("num_flushed", flush_stats.num_flushed, i64), - ("num_purged", flush_stats.num_purged, i64), - ("total_flush_size", flush_stats.total_size, i64), + ("num_flushed", flush_stats.num_flushed.0, i64), + ("num_purged", flush_stats.num_purged.0, i64), + ("total_flush_size", flush_stats.total_size.0, i64), ("total_cache_size", self.accounts_cache.size(), i64), ("total_frozen_slots", excess_slot_count, i64), ("total_slots", self.accounts_cache.num_slots(), i64), @@ -6146,7 +6141,7 @@ impl AccountsDb { ("num_accounts_saved", num_accounts_saved, i64), ( "store_accounts_total_us", - flush_stats.store_accounts_total_us, + flush_stats.store_accounts_total_us.0, i64 ), ( @@ -6235,9 +6230,7 @@ impl AccountsDb { mut should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>, max_clean_root: Option, ) -> FlushStats { - let mut num_purged = 0; - let mut total_size = 0; - let mut num_flushed = 0; + let mut flush_stats = FlushStats::default(); let iter_items: Vec<_> = slot_cache.iter().collect(); let mut purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = HashSet::new(); let mut pubkey_to_slot_set: Vec<(Pubkey, Slot)> = vec![]; @@ -6263,15 +6256,15 @@ impl AccountsDb { .unwrap_or(true); if should_flush { let hash = iter_item.value().hash(); - total_size += aligned_stored_size(account.data().len()) as u64; - num_flushed += 1; + flush_stats.total_size += aligned_stored_size(account.data().len()) as u64; + flush_stats.num_flushed += 1; Some(((key, account), hash)) } else { // If we don't flush, we have to remove the entry from the // index, since it's equivalent to purging purged_slot_pubkeys.insert((slot, *key)); pubkey_to_slot_set.push((*key, slot)); - num_purged += 1; + flush_stats.num_purged += 1; None } }) @@ -6288,13 +6281,12 @@ impl AccountsDb { &HashSet::default(), ); - let mut store_accounts_timing = StoreAccountsTiming::default(); - let mut store_accounts_total_us = 0; if !is_dead_slot { // This ensures that all updates are written to an AppendVec, before any // updates to the index happen, so anybody that sees a real entry in the index, // will be able to find the account in storage - let flushed_store = self.create_and_insert_store(slot, total_size, "flush_slot_cache"); + let flushed_store = + self.create_and_insert_store(slot, flush_stats.total_size.0, "flush_slot_cache"); let (store_accounts_timing_inner, store_accounts_total_inner_us) = measure_us!(self .store_accounts_frozen( (slot, &accounts[..]), @@ -6302,8 +6294,8 @@ impl AccountsDb { &flushed_store, StoreReclaims::Default, )); - store_accounts_timing = store_accounts_timing_inner; - store_accounts_total_us = store_accounts_total_inner_us; + flush_stats.store_accounts_timing = store_accounts_timing_inner; + flush_stats.store_accounts_total_us = Saturating(store_accounts_total_inner_us); // If the above sizing function is correct, just one AppendVec is enough to hold // all the data for the slot @@ -6315,13 +6307,8 @@ impl AccountsDb { // There is some racy condition for existing readers who just has read exactly while // flushing. That case is handled by retry_to_get_account_accessor() assert!(self.accounts_cache.remove_slot(slot).is_some()); - FlushStats { - num_flushed, - num_purged, - total_size, - store_accounts_timing, - store_accounts_total_us, - } + + flush_stats } /// flush all accounts in this slot diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index a7991652e63b44..09fe17a39e1078 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -20,10 +20,10 @@ use { rand::{thread_rng, Rng}, rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, solana_measure::measure_us, - solana_sdk::{account::ReadableAccount, clock::Slot, saturating_add_assign}, + solana_sdk::{account::ReadableAccount, clock::Slot}, std::{ collections::HashMap, - num::NonZeroU64, + num::{NonZeroU64, Saturating}, sync::{atomic::Ordering, Arc, Mutex}, }, }; @@ -68,9 +68,9 @@ struct AncientSlotInfos { /// subset of all_infos shrink_indexes: Vec, /// total alive bytes across contents of 'shrink_indexes' - total_alive_bytes_shrink: u64, + total_alive_bytes_shrink: Saturating, /// total alive bytes across all slots - total_alive_bytes: u64, + total_alive_bytes: Saturating, } impl AncientSlotInfos { @@ -106,7 +106,7 @@ impl AncientSlotInfos { if should_shrink { // alive ratio is too low, so prioritize combining this slot with others // to reduce disk space used - saturating_add_assign!(self.total_alive_bytes_shrink, alive_bytes); + self.total_alive_bytes_shrink += alive_bytes; self.shrink_indexes.push(self.all_infos.len()); } else { let already_ideal_size = u64::from(ideal_size) * 80 / 100; @@ -122,7 +122,7 @@ impl AncientSlotInfos { alive_bytes, should_shrink, }); - saturating_add_assign!(self.total_alive_bytes, alive_bytes); + self.total_alive_bytes += alive_bytes; } was_randomly_shrunk } @@ -150,20 +150,20 @@ impl AncientSlotInfos { /// clear 'should_shrink' for storages after a cutoff to limit how many storages we shrink fn clear_should_shrink_after_cutoff(&mut self, percent_of_alive_shrunk_data: u64) { - let mut bytes_to_shrink_due_to_ratio = 0; + let mut bytes_to_shrink_due_to_ratio = Saturating(0); // shrink enough slots to write 'percent_of_alive_shrunk_data'% of the total alive data // from slots that exceeded the shrink threshold. // The goal is to limit overall i/o in this pass while making progress. - let threshold_bytes = self.total_alive_bytes_shrink * percent_of_alive_shrunk_data / 100; + let threshold_bytes = self.total_alive_bytes_shrink.0 * percent_of_alive_shrunk_data / 100; for info_index in &self.shrink_indexes { let info = &mut self.all_infos[*info_index]; - if bytes_to_shrink_due_to_ratio >= threshold_bytes { + if bytes_to_shrink_due_to_ratio.0 >= threshold_bytes { // we exceeded the amount to shrink due to alive ratio, so don't shrink this one just due to 'should_shrink' // It MAY be shrunk based on total capacity still. // Mark it as false for 'should_shrink' so it gets evaluated solely based on # of files. info.should_shrink = false; } else { - saturating_add_assign!(bytes_to_shrink_due_to_ratio, info.alive_bytes); + bytes_to_shrink_due_to_ratio += info.alive_bytes; } } } @@ -187,11 +187,11 @@ impl AncientSlotInfos { // these indexes into 'all_infos' are useless once we truncate 'all_infos', so make sure they're cleared out to avoid any issues self.shrink_indexes.clear(); let total_storages = self.all_infos.len(); - let mut cumulative_bytes = 0u64; + let mut cumulative_bytes = Saturating(0u64); let low_threshold = max_storages * 50 / 100; for (i, info) in self.all_infos.iter().enumerate() { - saturating_add_assign!(cumulative_bytes, info.alive_bytes); - let ancient_storages_required = (cumulative_bytes / ideal_storage_size + 1) as usize; + cumulative_bytes += info.alive_bytes; + let ancient_storages_required = (cumulative_bytes.0 / ideal_storage_size + 1) as usize; let storages_remaining = total_storages - i - 1; // if the remaining uncombined storages and the # of resulting @@ -459,11 +459,11 @@ impl AccountsDb { write_ancient_accounts.metrics.accumulate(&ShrinkStatsSub { store_accounts_timing, - rewrite_elapsed_us, - create_and_insert_store_elapsed_us, - unpackable_slots_count: 0, - newest_alive_packed_count: 0, + rewrite_elapsed_us: Saturating(rewrite_elapsed_us), + create_and_insert_store_elapsed_us: Saturating(create_and_insert_store_elapsed_us), + ..ShrinkStatsSub::default() }); + write_ancient_accounts .shrinks_in_progress .insert(target_slot, shrink_in_progress); @@ -791,7 +791,7 @@ impl<'a> PackedAncientStorage<'a> { // starting at first entry in current_alive_accounts let mut partial_inner_index = 0; // 0 bytes written so far from the current set of accounts - let mut partial_bytes_written = 0; + let mut partial_bytes_written = Saturating(0); // pack a new storage each iteration of this outer loop loop { let mut bytes_total = 0usize; @@ -806,11 +806,11 @@ impl<'a> PackedAncientStorage<'a> { current_alive_accounts = accounts_to_combine.next(); // reset partial progress since we're starting over with a new set of alive accounts partial_inner_index = 0; - partial_bytes_written = 0; + partial_bytes_written = Saturating(0); continue; } let bytes_remaining_this_slot = - alive_accounts.bytes.saturating_sub(partial_bytes_written); + alive_accounts.bytes.saturating_sub(partial_bytes_written.0); let bytes_total_with_this_slot = bytes_total.saturating_add(bytes_remaining_this_slot); let mut partial_inner_index_max_exclusive; @@ -832,7 +832,7 @@ impl<'a> PackedAncientStorage<'a> { break; } // this account fits - saturating_add_assign!(partial_bytes_written, account_size); + partial_bytes_written += account_size; bytes_total = new_size; partial_inner_index_max_exclusive += 1; } @@ -2203,12 +2203,15 @@ pub mod tests { Vec::default() } ); - assert_eq!(infos.total_alive_bytes, alive_bytes_expected as u64); - assert_eq!(infos.total_alive_bytes_shrink, alive_bytes_expected as u64); + assert_eq!(infos.total_alive_bytes.0, alive_bytes_expected as u64); + assert_eq!( + infos.total_alive_bytes_shrink.0, + alive_bytes_expected as u64 + ); } else { assert!(infos.shrink_indexes.is_empty()); - assert_eq!(infos.total_alive_bytes, alive_bytes_expected as u64); - assert_eq!(infos.total_alive_bytes_shrink, 0); + assert_eq!(infos.total_alive_bytes.0, alive_bytes_expected as u64); + assert_eq!(infos.total_alive_bytes_shrink.0, 0); } } } @@ -2239,8 +2242,8 @@ pub mod tests { } assert!(infos.all_infos.is_empty()); assert!(infos.shrink_indexes.is_empty()); - assert_eq!(infos.total_alive_bytes, 0); - assert_eq!(infos.total_alive_bytes_shrink, 0); + assert_eq!(infos.total_alive_bytes.0, 0); + assert_eq!(infos.total_alive_bytes_shrink.0, 0); } } @@ -2269,8 +2272,8 @@ pub mod tests { if !alive { assert!(infos.all_infos.is_empty()); assert!(infos.shrink_indexes.is_empty()); - assert_eq!(infos.total_alive_bytes, 0); - assert_eq!(infos.total_alive_bytes_shrink, 0); + assert_eq!(infos.total_alive_bytes.0, 0); + assert_eq!(infos.total_alive_bytes_shrink.0, 0); } else { assert_eq!(infos.all_infos.len(), slots); let should_shrink = data_size.is_none(); @@ -2290,12 +2293,12 @@ pub mod tests { .map(|(i, _)| i) .collect::>() ); - assert_eq!(infos.total_alive_bytes, alive_bytes_expected); - assert_eq!(infos.total_alive_bytes_shrink, alive_bytes_expected); + assert_eq!(infos.total_alive_bytes.0, alive_bytes_expected); + assert_eq!(infos.total_alive_bytes_shrink.0, alive_bytes_expected); } else { assert!(infos.shrink_indexes.is_empty()); - assert_eq!(infos.total_alive_bytes, alive_bytes_expected); - assert_eq!(infos.total_alive_bytes_shrink, 0); + assert_eq!(infos.total_alive_bytes.0, alive_bytes_expected); + assert_eq!(infos.total_alive_bytes_shrink.0, 0); } } } @@ -2382,12 +2385,12 @@ pub mod tests { Vec::default() } ); - assert_eq!(infos.total_alive_bytes, alive_bytes_expected); - assert_eq!(infos.total_alive_bytes_shrink, alive_bytes_expected); + assert_eq!(infos.total_alive_bytes.0, alive_bytes_expected); + assert_eq!(infos.total_alive_bytes_shrink.0, alive_bytes_expected); } else { assert!(infos.shrink_indexes.is_empty()); - assert_eq!(infos.total_alive_bytes, alive_bytes_expected); - assert_eq!(infos.total_alive_bytes_shrink, 0); + assert_eq!(infos.total_alive_bytes.0, alive_bytes_expected); + assert_eq!(infos.total_alive_bytes_shrink.0, 0); } } } @@ -2702,8 +2705,8 @@ pub mod tests { .collect::>() } ); - assert_eq!(infos.total_alive_bytes, alive_bytes_expected); - assert_eq!(infos.total_alive_bytes_shrink, dead_bytes); + assert_eq!(infos.total_alive_bytes.0, alive_bytes_expected); + assert_eq!(infos.total_alive_bytes_shrink.0, dead_bytes); } } } @@ -2869,8 +2872,13 @@ pub mod tests { if swap { infos.all_infos = infos.all_infos.into_iter().rev().collect(); } - infos.total_alive_bytes_shrink = - infos.all_infos.iter().map(|info| info.alive_bytes).sum(); + infos.total_alive_bytes_shrink = Saturating( + infos + .all_infos + .iter() + .map(|info| info.alive_bytes) + .sum::(), + ); match method { TestShouldShrink::FilterAncientSlots => { let tuning = PackedAncientStorageTuning { @@ -2900,7 +2908,7 @@ pub mod tests { percent_of_alive_shrunk_data == 89 || percent_of_alive_shrunk_data == 90 } else { infos.all_infos[infos.shrink_indexes[0]].alive_bytes - >= infos.total_alive_bytes_shrink * percent_of_alive_shrunk_data + >= infos.total_alive_bytes_shrink.0 * percent_of_alive_shrunk_data / 100 }; if modify { From 2c11b7a0f990197c9d562780c9f080b6a0754a3f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 23:42:37 +0800 Subject: [PATCH 10/19] build(deps): bump thiserror from 1.0.57 to 1.0.58 (#573) * build(deps): bump thiserror from 1.0.57 to 1.0.58 Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.57 to 1.0.58. - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.57...1.0.58) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5210e12afff5b1..88174cfc7d95b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8265,18 +8265,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index afc700ef015e7f..d73b16c8ec7dba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -412,7 +412,7 @@ tar = "0.4.40" tarpc = "0.29.0" tempfile = "3.10.1" test-case = "3.3.1" -thiserror = "1.0.57" +thiserror = "1.0.58" tiny-bip39 = "0.8.2" # Update solana-tokio patch below when updating this version tokio = "1.29.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b7f5890f442362..8e6592dc2ba684 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7162,18 +7162,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", From ccdfd9a4d25720c0b0ba46e06a4226202928dd68 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 4 Apr 2024 12:28:07 -0500 Subject: [PATCH 11/19] rework remove_dead_accounts.reclaimed_offsets (#571) --- accounts-db/src/accounts_db.rs | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f1d0324128c148..366b27130ba7b9 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3514,14 +3514,9 @@ impl AccountsDb { { let mut reclaim_result = ReclaimResult::default(); if let Some(reclaims) = reclaims { - let (ref mut purged_account_slots, ref mut reclaimed_offsets) = reclaim_result; - - let dead_slots = self.remove_dead_accounts( - reclaims, - expected_single_dead_slot, - Some(reclaimed_offsets), - reset_accounts, - ); + let (dead_slots, reclaimed_offsets) = + self.remove_dead_accounts(reclaims, expected_single_dead_slot, reset_accounts); + reclaim_result.1 = reclaimed_offsets; if let HandleReclaims::ProcessDeadSlots(purge_stats) = handle_reclaims { if let Some(expected_single_dead_slot) = expected_single_dead_slot { @@ -3533,7 +3528,7 @@ impl AccountsDb { self.process_dead_slots( &dead_slots, - Some(purged_account_slots), + Some(&mut reclaim_result.0), purge_stats, pubkeys_removed_from_accounts_index, ); @@ -7855,16 +7850,18 @@ impl AccountsDb { } } + /// returns (dead slots, reclaimed_offsets) fn remove_dead_accounts<'a, I>( &'a self, reclaims: I, expected_slot: Option, - mut reclaimed_offsets: Option<&mut SlotOffsets>, reset_accounts: bool, - ) -> IntSet + ) -> (IntSet, SlotOffsets) where I: Iterator, { + let mut reclaimed_offsets = SlotOffsets::default(); + assert!(self.storage.no_shrink_in_progress()); let mut dead_slots = IntSet::default(); @@ -7873,12 +7870,10 @@ impl AccountsDb { for (slot, account_info) in reclaims { // No cached accounts should make it here assert!(!account_info.is_cached()); - if let Some(ref mut reclaimed_offsets) = reclaimed_offsets { - reclaimed_offsets - .entry(*slot) - .or_default() - .insert(account_info.offset()); - } + reclaimed_offsets + .entry(*slot) + .or_default() + .insert(account_info.offset()); if let Some(expected_slot) = expected_slot { assert_eq!(*slot, expected_slot); } @@ -7936,7 +7931,7 @@ impl AccountsDb { true }); - dead_slots + (dead_slots, reclaimed_offsets) } /// pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index @@ -14002,7 +13997,7 @@ pub mod tests { [0]; assert_eq!(account_info.0, slot); let reclaims = [account_info]; - accounts_db.remove_dead_accounts(reclaims.iter(), None, None, true); + accounts_db.remove_dead_accounts(reclaims.iter(), None, true); let after_size = storage0.alive_bytes.load(Ordering::Acquire); assert_eq!(before_size, after_size + account.stored_size()); } From f975e92ebac09ed90e81d7670a4a6332494d32d5 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 4 Apr 2024 11:03:35 -0700 Subject: [PATCH 12/19] ff cleanup: reduce_stake_warmup_cooldown (#470) * ff cleanup: reduce_stake_warmup_cooldown * update instruction comments to indicate stake config is unused --- programs/stake/src/stake_instruction.rs | 184 +++++++----------------- sdk/program/src/stake/instruction.rs | 6 +- 2 files changed, 55 insertions(+), 135 deletions(-) diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index b7ce459a541315..02196bfd647a35 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -1,11 +1,7 @@ use { - crate::{ - config, - stake_state::{ - authorize, authorize_with_seed, deactivate, deactivate_delinquent, delegate, - initialize, merge, new_warmup_cooldown_rate_epoch, redelegate, set_lockup, split, - withdraw, - }, + crate::stake_state::{ + authorize, authorize_with_seed, deactivate, deactivate_delinquent, delegate, initialize, + merge, new_warmup_cooldown_rate_epoch, redelegate, set_lockup, split, withdraw, }, log::*, solana_program_runtime::{ @@ -125,19 +121,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| )?; instruction_context.check_number_of_instruction_accounts(5)?; drop(me); - if !invoke_context - .feature_set - .is_active(&feature_set::reduce_stake_warmup_cooldown::id()) - { - // Post feature activation, remove both the feature gate code and the config completely in the interface - let config_account = - instruction_context.try_borrow_instruction_account(transaction_context, 4)?; - #[allow(deprecated)] - if !config::check_id(config_account.get_key()) { - return Err(InstructionError::InvalidArgument); - } - config::from(&config_account).ok_or(InstructionError::InvalidArgument)?; - } delegate( invoke_context, transaction_context, @@ -341,19 +324,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| .is_active(&feature_set::stake_redelegate_instruction::id()) { instruction_context.check_number_of_instruction_accounts(3)?; - if !invoke_context - .feature_set - .is_active(&feature_set::reduce_stake_warmup_cooldown::id()) - { - // Post feature activation, remove both the feature gate code and the config completely in the interface - let config_account = instruction_context - .try_borrow_instruction_account(transaction_context, 3)?; - #[allow(deprecated)] - if !config::check_id(config_account.get_key()) { - return Err(InstructionError::InvalidArgument); - } - config::from(&config_account).ok_or(InstructionError::InvalidArgument)?; - } redelegate( invoke_context, transaction_context, @@ -423,16 +393,9 @@ mod tests { Arc::new(FeatureSet::all_enabled()) } - /// With stake minimum delegation but 25% warmup/cooldown - fn feature_set_old_warmup_cooldown() -> Arc { - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&feature_set::reduce_stake_warmup_cooldown::id()); - Arc::new(feature_set) - } - - /// No stake minimum delegation and 25% warmup/cooldown - fn feature_set_old_warmup_cooldown_no_minimum_delegation() -> Arc { - let mut feature_set = feature_set_old_warmup_cooldown(); + /// No stake minimum delegation + fn feature_set_no_minimum_delegation() -> Arc { + let mut feature_set = feature_set_all_enabled(); Arc::get_mut(&mut feature_set) .unwrap() .deactivate(&feature_set::stake_raise_minimum_delegation_to_1_sol::id()); @@ -569,8 +532,7 @@ mod tests { active_stake } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_stake_process_instruction(feature_set: Arc) { process_instruction_as_one_arg( @@ -687,8 +649,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_spoofed_stake_accounts(feature_set: Arc) { process_instruction_as_one_arg( @@ -816,8 +777,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_stake_process_instruction_decode_bail(feature_set: Arc) { // these will not call stake_state, have bogus contents @@ -1085,8 +1045,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_stake_checked_instructions(feature_set: Arc) { let stake_address = Pubkey::new_unique(); @@ -1448,8 +1407,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_stake_initialize(feature_set: Arc) { let rent = Rent::default(); @@ -1554,8 +1512,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_authorize(feature_set: Arc) { let authority_address = solana_sdk::pubkey::new_rand(); @@ -1740,8 +1697,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_authorize_override(feature_set: Arc) { let authority_address = solana_sdk::pubkey::new_rand(); @@ -1860,8 +1816,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_authorize_with_seed(feature_set: Arc) { let authority_base_address = solana_sdk::pubkey::new_rand(); @@ -1978,8 +1933,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_authorize_delegated_stake(feature_set: Arc) { let authority_address = solana_sdk::pubkey::new_rand(); @@ -2176,8 +2130,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_stake_delegate(feature_set: Arc) { let mut vote_state = VoteState::default(); @@ -2424,8 +2377,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_redelegate_consider_balance_changes(feature_set: Arc) { let mut clock = Clock::default(); @@ -2631,8 +2583,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split(feature_set: Arc) { let stake_history = StakeHistory::default(); @@ -2765,8 +2716,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_withdraw_stake(feature_set: Arc) { let recipient_address = solana_sdk::pubkey::new_rand(); @@ -3060,8 +3010,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_withdraw_stake_before_warmup(feature_set: Arc) { let recipient_address = solana_sdk::pubkey::new_rand(); @@ -3195,8 +3144,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_withdraw_lockup(feature_set: Arc) { let recipient_address = solana_sdk::pubkey::new_rand(); @@ -3320,8 +3268,7 @@ mod tests { assert_eq!(from(&accounts[0]).unwrap(), StakeStateV2::Uninitialized); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_withdraw_rent_exempt(feature_set: Arc) { let recipient_address = solana_sdk::pubkey::new_rand(); @@ -3417,8 +3364,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_deactivate(feature_set: Arc) { let stake_address = solana_sdk::pubkey::new_rand(); @@ -3548,8 +3494,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_set_lockup(feature_set: Arc) { let custodian_address = solana_sdk::pubkey::new_rand(); @@ -3838,8 +3783,7 @@ mod tests { /// Ensure that `initialize()` respects the minimum balance requirements /// - Assert 1: accounts with a balance equal-to the rent exemption initialize OK /// - Assert 2: accounts with a balance less-than the rent exemption do not initialize - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_initialize_minimum_balance(feature_set: Arc) { let rent = Rent::default(); @@ -3894,8 +3838,7 @@ mod tests { /// withdrawing below the minimum delegation, then re-delegating successfully (see /// `test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation()` for /// more information.) - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_delegate_minimum_stake_delegation(feature_set: Arc) { let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -3996,8 +3939,7 @@ mod tests { /// EQ | LT | Err /// LT | EQ | Err /// LT | LT | Err - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_minimum_stake_delegation(feature_set: Arc) { let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -4098,8 +4040,7 @@ mod tests { /// delegation is OK /// - Assert 2: splitting the full amount from an account that has less than the minimum /// delegation is not OK - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_full_amount_minimum_stake_delegation(feature_set: Arc) { let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -4193,8 +4134,7 @@ mod tests { /// Ensure that `split()` correctly handles prefunded destination accounts from /// initialized stakes. When a destination account already has funds, ensure /// the minimum split amount reduces accordingly. - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_initialized_split_destination_minimum_balance(feature_set: Arc) { let rent = Rent::default(); @@ -4288,8 +4228,7 @@ mod tests { /// Ensure that `split()` correctly handles prefunded destination accounts from staked stakes. /// When a destination account already has funds, ensure the minimum split amount reduces /// accordingly. - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(), &[Ok(()), Ok(())]; "old_behavior")] - #[test_case(feature_set_old_warmup_cooldown(), &[ Err(StakeError::InsufficientDelegation.into()), Err(StakeError::InsufficientDelegation.into()) ] ; "new_behavior")] + #[test_case(feature_set_no_minimum_delegation(), &[Ok(()), Ok(())]; "old_behavior")] #[test_case(feature_set_all_enabled(), &[Err(StakeError::InsufficientDelegation.into()), Err(StakeError::InsufficientDelegation.into())]; "all_enabled")] fn test_staked_split_destination_minimum_balance( feature_set: Arc, @@ -4476,8 +4415,7 @@ mod tests { /// Ensure that `withdraw()` respects the minimum delegation requirements /// - Assert 1: withdrawing so remaining stake is equal-to the minimum is OK /// - Assert 2: withdrawing so remaining stake is less-than the minimum is not OK - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_withdraw_minimum_stake_delegation(feature_set: Arc) { let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -4760,8 +4698,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_source_uninitialized(feature_set: Arc) { let rent = Rent::default(); @@ -4861,8 +4798,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_split_not_uninitialized(feature_set: Arc) { let stake_lamports = 42; @@ -4913,8 +4849,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_more_than_staked(feature_set: Arc) { let rent = Rent::default(); @@ -4987,8 +4922,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_with_rent(feature_set: Arc) { let rent = Rent::default(); @@ -5120,8 +5054,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_to_account_with_rent_exempt_reserve(feature_set: Arc) { let rent = Rent::default(); @@ -5298,8 +5231,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_from_larger_sized_account(feature_set: Arc) { let rent = Rent::default(); @@ -5471,8 +5403,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_from_smaller_sized_account(feature_set: Arc) { let rent = Rent::default(); @@ -5565,8 +5496,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_100_percent_of_source(feature_set: Arc) { let rent = Rent::default(); @@ -5685,8 +5615,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_100_percent_of_source_to_account_with_lamports(feature_set: Arc) { let rent = Rent::default(); @@ -5805,8 +5734,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split_rent_exemptness(feature_set: Arc) { let rent = Rent::default(); @@ -6171,8 +6099,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_merge(feature_set: Arc) { let stake_address = solana_sdk::pubkey::new_rand(); @@ -6307,8 +6234,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_merge_self_fails(feature_set: Arc) { let stake_address = solana_sdk::pubkey::new_rand(); @@ -6385,8 +6311,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_merge_incorrect_authorized_staker(feature_set: Arc) { let stake_address = solana_sdk::pubkey::new_rand(); @@ -6484,8 +6409,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_merge_invalid_account_data(feature_set: Arc) { let stake_address = solana_sdk::pubkey::new_rand(); @@ -6570,8 +6494,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_merge_fake_stake_source(feature_set: Arc) { let stake_address = solana_sdk::pubkey::new_rand(); @@ -6642,8 +6565,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_merge_active_stake(feature_set: Arc) { let stake_address = solana_sdk::pubkey::new_rand(); @@ -6921,8 +6843,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_stake_get_minimum_delegation(feature_set: Arc) { let stake_address = Pubkey::new_unique(); @@ -6961,8 +6882,7 @@ mod tests { // The GetMinimumDelegation instruction does not take any accounts; so when it was added, // `process_instruction()` needed to be updated to *not* need a stake account passed in, which // changes the error *ordering* conditions. - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_stake_process_instruction_error_ordering(feature_set: Arc) { let rent = Rent::default(); @@ -7030,8 +6950,7 @@ mod tests { } } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_deactivate_delinquent(feature_set: Arc) { let reference_vote_address = Pubkey::new_unique(); @@ -7300,8 +7219,7 @@ mod tests { ); } - #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] - #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] + #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_redelegate(feature_set: Arc) { let feature_set = Arc::new(feature_set); diff --git a/sdk/program/src/stake/instruction.rs b/sdk/program/src/stake/instruction.rs index 9b964f0eee5f07..0bc80be6643de1 100644 --- a/sdk/program/src/stake/instruction.rs +++ b/sdk/program/src/stake/instruction.rs @@ -108,7 +108,7 @@ pub enum StakeInstruction { /// 1. `[]` Vote account to which this stake will be delegated /// 2. `[]` Clock sysvar /// 3. `[]` Stake history sysvar that carries stake warmup/cooldown history - /// 4. `[]` Address of config account that carries stake config + /// 4. `[]` Unused account, formerly the stake config /// 5. `[SIGNER]` Stake authority /// /// The entire balance of the staking account is staked. DelegateStake @@ -289,7 +289,7 @@ pub enum StakeInstruction { /// plus rent exempt minimum /// 1. `[WRITE]` Uninitialized stake account that will hold the redelegated stake /// 2. `[]` Vote account to which this stake will be re-delegated - /// 3. `[]` Address of config account that carries stake config + /// 3. `[]` Unused account, formerly the stake config /// 4. `[SIGNER]` Stake authority /// Redelegate, @@ -677,6 +677,7 @@ pub fn delegate_stake( AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::stake_history::id(), false), #[allow(deprecated)] + // For backwards compatibility we pass the stake config, although this account is unused AccountMeta::new_readonly(config::id(), false), AccountMeta::new_readonly(*authorized_pubkey, true), ]; @@ -782,6 +783,7 @@ fn _redelegate( AccountMeta::new(*uninitialized_stake_pubkey, false), AccountMeta::new_readonly(*vote_pubkey, false), #[allow(deprecated)] + // For backwards compatibility we pass the stake config, although this account is unused AccountMeta::new_readonly(config::id(), false), AccountMeta::new_readonly(*authorized_pubkey, true), ]; From bc81153d60e7d2f24d3d4933275421ff1d87eb40 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Thu, 4 Apr 2024 13:06:08 -0600 Subject: [PATCH 13/19] Add function to reward with full priority fee and burnt transaction fee (#566) * refactor shareble code into its own function; update and add tests * add function to reward with full prio fee and burnt transaction fee --- runtime/src/bank.rs | 4 +- runtime/src/bank/fee_distribution.rs | 314 ++++++++++++++++++++++----- 2 files changed, 265 insertions(+), 53 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index da92443ae4fda5..8da6210d5e740b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -268,8 +268,8 @@ impl AddAssign for SquashTiming { #[derive(AbiExample, Debug, Default, PartialEq)] pub(crate) struct CollectorFeeDetails { - pub transaction_fee: u64, - pub priority_fee: u64, + transaction_fee: u64, + priority_fee: u64, } impl CollectorFeeDetails { diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 5a53a1278881fa..c4326a8d440688 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -1,5 +1,6 @@ use { super::Bank, + crate::bank::CollectorFeeDetails, log::{debug, warn}, solana_sdk::{ account::{ReadableAccount, WritableAccount}, @@ -50,45 +51,79 @@ impl Bank { if collector_fees != 0 { let (deposit, mut burn) = self.fee_rate_governor.burn(collector_fees); if deposit > 0 { - let validate_fee_collector = self.validate_fee_collector_account(); - match self.deposit_fees( - &self.collector_id, - deposit, - DepositFeeOptions { - check_account_owner: validate_fee_collector, - check_rent_paying: validate_fee_collector, - }, - ) { - Ok(post_balance) => { - self.rewards.write().unwrap().push(( - self.collector_id, - RewardInfo { - reward_type: RewardType::Fee, - lamports: deposit as i64, - post_balance, - commission: None, - }, - )); - } - Err(err) => { - debug!( - "Burned {} lamport tx fee instead of sending to {} due to {}", - deposit, self.collector_id, err - ); - datapoint_warn!( - "bank-burned_fee", - ("slot", self.slot(), i64), - ("num_lamports", deposit, i64), - ("error", err.to_string(), String), - ); - burn += deposit; - } - } + self.deposit_or_burn_fee(deposit, &mut burn); } self.capitalization.fetch_sub(burn, Relaxed); } } + // NOTE: to replace `distribute_transaction_fees()`, it applies different burn/reward rate + // on different fees: + // transaction fee: same fee_rate_governor rule + // priority fee: 100% reward + // next PR will call it behind a feature gate + #[allow(dead_code)] + pub(super) fn distribute_transaction_fee_details(&self) { + let CollectorFeeDetails { + transaction_fee, + priority_fee, + } = *self.collector_fee_details.read().unwrap(); + + if transaction_fee.saturating_add(priority_fee) == 0 { + // nothing to distribute, exit early + return; + } + + let (mut deposit, mut burn) = if transaction_fee != 0 { + self.fee_rate_governor.burn(transaction_fee) + } else { + (0, 0) + }; + deposit = deposit.saturating_add(priority_fee); + + if deposit > 0 { + self.deposit_or_burn_fee(deposit, &mut burn); + } + self.capitalization.fetch_sub(burn, Relaxed); + } + + fn deposit_or_burn_fee(&self, deposit: u64, burn: &mut u64) { + let validate_fee_collector = self.validate_fee_collector_account(); + match self.deposit_fees( + &self.collector_id, + deposit, + DepositFeeOptions { + check_account_owner: validate_fee_collector, + check_rent_paying: validate_fee_collector, + }, + ) { + Ok(post_balance) => { + self.rewards.write().unwrap().push(( + self.collector_id, + RewardInfo { + reward_type: RewardType::Fee, + lamports: deposit as i64, + post_balance, + commission: None, + }, + )); + } + Err(err) => { + debug!( + "Burned {} lamport tx fee instead of sending to {} due to {}", + deposit, self.collector_id, err + ); + datapoint_warn!( + "bank-burned_fee", + ("slot", self.slot(), i64), + ("num_lamports", deposit, i64), + ("error", err.to_string(), String), + ); + *burn = burn.saturating_add(deposit); + } + } + } + // Deposits fees into a specified account and if successful, returns the new balance of that account fn deposit_fees( &self, @@ -298,10 +333,11 @@ pub mod tests { account::AccountSharedData, feature_set, native_token::sol_to_lamports, pubkey, rent::Rent, signature::Signer, }, + std::sync::RwLock, }; #[test] - fn test_distribute_transaction_fees() { + fn test_deposit_or_burn_fee() { #[derive(PartialEq)] enum Scenario { Normal, @@ -343,19 +379,16 @@ pub mod tests { let min_rent_exempt_balance = rent.minimum_balance(0); genesis.genesis_config.rent = rent; // Ensure rent is non-zero, as genesis_utils sets Rent::free by default let bank = Bank::new_for_tests(&genesis.genesis_config); - let transaction_fees = 100; - bank.collector_fees.fetch_add(transaction_fees, Relaxed); - assert_eq!(transaction_fees, bank.collector_fees.load(Relaxed)); - let (expected_collected_fees, burn_amount) = - bank.fee_rate_governor.burn(transaction_fees); - assert!(burn_amount > 0); + + let deposit = 100; + let mut burn = 100; if test_case.scenario == Scenario::RentPaying { // ensure that account balance + collected fees will make it rent-paying let initial_balance = 100; let account = AccountSharedData::new(initial_balance, 0, &system_program::id()); bank.store_account(bank.collector_id(), &account); - assert!(initial_balance + transaction_fees < min_rent_exempt_balance); + assert!(initial_balance + deposit < min_rent_exempt_balance); } else if test_case.scenario == Scenario::InvalidOwner { // ensure that account owner is invalid and fee distribution will fail let account = @@ -367,17 +400,14 @@ pub mod tests { bank.store_account(bank.collector_id(), &account); } - let initial_capitalization = bank.capitalization(); + let initial_burn = burn; let initial_collector_id_balance = bank.get_balance(bank.collector_id()); - bank.distribute_transaction_fees(); + bank.deposit_or_burn_fee(deposit, &mut burn); let new_collector_id_balance = bank.get_balance(bank.collector_id()); if test_case.scenario != Scenario::Normal && !test_case.disable_checks { assert_eq!(initial_collector_id_balance, new_collector_id_balance); - assert_eq!( - initial_capitalization - transaction_fees, - bank.capitalization() - ); + assert_eq!(initial_burn + deposit, burn); let locked_rewards = bank.rewards.read().unwrap(); assert!( locked_rewards.is_empty(), @@ -385,11 +415,11 @@ pub mod tests { ); } else { assert_eq!( - initial_collector_id_balance + expected_collected_fees, + initial_collector_id_balance + deposit, new_collector_id_balance ); - assert_eq!(initial_capitalization - burn_amount, bank.capitalization()); + assert_eq!(initial_burn, burn); let locked_rewards = bank.rewards.read().unwrap(); assert_eq!( @@ -400,7 +430,7 @@ pub mod tests { let reward_info = &locked_rewards[0]; assert_eq!( - reward_info.1.lamports, expected_collected_fees as i64, + reward_info.1.lamports, deposit as i64, "The reward amount should match the expected deposit" ); assert_eq!( @@ -412,6 +442,44 @@ pub mod tests { } } + #[test] + fn test_distribute_transaction_fees_normal() { + let genesis = create_genesis_config(0); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fees = 100; + bank.collector_fees.fetch_add(transaction_fees, Relaxed); + assert_eq!(transaction_fees, bank.collector_fees.load(Relaxed)); + let (expected_collected_fees, burn_amount) = bank.fee_rate_governor.burn(transaction_fees); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!( + initial_collector_id_balance + expected_collected_fees, + new_collector_id_balance + ); + assert_eq!(initial_capitalization - burn_amount, bank.capitalization()); + let locked_rewards = bank.rewards.read().unwrap(); + assert_eq!( + locked_rewards.len(), + 1, + "There should be one reward distributed" + ); + + let reward_info = &locked_rewards[0]; + assert_eq!( + reward_info.1.lamports, expected_collected_fees as i64, + "The reward amount should match the expected deposit" + ); + assert_eq!( + reward_info.1.reward_type, + RewardType::Fee, + "The reward type should be Fee" + ); + } + #[test] fn test_distribute_transaction_fees_zero() { let genesis = create_genesis_config(0); @@ -818,4 +886,148 @@ pub mod tests { } } } + + #[test] + fn test_distribute_transaction_fee_details_normal() { + let genesis = create_genesis_config(0); + let mut bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fee = 100; + let priority_fee = 200; + bank.collector_fee_details = RwLock::new(CollectorFeeDetails { + transaction_fee, + priority_fee, + }); + let (expected_deposit, expected_burn) = bank.fee_rate_governor.burn(transaction_fee); + let expected_rewards = expected_deposit + priority_fee; + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fee_details(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!( + initial_collector_id_balance + expected_rewards, + new_collector_id_balance + ); + assert_eq!( + initial_capitalization - expected_burn, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert_eq!( + locked_rewards.len(), + 1, + "There should be one reward distributed" + ); + + let reward_info = &locked_rewards[0]; + assert_eq!( + reward_info.1.lamports, expected_rewards as i64, + "The reward amount should match the expected deposit" + ); + assert_eq!( + reward_info.1.reward_type, + RewardType::Fee, + "The reward type should be Fee" + ); + } + + #[test] + fn test_distribute_transaction_fee_details_zero() { + let genesis = create_genesis_config(0); + let bank = Bank::new_for_tests(&genesis.genesis_config); + assert_eq!( + *bank.collector_fee_details.read().unwrap(), + CollectorFeeDetails::default() + ); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fee_details(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!(initial_capitalization, bank.capitalization()); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } + + #[test] + fn test_distribute_transaction_fee_details_burn_all() { + let mut genesis = create_genesis_config(0); + genesis.genesis_config.fee_rate_governor.burn_percent = 100; + let mut bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fee = 100; + let priority_fee = 200; + bank.collector_fee_details = RwLock::new(CollectorFeeDetails { + transaction_fee, + priority_fee, + }); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fee_details(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!( + initial_collector_id_balance + priority_fee, + new_collector_id_balance + ); + assert_eq!( + initial_capitalization - transaction_fee, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert_eq!( + locked_rewards.len(), + 1, + "There should be one reward distributed" + ); + + let reward_info = &locked_rewards[0]; + assert_eq!( + reward_info.1.lamports, priority_fee as i64, + "The reward amount should match the expected deposit" + ); + assert_eq!( + reward_info.1.reward_type, + RewardType::Fee, + "The reward type should be Fee" + ); + } + + #[test] + fn test_distribute_transaction_fee_details_overflow_failure() { + let genesis = create_genesis_config(0); + let mut bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fee = 100; + let priority_fee = 200; + bank.collector_fee_details = RwLock::new(CollectorFeeDetails { + transaction_fee, + priority_fee, + }); + + // ensure that account balance will overflow and fee distribution will fail + let account = AccountSharedData::new(u64::MAX, 0, &system_program::id()); + bank.store_account(bank.collector_id(), &account); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fee_details(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!( + initial_capitalization - transaction_fee - priority_fee, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } } From 2b0391049d27997bf2c986bcef467a70b828a63e Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Thu, 4 Apr 2024 13:19:13 -0700 Subject: [PATCH 14/19] transaction performance tracking -- streamer stage (#257) * transaction performance tracking -- streamer stage --- Cargo.lock | 16 +++ Cargo.toml | 2 + programs/sbf/Cargo.lock | 16 +++ sdk/src/packet.rs | 13 ++ streamer/Cargo.toml | 2 + streamer/src/nonblocking/quic.rs | 44 ++++++- streamer/src/quic.rs | 45 ++++++- transaction-metrics-tracker/Cargo.toml | 25 ++++ transaction-metrics-tracker/src/lib.rs | 157 +++++++++++++++++++++++++ 9 files changed, 317 insertions(+), 3 deletions(-) create mode 100644 transaction-metrics-tracker/Cargo.toml create mode 100644 transaction-metrics-tracker/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 88174cfc7d95b0..6d01f67b9181e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7288,9 +7288,11 @@ dependencies = [ "rand 0.8.5", "rustls", "solana-logger", + "solana-measure", "solana-metrics", "solana-perf", "solana-sdk", + "solana-transaction-metrics-tracker", "thiserror", "tokio", "x509-parser", @@ -7457,6 +7459,20 @@ dependencies = [ "solana-version", ] +[[package]] +name = "solana-transaction-metrics-tracker" +version = "2.0.0" +dependencies = [ + "Inflector", + "base64 0.22.0", + "bincode", + "lazy_static", + "log", + "rand 0.8.5", + "solana-perf", + "solana-sdk", +] + [[package]] name = "solana-transaction-status" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index d73b16c8ec7dba..0f85ef090cfbc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,6 +107,7 @@ members = [ "tokens", "tpu-client", "transaction-dos", + "transaction-metrics-tracker", "transaction-status", "turbine", "udp-client", @@ -380,6 +381,7 @@ solana-test-validator = { path = "test-validator", version = "=2.0.0" } solana-thin-client = { path = "thin-client", version = "=2.0.0" } solana-tpu-client = { path = "tpu-client", version = "=2.0.0", default-features = false } solana-transaction-status = { path = "transaction-status", version = "=2.0.0" } +solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=2.0.0" } solana-turbine = { path = "turbine", version = "=2.0.0" } solana-udp-client = { path = "udp-client", version = "=2.0.0" } solana-version = { path = "version", version = "=2.0.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8e6592dc2ba684..3d383111c3f97c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6333,9 +6333,11 @@ dependencies = [ "quinn-proto", "rand 0.8.5", "rustls", + "solana-measure", "solana-metrics", "solana-perf", "solana-sdk", + "solana-transaction-metrics-tracker", "thiserror", "tokio", "x509-parser", @@ -6437,6 +6439,20 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-transaction-metrics-tracker" +version = "2.0.0" +dependencies = [ + "Inflector", + "base64 0.22.0", + "bincode", + "lazy_static", + "log", + "rand 0.8.5", + "solana-perf", + "solana-sdk", +] + [[package]] name = "solana-transaction-status" version = "2.0.0" diff --git a/sdk/src/packet.rs b/sdk/src/packet.rs index faea9ab4753c67..8300b57218c696 100644 --- a/sdk/src/packet.rs +++ b/sdk/src/packet.rs @@ -33,6 +33,8 @@ bitflags! { /// the packet is built. /// This field can be removed when the above feature gate is adopted by mainnet-beta. const ROUND_COMPUTE_UNIT_PRICE = 0b0010_0000; + /// For tracking performance + const PERF_TRACK_PACKET = 0b0100_0000; } } @@ -228,6 +230,12 @@ impl Meta { self.flags.set(PacketFlags::TRACER_PACKET, is_tracer); } + #[inline] + pub fn set_track_performance(&mut self, is_performance_track: bool) { + self.flags + .set(PacketFlags::PERF_TRACK_PACKET, is_performance_track); + } + #[inline] pub fn set_simple_vote(&mut self, is_simple_vote: bool) { self.flags.set(PacketFlags::SIMPLE_VOTE_TX, is_simple_vote); @@ -261,6 +269,11 @@ impl Meta { self.flags.contains(PacketFlags::TRACER_PACKET) } + #[inline] + pub fn is_perf_track_packet(&self) -> bool { + self.flags.contains(PacketFlags::PERF_TRACK_PACKET) + } + #[inline] pub fn round_compute_unit_price(&self) -> bool { self.flags.contains(PacketFlags::ROUND_COMPUTE_UNIT_PRICE) diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 8e1eb12dff1d42..55d0030e734607 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -26,9 +26,11 @@ quinn = { workspace = true } quinn-proto = { workspace = true } rand = { workspace = true } rustls = { workspace = true, features = ["dangerous_configuration"] } +solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-perf = { workspace = true } solana-sdk = { workspace = true } +solana-transaction-metrics-tracker = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } x509-parser = { workspace = true } diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index c4969006288dbf..feb9bd2db65a3e 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -17,6 +17,7 @@ use { quinn::{Connecting, Connection, Endpoint, EndpointConfig, TokioRuntime, VarInt}, quinn_proto::VarIntBoundsExceeded, rand::{thread_rng, Rng}, + solana_measure::measure::Measure, solana_perf::packet::{PacketBatch, PACKETS_PER_BATCH}, solana_sdk::{ packet::{Meta, PACKET_DATA_SIZE}, @@ -27,9 +28,10 @@ use { QUIC_MIN_STAKED_CONCURRENT_STREAMS, QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO, QUIC_TOTAL_STAKED_CONCURRENT_STREAMS, QUIC_UNSTAKED_RECEIVE_WINDOW_RATIO, }, - signature::Keypair, + signature::{Keypair, Signature}, timing, }, + solana_transaction_metrics_tracker::signature_if_should_track_packet, std::{ iter::repeat_with, net::{IpAddr, SocketAddr, UdpSocket}, @@ -94,6 +96,7 @@ struct PacketChunk { struct PacketAccumulator { pub meta: Meta, pub chunks: Vec, + pub start_time: Instant, } #[derive(Copy, Clone, Debug)] @@ -646,6 +649,7 @@ async fn packet_batch_sender( trace!("enter packet_batch_sender"); let mut batch_start_time = Instant::now(); loop { + let mut packet_perf_measure: Vec<([u8; 64], std::time::Instant)> = Vec::default(); let mut packet_batch = PacketBatch::with_capacity(PACKETS_PER_BATCH); let mut total_bytes: usize = 0; @@ -665,6 +669,8 @@ async fn packet_batch_sender( || (!packet_batch.is_empty() && elapsed >= coalesce) { let len = packet_batch.len(); + track_streamer_fetch_packet_performance(&mut packet_perf_measure, &stats); + if let Err(e) = packet_sender.send(packet_batch) { stats .total_packet_batch_send_err @@ -710,6 +716,14 @@ async fn packet_batch_sender( total_bytes += packet_batch[i].meta().size; + if let Some(signature) = signature_if_should_track_packet(&packet_batch[i]) + .ok() + .flatten() + { + packet_perf_measure.push((*signature, packet_accumulator.start_time)); + // we set the PERF_TRACK_PACKET on + packet_batch[i].meta_mut().set_track_performance(true); + } stats .total_chunks_processed_by_batcher .fetch_add(num_chunks, Ordering::Relaxed); @@ -718,6 +732,32 @@ async fn packet_batch_sender( } } +fn track_streamer_fetch_packet_performance( + packet_perf_measure: &mut [([u8; 64], Instant)], + stats: &Arc, +) { + if packet_perf_measure.is_empty() { + return; + } + let mut measure = Measure::start("track_perf"); + let mut process_sampled_packets_us_hist = stats.process_sampled_packets_us_hist.lock().unwrap(); + + for (signature, start_time) in packet_perf_measure.iter() { + let duration = Instant::now().duration_since(*start_time); + debug!( + "QUIC streamer fetch stage took {duration:?} for transaction {:?}", + Signature::from(*signature) + ); + process_sampled_packets_us_hist + .increment(duration.as_micros() as u64) + .unwrap(); + } + measure.stop(); + stats + .perf_track_overhead_us + .fetch_add(measure.as_us(), Ordering::Relaxed); +} + async fn handle_connection( connection: Connection, remote_addr: SocketAddr, @@ -872,6 +912,7 @@ async fn handle_chunk( *packet_accum = Some(PacketAccumulator { meta, chunks: Vec::new(), + start_time: Instant::now(), }); } @@ -1471,6 +1512,7 @@ pub mod test { offset, end_of_chunk: size, }], + start_time: Instant::now(), }; ptk_sender.send(packet_accum).await.unwrap(); } diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index a7a08c73f4833b..3b1b6b21adf468 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -16,8 +16,8 @@ use { std::{ net::UdpSocket, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, RwLock, + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + Arc, Mutex, RwLock, }, thread, time::{Duration, SystemTime}, @@ -175,10 +175,19 @@ pub struct StreamStats { pub(crate) stream_load_ema: AtomicUsize, pub(crate) stream_load_ema_overflow: AtomicUsize, pub(crate) stream_load_capacity_overflow: AtomicUsize, + pub(crate) process_sampled_packets_us_hist: Mutex, + pub(crate) perf_track_overhead_us: AtomicU64, } impl StreamStats { pub fn report(&self, name: &'static str) { + let process_sampled_packets_us_hist = { + let mut metrics = self.process_sampled_packets_us_hist.lock().unwrap(); + let process_sampled_packets_us_hist = metrics.clone(); + metrics.clear(); + process_sampled_packets_us_hist + }; + datapoint_info!( name, ( @@ -425,6 +434,38 @@ impl StreamStats { self.stream_load_capacity_overflow.load(Ordering::Relaxed), i64 ), + ( + "process_sampled_packets_us_90pct", + process_sampled_packets_us_hist + .percentile(90.0) + .unwrap_or(0), + i64 + ), + ( + "process_sampled_packets_us_min", + process_sampled_packets_us_hist.minimum().unwrap_or(0), + i64 + ), + ( + "process_sampled_packets_us_max", + process_sampled_packets_us_hist.maximum().unwrap_or(0), + i64 + ), + ( + "process_sampled_packets_us_mean", + process_sampled_packets_us_hist.mean().unwrap_or(0), + i64 + ), + ( + "process_sampled_packets_count", + process_sampled_packets_us_hist.entries(), + i64 + ), + ( + "perf_track_overhead_us", + self.perf_track_overhead_us.swap(0, Ordering::Relaxed), + i64 + ), ); } } diff --git a/transaction-metrics-tracker/Cargo.toml b/transaction-metrics-tracker/Cargo.toml new file mode 100644 index 00000000000000..9bd82702a3ebb4 --- /dev/null +++ b/transaction-metrics-tracker/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "solana-transaction-metrics-tracker" +description = "Solana transaction metrics tracker" +documentation = "https://docs.rs/solana-transaction-metrics-tracker" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = false + +[dependencies] +Inflector = { workspace = true } +base64 = { workspace = true } +bincode = { workspace = true } +# Update this borsh dependency to the workspace version once +lazy_static = { workspace = true } +log = { workspace = true } +rand = { workspace = true } +solana-perf = { workspace = true } +solana-sdk = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/transaction-metrics-tracker/src/lib.rs b/transaction-metrics-tracker/src/lib.rs new file mode 100644 index 00000000000000..2baec195de9b84 --- /dev/null +++ b/transaction-metrics-tracker/src/lib.rs @@ -0,0 +1,157 @@ +use { + lazy_static::lazy_static, + log::*, + rand::Rng, + solana_perf::sigverify::PacketError, + solana_sdk::{packet::Packet, short_vec::decode_shortu16_len, signature::SIGNATURE_BYTES}, +}; + +// The mask is 12 bits long (1<<12 = 4096), it means the probability of matching +// the transaction is 1/4096 assuming the portion being matched is random. +lazy_static! { + static ref TXN_MASK: u16 = rand::thread_rng().gen_range(0..4096); +} + +/// Check if a transaction given its signature matches the randomly selected mask. +/// The signaure should be from the reference of Signature +pub fn should_track_transaction(signature: &[u8; SIGNATURE_BYTES]) -> bool { + // We do not use the highest signature byte as it is not really random + let match_portion: u16 = u16::from_le_bytes([signature[61], signature[62]]) >> 4; + trace!("Matching txn: {match_portion:016b} {:016b}", *TXN_MASK); + *TXN_MASK == match_portion +} + +/// Check if a transaction packet's signature matches the mask. +/// This does a rudimentry verification to make sure the packet at least +/// contains the signature data and it returns the reference to the signature. +pub fn signature_if_should_track_packet( + packet: &Packet, +) -> Result, PacketError> { + let signature = get_signature_from_packet(packet)?; + Ok(should_track_transaction(signature).then_some(signature)) +} + +/// Get the signature of the transaction packet +/// This does a rudimentry verification to make sure the packet at least +/// contains the signature data and it returns the reference to the signature. +pub fn get_signature_from_packet(packet: &Packet) -> Result<&[u8; SIGNATURE_BYTES], PacketError> { + let (sig_len_untrusted, sig_start) = packet + .data(..) + .and_then(|bytes| decode_shortu16_len(bytes).ok()) + .ok_or(PacketError::InvalidShortVec)?; + + if sig_len_untrusted < 1 { + return Err(PacketError::InvalidSignatureLen); + } + + let signature = packet + .data(sig_start..sig_start.saturating_add(SIGNATURE_BYTES)) + .ok_or(PacketError::InvalidSignatureLen)?; + let signature = signature + .try_into() + .map_err(|_| PacketError::InvalidSignatureLen)?; + Ok(signature) +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + hash::Hash, + signature::{Keypair, Signature}, + system_transaction, + }, + }; + + #[test] + fn test_get_signature_from_packet() { + // Default invalid txn packet + let packet = Packet::default(); + let sig = get_signature_from_packet(&packet); + assert_eq!(sig, Err(PacketError::InvalidShortVec)); + + // Use a valid transaction, it should succeed + let tx = system_transaction::transfer( + &Keypair::new(), + &solana_sdk::pubkey::new_rand(), + 1, + Hash::new_unique(), + ); + let mut packet = Packet::from_data(None, tx).unwrap(); + + let sig = get_signature_from_packet(&packet); + assert!(sig.is_ok()); + + // Invalid signature length + packet.buffer_mut()[0] = 0x0; + let sig = get_signature_from_packet(&packet); + assert_eq!(sig, Err(PacketError::InvalidSignatureLen)); + } + + #[test] + fn test_should_track_transaction() { + let mut sig = [0x0; SIGNATURE_BYTES]; + let track = should_track_transaction(&sig); + assert!(!track); + + // Intentionally matching the randomly generated mask + // The lower four bits are ignored as only 12 highest bits from + // signature's 61 and 62 u8 are used for matching. + // We generate a random one + let mut rng = rand::thread_rng(); + let random_number: u8 = rng.gen_range(0..=15); + sig[61] = ((*TXN_MASK & 0xf_u16) << 4) as u8 | random_number; + sig[62] = (*TXN_MASK >> 4) as u8; + + let track = should_track_transaction(&sig); + assert!(track); + } + + #[test] + fn test_signature_if_should_track_packet() { + // Default invalid txn packet + let packet = Packet::default(); + let sig = signature_if_should_track_packet(&packet); + assert_eq!(sig, Err(PacketError::InvalidShortVec)); + + // Use a valid transaction which is not matched + let tx = system_transaction::transfer( + &Keypair::new(), + &solana_sdk::pubkey::new_rand(), + 1, + Hash::new_unique(), + ); + let packet = Packet::from_data(None, tx).unwrap(); + let sig = signature_if_should_track_packet(&packet); + assert_eq!(Ok(None), sig); + + // Now simulate a txn matching the signature mask + let mut tx = system_transaction::transfer( + &Keypair::new(), + &solana_sdk::pubkey::new_rand(), + 1, + Hash::new_unique(), + ); + let mut sig = [0x0; SIGNATURE_BYTES]; + sig[61] = ((*TXN_MASK & 0xf_u16) << 4) as u8; + sig[62] = (*TXN_MASK >> 4) as u8; + + let sig = Signature::from(sig); + tx.signatures[0] = sig; + let mut packet = Packet::from_data(None, tx).unwrap(); + let sig2 = signature_if_should_track_packet(&packet); + + match sig2 { + Ok(sig) => { + assert!(sig.is_some()); + } + Err(_) => panic!("Expected to get a matching signature!"), + } + + // Invalid signature length + packet.buffer_mut()[0] = 0x0; + let sig = signature_if_should_track_packet(&packet); + assert_eq!(sig, Err(PacketError::InvalidSignatureLen)); + } +} From dcc195e060f7be0b94c574015c9e8eb8cb103dc7 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 4 Apr 2024 16:13:00 -0500 Subject: [PATCH 15/19] when flushing write cache, ignore reclaims (#581) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 366b27130ba7b9..713b60f8b6794d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6287,7 +6287,7 @@ impl AccountsDb { (slot, &accounts[..]), Some(hashes), &flushed_store, - StoreReclaims::Default, + StoreReclaims::Ignore, )); flush_stats.store_accounts_timing = store_accounts_timing_inner; flush_stats.store_accounts_total_us = Saturating(store_accounts_total_inner_us); From 526979d589e26433c515a484ed020d584a4aff88 Mon Sep 17 00:00:00 2001 From: Joe C Date: Thu, 4 Apr 2024 18:20:01 -0500 Subject: [PATCH 16/19] program-runtime: hoist `MessageProcessor` up to SVM (#586) --- Cargo.lock | 4 +++- program-runtime/Cargo.toml | 3 +-- program-runtime/src/lib.rs | 1 - programs/sbf/Cargo.lock | 2 +- svm/Cargo.toml | 3 +++ svm/src/lib.rs | 1 + {program-runtime => svm}/src/message_processor.rs | 10 ++++------ svm/src/transaction_processor.rs | 2 +- 8 files changed, 14 insertions(+), 12 deletions(-) rename {program-runtime => svm}/src/message_processor.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 6d01f67b9181e4..383740e77e7592 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6702,7 +6702,6 @@ dependencies = [ "enum-iterator", "itertools", "libc", - "libsecp256k1", "log", "num-derive", "num-traits", @@ -7304,9 +7303,12 @@ version = "2.0.0" dependencies = [ "bincode", "itertools", + "libsecp256k1", "log", "percentage", + "rand 0.8.5", "rustc_version 0.4.0", + "serde", "solana-bpf-loader-program", "solana-frozen-abi", "solana-frozen-abi-macro", diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index afec7352e1fb70..acf65778af5b72 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -21,7 +21,6 @@ num-derive = { workspace = true } num-traits = { workspace = true } percentage = { workspace = true } rand = { workspace = true } -serde = { workspace = true, features = ["derive", "rc"] } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } solana-measure = { workspace = true } @@ -32,7 +31,7 @@ thiserror = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -libsecp256k1 = { workspace = true } +serde = { workspace = true } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } test-case = { workspace = true } diff --git a/program-runtime/src/lib.rs b/program-runtime/src/lib.rs index 079f214fa236f0..ca44b44cd41aa9 100644 --- a/program-runtime/src/lib.rs +++ b/program-runtime/src/lib.rs @@ -15,7 +15,6 @@ pub mod compute_budget_processor; pub mod invoke_context; pub mod loaded_programs; pub mod log_collector; -pub mod message_processor; pub mod prioritization_fee; pub mod runtime_config; pub mod stable_log; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3d383111c3f97c..f6a62d9333b62f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5446,7 +5446,6 @@ dependencies = [ "percentage", "rand 0.8.5", "rustc_version", - "serde", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-measure", @@ -6351,6 +6350,7 @@ dependencies = [ "log", "percentage", "rustc_version", + "serde", "solana-bpf-loader-program", "solana-frozen-abi", "solana-frozen-abi-macro", diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 21da2f7105bd73..ba5286e984c114 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } itertools = { workspace = true } log = { workspace = true } percentage = { workspace = true } +serde = { workspace = true, features = ["derive", "rc"] } solana-bpf-loader-program = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } @@ -29,6 +30,8 @@ name = "solana_svm" [dev-dependencies] bincode = { workspace = true } +libsecp256k1 = { workspace = true } +rand = { workspace = true } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/svm/src/lib.rs b/svm/src/lib.rs index d0f679a15c448b..daa66a6fc6f774 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -4,6 +4,7 @@ pub mod account_loader; pub mod account_overrides; pub mod account_rent_state; +pub mod message_processor; pub mod transaction_account_state_info; pub mod transaction_error_metrics; pub mod transaction_processor; diff --git a/program-runtime/src/message_processor.rs b/svm/src/message_processor.rs similarity index 99% rename from program-runtime/src/message_processor.rs rename to svm/src/message_processor.rs index f3035820ac041c..6e788b9c0f86e0 100644 --- a/program-runtime/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -1,10 +1,10 @@ use { - crate::{ + serde::{Deserialize, Serialize}, + solana_measure::measure::Measure, + solana_program_runtime::{ invoke_context::InvokeContext, timings::{ExecuteDetailsTimings, ExecuteTimings}, }, - serde::{Deserialize, Serialize}, - solana_measure::measure::Measure, solana_sdk::{ account::WritableAccount, message::SanitizedMessage, @@ -34,7 +34,6 @@ impl MessageProcessor { /// For each instruction it calls the program entrypoint method and verifies that the result of /// the call does not violate the bank's accounting rules. /// The accounts are committed back to the bank only if every instruction succeeds. - #[allow(clippy::too_many_arguments)] pub fn process_message( message: &SanitizedMessage, program_indices: &[Vec], @@ -146,11 +145,10 @@ impl MessageProcessor { mod tests { use { super::*, - crate::{ + solana_program_runtime::{ compute_budget::ComputeBudget, declare_process_instruction, loaded_programs::{LoadedProgram, LoadedProgramsForTxBatch}, - message_processor::MessageProcessor, sysvar_cache::SysvarCache, }, solana_sdk::{ diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index e25c2ccfb24907..cc84347fa0d426 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -4,6 +4,7 @@ use { load_accounts, LoadedTransaction, TransactionCheckResult, TransactionLoadResult, }, account_overrides::AccountOverrides, + message_processor::MessageProcessor, transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, transaction_results::{ @@ -22,7 +23,6 @@ use { ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, log_collector::LogCollector, - message_processor::MessageProcessor, runtime_config::RuntimeConfig, sysvar_cache::SysvarCache, timings::{ExecuteDetailsTimings, ExecuteTimingType, ExecuteTimings}, From 87fc227b51e37fa43876cab0ca432fe9b9c06350 Mon Sep 17 00:00:00 2001 From: Joe C Date: Thu, 4 Apr 2024 18:20:20 -0500 Subject: [PATCH 17/19] Runtime: Core BPF Migration: Setup migration configurations (#525) * runtime: builtins: add `core_bpf_migration_config` to prototypes * runtime: builtins: set up test builtins * macro-ize builtin testing --- .../bank/builtins/core_bpf_migration/mod.rs | 4 +- runtime/src/bank/builtins/mod.rs | 359 ++++++++++++++++-- runtime/src/bank/builtins/prototypes.rs | 6 + 3 files changed, 330 insertions(+), 39 deletions(-) diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs index 261dc4f65b9183..6dbd4bc950a173 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/mod.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -27,7 +27,7 @@ use { /// Identifies the type of built-in program targeted for Core BPF migration. /// The type of target determines whether the program should have a program /// account or not, which is checked before migration. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub(crate) enum CoreBpfMigrationTargetType { /// A standard (stateful) builtin program must have a program account. Builtin, @@ -36,7 +36,7 @@ pub(crate) enum CoreBpfMigrationTargetType { } /// Configuration for migrating a built-in program to Core BPF. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub(crate) struct CoreBpfMigrationConfig { /// The program ID of the source program to be used to replace the builtin. pub source_program_id: Pubkey, diff --git a/runtime/src/bank/builtins/mod.rs b/runtime/src/bank/builtins/mod.rs index d9f9f573144c7a..f4239e1b5657ab 100644 --- a/runtime/src/bank/builtins/mod.rs +++ b/runtime/src/bank/builtins/mod.rs @@ -4,76 +4,361 @@ pub mod prototypes; pub use prototypes::{BuiltinPrototype, StatelessBuiltinPrototype}; use solana_sdk::{bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, feature_set}; +macro_rules! testable_prototype { + ($prototype:ident { + core_bpf_migration_config: $core_bpf_migration_config:expr, + name: $name:ident, + $($field:ident : $value:expr),* $(,)? + }) => { + $prototype { + core_bpf_migration_config: { + #[cfg(not(test))] + { + $core_bpf_migration_config + } + #[cfg(test)] + { + Some( test_only::$name::CONFIG ) + } + }, + name: stringify!($name), + $($field: $value),* + } + }; +} + pub static BUILTINS: &[BuiltinPrototype] = &[ - BuiltinPrototype { + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: system_program, enable_feature_id: None, program_id: solana_system_program::id(), - name: "system_program", entrypoint: solana_system_program::system_processor::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: vote_program, enable_feature_id: None, program_id: solana_vote_program::id(), - name: "vote_program", entrypoint: solana_vote_program::vote_processor::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: stake_program, enable_feature_id: None, program_id: solana_stake_program::id(), - name: "stake_program", entrypoint: solana_stake_program::stake_instruction::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: config_program, enable_feature_id: None, program_id: solana_config_program::id(), - name: "config_program", entrypoint: solana_config_program::config_processor::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: solana_bpf_loader_deprecated_program, enable_feature_id: None, program_id: bpf_loader_deprecated::id(), - name: "solana_bpf_loader_deprecated_program", entrypoint: solana_bpf_loader_program::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: solana_bpf_loader_program, enable_feature_id: None, program_id: bpf_loader::id(), - name: "solana_bpf_loader_program", entrypoint: solana_bpf_loader_program::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: solana_bpf_loader_upgradeable_program, enable_feature_id: None, program_id: bpf_loader_upgradeable::id(), - name: "solana_bpf_loader_upgradeable_program", entrypoint: solana_bpf_loader_program::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: compute_budget_program, enable_feature_id: None, program_id: solana_sdk::compute_budget::id(), - name: "compute_budget_program", entrypoint: solana_compute_budget_program::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: address_lookup_table_program, enable_feature_id: None, program_id: solana_sdk::address_lookup_table::program::id(), - name: "address_lookup_table_program", entrypoint: solana_address_lookup_table_program::processor::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: zk_token_proof_program, enable_feature_id: Some(feature_set::zk_token_sdk_enabled::id()), program_id: solana_zk_token_sdk::zk_token_proof_program::id(), - name: "zk_token_proof_program", entrypoint: solana_zk_token_proof_program::Entrypoint::vm, - }, - BuiltinPrototype { + }), + testable_prototype!(BuiltinPrototype { + core_bpf_migration_config: None, + name: loader_v4, enable_feature_id: Some(feature_set::enable_program_runtime_v2_and_loader_v4::id()), program_id: solana_sdk::loader_v4::id(), - name: "loader_v4", entrypoint: solana_loader_v4_program::Entrypoint::vm, - }, + }), ]; -pub static STATELESS_BUILTINS: &[StatelessBuiltinPrototype] = &[StatelessBuiltinPrototype { - program_id: solana_sdk::feature::id(), - name: "feature_gate_program", -}]; +pub static STATELESS_BUILTINS: &[StatelessBuiltinPrototype] = + &[testable_prototype!(StatelessBuiltinPrototype { + core_bpf_migration_config: None, + name: feature_gate_program, + program_id: solana_sdk::feature::id(), + })]; + +// This module contains a number of arbitrary addresses used for testing Core +// BPF migrations. +// Since the list of builtins is static, using `declare_id!` with constant +// values is arguably the least-overhead approach to injecting static addresses +// into the builtins list for both the feature ID and the source program ID. +// These arbitrary IDs can then be used to configure feature-activation runtime +// tests. +#[cfg(test)] +mod test_only { + use super::core_bpf_migration::{CoreBpfMigrationConfig, CoreBpfMigrationTargetType}; + pub mod system_program { + pub mod feature { + solana_sdk::declare_id!("AnjsdWg7LXFbjDdy78wncCJs9PyTdWpKkFmHAwQU1mQ6"); + } + pub mod source_program { + solana_sdk::declare_id!("EDEhzg1Jk79Wrk4mwpRa7txjgRxcE6igXwd6egFDVhuz"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_system_program", + }; + } + + pub mod vote_program { + pub mod feature { + solana_sdk::declare_id!("5wDLHMasPmtrcpfRZX67RVkBXBbSTQ9S4C8EJomD3yAk"); + } + pub mod source_program { + solana_sdk::declare_id!("6T9s4PTcHnpq2AVAqoCbJd4FuHsdD99MjSUEbS7qb1tT"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_vote_program", + }; + } + + pub mod stake_program { + pub mod feature { + solana_sdk::declare_id!("5gp5YKtNEirX45igBvp39bN6nEwhkNMRS7m2c63D1xPM"); + } + pub mod source_program { + solana_sdk::declare_id!("2a3XnUr4Xfxd8hBST8wd4D3Qbiu339XKessYsDwabCED"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_stake_program", + }; + } + + pub mod config_program { + pub mod feature { + solana_sdk::declare_id!("8Jve2vaMFhEgQ5JeB5HxekLx7hyjfRN2jLFawACqekNf"); + } + pub mod source_program { + solana_sdk::declare_id!("73ALcNtVqyM3q7XsvB2xkVECvggu4CcLX5J2XKmpjdBU"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_config_program", + }; + } + + pub mod solana_bpf_loader_deprecated_program { + pub mod feature { + solana_sdk::declare_id!("8gpakCv5Pk5PZGv9RUjzdkk2GVQPGx12cNRUDMQ3bP86"); + } + pub mod source_program { + solana_sdk::declare_id!("DveUYB5m9G3ce4zpV3fxg9pCNkvH1wDsyd8XberZ47JL"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_bpf_loader_deprecated_program", + }; + } + + pub mod solana_bpf_loader_program { + pub mod feature { + solana_sdk::declare_id!("8yEdUm4SaP1yNq2MczEVdrM48SucvZCTDSqjcAKfYfL6"); + } + pub mod source_program { + solana_sdk::declare_id!("2EWMYGJPuGLW4TexLLEMeXP2BkB1PXEKBFb698yw6LhT"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_bpf_loader_program", + }; + } + + pub mod solana_bpf_loader_upgradeable_program { + pub mod feature { + solana_sdk::declare_id!("oPQbVjgoQ7SaQmzZiiHW4xqHbh4BJqqrFhxEJZiMiwY"); + } + pub mod source_program { + solana_sdk::declare_id!("6bTmA9iefD57GDoQ9wUjG8SeYkSpRw3EkKzxZCbhkavq"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_bpf_loader_upgradeable_program", + }; + } + + pub mod compute_budget_program { + pub mod feature { + solana_sdk::declare_id!("D39vUspVfhjPVD7EtMJZrA5j1TSMp4LXfb43nxumGdHT"); + } + pub mod source_program { + solana_sdk::declare_id!("KfX1oLpFC5CwmFeSgXrNcXaouKjFkPuSJ4UsKb3zKMX"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_compute_budget_program", + }; + } + + pub mod address_lookup_table_program { + pub mod feature { + solana_sdk::declare_id!("5G9xu4TnRShZpEhWyjAW2FnRNCwF85g5XKzSbQy4XpCq"); + } + pub mod source_program { + solana_sdk::declare_id!("DQshE9LTac8eXjZTi8ApeuZJYH67UxTMUxaEGstC6mqJ"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_address_lookup_table_program", + }; + } + + pub mod zk_token_proof_program { + pub mod feature { + solana_sdk::declare_id!("GfeFwUzKP9NmaP5u4VfnFgEvQoeQc2wPgnBFrUZhpib5"); + } + pub mod source_program { + solana_sdk::declare_id!("Ffe9gL8vXraBkiv3HqbLvBqY7i9V4qtZxjH83jYYDe1V"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_zk_token_proof_program", + }; + } + + pub mod loader_v4 { + pub mod feature { + solana_sdk::declare_id!("Cz5JthYp27KR3rwTCtVJhbRgwHCurbwcYX46D8setL22"); + } + pub mod source_program { + solana_sdk::declare_id!("EH45pKy1kzjifB93wEJi91js3S4HETdsteywR7ZCNPn5"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Builtin, + datapoint_name: "migrate_builtin_to_core_bpf_loader_v4_program", + }; + } + + pub mod feature_gate_program { + pub mod feature { + solana_sdk::declare_id!("8ZYAiJVVEon55wqGt2wZi5oVjxwK4cBUUfwMwHFvkqpB"); + } + pub mod source_program { + solana_sdk::declare_id!("CWioXdq2ctv8Z4XdhmzJzgpU5i97ZHZZJVSJUmndV3mk"); + } + pub const CONFIG: super::CoreBpfMigrationConfig = super::CoreBpfMigrationConfig { + source_program_id: source_program::id(), + feature_id: feature::id(), + migration_target: super::CoreBpfMigrationTargetType::Stateless, + datapoint_name: "migrate_stateless_to_core_bpf_feature_gate_program", + }; + } +} + +#[cfg(test)] +mod tests { + // Since a macro is used to initialize the test IDs from the `test_only` + // module, best to ensure the lists have the expected values within a test + // context. + #[test] + fn test_testable_prototypes() { + assert_eq!( + &super::BUILTINS[0].core_bpf_migration_config, + &Some(super::test_only::system_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[1].core_bpf_migration_config, + &Some(super::test_only::vote_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[2].core_bpf_migration_config, + &Some(super::test_only::stake_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[3].core_bpf_migration_config, + &Some(super::test_only::config_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[4].core_bpf_migration_config, + &Some(super::test_only::solana_bpf_loader_deprecated_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[5].core_bpf_migration_config, + &Some(super::test_only::solana_bpf_loader_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[6].core_bpf_migration_config, + &Some(super::test_only::solana_bpf_loader_upgradeable_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[7].core_bpf_migration_config, + &Some(super::test_only::compute_budget_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[8].core_bpf_migration_config, + &Some(super::test_only::address_lookup_table_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[9].core_bpf_migration_config, + &Some(super::test_only::zk_token_proof_program::CONFIG) + ); + assert_eq!( + &super::BUILTINS[10].core_bpf_migration_config, + &Some(super::test_only::loader_v4::CONFIG) + ); + assert_eq!( + &super::STATELESS_BUILTINS[0].core_bpf_migration_config, + &Some(super::test_only::feature_gate_program::CONFIG) + ); + } +} diff --git a/runtime/src/bank/builtins/prototypes.rs b/runtime/src/bank/builtins/prototypes.rs index 5d9ea505152dda..1915c657467383 100644 --- a/runtime/src/bank/builtins/prototypes.rs +++ b/runtime/src/bank/builtins/prototypes.rs @@ -1,9 +1,11 @@ use { + super::core_bpf_migration::CoreBpfMigrationConfig, solana_program_runtime::invoke_context::BuiltinFunctionWithContext, solana_sdk::pubkey::Pubkey, }; /// Transitions of built-in programs at epoch boundaries when features are activated. pub struct BuiltinPrototype { + pub(crate) core_bpf_migration_config: Option, pub enable_feature_id: Option, pub program_id: Pubkey, pub name: &'static str, @@ -16,6 +18,7 @@ impl std::fmt::Debug for BuiltinPrototype { builder.field("program_id", &self.program_id); builder.field("name", &self.name); builder.field("enable_feature_id", &self.enable_feature_id); + builder.field("core_bpf_migration_config", &self.core_bpf_migration_config); builder.finish() } } @@ -29,6 +32,7 @@ impl solana_frozen_abi::abi_example::AbiExample for BuiltinPrototype { Ok(()) }); Self { + core_bpf_migration_config: None, enable_feature_id: None, program_id: Pubkey::default(), name: "", @@ -41,8 +45,10 @@ impl solana_frozen_abi::abi_example::AbiExample for BuiltinPrototype { /// features are activated. /// These are built-in programs that don't actually exist, but their address /// is reserved. +#[allow(dead_code)] // Removed in later commit #[derive(Debug)] pub struct StatelessBuiltinPrototype { + pub(crate) core_bpf_migration_config: Option, pub program_id: Pubkey, pub name: &'static str, } From 855a0c1a92bd1a495ddff9dcb213473eaba2b980 Mon Sep 17 00:00:00 2001 From: abcalphabet Date: Thu, 4 Apr 2024 20:47:07 -0300 Subject: [PATCH 18/19] ElGamal: add From impls; deprecate from/to_bytes (#246) * ElGamal: add From impls; deprecate from/to_bytes * variable names --- zk-token-sdk/src/encryption/elgamal.rs | 128 ++++++++++++++++-- .../batched_grouped_ciphertext_validity.rs | 4 +- .../ciphertext_ciphertext_equality.rs | 4 +- .../ciphertext_commitment_equality.rs | 2 +- .../grouped_ciphertext_validity.rs | 4 +- .../src/instruction/pubkey_validity.rs | 2 +- zk-token-sdk/src/instruction/withdraw.rs | 2 +- zk-token-sdk/src/instruction/zero_balance.rs | 2 +- .../ciphertext_commitment_equality_proof.rs | 2 +- .../grouped_ciphertext_validity_proof.rs | 2 +- .../src/sigma_proofs/zero_balance_proof.rs | 2 +- .../src/zk_token_elgamal/pod/elgamal.rs | 4 +- 12 files changed, 130 insertions(+), 28 deletions(-) diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index 0bc9eb0511b9ea..5e94904e482f33 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -68,7 +68,7 @@ const ELGAMAL_PUBKEY_LEN: usize = RISTRETTO_POINT_LEN; const ELGAMAL_SECRET_KEY_LEN: usize = SCALAR_LEN; /// Byte length of an ElGamal keypair -const ELGAMAL_KEYPAIR_LEN: usize = ELGAMAL_PUBKEY_LEN + ELGAMAL_SECRET_KEY_LEN; +pub const ELGAMAL_KEYPAIR_LEN: usize = ELGAMAL_PUBKEY_LEN + ELGAMAL_SECRET_KEY_LEN; #[derive(Error, Clone, Debug, Eq, PartialEq)] pub enum ElGamalError { @@ -82,6 +82,10 @@ pub enum ElGamalError { CiphertextDeserialization, #[error("failed to deserialize public key")] PubkeyDeserialization, + #[error("failed to deserialize keypair")] + KeypairDeserialization, + #[error("failed to deserialize secret key")] + SecretKeyDeserialization, } /// Algorithm handle for the twisted ElGamal encryption scheme @@ -235,6 +239,8 @@ impl ElGamalKeypair { &self.secret } + #[deprecated(note = "please use `into()` instead")] + #[allow(deprecated)] pub fn to_bytes(&self) -> [u8; ELGAMAL_KEYPAIR_LEN] { let mut bytes = [0u8; ELGAMAL_KEYPAIR_LEN]; bytes[..ELGAMAL_PUBKEY_LEN].copy_from_slice(&self.public.to_bytes()); @@ -242,6 +248,8 @@ impl ElGamalKeypair { bytes } + #[deprecated(note = "please use `try_from()` instead")] + #[allow(deprecated)] pub fn from_bytes(bytes: &[u8]) -> Option { if bytes.len() != ELGAMAL_KEYPAIR_LEN { return None; @@ -256,7 +264,7 @@ impl ElGamalKeypair { /// Reads a JSON-encoded keypair from a `Reader` implementor pub fn read_json(reader: &mut R) -> Result> { let bytes: Vec = serde_json::from_reader(reader)?; - Self::from_bytes(&bytes).ok_or_else(|| { + Self::try_from(bytes.as_slice()).ok().ok_or_else(|| { std::io::Error::new(std::io::ErrorKind::Other, "Invalid ElGamalKeypair").into() }) } @@ -268,8 +276,8 @@ impl ElGamalKeypair { /// Writes to a `Write` implementer with JSON-encoding pub fn write_json(&self, writer: &mut W) -> Result> { - let bytes = self.to_bytes(); - let json = serde_json::to_string(&bytes.to_vec())?; + let json = + serde_json::to_string(&Into::<[u8; ELGAMAL_KEYPAIR_LEN]>::into(self).as_slice())?; writer.write_all(&json.clone().into_bytes())?; Ok(json) } @@ -293,6 +301,40 @@ impl EncodableKey for ElGamalKeypair { } } +impl TryFrom<&[u8]> for ElGamalKeypair { + type Error = ElGamalError; + fn try_from(bytes: &[u8]) -> Result { + if bytes.len() != ELGAMAL_KEYPAIR_LEN { + return Err(ElGamalError::KeypairDeserialization); + } + + Ok(Self { + public: ElGamalPubkey::try_from(&bytes[..ELGAMAL_PUBKEY_LEN])?, + secret: ElGamalSecretKey::try_from(&bytes[ELGAMAL_PUBKEY_LEN..])?, + }) + } +} + +impl From for [u8; ELGAMAL_KEYPAIR_LEN] { + fn from(keypair: ElGamalKeypair) -> Self { + let mut bytes = [0u8; ELGAMAL_KEYPAIR_LEN]; + bytes[..ELGAMAL_PUBKEY_LEN] + .copy_from_slice(&Into::<[u8; ELGAMAL_PUBKEY_LEN]>::into(keypair.public)); + bytes[ELGAMAL_PUBKEY_LEN..].copy_from_slice(keypair.secret.as_bytes()); + bytes + } +} + +impl From<&ElGamalKeypair> for [u8; ELGAMAL_KEYPAIR_LEN] { + fn from(keypair: &ElGamalKeypair) -> Self { + let mut bytes = [0u8; ELGAMAL_KEYPAIR_LEN]; + bytes[..ELGAMAL_PUBKEY_LEN] + .copy_from_slice(&Into::<[u8; ELGAMAL_PUBKEY_LEN]>::into(keypair.public)); + bytes[ELGAMAL_PUBKEY_LEN..].copy_from_slice(keypair.secret.as_bytes()); + bytes + } +} + impl SeedDerivable for ElGamalKeypair { fn from_seed(seed: &[u8]) -> Result> { let secret = ElGamalSecretKey::from_seed(seed)?; @@ -343,10 +385,12 @@ impl ElGamalPubkey { &self.0 } + #[deprecated(note = "please use `into()` instead")] pub fn to_bytes(&self) -> [u8; ELGAMAL_PUBKEY_LEN] { self.0.compress().to_bytes() } + #[deprecated(note = "please use `try_from()` instead")] pub fn from_bytes(bytes: &[u8]) -> Option { if bytes.len() != ELGAMAL_PUBKEY_LEN { return None; @@ -384,13 +428,13 @@ impl ElGamalPubkey { impl EncodableKey for ElGamalPubkey { fn read(reader: &mut R) -> Result> { let bytes: Vec = serde_json::from_reader(reader)?; - Self::from_bytes(&bytes).ok_or_else(|| { + Self::try_from(bytes.as_slice()).ok().ok_or_else(|| { std::io::Error::new(std::io::ErrorKind::Other, "Invalid ElGamalPubkey").into() }) } fn write(&self, writer: &mut W) -> Result> { - let bytes = self.to_bytes(); + let bytes = Into::<[u8; ELGAMAL_PUBKEY_LEN]>::into(*self); let json = serde_json::to_string(&bytes.to_vec())?; writer.write_all(&json.clone().into_bytes())?; Ok(json) @@ -399,7 +443,38 @@ impl EncodableKey for ElGamalPubkey { impl fmt::Display for ElGamalPubkey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", BASE64_STANDARD.encode(self.to_bytes())) + write!( + f, + "{}", + BASE64_STANDARD.encode(Into::<[u8; ELGAMAL_PUBKEY_LEN]>::into(*self)) + ) + } +} + +impl TryFrom<&[u8]> for ElGamalPubkey { + type Error = ElGamalError; + fn try_from(bytes: &[u8]) -> Result { + if bytes.len() != ELGAMAL_PUBKEY_LEN { + return Err(ElGamalError::PubkeyDeserialization); + } + + Ok(ElGamalPubkey( + CompressedRistretto::from_slice(bytes) + .decompress() + .ok_or(ElGamalError::PubkeyDeserialization)?, + )) + } +} + +impl From for [u8; ELGAMAL_PUBKEY_LEN] { + fn from(pubkey: ElGamalPubkey) -> Self { + pubkey.0.compress().to_bytes() + } +} + +impl From<&ElGamalPubkey> for [u8; ELGAMAL_PUBKEY_LEN] { + fn from(pubkey: &ElGamalPubkey) -> Self { + pubkey.0.compress().to_bytes() } } @@ -487,10 +562,12 @@ impl ElGamalSecretKey { self.0.as_bytes() } + #[deprecated(note = "please use `into()` instead")] pub fn to_bytes(&self) -> [u8; ELGAMAL_SECRET_KEY_LEN] { self.0.to_bytes() } + #[deprecated(note = "please use `try_from()` instead")] pub fn from_bytes(bytes: &[u8]) -> Option { match bytes.try_into() { Ok(bytes) => Scalar::from_canonical_bytes(bytes).map(ElGamalSecretKey), @@ -502,13 +579,13 @@ impl ElGamalSecretKey { impl EncodableKey for ElGamalSecretKey { fn read(reader: &mut R) -> Result> { let bytes: Vec = serde_json::from_reader(reader)?; - Self::from_bytes(&bytes).ok_or_else(|| { + Self::try_from(bytes.as_slice()).ok().ok_or_else(|| { std::io::Error::new(std::io::ErrorKind::Other, "Invalid ElGamalSecretKey").into() }) } fn write(&self, writer: &mut W) -> Result> { - let bytes = self.to_bytes(); + let bytes = Into::<[u8; ELGAMAL_SECRET_KEY_LEN]>::into(self); let json = serde_json::to_string(&bytes.to_vec())?; writer.write_all(&json.clone().into_bytes())?; Ok(json) @@ -546,6 +623,31 @@ impl From for ElGamalSecretKey { } } +impl TryFrom<&[u8]> for ElGamalSecretKey { + type Error = ElGamalError; + fn try_from(bytes: &[u8]) -> Result { + match bytes.try_into() { + Ok(bytes) => Ok(ElGamalSecretKey::from( + Scalar::from_canonical_bytes(bytes) + .ok_or(ElGamalError::SecretKeyDeserialization)?, + )), + _ => Err(ElGamalError::SecretKeyDeserialization), + } + } +} + +impl From for [u8; ELGAMAL_SECRET_KEY_LEN] { + fn from(secret_key: ElGamalSecretKey) -> Self { + secret_key.0.to_bytes() + } +} + +impl From<&ElGamalSecretKey> for [u8; ELGAMAL_SECRET_KEY_LEN] { + fn from(secret_key: &ElGamalSecretKey) -> Self { + secret_key.0.to_bytes() + } +} + impl Eq for ElGamalSecretKey {} impl PartialEq for ElGamalSecretKey { fn eq(&self, other: &Self) -> bool { @@ -954,10 +1056,10 @@ mod tests { assert!(Path::new(&outfile).exists()); assert_eq!( keypair_vec, - ElGamalKeypair::read_json_file(&outfile) - .unwrap() - .to_bytes() - .to_vec() + Into::<[u8; ELGAMAL_KEYPAIR_LEN]>::into( + ElGamalKeypair::read_json_file(&outfile).unwrap() + ) + .to_vec() ); #[cfg(unix)] diff --git a/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity.rs b/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity.rs index 10a9d790804e30..0be760691f3ee6 100644 --- a/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity.rs +++ b/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity.rs @@ -70,8 +70,8 @@ impl BatchedGroupedCiphertext2HandlesValidityProofData { opening_lo: &PedersenOpening, opening_hi: &PedersenOpening, ) -> Result { - let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.to_bytes()); - let pod_auditor_pubkey = pod::ElGamalPubkey(auditor_pubkey.to_bytes()); + let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.into()); + let pod_auditor_pubkey = pod::ElGamalPubkey(auditor_pubkey.into()); let pod_grouped_ciphertext_lo = (*grouped_ciphertext_lo).into(); let pod_grouped_ciphertext_hi = (*grouped_ciphertext_hi).into(); diff --git a/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs b/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs index 5d5fa7e54f04c8..aa31db0a432f4e 100644 --- a/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs +++ b/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs @@ -66,8 +66,8 @@ impl CiphertextCiphertextEqualityProofData { destination_opening: &PedersenOpening, amount: u64, ) -> Result { - let pod_source_pubkey = pod::ElGamalPubkey(source_keypair.pubkey().to_bytes()); - let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.to_bytes()); + let pod_source_pubkey = pod::ElGamalPubkey(source_keypair.pubkey().into()); + let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.into()); let pod_source_ciphertext = pod::ElGamalCiphertext(source_ciphertext.to_bytes()); let pod_destination_ciphertext = pod::ElGamalCiphertext(destination_ciphertext.to_bytes()); diff --git a/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs b/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs index 4f4f49fab57c68..5cd2b3cbc5c670 100644 --- a/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs +++ b/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs @@ -62,7 +62,7 @@ impl CiphertextCommitmentEqualityProofData { amount: u64, ) -> Result { let context = CiphertextCommitmentEqualityProofContext { - pubkey: pod::ElGamalPubkey(keypair.pubkey().to_bytes()), + pubkey: pod::ElGamalPubkey(keypair.pubkey().into()), ciphertext: pod::ElGamalCiphertext(ciphertext.to_bytes()), commitment: pod::PedersenCommitment(commitment.to_bytes()), }; diff --git a/zk-token-sdk/src/instruction/grouped_ciphertext_validity.rs b/zk-token-sdk/src/instruction/grouped_ciphertext_validity.rs index bb13a0805ad670..a99a733c748928 100644 --- a/zk-token-sdk/src/instruction/grouped_ciphertext_validity.rs +++ b/zk-token-sdk/src/instruction/grouped_ciphertext_validity.rs @@ -63,8 +63,8 @@ impl GroupedCiphertext2HandlesValidityProofData { amount: u64, opening: &PedersenOpening, ) -> Result { - let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.to_bytes()); - let pod_auditor_pubkey = pod::ElGamalPubkey(auditor_pubkey.to_bytes()); + let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.into()); + let pod_auditor_pubkey = pod::ElGamalPubkey(auditor_pubkey.into()); let pod_grouped_ciphertext = (*grouped_ciphertext).into(); let context = GroupedCiphertext2HandlesValidityProofContext { diff --git a/zk-token-sdk/src/instruction/pubkey_validity.rs b/zk-token-sdk/src/instruction/pubkey_validity.rs index b0251fbf5631c5..3c264f0cdd31d3 100644 --- a/zk-token-sdk/src/instruction/pubkey_validity.rs +++ b/zk-token-sdk/src/instruction/pubkey_validity.rs @@ -50,7 +50,7 @@ pub struct PubkeyValidityProofContext { #[cfg(not(target_os = "solana"))] impl PubkeyValidityData { pub fn new(keypair: &ElGamalKeypair) -> Result { - let pod_pubkey = pod::ElGamalPubkey(keypair.pubkey().to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.pubkey().into()); let context = PubkeyValidityProofContext { pubkey: pod_pubkey }; diff --git a/zk-token-sdk/src/instruction/withdraw.rs b/zk-token-sdk/src/instruction/withdraw.rs index 3dee9ffc6106f4..530b6de0d75532 100644 --- a/zk-token-sdk/src/instruction/withdraw.rs +++ b/zk-token-sdk/src/instruction/withdraw.rs @@ -69,7 +69,7 @@ impl WithdrawData { // current source balance let final_ciphertext = current_ciphertext - &ElGamal::encode(amount); - let pod_pubkey = pod::ElGamalPubkey(keypair.pubkey().to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.pubkey().into()); let pod_final_ciphertext: pod::ElGamalCiphertext = final_ciphertext.into(); let context = WithdrawProofContext { diff --git a/zk-token-sdk/src/instruction/zero_balance.rs b/zk-token-sdk/src/instruction/zero_balance.rs index d5a51bb3aa8ea0..7d52b80063176e 100644 --- a/zk-token-sdk/src/instruction/zero_balance.rs +++ b/zk-token-sdk/src/instruction/zero_balance.rs @@ -54,7 +54,7 @@ impl ZeroBalanceProofData { keypair: &ElGamalKeypair, ciphertext: &ElGamalCiphertext, ) -> Result { - let pod_pubkey = pod::ElGamalPubkey(keypair.pubkey().to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.pubkey().into()); let pod_ciphertext = pod::ElGamalCiphertext(ciphertext.to_bytes()); let context = ZeroBalanceProofContext { diff --git a/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs b/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs index 6451d46d1916f2..768b07b216cdbe 100644 --- a/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs @@ -309,7 +309,7 @@ mod test { #[test] fn test_ciphertext_commitment_equality_proof_edge_cases() { // if ElGamal public key zero (public key is invalid), then the proof should always reject - let public = ElGamalPubkey::from_bytes(&[0u8; 32]).unwrap(); + let public = ElGamalPubkey::try_from([0u8; 32].as_slice()).unwrap(); let secret = ElGamalSecretKey::new_rand(); let elgamal_keypair = ElGamalKeypair::new_for_tests(public, secret); diff --git a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof.rs b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof.rs index 9f1df14c316ef1..1c1a57997e4740 100644 --- a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof.rs @@ -274,7 +274,7 @@ mod test { #[test] fn test_grouped_ciphertext_validity_proof_edge_cases() { // if destination public key zeroed, then the proof should always reject - let destination_pubkey = ElGamalPubkey::from_bytes(&[0u8; 32]).unwrap(); + let destination_pubkey = ElGamalPubkey::try_from([0u8; 32].as_slice()).unwrap(); let auditor_keypair = ElGamalKeypair::new_rand(); let auditor_pubkey = auditor_keypair.pubkey(); diff --git a/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs b/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs index cab8759f4f2a87..3585978c76c1df 100644 --- a/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs @@ -286,7 +286,7 @@ mod test { let mut prover_transcript = Transcript::new(b"test"); let mut verifier_transcript = Transcript::new(b"test"); - let public = ElGamalPubkey::from_bytes(&[0u8; 32]).unwrap(); + let public = ElGamalPubkey::try_from([0u8; 32].as_slice()).unwrap(); let ciphertext = public.encrypt(0_u64); let proof = ZeroBalanceProof::new(&source_keypair, &ciphertext, &mut prover_transcript); diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs b/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs index 251729c0856da2..2123d716a4675a 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs @@ -100,7 +100,7 @@ impl_from_str!( #[cfg(not(target_os = "solana"))] impl From for ElGamalPubkey { fn from(decoded_pubkey: decoded::ElGamalPubkey) -> Self { - Self(decoded_pubkey.to_bytes()) + Self(decoded_pubkey.into()) } } @@ -109,7 +109,7 @@ impl TryFrom for decoded::ElGamalPubkey { type Error = ElGamalError; fn try_from(pod_pubkey: ElGamalPubkey) -> Result { - Self::from_bytes(&pod_pubkey.0).ok_or(ElGamalError::PubkeyDeserialization) + Self::try_from(pod_pubkey.0.as_slice()) } } From 0b9c6379b35fa4eb45184a4f9b96fc8c38e8aaa5 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 5 Apr 2024 08:58:33 +0900 Subject: [PATCH 19/19] Introduce SchedulingStateMachine for unified scheduler (#129) * Introduce SchedulingStateMachine * Apply all typo fixes from code review Co-authored-by: Andrew Fitzgerald * Update word wrapping * Clarify Token::assume_exclusive_mutating_thread() * Use slice instead of &Vec<_> * Improve non-const explanation * Document consecutive readonly rescheduling opt. * Make test_gradual_locking terminate for miri * Avoid unnecessary Task::clone() * Rename: lock_{status,result} and no attempt_...() * Add safety comment for get_account_locks_unchecked * Reduce and comment about Page::blocked_tasks cap. * Document SchedulingStateMachine::schedule_task() * Add justification of closure in create_task * Use the From trait for PageUsage * Replace unneeded if-let with .expect() * Add helpful comments for peculiar crossbeam usage * Fix typo * Make bug-bounty-exempt statement more clear * Add test_enfoced_get_account_locks_verification * Fix typos... * Big rename: Page => UsageQueue * Document UsageQueueLoader * Various minor cleanings for beautifier diff * Ensure reinitialize() is maintained for new fields * Remove uneeded impl Send for TokenCell & doc upd. * Apply typo fixes from code review Co-authored-by: Andrew Fitzgerald * Merge similar tests into one * Remove test_debug * Remove assertions of task_index() * Fix UB in TokenCell * Make schedule_task doc comment simpler * Document deschedule_task * Simplify unlock_usage_queue() args * Add comment for try_unblock() -> None * Switch to Option for fewer assert!s * Add assert_matches!() to UsageQueue methods * Add panicking test case for ::reinitialize() * Use UsageFromTask * Rename: LockAttempt => LockContext * Move locking and unlocking methods to usage queue * Remove outdated comment... * Remove redundant fn: pop_unblocked_usage_from_task * Document the index of task * Clarifty comment a bit * Update .current_usage inside try_lock() * Use inspect_err to simplify code * fix ci... * Use ()... * Rename: schedule{,_next}_unblocked_task() * Rename: {try_lock,unlock}_{for_task,usage_queues} * Test solana-unified-scheduler-logic under miri * Test UB to illustrate limitation of TokenCell * Test UB of using multiple tokens at the same time --------- Co-authored-by: Andrew Fitzgerald --- Cargo.lock | 3 + ci/test-miri.sh | 11 +- programs/sbf/Cargo.lock | 3 + runtime/src/bank.rs | 3 + unified-scheduler-logic/Cargo.toml | 2 + unified-scheduler-logic/src/lib.rs | 1394 +++++++++++++++++++++++++++- unified-scheduler-pool/Cargo.toml | 1 + unified-scheduler-pool/src/lib.rs | 240 +++-- 8 files changed, 1583 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 383740e77e7592..1ba69acd0787f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7552,7 +7552,9 @@ dependencies = [ name = "solana-unified-scheduler-logic" version = "2.0.0" dependencies = [ + "assert_matches", "solana-sdk", + "static_assertions", ] [[package]] @@ -7561,6 +7563,7 @@ version = "2.0.0" dependencies = [ "assert_matches", "crossbeam-channel", + "dashmap", "derivative", "log", "solana-ledger", diff --git a/ci/test-miri.sh b/ci/test-miri.sh index 407d48c34106a2..3163ba500e2290 100755 --- a/ci/test-miri.sh +++ b/ci/test-miri.sh @@ -2,7 +2,16 @@ set -eo pipefail +source ci/_ source ci/rust-version.sh nightly # miri is very slow; so only run very few of selective tests! -cargo "+${rust_nightly}" miri test -p solana-program -- hash:: account_info:: +_ cargo "+${rust_nightly}" miri test -p solana-program -- hash:: account_info:: + +_ cargo "+${rust_nightly}" miri test -p solana-unified-scheduler-logic + +# run intentionally-#[ignored] ub triggering tests for each to make sure they fail +(! _ cargo "+${rust_nightly}" miri test -p solana-unified-scheduler-logic -- \ + --ignored --exact "utils::tests::test_ub_illegally_created_multiple_tokens") +(! _ cargo "+${rust_nightly}" miri test -p solana-unified-scheduler-logic -- \ + --ignored --exact "utils::tests::test_ub_illegally_shared_token_cell") diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f6a62d9333b62f..f3279c33612893 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6527,7 +6527,9 @@ dependencies = [ name = "solana-unified-scheduler-logic" version = "2.0.0" dependencies = [ + "assert_matches", "solana-sdk", + "static_assertions", ] [[package]] @@ -6536,6 +6538,7 @@ version = "2.0.0" dependencies = [ "assert_matches", "crossbeam-channel", + "dashmap", "derivative", "log", "solana-ledger", diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8da6210d5e740b..1571ff11e154c1 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3562,6 +3562,9 @@ impl Bank { transaction: &'a SanitizedTransaction, ) -> TransactionBatch<'_, '_> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + // Note that switching this to .get_account_locks_unchecked() is unacceptable currently. + // The unified scheduler relies on the checks enforced here. + // See a comment in SchedulingStateMachine::create_task(). let lock_result = transaction .get_account_locks(tx_account_lock_limit) .map(|_| ()); diff --git a/unified-scheduler-logic/Cargo.toml b/unified-scheduler-logic/Cargo.toml index b2e80c79c7a08f..b05cec41a7c862 100644 --- a/unified-scheduler-logic/Cargo.toml +++ b/unified-scheduler-logic/Cargo.toml @@ -10,4 +10,6 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +assert_matches = { workspace = true } solana-sdk = { workspace = true } +static_assertions = { workspace = true } diff --git a/unified-scheduler-logic/src/lib.rs b/unified-scheduler-logic/src/lib.rs index 997c6c1745a7c9..2bae4f603582f5 100644 --- a/unified-scheduler-logic/src/lib.rs +++ b/unified-scheduler-logic/src/lib.rs @@ -1,15 +1,428 @@ -use solana_sdk::transaction::SanitizedTransaction; +#![allow(rustdoc::private_intra_doc_links)] +//! The task (transaction) scheduling code for the unified scheduler +//! +//! ### High-level API and design +//! +//! The most important type is [`SchedulingStateMachine`]. It takes new tasks (= transactions) and +//! may return back them if runnable via +//! [`::schedule_task()`](SchedulingStateMachine::schedule_task) while maintaining the account +//! readonly/writable lock rules. Those returned runnable tasks are guaranteed to be safe to +//! execute in parallel. Lastly, `SchedulingStateMachine` should be notified about the completion +//! of the exeuction via [`::deschedule_task()`](SchedulingStateMachine::deschedule_task), so that +//! conflicting tasks can be returned from +//! [`::schedule_next_unblocked_task()`](SchedulingStateMachine::schedule_next_unblocked_task) as +//! newly-unblocked runnable ones. +//! +//! The design principle of this crate (`solana-unified-scheduler-logic`) is simplicity for the +//! separation of concern. It is interacted only with a few of its public API by +//! `solana-unified-scheduler-pool`. This crate doesn't know about banks, slots, solana-runtime, +//! threads, crossbeam-channel at all. Becasue of this, it's deterministic, easy-to-unit-test, and +//! its perf footprint is well understood. It really focuses on its single job: sorting +//! transactions in executable order. +//! +//! ### Algorithm +//! +//! The algorithm can be said it's based on per-address FIFO queues, which are updated every time +//! both new task is coming (= called _scheduling_) and runnable (= _post-scheduling_) task is +//! finished (= called _descheduling_). +//! +//! For the _non-conflicting scheduling_ case, the story is very simple; it just remembers that all +//! of accessed addresses are write-locked or read-locked with the number of active (= +//! _currently-scheduled-and-not-descheduled-yet_) tasks. Correspondingly, descheduling does the +//! opposite book-keeping process, regardless whether a finished task has been conflicted or not. +//! +//! For the _conflicting scheduling_ case, it remembers that each of **non-conflicting addresses** +//! like the non-conflicting case above. As for **conflicting addresses**, each task is recorded to +//! respective FIFO queues attached to the (conflicting) addresses. Importantly, the number of +//! conflicting addresses of the conflicting task is also remembered. +//! +//! The last missing piece is that the scheduler actually tries to reschedule previously blocked +//! tasks while deschduling, in addition to the above-mentioned book-keeping processing. Namely, +//! when given address is ready for new fresh locking resulted from descheduling a task (i.e. write +//! lock is released or read lock count has reached zero), it pops out the first element of the +//! FIFO blocked-task queue of the address. Then, it immediately marks the address as relocked. It +//! also decrements the number of conflicting addresses of the popped-out task. As the final step, +//! if the number reaches to the zero, it means the task has fully finished locking all of its +//! addresses and is directly routed to be runnable. Lastly, if the next first element of the +//! blocked-task queue is trying to read-lock the address like the popped-out one, this +//! rescheduling is repeated as an optimization to increase parallelism of task execution. +//! +//! Put differently, this algorithm tries to gradually lock all of addresses of tasks at different +//! timings while not deviating the execution order from the original task ingestion order. This +//! implies there's no locking retries in general, which is the primary source of non-linear perf. +//! degration. +//! +//! As a ballpark number from a synthesized micro benchmark on usual CPU for `mainnet-beta` +//! validators, it takes roughly 100ns to schedule and deschedule a transaction with 10 accounts. +//! And 1us for a transaction with 100 accounts. Note that this excludes crossbeam communication +//! overhead at all. That's said, it's not unrealistic to say the whole unified scheduler can +//! attain 100k-1m tps overall, assuming those transaction executions aren't bottlenecked. +//! +//! ### Runtime performance characteristics and data structure arrangement +//! +//! Its algorithm is very fast for high throughput, real-time for low latency. The whole +//! unified-scheduler architecture is designed from grounds up to support the fastest execution of +//! this scheduling code. For that end, unified scheduler pre-loads address-specific locking state +//! data structures (called [`UsageQueue`]) for all of transaction's accounts, in order to offload +//! the job to other threads from the scheduler thread. This preloading is done inside +//! [`create_task()`](SchedulingStateMachine::create_task). In this way, task scheduling +//! computational complexity is basically reduced to several word-sized loads and stores in the +//! schduler thread (i.e. constant; no allocations nor syscalls), while being proportional to the +//! number of addresses in a given transaction. Note that this statement is held true, regardless +//! of conflicts. This is because the preloading also pre-allocates some scratch-pad area +//! ([`blocked_usages_from_tasks`](UsageQueueInner::blocked_usages_from_tasks)) to stash blocked +//! ones. So, a conflict only incurs some additional fixed number of mem stores, within error +//! margin of the constant complexity. And additional memory allocation for the scratchpad could +//! said to be amortized, if such an unusual event should occur. +//! +//! [`Arc`] is used to implement this preloading mechanism, because `UsageQueue`s are shared across +//! tasks accessing the same account, and among threads due to the preloading. Also, interior +//! mutability is needed. However, `SchedulingStateMachine` doesn't use conventional locks like +//! RwLock. Leveraging the fact it's the only state-mutating exclusive thread, it instead uses +//! `UnsafeCell`, which is sugar-coated by a tailored wrapper called [`TokenCell`]. `TokenCell` +//! imposes an overly restrictive aliasing rule via rust type system to maintain the memory safety. +//! By localizing any synchronization to the message passing, the scheduling code itself attains +//! maximally possible single-threaed execution without stalling cpu pipelines at all, only +//! constrained to mem access latency, while efficiently utilizing L1-L3 cpu cache with full of +//! `UsageQueue`s. +//! +//! ### Buffer bloat insignificance +//! +//! The scheduler code itself doesn't care about the buffer bloat problem, which can occur in +//! unified scheduler, where a run of heavily linearized and blocked tasks could be severely +//! hampered by very large number of interleaved runnable tasks along side. The reason is again +//! for separation of concerns. This is acceptable because the scheduling code itself isn't +//! susceptible to the buffer bloat problem by itself as explained by the description and validated +//! by the mentioned benchmark above. Thus, this should be solved elsewhere, specifically at the +//! scheduler pool. +use { + crate::utils::{ShortCounter, Token, TokenCell}, + assert_matches::assert_matches, + solana_sdk::{pubkey::Pubkey, transaction::SanitizedTransaction}, + static_assertions::const_assert_eq, + std::{collections::VecDeque, mem, sync::Arc}, +}; -pub struct Task { +/// Internal utilities. Namely this contains [`ShortCounter`] and [`TokenCell`]. +mod utils { + use std::{ + any::{self, TypeId}, + cell::{RefCell, UnsafeCell}, + collections::BTreeSet, + marker::PhantomData, + thread, + }; + + /// A really tiny counter to hide `.checked_{add,sub}` all over the place. + /// + /// It's caller's reponsibility to ensure this (backed by [`u32`]) never overflow. + #[derive(Debug, Clone, Copy)] + pub(super) struct ShortCounter(u32); + + impl ShortCounter { + pub(super) fn zero() -> Self { + Self(0) + } + + pub(super) fn one() -> Self { + Self(1) + } + + pub(super) fn is_one(&self) -> bool { + self.0 == 1 + } + + pub(super) fn is_zero(&self) -> bool { + self.0 == 0 + } + + pub(super) fn current(&self) -> u32 { + self.0 + } + + #[must_use] + pub(super) fn increment(self) -> Self { + Self(self.0.checked_add(1).unwrap()) + } + + #[must_use] + pub(super) fn decrement(self) -> Self { + Self(self.0.checked_sub(1).unwrap()) + } + + pub(super) fn increment_self(&mut self) -> &mut Self { + *self = self.increment(); + self + } + + pub(super) fn decrement_self(&mut self) -> &mut Self { + *self = self.decrement(); + self + } + + pub(super) fn reset_to_zero(&mut self) -> &mut Self { + self.0 = 0; + self + } + } + + /// A conditionally [`Send`]-able and [`Sync`]-able cell leveraging scheduler's one-by-one data + /// access pattern with zero runtime synchronization cost. + /// + /// To comply with Rust's aliasing rules, these cells require a carefully-created [`Token`] to + /// be passed around to access the inner values. The token is a special-purpose phantom object + /// to get rid of its inherent `unsafe`-ness in [`UnsafeCell`], which is internally used for + /// the interior mutability. + /// + /// The final objective of [`Token`] is to ensure there's only one mutable reference to the + /// [`TokenCell`] at most _at any given moment_. To that end, it's `unsafe` to create it, + /// shifting the responsibility of binding the only singleton instance to a particular thread + /// and not creating more than one, onto the API consumers. And its constructor is non-`const`, + /// and the type is `!Clone` (and `!Copy` as well), `!Default`, `!Send` and `!Sync` to make it + /// relatively hard to cross thread boundaries accidentally. + /// + /// In other words, the token semantically _owns_ all of its associated instances of + /// [`TokenCell`]s. And `&mut Token` is needed to access one of them as if the one is of + /// [`Token`]'s `*_mut()` getters. Thus, the Rust aliasing rule for `UnsafeCell` can + /// transitively be proven to be satisfied simply based on the usual borrow checking of the + /// `&mut` reference of [`Token`] itself via + /// [`::with_borrow_mut()`](TokenCell::with_borrow_mut). + /// + /// By extension, it's allowed to create _multiple_ tokens in a _single_ process as long as no + /// instance of [`TokenCell`] is shared by multiple instances of [`Token`]. + /// + /// Note that this is overly restrictive in that it's forbidden, yet, technically possible + /// to _have multiple mutable references to the inner values at the same time, if and only + /// if the respective cells aren't aliased to each other (i.e. different instances)_. This + /// artificial restriction is acceptable for its intended use by the unified scheduler's code + /// because its algorithm only needs to access each instance of [`TokenCell`]-ed data once at a + /// time. Finally, this restriction is traded off for restoration of Rust aliasing rule at zero + /// runtime cost. Without this token mechanism, there's no way to realize this. + #[derive(Debug, Default)] + pub(super) struct TokenCell(UnsafeCell); + + impl TokenCell { + /// Creates a new `TokenCell` with the `value` typed as `V`. + /// + /// Note that this isn't parametric over the its accompanied `Token`'s lifetime to avoid + /// complex handling of non-`'static` heaped data in general. Instead, it's manually + /// required to ensure this instance is accessed only via its associated Token for the + /// entire lifetime. + /// + /// This is intentionally left to be non-`const` to forbid unprotected sharing via static + /// variables among threads. + pub(super) fn new(value: V) -> Self { + Self(UnsafeCell::new(value)) + } + + /// Acquires a mutable reference inside a given closure, while borrowing the mutable + /// reference of the given token. + /// + /// In this way, any additional reborrow can never happen at the same time across all + /// instances of [`TokenCell`] conceptually owned by the instance of [`Token`] (a + /// particular thread), unless previous borrow is released. After the release, the used + /// singleton token should be free to be reused for reborrows. + /// + /// Note that lifetime of the acquired reference is still restricted to 'self, not + /// 'token, in order to avoid use-after-free undefined behaviors. + pub(super) fn with_borrow_mut( + &self, + _token: &mut Token, + f: impl FnOnce(&mut V) -> R, + ) -> R { + f(unsafe { &mut *self.0.get() }) + } + } + + // Safety: Once after a (`Send`-able) `TokenCell` is transferred to a thread from other + // threads, access to `TokenCell` is assumed to be only from the single thread by proper use of + // Token. Thereby, implementing `Sync` can be thought as safe and doing so is needed for the + // particular implementation pattern in the unified scheduler (multi-threaded off-loading). + // + // In other words, TokenCell is technically still `!Sync`. But there should be no + // legalized usage which depends on real `Sync` to avoid undefined behaviors. + unsafe impl Sync for TokenCell {} + + /// A auxiliary zero-sized type to enforce aliasing rule to [`TokenCell`] via rust type system + /// + /// Token semantically owns a collection of `TokenCell` objects and governs the _unique_ + /// existence of mutable access over them by requiring the token itself to be mutably borrowed + /// to get a mutable reference to the internal value of `TokenCell`. + // *mut is used to make this type !Send and !Sync + pub(super) struct Token(PhantomData<*mut V>); + + impl Token { + /// Returns the token to acquire a mutable reference to the inner value of [TokenCell]. + /// + /// This is intentionally left to be non-`const` to forbid unprotected sharing via static + /// variables among threads. + /// + /// # Panics + /// + /// This function will `panic!()` if called multiple times with same type `V` from the same + /// thread to detect potential misuses. + /// + /// # Safety + /// + /// This method should be called exactly once for each thread at most to avoid undefined + /// behavior when used with [`Token`]. + #[must_use] + pub(super) unsafe fn assume_exclusive_mutating_thread() -> Self { + thread_local! { + static TOKENS: RefCell> = const { RefCell::new(BTreeSet::new()) }; + } + // TOKEN.with_borrow_mut can't panic because it's the only non-overlapping + // bound-to-local-variable borrow of the _thread local_ variable. + assert!( + TOKENS.with_borrow_mut(|tokens| tokens.insert(TypeId::of::())), + "{:?} is wrongly initialized twice on {:?}", + any::type_name::(), + thread::current() + ); + + Self(PhantomData) + } + } + + #[cfg(test)] + mod tests { + use { + super::{Token, TokenCell}, + std::{mem, sync::Arc, thread}, + }; + + #[test] + #[should_panic( + expected = "\"solana_unified_scheduler_logic::utils::Token\" is wrongly \ + initialized twice on Thread" + )] + fn test_second_creation_of_tokens_in_a_thread() { + unsafe { + let _ = Token::::assume_exclusive_mutating_thread(); + let _ = Token::::assume_exclusive_mutating_thread(); + } + } + + #[derive(Debug)] + struct FakeQueue { + v: Vec, + } + + // As documented above, it's illegal to create multiple tokens inside a single thread to + // acquire multiple mutable references to the same TokenCell at the same time. + #[test] + // Trigger (harmless) UB unless running under miri by conditionally #[ignore]-ing, + // confirming false-positive result to conversely show the merit of miri! + #[cfg_attr(miri, ignore)] + fn test_ub_illegally_created_multiple_tokens() { + // Unauthorized token minting! + let mut token1 = unsafe { mem::transmute(()) }; + let mut token2 = unsafe { mem::transmute(()) }; + + let queue = TokenCell::new(FakeQueue { + v: Vec::with_capacity(20), + }); + queue.with_borrow_mut(&mut token1, |queue_mut1| { + queue_mut1.v.push(1); + queue.with_borrow_mut(&mut token2, |queue_mut2| { + queue_mut2.v.push(2); + queue_mut1.v.push(3); + }); + queue_mut1.v.push(4); + }); + + // It's in ub already, so we can't assert reliably, so dbg!(...) just for fun + #[cfg(not(miri))] + dbg!(queue.0.into_inner()); + + // Return successfully to indicate an unexpected outcome, because this test should + // have aborted by now. + } + + // As documented above, it's illegal to share (= co-own) the same instance of TokenCell + // across threads. Unfortunately, we can't prevent this from happening with some + // type-safety magic to cause compile errors... So sanity-check here test fails due to a + // runtime error of the known UB, when run under miri. + #[test] + // Trigger (harmless) UB unless running under miri by conditionally #[ignore]-ing, + // confirming false-positive result to conversely show the merit of miri! + #[cfg_attr(miri, ignore)] + fn test_ub_illegally_shared_token_cell() { + let queue1 = Arc::new(TokenCell::new(FakeQueue { + v: Vec::with_capacity(20), + })); + let queue2 = queue1.clone(); + #[cfg(not(miri))] + let queue3 = queue1.clone(); + + // Usually miri immediately detects the data race; but just repeat enough time to avoid + // being flaky + for _ in 0..10 { + let (queue1, queue2) = (queue1.clone(), queue2.clone()); + let thread1 = thread::spawn(move || { + let mut token = unsafe { Token::assume_exclusive_mutating_thread() }; + queue1.with_borrow_mut(&mut token, |queue| { + // this is UB + queue.v.push(3); + }); + }); + // Immediately spawn next thread without joining thread1 to ensure there's a data race + // definitely. Otherwise, joining here wouldn't cause UB. + let thread2 = thread::spawn(move || { + let mut token = unsafe { Token::assume_exclusive_mutating_thread() }; + queue2.with_borrow_mut(&mut token, |queue| { + // this is UB + queue.v.push(4); + }); + }); + + thread1.join().unwrap(); + thread2.join().unwrap(); + } + + // It's in ub already, so we can't assert reliably, so dbg!(...) just for fun + #[cfg(not(miri))] + { + drop((queue1, queue2)); + dbg!(Arc::into_inner(queue3).unwrap().0.into_inner()); + } + + // Return successfully to indicate an unexpected outcome, because this test should + // have aborted by now + } + } +} + +/// [`Result`] for locking a [usage_queue](UsageQueue) with particular +/// [current_usage](RequestedUsage). +type LockResult = Result<(), ()>; +const_assert_eq!(mem::size_of::(), 1); + +/// Something to be scheduled; usually a wrapper of [`SanitizedTransaction`]. +pub type Task = Arc; +const_assert_eq!(mem::size_of::(), 8); + +/// [`Token`] for [`UsageQueue`]. +type UsageQueueToken = Token; +const_assert_eq!(mem::size_of::(), 0); + +/// [`Token`] for [task](Task)'s [internal mutable data](`TaskInner::blocked_usage_count`). +type BlockedUsageCountToken = Token; +const_assert_eq!(mem::size_of::(), 0); + +/// Internal scheduling data about a particular task. +#[derive(Debug)] +pub struct TaskInner { transaction: SanitizedTransaction, + /// The index of a transaction in ledger entries; not used by SchedulingStateMachine by itself. + /// Carrying this along with the transaction is needed to properly record the execution result + /// of it. index: usize, + lock_contexts: Vec, + blocked_usage_count: TokenCell, } -impl Task { - pub fn create_task(transaction: SanitizedTransaction, index: usize) -> Self { - Task { transaction, index } - } - +impl TaskInner { pub fn task_index(&self) -> usize { self.index } @@ -17,4 +430,971 @@ impl Task { pub fn transaction(&self) -> &SanitizedTransaction { &self.transaction } + + fn lock_contexts(&self) -> &[LockContext] { + &self.lock_contexts + } + + fn set_blocked_usage_count(&self, token: &mut BlockedUsageCountToken, count: ShortCounter) { + self.blocked_usage_count + .with_borrow_mut(token, |usage_count| { + *usage_count = count; + }) + } + + #[must_use] + fn try_unblock(self: Task, token: &mut BlockedUsageCountToken) -> Option { + let did_unblock = self + .blocked_usage_count + .with_borrow_mut(token, |usage_count| usage_count.decrement_self().is_zero()); + did_unblock.then_some(self) + } +} + +/// [`Task`]'s per-address context to lock a [usage_queue](UsageQueue) with [certain kind of +/// request](RequestedUsage). +#[derive(Debug)] +struct LockContext { + usage_queue: UsageQueue, + requested_usage: RequestedUsage, +} +const_assert_eq!(mem::size_of::(), 16); + +impl LockContext { + fn new(usage_queue: UsageQueue, requested_usage: RequestedUsage) -> Self { + Self { + usage_queue, + requested_usage, + } + } + + fn with_usage_queue_mut( + &self, + usage_queue_token: &mut UsageQueueToken, + f: impl FnOnce(&mut UsageQueueInner) -> R, + ) -> R { + self.usage_queue.0.with_borrow_mut(usage_queue_token, f) + } +} + +/// Status about how the [`UsageQueue`] is used currently. +#[derive(Copy, Clone, Debug)] +enum Usage { + Readonly(ShortCounter), + Writable, +} +const_assert_eq!(mem::size_of::(), 8); + +impl From for Usage { + fn from(requested_usage: RequestedUsage) -> Self { + match requested_usage { + RequestedUsage::Readonly => Usage::Readonly(ShortCounter::one()), + RequestedUsage::Writable => Usage::Writable, + } + } +} + +/// Status about how a task is requesting to use a particular [`UsageQueue`]. +#[derive(Clone, Copy, Debug)] +enum RequestedUsage { + Readonly, + Writable, +} + +/// Internal scheduling data about a particular address. +/// +/// Specifically, it holds the current [`Usage`] (or no usage with [`Usage::Unused`]) and which +/// [`Task`]s are blocked to be executed after the current task is notified to be finished via +/// [`::deschedule_task`](`SchedulingStateMachine::deschedule_task`) +#[derive(Debug)] +struct UsageQueueInner { + current_usage: Option, + blocked_usages_from_tasks: VecDeque, +} + +type UsageFromTask = (RequestedUsage, Task); + +impl Default for UsageQueueInner { + fn default() -> Self { + Self { + current_usage: None, + // Capacity should be configurable to create with large capacity like 1024 inside the + // (multi-threaded) closures passed to create_task(). In this way, reallocs can be + // avoided happening in the scheduler thread. Also, this configurability is desired for + // unified-scheduler-logic's motto: separation of concerns (the pure logic should be + // sufficiently distanced from any some random knob's constants needed for messy + // reality for author's personal preference...). + // + // Note that large cap should be accompanied with proper scheduler cleaning after use, + // which should be handled by higher layers (i.e. scheduler pool). + blocked_usages_from_tasks: VecDeque::with_capacity(128), + } + } +} + +impl UsageQueueInner { + fn try_lock(&mut self, requested_usage: RequestedUsage) -> LockResult { + match self.current_usage { + None => Some(Usage::from(requested_usage)), + Some(Usage::Readonly(count)) => match requested_usage { + RequestedUsage::Readonly => Some(Usage::Readonly(count.increment())), + RequestedUsage::Writable => None, + }, + Some(Usage::Writable) => None, + } + .inspect(|&new_usage| { + self.current_usage = Some(new_usage); + }) + .map(|_| ()) + .ok_or(()) + } + + #[must_use] + fn unlock(&mut self, requested_usage: RequestedUsage) -> Option { + let mut is_unused_now = false; + match &mut self.current_usage { + Some(Usage::Readonly(ref mut count)) => match requested_usage { + RequestedUsage::Readonly => { + if count.is_one() { + is_unused_now = true; + } else { + count.decrement_self(); + } + } + RequestedUsage::Writable => unreachable!(), + }, + Some(Usage::Writable) => match requested_usage { + RequestedUsage::Writable => { + is_unused_now = true; + } + RequestedUsage::Readonly => unreachable!(), + }, + None => unreachable!(), + } + + if is_unused_now { + self.current_usage = None; + self.blocked_usages_from_tasks.pop_front() + } else { + None + } + } + + fn push_blocked_usage_from_task(&mut self, usage_from_task: UsageFromTask) { + assert_matches!(self.current_usage, Some(_)); + self.blocked_usages_from_tasks.push_back(usage_from_task); + } + + #[must_use] + fn pop_unblocked_readonly_usage_from_task(&mut self) -> Option { + if matches!( + self.blocked_usages_from_tasks.front(), + Some((RequestedUsage::Readonly, _)) + ) { + assert_matches!(self.current_usage, Some(Usage::Readonly(_))); + self.blocked_usages_from_tasks.pop_front() + } else { + None + } + } + + fn has_no_blocked_usage(&self) -> bool { + self.blocked_usages_from_tasks.is_empty() + } +} + +const_assert_eq!(mem::size_of::>(), 40); + +/// Scheduler's internal data for each address ([`Pubkey`](`solana_sdk::pubkey::Pubkey`)). Very +/// opaque wrapper type; no methods just with [`::clone()`](Clone::clone) and +/// [`::default()`](Default::default). +#[derive(Debug, Clone, Default)] +pub struct UsageQueue(Arc>); +const_assert_eq!(mem::size_of::(), 8); + +/// A high-level `struct`, managing the overall scheduling of [tasks](Task), to be used by +/// `solana-unified-scheduler-pool`. +pub struct SchedulingStateMachine { + unblocked_task_queue: VecDeque, + active_task_count: ShortCounter, + handled_task_count: ShortCounter, + unblocked_task_count: ShortCounter, + total_task_count: ShortCounter, + count_token: BlockedUsageCountToken, + usage_queue_token: UsageQueueToken, +} +const_assert_eq!(mem::size_of::(), 48); + +impl SchedulingStateMachine { + pub fn has_no_active_task(&self) -> bool { + self.active_task_count.is_zero() + } + + pub fn has_unblocked_task(&self) -> bool { + !self.unblocked_task_queue.is_empty() + } + + pub fn unblocked_task_queue_count(&self) -> usize { + self.unblocked_task_queue.len() + } + + pub fn active_task_count(&self) -> u32 { + self.active_task_count.current() + } + + pub fn handled_task_count(&self) -> u32 { + self.handled_task_count.current() + } + + pub fn unblocked_task_count(&self) -> u32 { + self.unblocked_task_count.current() + } + + pub fn total_task_count(&self) -> u32 { + self.total_task_count.current() + } + + /// Schedules given `task`, returning it if successful. + /// + /// Returns `Some(task)` if it's immediately scheduled. Otherwise, returns `None`, + /// indicating the scheduled task is blocked currently. + /// + /// Note that this function takes ownership of the task to allow for future optimizations. + #[must_use] + pub fn schedule_task(&mut self, task: Task) -> Option { + self.total_task_count.increment_self(); + self.active_task_count.increment_self(); + self.try_lock_usage_queues(task) + } + + #[must_use] + pub fn schedule_next_unblocked_task(&mut self) -> Option { + self.unblocked_task_queue.pop_front().inspect(|_| { + self.unblocked_task_count.increment_self(); + }) + } + + /// Deschedules given scheduled `task`. + /// + /// This must be called exactly once for all scheduled tasks to uphold both + /// `SchedulingStateMachine` and `UsageQueue` internal state consistency at any given moment of + /// time. It's serious logic error to call this twice with the same task or none at all after + /// scheduling. Similarly, calling this with not scheduled task is also forbidden. + /// + /// Note that this function intentionally doesn't take ownership of the task to avoid dropping + /// tasks inside `SchedulingStateMachine` to provide an offloading-based optimization + /// opportunity for callers. + pub fn deschedule_task(&mut self, task: &Task) { + self.active_task_count.decrement_self(); + self.handled_task_count.increment_self(); + self.unlock_usage_queues(task); + } + + #[must_use] + fn try_lock_usage_queues(&mut self, task: Task) -> Option { + let mut blocked_usage_count = ShortCounter::zero(); + + for context in task.lock_contexts() { + context.with_usage_queue_mut(&mut self.usage_queue_token, |usage_queue| { + let lock_result = if usage_queue.has_no_blocked_usage() { + usage_queue.try_lock(context.requested_usage) + } else { + LockResult::Err(()) + }; + if let Err(()) = lock_result { + blocked_usage_count.increment_self(); + let usage_from_task = (context.requested_usage, task.clone()); + usage_queue.push_blocked_usage_from_task(usage_from_task); + } + }); + } + + // no blocked usage count means success + if blocked_usage_count.is_zero() { + Some(task) + } else { + task.set_blocked_usage_count(&mut self.count_token, blocked_usage_count); + None + } + } + + fn unlock_usage_queues(&mut self, task: &Task) { + for context in task.lock_contexts() { + context.with_usage_queue_mut(&mut self.usage_queue_token, |usage_queue| { + let mut unblocked_task_from_queue = usage_queue.unlock(context.requested_usage); + + while let Some((requested_usage, task_with_unblocked_queue)) = + unblocked_task_from_queue + { + // When `try_unblock()` returns `None` as a failure of unblocking this time, + // this means the task is still blocked by other active task's usages. So, + // don't push task into unblocked_task_queue yet. It can be assumed that every + // task will eventually succeed to be unblocked, and enter in this condition + // clause as long as `SchedulingStateMachine` is used correctly. + if let Some(task) = task_with_unblocked_queue.try_unblock(&mut self.count_token) + { + self.unblocked_task_queue.push_back(task); + } + + match usage_queue.try_lock(requested_usage) { + LockResult::Ok(()) => { + // Try to further schedule blocked task for parallelism in the case of + // readonly usages + unblocked_task_from_queue = + if matches!(requested_usage, RequestedUsage::Readonly) { + usage_queue.pop_unblocked_readonly_usage_from_task() + } else { + None + }; + } + LockResult::Err(()) => panic!("should never fail in this context"), + } + } + }); + } + } + + /// Creates a new task with [`SanitizedTransaction`] with all of its corresponding + /// [`UsageQueue`]s preloaded. + /// + /// Closure (`usage_queue_loader`) is used to delegate the (possibly multi-threaded) + /// implementation of [`UsageQueue`] look-up by [`pubkey`](Pubkey) to callers. It's the + /// caller's responsibility to ensure the same instance is returned from the closure, given a + /// particular pubkey. + /// + /// Closure is used here to delegate the responsibility of primary ownership of `UsageQueue` + /// (and caching/pruning if any) to the caller. `SchedulingStateMachine` guarantees that all of + /// shared owndership of `UsageQueue`s are released and UsageQueue state is identical to just + /// after created, if `has_no_active_task()` is `true`. Also note that this is desired for + /// separation of concern. + pub fn create_task( + transaction: SanitizedTransaction, + index: usize, + usage_queue_loader: &mut impl FnMut(Pubkey) -> UsageQueue, + ) -> Task { + // Calling the _unchecked() version here is safe for faster operation, because + // `get_account_locks()` (the safe variant) is ensured to be called in + // DefaultTransactionHandler::handle() via Bank::prepare_unlocked_batch_from_single_tx(). + // + // The safe variant has additional account-locking related verifications, which is crucial. + // + // Currently the replaying stage is redundantly calling `get_account_locks()` when unified + // scheduler is enabled on the given transaction at the blockstore. This will be relaxed + // for optimization in the future. As for banking stage with unified scheduler, it will + // need to run .get_account_locks() at least once somewhere in the code path. In the + // distant future, this function `create_task()` should be adjusted so that both stages do + // the checks before calling this (say, with some ad-hoc type like + // `SanitizedTransactionWithCheckedAccountLocks`) or do the checks here, resulting in + // eliminating the redundant one in the replaying stage and in the handler. + let locks = transaction.get_account_locks_unchecked(); + + let writable_locks = locks + .writable + .iter() + .map(|address| (address, RequestedUsage::Writable)); + let readonly_locks = locks + .readonly + .iter() + .map(|address| (address, RequestedUsage::Readonly)); + + let lock_contexts = writable_locks + .chain(readonly_locks) + .map(|(address, requested_usage)| { + LockContext::new(usage_queue_loader(**address), requested_usage) + }) + .collect(); + + Task::new(TaskInner { + transaction, + index, + lock_contexts, + blocked_usage_count: TokenCell::new(ShortCounter::zero()), + }) + } + + /// Rewind the inactive state machine to be initialized + /// + /// This isn't called _reset_ to indicate this isn't safe to call this at any given moment. + /// This panics if the state machine hasn't properly been finished (i.e. there should be no + /// active task) to uphold invariants of [`UsageQueue`]s. + /// + /// This method is intended to reuse SchedulingStateMachine instance (to avoid its `unsafe` + /// [constructor](SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling) + /// as much as possible) and its (possibly cached) associated [`UsageQueue`]s for processing + /// other slots. + pub fn reinitialize(&mut self) { + assert!(self.has_no_active_task()); + assert_eq!(self.unblocked_task_queue.len(), 0); + // nice trick to ensure all fields are handled here if new one is added. + let Self { + unblocked_task_queue: _, + active_task_count, + handled_task_count, + unblocked_task_count, + total_task_count, + count_token: _, + usage_queue_token: _, + // don't add ".." here + } = self; + active_task_count.reset_to_zero(); + handled_task_count.reset_to_zero(); + unblocked_task_count.reset_to_zero(); + total_task_count.reset_to_zero(); + } + + /// Creates a new instance of [`SchedulingStateMachine`] with its `unsafe` fields created as + /// well, thus carrying over `unsafe`. + /// + /// # Safety + /// Call this exactly once for each thread. See [`TokenCell`] for details. + #[must_use] + pub unsafe fn exclusively_initialize_current_thread_for_scheduling() -> Self { + Self { + // It's very unlikely this is desired to be configurable, like + // `UsageQueueInner::blocked_usages_from_tasks`'s cap. + unblocked_task_queue: VecDeque::with_capacity(1024), + active_task_count: ShortCounter::zero(), + handled_task_count: ShortCounter::zero(), + unblocked_task_count: ShortCounter::zero(), + total_task_count: ShortCounter::zero(), + count_token: unsafe { BlockedUsageCountToken::assume_exclusive_mutating_thread() }, + usage_queue_token: unsafe { UsageQueueToken::assume_exclusive_mutating_thread() }, + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + instruction::{AccountMeta, Instruction}, + message::Message, + pubkey::Pubkey, + signature::Signer, + signer::keypair::Keypair, + transaction::{SanitizedTransaction, Transaction}, + }, + std::{cell::RefCell, collections::HashMap, rc::Rc}, + }; + + fn simplest_transaction() -> SanitizedTransaction { + let payer = Keypair::new(); + let message = Message::new(&[], Some(&payer.pubkey())); + let unsigned = Transaction::new_unsigned(message); + SanitizedTransaction::from_transaction_for_tests(unsigned) + } + + fn transaction_with_readonly_address(address: Pubkey) -> SanitizedTransaction { + let instruction = Instruction { + program_id: Pubkey::default(), + accounts: vec![AccountMeta::new_readonly(address, false)], + data: vec![], + }; + let message = Message::new(&[instruction], Some(&Pubkey::new_unique())); + let unsigned = Transaction::new_unsigned(message); + SanitizedTransaction::from_transaction_for_tests(unsigned) + } + + fn transaction_with_writable_address(address: Pubkey) -> SanitizedTransaction { + let instruction = Instruction { + program_id: Pubkey::default(), + accounts: vec![AccountMeta::new(address, false)], + data: vec![], + }; + let message = Message::new(&[instruction], Some(&Pubkey::new_unique())); + let unsigned = Transaction::new_unsigned(message); + SanitizedTransaction::from_transaction_for_tests(unsigned) + } + + fn create_address_loader( + usage_queues: Option>>>, + ) -> impl FnMut(Pubkey) -> UsageQueue { + let usage_queues = usage_queues.unwrap_or_default(); + move |address| { + usage_queues + .borrow_mut() + .entry(address) + .or_default() + .clone() + } + } + + #[test] + fn test_scheduling_state_machine_creation() { + let state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_eq!(state_machine.active_task_count(), 0); + assert_eq!(state_machine.total_task_count(), 0); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_scheduling_state_machine_good_reinitialization() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + state_machine.total_task_count.increment_self(); + assert_eq!(state_machine.total_task_count(), 1); + state_machine.reinitialize(); + assert_eq!(state_machine.total_task_count(), 0); + } + + #[test] + #[should_panic(expected = "assertion failed: self.has_no_active_task()")] + fn test_scheduling_state_machine_bad_reinitialization() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let address_loader = &mut create_address_loader(None); + let task = SchedulingStateMachine::create_task(simplest_transaction(), 3, address_loader); + state_machine.schedule_task(task).unwrap(); + state_machine.reinitialize(); + } + + #[test] + fn test_create_task() { + let sanitized = simplest_transaction(); + let task = SchedulingStateMachine::create_task(sanitized.clone(), 3, &mut |_| { + UsageQueue::default() + }); + assert_eq!(task.task_index(), 3); + assert_eq!(task.transaction(), &sanitized); + } + + #[test] + fn test_non_conflicting_task_related_counts() { + let sanitized = simplest_transaction(); + let address_loader = &mut create_address_loader(None); + let task = SchedulingStateMachine::create_task(sanitized.clone(), 3, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let task = state_machine.schedule_task(task).unwrap(); + assert_eq!(state_machine.active_task_count(), 1); + assert_eq!(state_machine.total_task_count(), 1); + state_machine.deschedule_task(&task); + assert_eq!(state_machine.active_task_count(), 0); + assert_eq!(state_machine.total_task_count(), 1); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_conflicting_task_related_counts() { + let sanitized = simplest_transaction(); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized.clone(), 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized.clone(), 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized.clone(), 103, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + + state_machine.deschedule_task(&task1); + assert!(state_machine.has_unblocked_task()); + assert_eq!(state_machine.unblocked_task_queue_count(), 1); + + // unblocked_task_count() should be incremented + assert_eq!(state_machine.unblocked_task_count(), 0); + assert_eq!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + assert_eq!(state_machine.unblocked_task_count(), 1); + + // there's no blocked task anymore; calling schedule_next_unblocked_task should be noop and + // shouldn't increment the unblocked_task_count(). + assert!(!state_machine.has_unblocked_task()); + assert_matches!(state_machine.schedule_next_unblocked_task(), None); + assert_eq!(state_machine.unblocked_task_count(), 1); + + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task2); + + assert_matches!( + state_machine + .schedule_task(task3.clone()) + .map(|task| task.task_index()), + Some(103) + ); + state_machine.deschedule_task(&task3); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_existing_blocking_task_then_newly_scheduled_task() { + let sanitized = simplest_transaction(); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized.clone(), 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized.clone(), 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized.clone(), 103, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task1); + assert_eq!(state_machine.unblocked_task_queue_count(), 1); + + // new task is arriving after task1 is already descheduled and task2 got unblocked + assert_matches!(state_machine.schedule_task(task3.clone()), None); + + assert_eq!(state_machine.unblocked_task_count(), 0); + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + assert_eq!(state_machine.unblocked_task_count(), 1); + + state_machine.deschedule_task(&task2); + + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(103) + ); + assert_eq!(state_machine.unblocked_task_count(), 2); + + state_machine.deschedule_task(&task3); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_multiple_readonly_task_and_counts() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_readonly_address(conflicting_address); + let sanitized2 = transaction_with_readonly_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + // both of read-only tasks should be immediately runnable + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!( + state_machine + .schedule_task(task2.clone()) + .map(|t| t.task_index()), + Some(102) + ); + + assert_eq!(state_machine.active_task_count(), 2); + assert_eq!(state_machine.handled_task_count(), 0); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task1); + assert_eq!(state_machine.active_task_count(), 1); + assert_eq!(state_machine.handled_task_count(), 1); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task2); + assert_eq!(state_machine.active_task_count(), 0); + assert_eq!(state_machine.handled_task_count(), 2); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_all_blocking_readable_tasks_block_writable_task() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_readonly_address(conflicting_address); + let sanitized2 = transaction_with_readonly_address(conflicting_address); + let sanitized3 = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized3, 103, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!( + state_machine + .schedule_task(task2.clone()) + .map(|t| t.task_index()), + Some(102) + ); + assert_matches!(state_machine.schedule_task(task3.clone()), None); + + assert_eq!(state_machine.active_task_count(), 3); + assert_eq!(state_machine.handled_task_count(), 0); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + state_machine.deschedule_task(&task1); + assert_eq!(state_machine.active_task_count(), 2); + assert_eq!(state_machine.handled_task_count(), 1); + assert_eq!(state_machine.unblocked_task_queue_count(), 0); + assert_matches!(state_machine.schedule_next_unblocked_task(), None); + state_machine.deschedule_task(&task2); + assert_eq!(state_machine.active_task_count(), 1); + assert_eq!(state_machine.handled_task_count(), 2); + assert_eq!(state_machine.unblocked_task_queue_count(), 1); + // task3 is finally unblocked after all of readable tasks (task1 and task2) is finished. + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(103) + ); + state_machine.deschedule_task(&task3); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_readonly_then_writable_then_readonly_linearized() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_readonly_address(conflicting_address); + let sanitized2 = transaction_with_writable_address(conflicting_address); + let sanitized3 = transaction_with_readonly_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized3, 103, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + assert_matches!(state_machine.schedule_task(task3.clone()), None); + + assert_matches!(state_machine.schedule_next_unblocked_task(), None); + state_machine.deschedule_task(&task1); + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + assert_matches!(state_machine.schedule_next_unblocked_task(), None); + state_machine.deschedule_task(&task2); + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(103) + ); + assert_matches!(state_machine.schedule_next_unblocked_task(), None); + state_machine.deschedule_task(&task3); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_readonly_then_writable() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_readonly_address(conflicting_address); + let sanitized2 = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + + // descheduling read-locking task1 should equate to unblocking write-locking task2 + state_machine.deschedule_task(&task1); + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + state_machine.deschedule_task(&task2); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_blocked_tasks_writable_2_readonly_then_writable() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_writable_address(conflicting_address); + let sanitized2 = transaction_with_readonly_address(conflicting_address); + let sanitized3 = transaction_with_readonly_address(conflicting_address); + let sanitized4 = transaction_with_writable_address(conflicting_address); + let address_loader = &mut create_address_loader(None); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + let task3 = SchedulingStateMachine::create_task(sanitized3, 103, address_loader); + let task4 = SchedulingStateMachine::create_task(sanitized4, 104, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + assert_matches!(state_machine.schedule_task(task3.clone()), None); + assert_matches!(state_machine.schedule_task(task4.clone()), None); + + state_machine.deschedule_task(&task1); + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(103) + ); + // the above deschedule_task(task1) call should only unblock task2 and task3 because these + // are read-locking. And shouldn't unblock task4 because it's write-locking + assert_matches!(state_machine.schedule_next_unblocked_task(), None); + + state_machine.deschedule_task(&task2); + // still task4 is blocked... + assert_matches!(state_machine.schedule_next_unblocked_task(), None); + + state_machine.deschedule_task(&task3); + // finally task4 should be unblocked + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(104) + ); + state_machine.deschedule_task(&task4); + assert!(state_machine.has_no_active_task()); + } + + #[test] + fn test_gradual_locking() { + let conflicting_address = Pubkey::new_unique(); + let sanitized1 = transaction_with_writable_address(conflicting_address); + let sanitized2 = transaction_with_writable_address(conflicting_address); + let usage_queues = Rc::new(RefCell::new(HashMap::new())); + let address_loader = &mut create_address_loader(Some(usage_queues.clone())); + let task1 = SchedulingStateMachine::create_task(sanitized1, 101, address_loader); + let task2 = SchedulingStateMachine::create_task(sanitized2, 102, address_loader); + + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + assert_matches!( + state_machine + .schedule_task(task1.clone()) + .map(|t| t.task_index()), + Some(101) + ); + assert_matches!(state_machine.schedule_task(task2.clone()), None); + let usage_queues = usage_queues.borrow_mut(); + let usage_queue = usage_queues.get(&conflicting_address).unwrap(); + usage_queue + .0 + .with_borrow_mut(&mut state_machine.usage_queue_token, |usage_queue| { + assert_matches!(usage_queue.current_usage, Some(Usage::Writable)); + }); + // task2's fee payer should have been locked already even if task2 is blocked still via the + // above the schedule_task(task2) call + let fee_payer = task2.transaction().message().fee_payer(); + let usage_queue = usage_queues.get(fee_payer).unwrap(); + usage_queue + .0 + .with_borrow_mut(&mut state_machine.usage_queue_token, |usage_queue| { + assert_matches!(usage_queue.current_usage, Some(Usage::Writable)); + }); + state_machine.deschedule_task(&task1); + assert_matches!( + state_machine + .schedule_next_unblocked_task() + .map(|t| t.task_index()), + Some(102) + ); + state_machine.deschedule_task(&task2); + assert!(state_machine.has_no_active_task()); + } + + #[test] + #[should_panic(expected = "internal error: entered unreachable code")] + fn test_unreachable_unlock_conditions1() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let usage_queue = UsageQueue::default(); + usage_queue + .0 + .with_borrow_mut(&mut state_machine.usage_queue_token, |usage_queue| { + let _ = usage_queue.unlock(RequestedUsage::Writable); + }); + } + + #[test] + #[should_panic(expected = "internal error: entered unreachable code")] + fn test_unreachable_unlock_conditions2() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let usage_queue = UsageQueue::default(); + usage_queue + .0 + .with_borrow_mut(&mut state_machine.usage_queue_token, |usage_queue| { + usage_queue.current_usage = Some(Usage::Writable); + let _ = usage_queue.unlock(RequestedUsage::Readonly); + }); + } + + #[test] + #[should_panic(expected = "internal error: entered unreachable code")] + fn test_unreachable_unlock_conditions3() { + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; + let usage_queue = UsageQueue::default(); + usage_queue + .0 + .with_borrow_mut(&mut state_machine.usage_queue_token, |usage_queue| { + usage_queue.current_usage = Some(Usage::Readonly(ShortCounter::one())); + let _ = usage_queue.unlock(RequestedUsage::Writable); + }); + } } diff --git a/unified-scheduler-pool/Cargo.toml b/unified-scheduler-pool/Cargo.toml index 7626215b1e1126..9bd668f2799ab0 100644 --- a/unified-scheduler-pool/Cargo.toml +++ b/unified-scheduler-pool/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } [dependencies] assert_matches = { workspace = true } crossbeam-channel = { workspace = true } +dashmap = { workspace = true } derivative = { workspace = true } log = { workspace = true } solana-ledger = { workspace = true } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 81a3506ea28480..eed740025bcad4 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -1,3 +1,8 @@ +//! NOTE: While the unified scheduler is fully functional and moderately performant even with +//! mainnet-beta, it has known resource-exhaustion related security issues for replaying +//! specially-crafted blocks produced by malicious leaders. Thus, this experimental and +//! nondefault functionality is exempt from the bug bounty program for now. +//! //! Transaction scheduling code. //! //! This crate implements 3 solana-runtime traits (`InstalledScheduler`, `UninstalledScheduler` and @@ -10,7 +15,8 @@ use { assert_matches::assert_matches, - crossbeam_channel::{select, unbounded, Receiver, SendError, Sender}, + crossbeam_channel::{never, select, unbounded, Receiver, RecvError, SendError, Sender}, + dashmap::DashMap, derivative::Derivative, log::*, solana_ledger::blockstore_processor::{ @@ -26,8 +32,11 @@ use { }, prioritization_fee_cache::PrioritizationFeeCache, }, - solana_sdk::transaction::{Result, SanitizedTransaction}, - solana_unified_scheduler_logic::Task, + solana_sdk::{ + pubkey::Pubkey, + transaction::{Result, SanitizedTransaction}, + }, + solana_unified_scheduler_logic::{SchedulingStateMachine, Task, UsageQueue}, solana_vote::vote_sender_types::ReplayVoteSender, std::{ fmt::Debug, @@ -90,10 +99,8 @@ where replay_vote_sender: Option, prioritization_fee_cache: Arc, ) -> Arc { - let handler_count = handler_count.unwrap_or(1); - // we're hard-coding the number of handler thread to 1, meaning this impl is currently - // single-threaded still. - assert_eq!(handler_count, 1); // replace this with assert!(handler_count >= 1) later + let handler_count = handler_count.unwrap_or(Self::default_handler_count()); + assert!(handler_count >= 1); Arc::new_cyclic(|weak_self| Self { scheduler_inners: Mutex::default(), @@ -386,13 +393,35 @@ mod chained_channel { } } +/// The primary owner of all [`UsageQueue`]s used for particular [`PooledScheduler`]. +/// +/// Currently, the simplest implementation. This grows memory usage in unbounded way. Cleaning will +/// be added later. This struct is here to be put outside `solana-unified-scheduler-logic` for the +/// crate's original intent (separation of logics from this crate). Some practical and mundane +/// pruning will be implemented in this type. +#[derive(Default, Debug)] +pub struct UsageQueueLoader { + usage_queues: DashMap, +} + +impl UsageQueueLoader { + pub fn load(&self, address: Pubkey) -> UsageQueue { + self.usage_queues.entry(address).or_default().clone() + } +} + +// (this is slow needing atomic mem reads. However, this can be turned into a lot faster +// optimizer-friendly version as shown in this crossbeam pr: +// https://github.com/crossbeam-rs/crossbeam/pull/1047) +fn disconnected() -> Receiver { + // drop the sender residing at .0, returning an always-disconnected receiver. + unbounded().1 +} + fn initialized_result_with_timings() -> ResultWithTimings { (Ok(()), ExecuteTimings::default()) } -// Currently, simplest possible implementation (i.e. single-threaded) -// this will be replaced with more proper implementation... -// not usable at all, especially for mainnet-beta #[derive(Debug)] pub struct PooledScheduler { inner: PooledSchedulerInner, @@ -402,6 +431,7 @@ pub struct PooledScheduler { #[derive(Debug)] pub struct PooledSchedulerInner, TH: TaskHandler> { thread_manager: ThreadManager, + usage_queue_loader: UsageQueueLoader, } // This type manages the OS threads for scheduling and executing transactions. The term @@ -427,6 +457,7 @@ impl PooledScheduler { Self::from_inner( PooledSchedulerInner:: { thread_manager: ThreadManager::new(pool), + usage_queue_loader: UsageQueueLoader::default(), }, initial_context, ) @@ -518,7 +549,6 @@ impl, TH: TaskHandler> ThreadManager { let new_task_receiver = self.new_task_receiver.clone(); let mut session_ending = false; - let mut active_task_count: usize = 0; // Now, this is the main loop for the scheduler thread, which is a special beast. // @@ -558,61 +588,99 @@ impl, TH: TaskHandler> ThreadManager { // cycles out of the scheduler thread. Thus, any kinds of unessential overhead sources // like syscalls, VDSO, and even memory (de)allocation should be avoided at all costs // by design or by means of offloading at the last resort. - move || loop { - let mut is_finished = false; - while !is_finished { - select! { - recv(finished_task_receiver) -> executed_task => { - let executed_task = executed_task.unwrap(); - - active_task_count = active_task_count.checked_sub(1).unwrap(); - let result_with_timings = result_with_timings.as_mut().unwrap(); - Self::accumulate_result_with_timings(result_with_timings, executed_task); - }, - recv(new_task_receiver) -> message => { - assert!(!session_ending); - - match message.unwrap() { - NewTaskPayload::Payload(task) => { - // so, we're NOT scheduling at all here; rather, just execute - // tx straight off. the inter-tx locking deps aren't needed to - // be resolved in the case of single-threaded FIFO like this. - runnable_task_sender - .send_payload(task) - .unwrap(); - active_task_count = active_task_count.checked_add(1).unwrap(); - } - NewTaskPayload::OpenSubchannel(context) => { - // signal about new SchedulingContext to handler threads - runnable_task_sender - .send_chained_channel(context, handler_count) - .unwrap(); - assert_matches!( - result_with_timings.replace(initialized_result_with_timings()), - None - ); - } - NewTaskPayload::CloseSubchannel => { - session_ending = true; - } - } - }, - }; + move || { + let (do_now, dont_now) = (&disconnected::<()>(), &never::<()>()); + let dummy_receiver = |trigger| { + if trigger { + do_now + } else { + dont_now + } + }; - // a really simplistic termination condition, which only works under the - // assumption of single handler thread... - is_finished = session_ending && active_task_count == 0; - } + let mut state_machine = unsafe { + SchedulingStateMachine::exclusively_initialize_current_thread_for_scheduling() + }; - if session_ending { - session_result_sender - .send(Some( - result_with_timings - .take() - .unwrap_or_else(initialized_result_with_timings), - )) - .unwrap(); - session_ending = false; + loop { + let mut is_finished = false; + while !is_finished { + // ALL recv selectors are eager-evaluated ALWAYS by current crossbeam impl, + // which isn't great and is inconsistent with `if`s in the Rust's match + // arm. So, eagerly binding the result to a variable unconditionally here + // makes no perf. difference... + let dummy_unblocked_task_receiver = + dummy_receiver(state_machine.has_unblocked_task()); + + // (Assume this is biased; i.e. select_biased! in this crossbeam pr: + // https://github.com/rust-lang/futures-rs/pull/1976) + // + // There's something special called dummy_unblocked_task_receiver here. + // This odd pattern was needed to react to newly unblocked tasks from + // _not-crossbeam-channel_ event sources, precisely at the specified + // precedence among other selectors, while delegating the conrol flow to + // select_biased!. + // + // In this way, hot looping is avoided and overall control flow is much + // consistent. Note that unified scheduler will go + // into busy looping to seek lowest latency eventually. However, not now, + // to measure _actual_ cpu usage easily with the select approach. + select! { + recv(finished_task_receiver) -> executed_task => { + let executed_task = executed_task.unwrap(); + + state_machine.deschedule_task(&executed_task.task); + let result_with_timings = result_with_timings.as_mut().unwrap(); + Self::accumulate_result_with_timings(result_with_timings, executed_task); + }, + recv(dummy_unblocked_task_receiver) -> dummy => { + assert_matches!(dummy, Err(RecvError)); + + let task = state_machine + .schedule_next_unblocked_task() + .expect("unblocked task"); + runnable_task_sender.send_payload(task).unwrap(); + }, + recv(new_task_receiver) -> message => { + assert!(!session_ending); + + match message.unwrap() { + NewTaskPayload::Payload(task) => { + if let Some(task) = state_machine.schedule_task(task) { + runnable_task_sender.send_payload(task).unwrap(); + } + } + NewTaskPayload::OpenSubchannel(context) => { + // signal about new SchedulingContext to handler threads + runnable_task_sender + .send_chained_channel(context, handler_count) + .unwrap(); + assert_matches!( + result_with_timings.replace(initialized_result_with_timings()), + None + ); + } + NewTaskPayload::CloseSubchannel => { + session_ending = true; + } + } + }, + }; + + is_finished = session_ending && state_machine.has_no_active_task(); + } + + if session_ending { + state_machine.reinitialize(); + session_result_sender + .send(Some( + result_with_timings + .take() + .unwrap_or_else(initialized_result_with_timings), + )) + .unwrap(); + session_ending = false; + } } } }; @@ -741,7 +809,9 @@ impl InstalledScheduler for PooledScheduler { } fn schedule_execution(&self, &(transaction, index): &(&SanitizedTransaction, usize)) { - let task = Task::create_task(transaction.clone(), index); + let task = SchedulingStateMachine::create_task(transaction.clone(), index, &mut |pubkey| { + self.inner.usage_queue_loader.load(pubkey) + }); self.inner.thread_manager.send_task(task); } @@ -1020,7 +1090,7 @@ mod tests { .result, Ok(_) ); - scheduler.schedule_execution(&(good_tx_after_bad_tx, 0)); + scheduler.schedule_execution(&(good_tx_after_bad_tx, 1)); scheduler.pause_for_recent_blockhash(); // transaction_count should remain same as scheduler should be bailing out. // That's because we're testing the serialized failing execution case in this test. @@ -1244,4 +1314,42 @@ mod tests { 4 ); } + + // See comment in SchedulingStateMachine::create_task() for the justification of this test + #[test] + fn test_enfoced_get_account_locks_validation() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + ref mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank = &setup_dummy_fork_graph(bank); + + let mut tx = system_transaction::transfer( + mint_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + ); + // mangle the transfer tx to try to lock fee_payer (= mint_keypair) address twice! + tx.message.account_keys.push(tx.message.account_keys[0]); + let tx = &SanitizedTransaction::from_transaction_for_tests(tx); + + // this internally should call SanitizedTransaction::get_account_locks(). + let result = &mut Ok(()); + let timings = &mut ExecuteTimings::default(); + let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let handler_context = &HandlerContext { + log_messages_bytes_limit: None, + transaction_status_sender: None, + replay_vote_sender: None, + prioritization_fee_cache, + }; + + DefaultTaskHandler::handle(result, timings, bank, tx, 0, handler_context); + assert_matches!(result, Err(TransactionError::AccountLoadedTwice)); + } }