From 1404860be3052d1304f2dbb44d7d6bdf61e9f700 Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Wed, 27 Jul 2022 14:19:02 -0600 Subject: [PATCH 001/192] Adjust `solana validators -n` header to be correctly aligned with the columns --- cli-output/src/cli_output.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 8a7c9dad6c320a..f45c5713e4af29 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -454,7 +454,7 @@ impl fmt::Display for CliValidators { "Credits", "Version", "Active Stake", - padding = padding + 1 + padding = padding + 2 )) .bold(); writeln!(f, "{}", header)?; From 48862c575a1f62f2d010d7568cbe0279b2e51f62 Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Tue, 28 Jun 2022 10:39:00 -0700 Subject: [PATCH 002/192] Add StakeInstruction::Redelegate --- programs/stake/src/stake_instruction.rs | 657 +++++++++++++++++++++++- programs/stake/src/stake_state.rs | 143 +++++- sdk/program/src/stake/instruction.rs | 87 ++++ sdk/src/feature_set.rs | 5 + transaction-status/src/parse_stake.rs | 13 + 5 files changed, 900 insertions(+), 5 deletions(-) diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 3d10506212a2f9..a0de27299a14fc 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -3,7 +3,7 @@ use { config, stake_state::{ authorize, authorize_with_seed, deactivate, deactivate_delinquent, delegate, - initialize, merge, set_lockup, split, withdraw, + initialize, merge, redelegate, set_lockup, split, withdraw, }, }, log::*, @@ -177,6 +177,7 @@ pub fn process_instruction( let config = config::from(&config_account).ok_or(InstructionError::InvalidArgument)?; drop(config_account); delegate( + invoke_context, transaction_context, instruction_context, 0, @@ -424,6 +425,36 @@ pub fn process_instruction( Err(InstructionError::InvalidInstructionData) } } + Ok(StakeInstruction::Redelegate) => { + let mut me = get_stake_account()?; + if invoke_context + .feature_set + .is_active(&feature_set::stake_redelegate_instruction::id()) + { + instruction_context.check_number_of_instruction_accounts(3)?; + let config_account = + instruction_context.try_borrow_instruction_account(transaction_context, 3)?; + if !config::check_id(config_account.get_key()) { + return Err(InstructionError::InvalidArgument); + } + let config = + config::from(&config_account).ok_or(InstructionError::InvalidArgument)?; + drop(config_account); + + redelegate( + invoke_context, + transaction_context, + instruction_context, + &mut me, + 1, + 2, + &config, + &signers, + ) + } else { + Err(InstructionError::InvalidInstructionData) + } + } Err(err) => { if !invoke_context.feature_set.is_active( &feature_set::add_get_minimum_delegation_instruction_to_stake_program::id(), @@ -463,14 +494,19 @@ mod tests { set_lockup_checked, AuthorizeCheckedWithSeedArgs, AuthorizeWithSeedArgs, LockupArgs, StakeError, }, - state::{Authorized, Lockup, StakeAuthorize}, + state::{Authorized, Lockup, StakeActivationStatus, StakeAuthorize}, MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION, }, stake_history::{StakeHistory, StakeHistoryEntry}, system_program, sysvar, }, solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, - std::{borrow::BorrowMut, collections::HashSet, str::FromStr, sync::Arc}, + std::{ + borrow::{Borrow, BorrowMut}, + collections::HashSet, + str::FromStr, + sync::Arc, + }, test_case::test_case, }; @@ -853,6 +889,16 @@ mod tests { ), Err(InstructionError::InvalidAccountOwner), ); + process_instruction_as_one_arg( + &feature_set, + &instruction::redelegate( + &spoofed_stake_state_pubkey(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + )[2], + Err(InstructionError::InvalidAccountOwner), + ); } #[test_case(feature_set_old_behavior(); "old_behavior")] @@ -6823,4 +6869,609 @@ mod tests { Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()), ); } + + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_redelegate(feature_set: FeatureSet) { + let feature_set = Arc::new(feature_set); + + let minimum_delegation = crate::get_minimum_delegation(&feature_set); + let rent = Rent::default(); + let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + + let mut sysvar_cache_override = SysvarCache::default(); + sysvar_cache_override.set_stake_history(stake_history.clone()); + sysvar_cache_override.set_rent(rent); + sysvar_cache_override.set_clock(Clock { + epoch: current_epoch, + ..Clock::default() + }); + + let authorized_staker = Pubkey::new_unique(); + let vote_address = Pubkey::new_unique(); + let new_vote_address = Pubkey::new_unique(); + let stake_address = Pubkey::new_unique(); + let uninitialized_stake_address = Pubkey::new_unique(); + + let prepare_stake_account = |activation_epoch, expected_stake_activation_status| { + let initial_stake_delegation = minimum_delegation + rent_exempt_reserve; + let initial_stake_state = StakeState::Stake( + Meta { + authorized: Authorized { + staker: authorized_staker, + withdrawer: Pubkey::new_unique(), + }, + rent_exempt_reserve, + ..Meta::default() + }, + new_stake( + initial_stake_delegation, + &vote_address, + &VoteState::default(), + activation_epoch, + &stake_config::Config::default(), + ), + ); + + if let Some(expected_stake_activation_status) = expected_stake_activation_status { + assert_eq!( + expected_stake_activation_status, + initial_stake_state + .delegation() + .unwrap() + .stake_activating_and_deactivating(current_epoch, Some(&stake_history)) + ); + } + + AccountSharedData::new_data_with_space( + rent_exempt_reserve + initial_stake_delegation, /* lamports */ + &initial_stake_state, + StakeState::size_of(), + &id(), + ) + .unwrap() + }; + + let new_vote_account = AccountSharedData::new_data_with_space( + 1, /* lamports */ + &VoteStateVersions::new_current(VoteState::default()), + VoteState::size_of(), + &solana_vote_program::id(), + ) + .unwrap(); + + let process_instruction_redelegate = + |stake_address: &Pubkey, + stake_account: &AccountSharedData, + authorized_staker: &Pubkey, + vote_address: &Pubkey, + vote_account: &AccountSharedData, + uninitialized_stake_address: &Pubkey, + uninitialized_stake_account: &AccountSharedData, + expected_result| { + process_instruction_with_overrides( + &serialize(&StakeInstruction::Redelegate).unwrap(), + vec![ + (*stake_address, stake_account.clone()), + ( + *uninitialized_stake_address, + uninitialized_stake_account.clone(), + ), + (*vote_address, vote_account.clone()), + ( + stake_config::id(), + config::create_account(0, &stake_config::Config::default()), + ), + (*authorized_staker, AccountSharedData::default()), + ], + vec![ + AccountMeta { + pubkey: *stake_address, + is_signer: false, + is_writable: true, + }, + AccountMeta { + pubkey: *uninitialized_stake_address, + is_signer: false, + is_writable: true, + }, + AccountMeta { + pubkey: *vote_address, + is_signer: false, + is_writable: false, + }, + AccountMeta { + pubkey: stake_config::id(), + is_signer: false, + is_writable: false, + }, + AccountMeta { + pubkey: *authorized_staker, + is_signer: true, + is_writable: false, + }, + ], + Some(&sysvar_cache_override), + Some(Arc::clone(&feature_set)), + expected_result, + ) + }; + + // + // Failure: incorrect authorized staker + // + let stake_account = prepare_stake_account(0 /*activation_epoch*/, None); + let uninitialized_stake_account = + AccountSharedData::new(0 /* lamports */, StakeState::size_of(), &id()); + + let _ = process_instruction_redelegate( + &stake_address, + &stake_account, + &Pubkey::new_unique(), // <-- Incorrect authorized staker + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Err(InstructionError::MissingRequiredSignature), + ); + + // + // Success: normal case + // + let output_accounts = process_instruction_redelegate( + &stake_address, + &stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Ok(()), + ); + + assert_eq!(output_accounts[0].lamports(), rent_exempt_reserve); + if let StakeState::Stake(meta, stake) = + output_accounts[0].borrow().deserialize_data().unwrap() + { + assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve); + assert_eq!( + stake.delegation.stake, + minimum_delegation + rent_exempt_reserve + ); + assert_eq!(stake.delegation.activation_epoch, 0); + assert_eq!(stake.delegation.deactivation_epoch, current_epoch); + } else { + panic!("Invalid output_accounts[0] data"); + } + assert_eq!( + output_accounts[1].lamports(), + minimum_delegation + rent_exempt_reserve + ); + if let StakeState::Stake(meta, stake) = + output_accounts[1].borrow().deserialize_data().unwrap() + { + assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve); + assert_eq!(stake.delegation.stake, minimum_delegation); + assert_eq!(stake.delegation.activation_epoch, current_epoch); + assert_eq!(stake.delegation.deactivation_epoch, u64::MAX); + } else { + panic!("Invalid output_accounts[1] data"); + } + + // + // Variations of rescinding the deactivation of `stake_account` + // + let deactivated_stake_accounts = [ + ( + // Failure: insufficient stake in `stake_account` to even delegate normally + { + let mut deactivated_stake_account = output_accounts[0].clone(); + deactivated_stake_account + .checked_add_lamports(minimum_delegation - 1) + .unwrap(); + deactivated_stake_account + }, + Err(StakeError::InsufficientDelegation.into()), + ), + ( + // Failure: `stake_account` holds the "virtual stake" that's cooling now, with the + // real stake now warming up in `uninitialized_stake_account` + { + let mut deactivated_stake_account = output_accounts[0].clone(); + deactivated_stake_account + .checked_add_lamports(minimum_delegation) + .unwrap(); + deactivated_stake_account + }, + Err(StakeError::TooSoonToRedelegate.into()), + ), + ( + // Success: `stake_account` has been replenished with additional lamports to + // fully realize its "virtual stake" + { + let mut deactivated_stake_account = output_accounts[0].clone(); + deactivated_stake_account + .checked_add_lamports(minimum_delegation + rent_exempt_reserve) + .unwrap(); + deactivated_stake_account + }, + Ok(()), + ), + ( + // Failure: `stake_account` has been replenished with 1 lamport less than what's + // necessary to fully realize its "virtual stake" + { + let mut deactivated_stake_account = output_accounts[0].clone(); + deactivated_stake_account + .checked_add_lamports(minimum_delegation + rent_exempt_reserve - 1) + .unwrap(); + deactivated_stake_account + }, + Err(StakeError::TooSoonToRedelegate.into()), + ), + ]; + for (deactivated_stake_account, expected_result) in deactivated_stake_accounts { + let _ = process_instruction_with_overrides( + &serialize(&StakeInstruction::DelegateStake).unwrap(), + vec![ + (stake_address, deactivated_stake_account), + (vote_address, new_vote_account.clone()), + ( + sysvar::clock::id(), + account::create_account_shared_data_for_test(&Clock::default()), + ), + ( + sysvar::stake_history::id(), + account::create_account_shared_data_for_test(&StakeHistory::default()), + ), + ( + stake_config::id(), + config::create_account(0, &stake_config::Config::default()), + ), + (authorized_staker, AccountSharedData::default()), + ], + vec![ + AccountMeta { + pubkey: stake_address, + is_signer: false, + is_writable: true, + }, + AccountMeta { + pubkey: vote_address, + is_signer: false, + is_writable: false, + }, + AccountMeta { + pubkey: sysvar::clock::id(), + is_signer: false, + is_writable: false, + }, + AccountMeta { + pubkey: sysvar::stake_history::id(), + is_signer: false, + is_writable: false, + }, + AccountMeta { + pubkey: stake_config::id(), + is_signer: false, + is_writable: false, + }, + AccountMeta { + pubkey: authorized_staker, + is_signer: true, + is_writable: false, + }, + ], + Some(&sysvar_cache_override), + Some(Arc::clone(&feature_set)), + expected_result, + ); + } + + // + // Success: `uninitialized_stake_account` starts with 42 extra lamports + // + let uninitialized_stake_account_with_extra_lamports = + AccountSharedData::new(42 /* lamports */, StakeState::size_of(), &id()); + let output_accounts = process_instruction_redelegate( + &stake_address, + &stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account_with_extra_lamports, + Ok(()), + ); + + assert_eq!(output_accounts[0].lamports(), rent_exempt_reserve); + assert_eq!( + output_accounts[1].lamports(), + minimum_delegation + rent_exempt_reserve + 42 + ); + if let StakeState::Stake(meta, stake) = + output_accounts[1].borrow().deserialize_data().unwrap() + { + assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve); + assert_eq!(stake.delegation.stake, minimum_delegation + 42); + assert_eq!(stake.delegation.activation_epoch, current_epoch); + assert_eq!(stake.delegation.deactivation_epoch, u64::MAX); + } else { + panic!("Invalid output_accounts[1] data"); + } + + // + // Success: `stake_account` is over-allocated and holds a greater than required `rent_exempt_reserve` + // + let mut stake_account_over_allocated = + prepare_stake_account(0 /*activation_epoch:*/, None); + if let StakeState::Stake(mut meta, stake) = stake_account_over_allocated + .borrow_mut() + .deserialize_data() + .unwrap() + { + meta.rent_exempt_reserve += 42; + stake_account_over_allocated + .set_state(&StakeState::Stake(meta, stake)) + .unwrap(); + } + stake_account_over_allocated + .checked_add_lamports(42) + .unwrap(); + assert_eq!( + stake_account_over_allocated.lamports(), + (minimum_delegation + rent_exempt_reserve) + (rent_exempt_reserve + 42), + ); + assert_eq!(uninitialized_stake_account.lamports(), 0); + let output_accounts = process_instruction_redelegate( + &stake_address, + &stake_account_over_allocated, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Ok(()), + ); + + assert_eq!(output_accounts[0].lamports(), rent_exempt_reserve + 42); + if let StakeState::Stake(meta, _stake) = + output_accounts[0].borrow().deserialize_data().unwrap() + { + assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve + 42); + } else { + panic!("Invalid output_accounts[0] data"); + } + assert_eq!( + output_accounts[1].lamports(), + minimum_delegation + rent_exempt_reserve, + ); + if let StakeState::Stake(meta, stake) = + output_accounts[1].borrow().deserialize_data().unwrap() + { + assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve); + assert_eq!(stake.delegation.stake, minimum_delegation); + } else { + panic!("Invalid output_accounts[1] data"); + } + + // + // Failure: `uninitialized_stake_account` with invalid program id + // + let _ = process_instruction_redelegate( + &stake_address, + &stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &AccountSharedData::new( + 0, /* lamports */ + StakeState::size_of(), + &Pubkey::new_unique(), // <-- Invalid program id + ), + Err(InstructionError::IncorrectProgramId), + ); + + // + // Failure: `uninitialized_stake_account` with size too small + // + let _ = process_instruction_redelegate( + &stake_address, + &stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &AccountSharedData::new(0 /* lamports */, StakeState::size_of() - 1, &id()), // <-- size too small + Err(InstructionError::InvalidAccountData), + ); + + // + // Failure: `uninitialized_stake_account` with size too large + // + let _ = process_instruction_redelegate( + &stake_address, + &stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &AccountSharedData::new(0 /* lamports */, StakeState::size_of() + 1, &id()), // <-- size too large + Err(InstructionError::InvalidAccountData), + ); + + // + // Failure: `uninitialized_stake_account` with initialized stake account + // + let _ = process_instruction_redelegate( + &stake_address, + &stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &stake_account.clone(), // <-- Initialized stake account + Err(InstructionError::AccountAlreadyInitialized), + ); + + // + // Failure: invalid `new_vote_account` + // + let _ = process_instruction_redelegate( + &stake_address, + &stake_account, + &authorized_staker, + &new_vote_address, + &uninitialized_stake_account.clone(), // <-- Invalid vote account + &uninitialized_stake_address, + &uninitialized_stake_account, + Err(InstructionError::IncorrectProgramId), + ); + + // + // Failure: invalid `stake_account` + // + let _ = process_instruction_redelegate( + &stake_address, + &uninitialized_stake_account.clone(), // <-- Uninitialized stake account + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Err(InstructionError::InvalidAccountData), + ); + + // + // Failure: stake is inactive, activating or deactivating + // + let inactive_stake_account = prepare_stake_account( + current_epoch + 1, /*activation_epoch*/ + Some(StakeActivationStatus { + effective: 0, + activating: 0, + deactivating: 0, + }), + ); + let _ = process_instruction_redelegate( + &stake_address, + &inactive_stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Err(StakeError::RedelegateTransientOrInactiveStake.into()), + ); + + let activating_stake_account = prepare_stake_account( + current_epoch, /*activation_epoch*/ + Some(StakeActivationStatus { + effective: 0, + activating: minimum_delegation + rent_exempt_reserve, + deactivating: 0, + }), + ); + let _ = process_instruction_redelegate( + &stake_address, + &activating_stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Err(StakeError::RedelegateTransientOrInactiveStake.into()), + ); + + let mut deactivating_stake_account = + prepare_stake_account(0 /*activation_epoch:*/, None); + if let StakeState::Stake(meta, mut stake) = deactivating_stake_account + .borrow_mut() + .deserialize_data() + .unwrap() + { + stake.deactivate(current_epoch).unwrap(); + assert_eq!( + StakeActivationStatus { + effective: minimum_delegation + rent_exempt_reserve, + activating: 0, + deactivating: minimum_delegation + rent_exempt_reserve, + }, + stake + .delegation + .stake_activating_and_deactivating(current_epoch, Some(&stake_history)) + ); + + deactivating_stake_account + .set_state(&StakeState::Stake(meta, stake)) + .unwrap(); + } + let _ = process_instruction_redelegate( + &stake_address, + &deactivating_stake_account, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Err(StakeError::RedelegateTransientOrInactiveStake.into()), + ); + + // + // Failure: `stake_account` has insufficient stake + // (less than `minimum_delegation + rent_exempt_reserve`) + // + let mut stake_account_too_few_lamports = stake_account.clone(); + if let StakeState::Stake(meta, mut stake) = stake_account_too_few_lamports + .borrow_mut() + .deserialize_data() + .unwrap() + { + stake.delegation.stake -= 1; + assert_eq!( + stake.delegation.stake, + minimum_delegation + rent_exempt_reserve - 1 + ); + stake_account_too_few_lamports + .set_state(&StakeState::Stake(meta, stake)) + .unwrap(); + } else { + panic!("Invalid stake_account"); + } + stake_account_too_few_lamports + .checked_sub_lamports(1) + .unwrap(); + assert_eq!( + stake_account_too_few_lamports.lamports(), + minimum_delegation + 2 * rent_exempt_reserve - 1 + ); + + let _ = process_instruction_redelegate( + &stake_address, + &stake_account_too_few_lamports, + &authorized_staker, + &new_vote_address, + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Err(StakeError::InsufficientDelegation.into()), + ); + + // + // Failure: redelegate to same vote addresss + // + let _ = process_instruction_redelegate( + &stake_address, + &stake_account, + &authorized_staker, + &vote_address, // <-- Same vote address + &new_vote_account, + &uninitialized_stake_address, + &uninitialized_stake_account, + Err(StakeError::RedelegateToSameVoteAccount.into()), + ); + } } diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index ddd87cd3cdaa7d..a3d55e3d10ca5a 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -94,7 +94,8 @@ pub fn meta_from(account: &AccountSharedData) -> Option { from(account).and_then(|state: StakeState| state.meta()) } -fn redelegate( +fn redelegate_stake( + invoke_context: &InvokeContext, stake: &mut Stake, stake_lamports: u64, voter_pubkey: &Pubkey, @@ -105,11 +106,25 @@ fn redelegate( ) -> Result<(), StakeError> { // If stake is currently active: if stake.stake(clock.epoch, Some(stake_history)) != 0 { + let stake_lamports_ok = if invoke_context + .feature_set + .is_active(&feature_set::stake_redelegate_instruction::id()) + { + // When a stake account is redelegated, the delegated lamports from the source stake + // account are transferred to a new stake account. Do not permit the deactivation of + // the source stake account to be rescinded, by more generally requiring the delegation + // be configured with the expected amount of stake lamports before rescinding. + stake_lamports >= stake.delegation.stake + } else { + true + }; + // If pubkey of new voter is the same as current, // and we are scheduled to start deactivating this epoch, // we rescind deactivation if stake.delegation.voter_pubkey == *voter_pubkey && clock.epoch == stake.delegation.deactivation_epoch + && stake_lamports_ok { stake.delegation.deactivation_epoch = std::u64::MAX; return Ok(()); @@ -556,7 +571,9 @@ pub fn authorize_with_seed( ) } +#[allow(clippy::too_many_arguments)] pub fn delegate( + invoke_context: &InvokeContext, transaction_context: &TransactionContext, instruction_context: &InstructionContext, stake_account_index: usize, @@ -596,7 +613,8 @@ pub fn delegate( meta.authorized.check(signers, StakeAuthorize::Staker)?; let ValidatedDelegatedInfo { stake_amount } = validate_delegated_amount(&stake_account, &meta, feature_set)?; - redelegate( + redelegate_stake( + invoke_context, &mut stake, stake_amount, &vote_pubkey, @@ -856,6 +874,127 @@ pub fn merge( Ok(()) } +pub fn redelegate( + invoke_context: &InvokeContext, + transaction_context: &TransactionContext, + instruction_context: &InstructionContext, + stake_account: &mut BorrowedAccount, + uninitialized_stake_account_index: usize, + vote_account_index: usize, + config: &Config, + signers: &HashSet, +) -> Result<(), InstructionError> { + let clock = invoke_context.get_sysvar_cache().get_clock()?; + + // ensure `uninitialized_stake_account_index` is in the uninitialized state + let mut uninitialized_stake_account = instruction_context + .try_borrow_instruction_account(transaction_context, uninitialized_stake_account_index)?; + if *uninitialized_stake_account.get_owner() != id() { + ic_msg!( + invoke_context, + "expected uninitialized stake account owner to be {}, not {}", + id(), + *uninitialized_stake_account.get_owner() + ); + return Err(InstructionError::IncorrectProgramId); + } + if uninitialized_stake_account.get_data().len() != StakeState::size_of() { + ic_msg!( + invoke_context, + "expected uninitialized stake account data len to be {}, not {}", + StakeState::size_of(), + uninitialized_stake_account.get_data().len() + ); + return Err(InstructionError::InvalidAccountData); + } + if !matches!( + uninitialized_stake_account.get_state()?, + StakeState::Uninitialized + ) { + ic_msg!( + invoke_context, + "expected uninitialized stake account to be uninitialized", + ); + return Err(InstructionError::AccountAlreadyInitialized); + } + + // validate the provided vote account + let vote_account = instruction_context + .try_borrow_instruction_account(transaction_context, vote_account_index)?; + if *vote_account.get_owner() != solana_vote_program::id() { + ic_msg!( + invoke_context, + "expected vote account owner to be {}, not {}", + solana_vote_program::id(), + *vote_account.get_owner() + ); + return Err(InstructionError::IncorrectProgramId); + } + let vote_pubkey = *vote_account.get_key(); + let vote_state = vote_account.get_state::()?; + + let (stake_meta, effective_stake) = + if let StakeState::Stake(meta, stake) = stake_account.get_state()? { + let stake_history = invoke_context.get_sysvar_cache().get_stake_history()?; + let status = stake + .delegation + .stake_activating_and_deactivating(clock.epoch, Some(&stake_history)); + if status.effective == 0 || status.activating != 0 || status.deactivating != 0 { + ic_msg!(invoke_context, "stake is not active"); + return Err(StakeError::RedelegateTransientOrInactiveStake.into()); + } + + // Deny redelegating to the same vote account. This is nonsensical and could be used to + // grief the global stake warm-up/cool-down rate + if stake.delegation.voter_pubkey == vote_pubkey { + ic_msg!( + invoke_context, + "redelegating to the same vote account not permitted" + ); + return Err(StakeError::RedelegateToSameVoteAccount.into()); + } + + (meta, status.effective) + } else { + ic_msg!(invoke_context, "invalid stake account data",); + return Err(InstructionError::InvalidAccountData); + }; + + // deactivate `stake_account` + // + // Note: This function also ensures `signers` contains the `StakeAuthorize::Staker` + deactivate(stake_account, &clock, signers)?; + + // transfer the effective stake to the uninitialized stake account + stake_account.checked_sub_lamports(effective_stake)?; + uninitialized_stake_account.checked_add_lamports(effective_stake)?; + + // initialize and schedule `uninitialized_stake_account` for activation + let sysvar_cache = invoke_context.get_sysvar_cache(); + let rent = sysvar_cache.get_rent()?; + let mut uninitialized_stake_meta = stake_meta; + uninitialized_stake_meta.rent_exempt_reserve = + rent.minimum_balance(uninitialized_stake_account.get_data().len()); + + let ValidatedDelegatedInfo { stake_amount } = validate_delegated_amount( + &uninitialized_stake_account, + &uninitialized_stake_meta, + &invoke_context.feature_set, + )?; + uninitialized_stake_account.set_state(&StakeState::Stake( + uninitialized_stake_meta, + new_stake( + stake_amount, + &vote_pubkey, + &vote_state.convert_to_current(), + clock.epoch, + config, + ), + ))?; + + Ok(()) +} + #[allow(clippy::too_many_arguments)] pub fn withdraw( transaction_context: &TransactionContext, diff --git a/sdk/program/src/stake/instruction.rs b/sdk/program/src/stake/instruction.rs index 9a7601a640807f..67d784c38d4a43 100644 --- a/sdk/program/src/stake/instruction.rs +++ b/sdk/program/src/stake/instruction.rs @@ -60,6 +60,12 @@ pub enum StakeError { #[error("delegation amount is less than the minimum")] InsufficientDelegation, + + #[error("stake account with transient or inactive stake cannot be redelegated")] + RedelegateTransientOrInactiveStake, + + #[error("stake redelegation to the same vote account is not permitted")] + RedelegateToSameVoteAccount, } impl DecodeError for StakeError { @@ -261,6 +267,28 @@ pub enum StakeInstruction { /// 2. `[]` Reference vote account that has voted at least once in the last /// `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` epochs DeactivateDelinquent, + + /// Redelegate activated stake to another vote account. + /// + /// Upon success: + /// * the balance of the delegated stake account will be reduced to the undelegated amount in + /// the account (rent exempt minimum and any additional lamports not part of the delegation), + /// and scheduled for deactivation. + /// * the provided uninitialized stake account will receive the original balance of the + /// delegated stake account, minus the rent exempt minimum, and scheduled for activation to + /// the provided vote account. Any existing lamports in the uninitialized stake account + /// will also be included in the re-delegation. + /// + /// # Account references + /// 0. `[WRITE]` Delegated stake account to be redelegated. The account must be fully + /// activated and carry a balance greater than or equal to the minimum delegation amount + /// plus rent exempt minimum + /// 1. `[WRITE]` Uninitialized stake account that will hold the redelegated stake + /// 2. `[]` Vote account to which this stake will be re-delegated + /// 3. `[]` Address of config account that carries stake config + /// 4. `[SIGNER]` Stake authority + /// + Redelegate, } #[derive(Default, Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] @@ -738,6 +766,65 @@ pub fn deactivate_delinquent_stake( Instruction::new_with_bincode(id(), &StakeInstruction::DeactivateDelinquent, account_metas) } +fn _redelegate( + stake_pubkey: &Pubkey, + authorized_pubkey: &Pubkey, + vote_pubkey: &Pubkey, + uninitialized_stake_pubkey: &Pubkey, +) -> Instruction { + let account_metas = vec![ + AccountMeta::new(*stake_pubkey, false), + AccountMeta::new(*uninitialized_stake_pubkey, false), + AccountMeta::new_readonly(*vote_pubkey, false), + AccountMeta::new_readonly(config::id(), false), + AccountMeta::new_readonly(*authorized_pubkey, true), + ]; + Instruction::new_with_bincode(id(), &StakeInstruction::Redelegate, account_metas) +} + +pub fn redelegate( + stake_pubkey: &Pubkey, + authorized_pubkey: &Pubkey, + vote_pubkey: &Pubkey, + uninitialized_stake_pubkey: &Pubkey, +) -> Vec { + vec![ + system_instruction::allocate(uninitialized_stake_pubkey, StakeState::size_of() as u64), + system_instruction::assign(uninitialized_stake_pubkey, &id()), + _redelegate( + stake_pubkey, + authorized_pubkey, + vote_pubkey, + uninitialized_stake_pubkey, + ), + ] +} + +pub fn redelegate_with_seed( + stake_pubkey: &Pubkey, + authorized_pubkey: &Pubkey, + vote_pubkey: &Pubkey, + uninitialized_stake_pubkey: &Pubkey, // derived using create_with_seed() + base: &Pubkey, // base + seed: &str, // seed +) -> Vec { + vec![ + system_instruction::allocate_with_seed( + uninitialized_stake_pubkey, + base, + seed, + StakeState::size_of() as u64, + &id(), + ), + _redelegate( + stake_pubkey, + authorized_pubkey, + vote_pubkey, + uninitialized_stake_pubkey, + ), + ] +} + #[cfg(test)] mod tests { use {super::*, crate::instruction::InstructionError}; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 6708fcf417d5f3..899120dada206e 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -287,6 +287,10 @@ pub mod stake_deactivate_delinquent_instruction { solana_sdk::declare_id!("437r62HoAdUb63amq3D7ENnBLDhHT2xY8eFkLJYVKK4x"); } +pub mod stake_redelegate_instruction { + solana_sdk::declare_id!("3EPmAX94PvVJCjMeFfRFvj4avqCPL8vv3TGsZQg7ydMx"); +} + pub mod vote_withdraw_authority_may_change_authorized_voter { solana_sdk::declare_id!("AVZS3ZsN4gi6Rkx2QUibYuSJG3S6QHib7xCYhG6vGJxU"); } @@ -586,6 +590,7 @@ lazy_static! { (nonce_must_be_advanceable::id(), "durable nonces must be advanceable"), (vote_authorize_with_seed::id(), "An instruction you can use to change a vote accounts authority when the current authority is a derived key #25860"), (cap_accounts_data_size_per_block::id(), "cap the accounts data size per block #25517"), + (stake_redelegate_instruction::id(), "enable the redelegate stake instruction #26294"), (preserve_rent_epoch_for_rent_exempt_accounts::id(), "preserve rent epoch for rent exempt accounts #26479"), (enable_bpf_loader_extend_program_data_ix::id(), "enable bpf upgradeable loader ExtendProgramData instruction #25234"), (enable_early_verification_of_account_modifications::id(), "enable early verification of account modifications #25899"), diff --git a/transaction-status/src/parse_stake.rs b/transaction-status/src/parse_stake.rs index f1f9ff86fa8438..a973643ed26181 100644 --- a/transaction-status/src/parse_stake.rs +++ b/transaction-status/src/parse_stake.rs @@ -284,6 +284,19 @@ pub fn parse_stake( }), }) } + StakeInstruction::Redelegate => { + check_num_stake_accounts(&instruction.accounts, 4)?; + Ok(ParsedInstructionEnum { + instruction_type: "redelegate".to_string(), + info: json!({ + "stakeAccount": account_keys[instruction.accounts[0] as usize].to_string(), + "newStakeAccount": account_keys[instruction.accounts[1] as usize].to_string(), + "voteAccount": account_keys[instruction.accounts[2] as usize].to_string(), + "stakeConfigAccount": account_keys[instruction.accounts[3] as usize].to_string(), + "stakeAuthority": account_keys[instruction.accounts[4] as usize].to_string(), + }), + }) + } } } From b660ac511dcb29c3455d9ae87cb5d841b9edb96f Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Wed, 29 Jun 2022 10:03:44 -0700 Subject: [PATCH 003/192] cli: Add stake redelegation support --- cli/src/cli.rs | 6 + cli/src/stake.rs | 140 +++++++++++++++++++++- cli/src/test_utils.rs | 18 ++- cli/tests/stake.rs | 265 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 420 insertions(+), 9 deletions(-) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index d4e580d7ed8416..d37d2ef87c110e 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -214,6 +214,7 @@ pub enum CliCommand { nonce_authority: SignerIndex, memo: Option, fee_payer: SignerIndex, + redelegation_stake_account_pubkey: Option, }, SplitStake { stake_account_pubkey: Pubkey, @@ -683,6 +684,9 @@ pub fn parse_command( ("delegate-stake", Some(matches)) => { parse_stake_delegate_stake(matches, default_signer, wallet_manager) } + ("redelegate-stake", Some(matches)) => { + parse_stake_delegate_stake(matches, default_signer, wallet_manager) + } ("withdraw-stake", Some(matches)) => { parse_stake_withdraw_stake(matches, default_signer, wallet_manager) } @@ -1136,6 +1140,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { nonce_authority, memo, fee_payer, + redelegation_stake_account_pubkey, } => process_delegate_stake( &rpc_client, config, @@ -1150,6 +1155,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *nonce_authority, memo.as_ref(), *fee_payer, + redelegation_stake_account_pubkey.as_ref(), ), CliCommand::SplitStake { stake_account_pubkey, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 769bfd94fb3da9..2ab885d35b596a 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -285,6 +285,51 @@ impl StakeSubCommands for App<'_, '_> { .arg(fee_payer_arg()) .arg(memo_arg()) ) + .subcommand( + SubCommand::with_name("redelegate-stake") + .about("Redelegate active stake to another vote account") + .arg( + Arg::with_name("force") + .long("force") + .takes_value(false) + .hidden(true) // Don't document this argument to discourage its use + .help("Override vote account sanity checks (use carefully!)") + ) + .arg( + pubkey!(Arg::with_name("stake_account_pubkey") + .index(1) + .value_name("STAKE_ACCOUNT_ADDRESS") + .required(true), + "Existing delegated stake account that has been fully activated. \ + On success this stake account will be scheduled for deactivation and the rent-exempt balance \ + may be withdrawn once fully deactivated") + ) + .arg( + pubkey!(Arg::with_name("vote_account_pubkey") + .index(2) + .value_name("REDELEGATED_VOTE_ACCOUNT_ADDRESS") + .required(true), + "The vote account to which the stake will be redelegated") + ) + .arg( + Arg::with_name("redelegation_stake_account") + .index(3) + .value_name("REDELEGATION_STAKE_ACCOUNT") + .takes_value(true) + .required(true) + .validator(is_valid_signer) + .help("Stake account to create for the redelegation. \ + On success this stake account will be created and scheduled for activation with all \ + the stake in the existing stake account, exclusive of the rent-exempt balance retained \ + in the existing account") + ) + .arg(stake_authority_arg()) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) + .arg(memo_arg()) + ) + .subcommand( SubCommand::with_name("stake-authorize") .about("Authorize a new signing keypair for the given stake account") @@ -753,6 +798,8 @@ pub fn parse_stake_delegate_stake( pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap(); let vote_account_pubkey = pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap(); + let (redelegation_stake_account, redelegation_stake_account_pubkey) = + signer_of(matches, "redelegation_stake_account", wallet_manager)?; let force = matches.is_present("force"); let sign_only = matches.is_present(SIGN_ONLY_ARG.name); let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); @@ -765,7 +812,7 @@ pub fn parse_stake_delegate_stake( signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; - let mut bulk_signers = vec![stake_authority, fee_payer]; + let mut bulk_signers = vec![stake_authority, fee_payer, redelegation_stake_account]; if nonce_account.is_some() { bulk_signers.push(nonce_authority); } @@ -785,6 +832,7 @@ pub fn parse_stake_delegate_stake( nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), + redelegation_stake_account_pubkey, }, signers: signer_info.signers, }) @@ -2414,11 +2462,28 @@ pub fn process_delegate_stake( nonce_authority: SignerIndex, memo: Option<&String>, fee_payer: SignerIndex, + redelegation_stake_account_pubkey: Option<&Pubkey>, ) -> ProcessResult { check_unique_pubkeys( (&config.signers[0].pubkey(), "cli keypair".to_string()), (stake_account_pubkey, "stake_account_pubkey".to_string()), )?; + if let Some(redelegation_stake_account_pubkey) = &redelegation_stake_account_pubkey { + check_unique_pubkeys( + (stake_account_pubkey, "stake_account_pubkey".to_string()), + ( + redelegation_stake_account_pubkey, + "redelegation_stake_account".to_string(), + ), + )?; + check_unique_pubkeys( + (&config.signers[0].pubkey(), "cli keypair".to_string()), + ( + redelegation_stake_account_pubkey, + "redelegation_stake_account".to_string(), + ), + )?; + } let stake_authority = config.signers[stake_authority]; if !sign_only { @@ -2471,12 +2536,22 @@ pub fn process_delegate_stake( let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; - let ixs = vec![stake_instruction::delegate_stake( - stake_account_pubkey, - &stake_authority.pubkey(), - vote_account_pubkey, - )] + let ixs = if let Some(redelegation_stake_account_pubkey) = &redelegation_stake_account_pubkey { + stake_instruction::redelegate( + stake_account_pubkey, + &stake_authority.pubkey(), + vote_account_pubkey, + redelegation_stake_account_pubkey, + ) + } else { + vec![stake_instruction::delegate_stake( + stake_account_pubkey, + &stake_authority.pubkey(), + vote_account_pubkey, + )] + } .with_memo(memo); + let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; @@ -3867,6 +3942,7 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -3898,6 +3974,7 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -3931,6 +4008,7 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -3965,6 +4043,7 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -3994,6 +4073,7 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -4033,6 +4113,7 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 1, + redelegation_stake_account_pubkey: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -4081,6 +4162,7 @@ mod tests { nonce_authority: 2, memo: None, fee_payer: 1, + redelegation_stake_account_pubkey: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -4117,6 +4199,7 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 1, + redelegation_stake_account_pubkey: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -4125,6 +4208,51 @@ mod tests { } ); + // Test RedelegateStake Subcommand (minimal test due to the significant implementation + // overlap with DelegateStake) + let (redelegation_stake_account_keypair_file, mut redelegation_stake_account_tmp_file) = + make_tmp_file(); + let redelegation_stake_account_keypair = Keypair::new(); + write_keypair( + &redelegation_stake_account_keypair, + redelegation_stake_account_tmp_file.as_file_mut(), + ) + .unwrap(); + let redelegation_stake_account_pubkey = redelegation_stake_account_keypair.pubkey(); + + let test_redelegate_stake = test_commands.clone().get_matches_from(vec![ + "test", + "redelegate-stake", + &stake_account_string, + &vote_account_string, + &redelegation_stake_account_keypair_file, + ]); + assert_eq!( + parse_command(&test_redelegate_stake, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::DelegateStake { + stake_account_pubkey, + vote_account_pubkey, + stake_authority: 0, + force: false, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + redelegation_stake_account_pubkey: Some(redelegation_stake_account_pubkey), + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&redelegation_stake_account_keypair_file) + .unwrap() + .into() + ], + } + ); + // Test WithdrawStake Subcommand let test_withdraw_stake = test_commands.clone().get_matches_from(vec![ "test", diff --git a/cli/src/test_utils.rs b/cli/src/test_utils.rs index 082f92d789c3ff..287ebce27746c9 100644 --- a/cli/src/test_utils.rs +++ b/cli/src/test_utils.rs @@ -1,6 +1,9 @@ use { solana_client::rpc_client::RpcClient, - solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig}, + solana_sdk::{ + clock::{Epoch, DEFAULT_MS_PER_SLOT}, + commitment_config::CommitmentConfig, + }, std::{thread::sleep, time::Duration}, }; @@ -35,3 +38,16 @@ pub fn check_ready(rpc_client: &RpcClient) { sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)); } } + +pub fn wait_for_next_epoch(rpc_client: &RpcClient) -> Epoch { + let current_epoch = rpc_client.get_epoch_info().unwrap().epoch; + println!("waiting for epoch {}", current_epoch + 1); + loop { + sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)); + + let next_epoch = rpc_client.get_epoch_info().unwrap().epoch; + if next_epoch > current_epoch { + return next_epoch; + } + } +} diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index 2fda3af9b15651..e3e6bde9ccbcd4 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -6,21 +6,25 @@ use { cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, spend_utils::SpendAmount, stake::StakeAuthorizationIndexed, - test_utils::check_ready, + test_utils::{check_ready, wait_for_next_epoch}, }, solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_client::{ blockhash_query::{self, BlockhashQuery}, nonce_utils, rpc_client::RpcClient, + rpc_response::{RpcStakeActivation, StakeActivationState}, }, solana_faucet::faucet::run_local_faucet, solana_sdk::{ account_utils::StateMut, commitment_config::CommitmentConfig, + epoch_schedule::EpochSchedule, fee::FeeStructure, + fee_calculator::FeeRateGovernor, nonce::State as NonceState, pubkey::Pubkey, + rent::Rent, signature::{keypair_from_seed, Keypair, Signer}, stake::{ self, @@ -29,9 +33,259 @@ use { }, }, solana_streamer::socket::SocketAddrSpace, - solana_test_validator::TestValidator, + solana_test_validator::{TestValidator, TestValidatorGenesis}, }; +#[test] +fn test_stake_redelegation() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let authorized_withdrawer = Keypair::new().pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + + let slots_per_epoch = 32; + let test_validator = TestValidatorGenesis::default() + .fee_rate_governor(FeeRateGovernor::new(0, 0)) + .rent(Rent { + lamports_per_byte_year: 1, + exemption_threshold: 1.0, + ..Rent::default() + }) + .epoch_schedule(EpochSchedule::custom( + slots_per_epoch, + slots_per_epoch, + /* enable_warmup_epochs = */ false, + )) + .faucet_addr(Some(faucet_addr)) + .start_with_mint_address(mint_pubkey, SocketAddrSpace::Unspecified) + .expect("validator start failed"); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + let default_signer = Keypair::new(); + + let mut config = CliConfig::recent_for_tests(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&default_signer]; + + request_and_confirm_airdrop( + &rpc_client, + &config, + &config.signers[0].pubkey(), + 100_000_000_000, + ) + .unwrap(); + + // Create vote account + let vote_keypair = Keypair::new(); + config.signers = vec![&default_signer, &vote_keypair]; + config.command = CliCommand::CreateVoteAccount { + vote_account: 1, + seed: None, + identity_account: 0, + authorized_voter: None, + authorized_withdrawer, + commission: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config).unwrap(); + + // Create second vote account + let vote2_keypair = Keypair::new(); + config.signers = vec![&default_signer, &vote2_keypair]; + config.command = CliCommand::CreateVoteAccount { + vote_account: 1, + seed: None, + identity_account: 0, + authorized_voter: None, + authorized_withdrawer, + commission: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config).unwrap(); + + // Create stake account + let stake_keypair = Keypair::new(); + config.signers = vec![&default_signer, &stake_keypair]; + config.command = CliCommand::CreateStakeAccount { + stake_account: 1, + seed: None, + staker: None, + withdrawer: None, + withdrawer_signer: None, + lockup: Lockup::default(), + amount: SpendAmount::Some(50_000_000_000), + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + from: 0, + }; + process_command(&config).unwrap(); + + // Delegate stake to `vote_keypair` + config.signers = vec![&default_signer]; + config.command = CliCommand::DelegateStake { + stake_account_pubkey: stake_keypair.pubkey(), + vote_account_pubkey: vote_keypair.pubkey(), + stake_authority: 0, + force: true, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + redelegation_stake_account_pubkey: None, + }; + process_command(&config).unwrap(); + + // wait for new epoch + wait_for_next_epoch(&rpc_client); + + // `stake_keypair` should now be delegated to `vote_keypair` and fully activated + let stake_account = rpc_client.get_account(&stake_keypair.pubkey()).unwrap(); + let stake_state: StakeState = stake_account.state().unwrap(); + + let rent_exempt_reserve = match stake_state { + StakeState::Stake(meta, stake) => { + assert_eq!(stake.delegation.voter_pubkey, vote_keypair.pubkey()); + meta.rent_exempt_reserve + } + _ => panic!("Unexpected stake state!"), + }; + + assert_eq!( + rpc_client + .get_stake_activation(stake_keypair.pubkey(), None) + .unwrap(), + RpcStakeActivation { + state: StakeActivationState::Active, + active: 50_000_000_000 - rent_exempt_reserve, + inactive: 0 + } + ); + check_balance!(50_000_000_000, &rpc_client, &stake_keypair.pubkey()); + + let stake2_keypair = Keypair::new(); + + // Add an extra `rent_exempt_reserve` amount into `stake2_keypair` before redelegation to + // account for the `rent_exempt_reserve` balance that'll be pealed off the stake during the + // redelegation process + request_and_confirm_airdrop( + &rpc_client, + &config, + &stake2_keypair.pubkey(), + rent_exempt_reserve, + ) + .unwrap(); + + // wait for a new epoch to ensure the `Redelegate` happens as soon as possible in the epoch + // to reduce the risk of a race condition when checking the stake account correctly enters the + // deactivating state for the remainder of the current epoch + wait_for_next_epoch(&rpc_client); + + // Redelegate to `vote2_keypair` via `stake2_keypair + config.signers = vec![&default_signer, &stake2_keypair]; + config.command = CliCommand::DelegateStake { + stake_account_pubkey: stake_keypair.pubkey(), + vote_account_pubkey: vote2_keypair.pubkey(), + stake_authority: 0, + force: true, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + redelegation_stake_account_pubkey: Some(stake2_keypair.pubkey()), + }; + process_command(&config).unwrap(); + + // `stake_keypair` should now be deactivating + assert_eq!( + rpc_client + .get_stake_activation(stake_keypair.pubkey(), None) + .unwrap(), + RpcStakeActivation { + state: StakeActivationState::Deactivating, + active: 50_000_000_000 - rent_exempt_reserve, + inactive: 0, + } + ); + + // `stake_keypair2` should now be activating + assert_eq!( + rpc_client + .get_stake_activation(stake2_keypair.pubkey(), None) + .unwrap(), + RpcStakeActivation { + state: StakeActivationState::Activating, + active: 0, + inactive: 50_000_000_000 - rent_exempt_reserve, + } + ); + + // check that all the stake, save `rent_exempt_reserve`, have been moved from `stake_keypair` + // to `stake2_keypair` + check_balance!(rent_exempt_reserve, &rpc_client, &stake_keypair.pubkey()); + check_balance!(50_000_000_000, &rpc_client, &stake2_keypair.pubkey()); + + // wait for new epoch + wait_for_next_epoch(&rpc_client); + + // `stake_keypair` should now be deactivated + assert_eq!( + rpc_client + .get_stake_activation(stake_keypair.pubkey(), None) + .unwrap(), + RpcStakeActivation { + state: StakeActivationState::Inactive, + active: 0, + inactive: 0, + } + ); + + // `stake2_keypair` should now be delegated to `vote2_keypair` and fully activated + let stake2_account = rpc_client.get_account(&stake2_keypair.pubkey()).unwrap(); + let stake2_state: StakeState = stake2_account.state().unwrap(); + + match stake2_state { + StakeState::Stake(_meta, stake) => { + assert_eq!(stake.delegation.voter_pubkey, vote2_keypair.pubkey()); + } + _ => panic!("Unexpected stake2 state!"), + }; + + assert_eq!( + rpc_client + .get_stake_activation(stake2_keypair.pubkey(), None) + .unwrap(), + RpcStakeActivation { + state: StakeActivationState::Active, + active: 50_000_000_000 - rent_exempt_reserve, + inactive: 0 + } + ); +} + #[test] fn test_stake_delegation_force() { let mint_keypair = Keypair::new(); @@ -113,6 +367,7 @@ fn test_stake_delegation_force() { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }; process_command(&config).unwrap_err(); @@ -129,6 +384,7 @@ fn test_stake_delegation_force() { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }; process_command(&config).unwrap(); } @@ -205,6 +461,7 @@ fn test_seed_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }; process_command(&config_validator).unwrap(); @@ -293,6 +550,7 @@ fn test_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }; process_command(&config_validator).unwrap(); @@ -405,6 +663,7 @@ fn test_offline_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); @@ -426,6 +685,7 @@ fn test_offline_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }; process_command(&config_payer).unwrap(); @@ -558,6 +818,7 @@ fn test_nonced_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, + redelegation_stake_account_pubkey: None, }; process_command(&config).unwrap(); From 2481ebb1501f1f02937b1b1ea925943254125af2 Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Wed, 27 Jul 2022 19:44:52 -0700 Subject: [PATCH 004/192] Add explicit comment about get_stake_account to StakeInstruction enum (#26824) Add explicit comment about get_stake_account --- programs/stake/src/stake_instruction.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index a0de27299a14fc..84cd9a4f965cd1 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -455,6 +455,20 @@ pub fn process_instruction( Err(InstructionError::InvalidInstructionData) } } + // In order to prevent consensus issues, any new StakeInstruction variant added before the + // `add_get_minimum_delegation_instruction_to_stake_program` is activated needs to check + // the validity of the stake account by calling the `get_stake_account()` method outside + // its own feature gate, as per the following pattern: + // ``` + // Ok(StakeInstruction::Variant) -> { + // let mut me = get_stake_account()?; + // if invoke_context + // .feature_set + // .is_active(&feature_set::stake_variant_feature::id()) { .. } + // } + // ``` + // TODO: Remove this comment when `add_get_minimum_delegation_instruction_to_stake_program` + // is cleaned up Err(err) => { if !invoke_context.feature_set.is_active( &feature_set::add_get_minimum_delegation_instruction_to_stake_program::id(), From 817f65bb503d72130bca8e7e2b5b31cb73980188 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 28 Jul 2022 09:46:34 -0500 Subject: [PATCH 005/192] add full_snapshot to hash config (#26811) --- core/src/accounts_hash_verifier.rs | 2 ++ runtime/src/accounts_background_service.rs | 1 + runtime/src/accounts_db.rs | 3 +++ runtime/src/accounts_hash.rs | 12 ++++++++++++ 4 files changed, 18 insertions(+) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 9ca5b88eb84287..ae8f0dbe780aae 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -142,6 +142,7 @@ impl AccountsHashVerifier { epoch_schedule: &accounts_package.epoch_schedule, rent_collector: &accounts_package.rent_collector, store_detailed_debug_info_on_failure: false, + full_snapshot: None, }, &sorted_storages, timings, @@ -164,6 +165,7 @@ impl AccountsHashVerifier { rent_collector: &accounts_package.rent_collector, // now that we've failed, store off the failing contents that produced a bad capitalization store_detailed_debug_info_on_failure: true, + full_snapshot: None, }, &sorted_storages, HashStats::default(), diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 5f5a429708b7c6..285f47c8cd3104 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -241,6 +241,7 @@ impl SnapshotRequestHandler { epoch_schedule: snapshot_root_bank.epoch_schedule(), rent_collector: snapshot_root_bank.rent_collector(), store_detailed_debug_info_on_failure: false, + full_snapshot: None, }, ).unwrap(); assert_eq!(previous_hash, this_hash); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 3e47395e2d888c..0b1947dd1bc5d7 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -6742,6 +6742,7 @@ impl AccountsDb { epoch_schedule, rent_collector, store_detailed_debug_info_on_failure: false, + full_snapshot: None, }, expected_capitalization, ) @@ -7023,6 +7024,7 @@ impl AccountsDb { epoch_schedule, rent_collector, store_detailed_debug_info_on_failure: false, + full_snapshot: None, }, None, )?; @@ -11626,6 +11628,7 @@ pub mod tests { epoch_schedule: &EPOCH_SCHEDULE, rent_collector: &RENT_COLLECTOR, store_detailed_debug_info_on_failure: false, + full_snapshot: None, } } } diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs index a0ce69a5531b57..ee1b96776fc305 100644 --- a/runtime/src/accounts_hash.rs +++ b/runtime/src/accounts_hash.rs @@ -6,6 +6,7 @@ use { solana_sdk::{ hash::{Hash, Hasher}, pubkey::Pubkey, + slot_history::Slot, sysvar::epoch_schedule::EpochSchedule, }, std::{ @@ -27,6 +28,15 @@ pub struct PreviousPass { pub lamports: u64, } +#[derive(Debug)] +#[allow(dead_code)] +pub struct FullSnapshotAccountsHashInfo { + /// accounts hash over all accounts when the full snapshot was taken + hash: Hash, + /// slot where full snapshot was taken + slot: Slot, +} + /// parameters to calculate accounts hash #[derive(Debug)] pub struct CalcAccountsHashConfig<'a> { @@ -45,6 +55,8 @@ pub struct CalcAccountsHashConfig<'a> { pub rent_collector: &'a RentCollector, /// used for tracking down hash mismatches after the fact pub store_detailed_debug_info_on_failure: bool, + /// `Some` if this is an incremental snapshot which only hashes slots since the base full snapshot + pub full_snapshot: Option, } impl<'a> CalcAccountsHashConfig<'a> { From 013d045981905a9e03abc37711aa748f3f6050e1 Mon Sep 17 00:00:00 2001 From: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Date: Thu, 28 Jul 2022 13:44:02 -0400 Subject: [PATCH 006/192] [docs] added prettier config file (#26817) feat: added prettier config file --- docs/.prettierrc.json | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 docs/.prettierrc.json diff --git a/docs/.prettierrc.json b/docs/.prettierrc.json new file mode 100644 index 00000000000000..c4664cb741fbd9 --- /dev/null +++ b/docs/.prettierrc.json @@ -0,0 +1,6 @@ +{ + "trailingComma": "all", + "tabWidth": 2, + "semi": true, + "singleQuote": false +} From 467cb5def5f3f793b02b5596fe3906e8affd0445 Mon Sep 17 00:00:00 2001 From: Brennan Watt Date: Thu, 28 Jul 2022 11:33:19 -0700 Subject: [PATCH 007/192] Concurrent slot replay (#26465) * Concurrent replay slots * Split out concurrent and single bank replay paths * Sub function processing of replay results for readability * Add feature switch for concurrent replay --- core/src/progress_map.rs | 8 +- core/src/replay_stage.rs | 432 ++++++++++++++++++++++++----- ledger/src/blockstore_processor.rs | 2 +- runtime/src/bank.rs | 5 + runtime/src/bank_forks.rs | 4 +- sdk/src/feature_set.rs | 5 + 6 files changed, 376 insertions(+), 80 deletions(-) diff --git a/core/src/progress_map.rs b/core/src/progress_map.rs index 823d3cbb2a70bd..fc069750508839 100644 --- a/core/src/progress_map.rs +++ b/core/src/progress_map.rs @@ -188,8 +188,8 @@ pub struct ForkProgress { pub is_dead: bool, pub fork_stats: ForkStats, pub propagated_stats: PropagatedStats, - pub replay_stats: ReplaySlotStats, - pub replay_progress: ConfirmationProgress, + pub replay_stats: Arc>, + pub replay_progress: Arc>, pub retransmit_info: RetransmitInfo, // Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only // count new blocks replayed since last restart, which won't include @@ -235,8 +235,8 @@ impl ForkProgress { Self { is_dead: false, fork_stats: ForkStats::default(), - replay_stats: ReplaySlotStats::default(), - replay_progress: ConfirmationProgress::new(last_entry), + replay_stats: Arc::new(RwLock::new(ReplaySlotStats::default())), + replay_progress: Arc::new(RwLock::new(ConfirmationProgress::new(last_entry))), num_blocks_on_fork, num_dropped_blocks_on_fork, propagated_stats: PropagatedStats { diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 22a872265393c4..d1f408b1cdaf63 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1,4 +1,5 @@ //! The `replay_stage` replays transactions broadcast by the leader. + use { crate::{ ancestor_hashes_service::AncestorHashesReplayUpdateSender, @@ -18,7 +19,7 @@ use { fork_choice::{ForkChoice, SelectVoteAndResetForkResult}, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, - progress_map::{ForkProgress, ProgressMap, PropagatedStats}, + progress_map::{ForkProgress, ProgressMap, PropagatedStats, ReplaySlotStats}, repair_service::DuplicateSlotsResetReceiver, rewards_recorder_service::RewardsRecorderSender, tower_storage::{SavedTower, SavedTowerVersions, TowerStorage}, @@ -28,6 +29,8 @@ use { window_service::DuplicateSlotReceiver, }, crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, + lazy_static::lazy_static, + rayon::{prelude::*, ThreadPool}, solana_client::rpc_response::SlotUpdate, solana_entry::entry::VerifyRecyclers, solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock, @@ -35,7 +38,9 @@ use { solana_ledger::{ block_error::BlockError, blockstore::Blockstore, - blockstore_processor::{self, BlockstoreProcessorError, TransactionStatusSender}, + blockstore_processor::{ + self, BlockstoreProcessorError, ConfirmationProgress, TransactionStatusSender, + }, leader_schedule_cache::LeaderScheduleCache, leader_schedule_utils::first_of_consecutive_leader_slots, }, @@ -70,7 +75,7 @@ use { collections::{HashMap, HashSet}, result, sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, RwLock, }, thread::{self, Builder, JoinHandle}, @@ -85,6 +90,17 @@ pub const DUPLICATE_LIVENESS_THRESHOLD: f64 = 0.1; pub const DUPLICATE_THRESHOLD: f64 = 1.0 - SWITCH_FORK_THRESHOLD - DUPLICATE_LIVENESS_THRESHOLD; const MAX_VOTE_SIGNATURES: usize = 200; const MAX_VOTE_REFRESH_INTERVAL_MILLIS: usize = 5000; +// Expect this number to be small enough to minimize thread pool overhead while large enough +// to be able to replay all active forks at the same time in most cases. +const MAX_CONCURRENT_FORKS_TO_REPLAY: usize = 4; + +lazy_static! { + static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() + .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) + .thread_name(|ix| format!("replay_{}", ix)) + .build() + .unwrap(); +} #[derive(PartialEq, Eq, Debug)] pub enum HeaviestForkFailures { @@ -113,6 +129,12 @@ impl Drop for Finalizer { } } +struct ReplaySlotFromBlockstore { + is_slot_dead: bool, + bank_slot: Slot, + replay_result: Option>, +} + struct LastVoteRefreshTime { last_refresh_time: Instant, last_print_time: Instant, @@ -174,7 +196,7 @@ pub struct ReplayTiming { generate_new_bank_forks_get_slots_since_us: u64, generate_new_bank_forks_loop_us: u64, generate_new_bank_forks_write_lock_us: u64, - replay_blockstore_us: u64, + replay_blockstore_us: u64, //< When processing forks concurrently, only captures the longest fork } impl ReplayTiming { #[allow(clippy::too_many_arguments)] @@ -1685,21 +1707,24 @@ impl ReplayStage { fn replay_blockstore_into_bank( bank: &Arc, blockstore: &Blockstore, - bank_progress: &mut ForkProgress, + replay_stats: &RwLock, + replay_progress: &RwLock, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: &ReplayVoteSender, verify_recyclers: &VerifyRecyclers, log_messages_bytes_limit: Option, ) -> result::Result { - let tx_count_before = bank_progress.replay_progress.num_txs; + let mut w_replay_stats = replay_stats.write().unwrap(); + let mut w_replay_progress = replay_progress.write().unwrap(); + let tx_count_before = w_replay_progress.num_txs; // All errors must lead to marking the slot as dead, otherwise, // the `check_slot_agrees_with_cluster()` called by `replay_active_banks()` // will break! blockstore_processor::confirm_slot( blockstore, bank, - &mut bank_progress.replay_stats, - &mut bank_progress.replay_progress, + &mut w_replay_stats, + &mut w_replay_progress, false, transaction_status_sender, Some(replay_vote_sender), @@ -1708,7 +1733,7 @@ impl ReplayStage { false, log_messages_bytes_limit, )?; - let tx_count_after = bank_progress.replay_progress.num_txs; + let tx_count_after = w_replay_progress.num_txs; let tx_count = tx_count_after - tx_count_before; Ok(tx_count) } @@ -2191,49 +2216,139 @@ impl ReplayStage { } #[allow(clippy::too_many_arguments)] - fn replay_active_banks( + fn replay_active_banks_concurrently( blockstore: &Blockstore, bank_forks: &RwLock, my_pubkey: &Pubkey, vote_account: &Pubkey, progress: &mut ProgressMap, transaction_status_sender: Option<&TransactionStatusSender>, - cache_block_meta_sender: Option<&CacheBlockMetaSender>, verify_recyclers: &VerifyRecyclers, - heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, replay_vote_sender: &ReplayVoteSender, - bank_notification_sender: &Option, - rewards_recorder_sender: &Option, - rpc_subscriptions: &Arc, - duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, - epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, - unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, - latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, - cluster_slots_update_sender: &ClusterSlotsUpdateSender, - cost_update_sender: &Sender, - duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, - ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, - block_metadata_notifier: Option, replay_timing: &mut ReplayTiming, log_messages_bytes_limit: Option, - ) -> bool { - let mut did_complete_bank = false; - let mut tx_count = 0; - let mut execute_timings = ExecuteTimings::default(); - let active_banks = bank_forks.read().unwrap().active_banks(); - trace!("active banks {:?}", active_banks); + active_bank_slots: &[Slot], + ) -> Vec { + // Make mutable shared structures thread safe. + let progress = RwLock::new(progress); + let longest_replay_time_us = AtomicU64::new(0); + + // Allow for concurrent replaying of slots from different forks. + let replay_result_vec: Vec = PAR_THREAD_POOL.install(|| { + active_bank_slots + .into_par_iter() + .map(|bank_slot| { + let bank_slot = *bank_slot; + let mut replay_result = ReplaySlotFromBlockstore { + is_slot_dead: false, + bank_slot, + replay_result: None, + }; + let my_pubkey = &my_pubkey.clone(); + trace!( + "Replay active bank: slot {}, thread_idx {}", + bank_slot, + PAR_THREAD_POOL.current_thread_index().unwrap_or_default() + ); + let mut progress_lock = progress.write().unwrap(); + if progress_lock + .get(&bank_slot) + .map(|p| p.is_dead) + .unwrap_or(false) + { + // If the fork was marked as dead, don't replay it + debug!("bank_slot {:?} is marked dead", bank_slot); + replay_result.is_slot_dead = true; + return replay_result; + } - for bank_slot in &active_banks { - // If the fork was marked as dead, don't replay it - if progress.get(bank_slot).map(|p| p.is_dead).unwrap_or(false) { - debug!("bank_slot {:?} is marked dead", *bank_slot); - continue; - } + let bank = &bank_forks.read().unwrap().get(bank_slot).unwrap(); + let parent_slot = bank.parent_slot(); + let (num_blocks_on_fork, num_dropped_blocks_on_fork) = { + let stats = progress_lock + .get(&parent_slot) + .expect("parent of active bank must exist in progress map"); + let num_blocks_on_fork = stats.num_blocks_on_fork + 1; + let new_dropped_blocks = bank.slot() - parent_slot - 1; + let num_dropped_blocks_on_fork = + stats.num_dropped_blocks_on_fork + new_dropped_blocks; + (num_blocks_on_fork, num_dropped_blocks_on_fork) + }; + let prev_leader_slot = progress_lock.get_bank_prev_leader_slot(bank); + + let bank_progress = progress_lock.entry(bank.slot()).or_insert_with(|| { + ForkProgress::new_from_bank( + bank, + my_pubkey, + &vote_account.clone(), + prev_leader_slot, + num_blocks_on_fork, + num_dropped_blocks_on_fork, + ) + }); + + let replay_stats = bank_progress.replay_stats.clone(); + let replay_progress = bank_progress.replay_progress.clone(); + drop(progress_lock); + + if bank.collector_id() != my_pubkey { + let mut replay_blockstore_time = + Measure::start("replay_blockstore_into_bank"); + let blockstore_result = Self::replay_blockstore_into_bank( + bank, + blockstore, + &replay_stats, + &replay_progress, + transaction_status_sender, + &replay_vote_sender.clone(), + &verify_recyclers.clone(), + log_messages_bytes_limit, + ); + replay_blockstore_time.stop(); + replay_result.replay_result = Some(blockstore_result); + longest_replay_time_us + .fetch_max(replay_blockstore_time.as_us(), Ordering::Relaxed); + } + replay_result + }) + .collect() + }); + // Accumulating time across all slots could inflate this number and make it seem like an + // overly large amount of time is being spent on blockstore compared to other activities. + replay_timing.replay_blockstore_us += longest_replay_time_us.load(Ordering::Relaxed); - let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap(); + replay_result_vec + } + + #[allow(clippy::too_many_arguments)] + fn replay_active_bank( + blockstore: &Blockstore, + bank_forks: &RwLock, + my_pubkey: &Pubkey, + vote_account: &Pubkey, + progress: &mut ProgressMap, + transaction_status_sender: Option<&TransactionStatusSender>, + verify_recyclers: &VerifyRecyclers, + replay_vote_sender: &ReplayVoteSender, + replay_timing: &mut ReplayTiming, + log_messages_bytes_limit: Option, + bank_slot: Slot, + ) -> ReplaySlotFromBlockstore { + let mut replay_result = ReplaySlotFromBlockstore { + is_slot_dead: false, + bank_slot, + replay_result: None, + }; + let my_pubkey = &my_pubkey.clone(); + trace!("Replay active bank: slot {}", bank_slot); + if progress.get(&bank_slot).map(|p| p.is_dead).unwrap_or(false) { + // If the fork was marked as dead, don't replay it + debug!("bank_slot {:?} is marked dead", bank_slot); + replay_result.is_slot_dead = true; + } else { + let bank = &bank_forks.read().unwrap().get(bank_slot).unwrap(); let parent_slot = bank.parent_slot(); - let prev_leader_slot = progress.get_bank_prev_leader_slot(&bank); + let prev_leader_slot = progress.get_bank_prev_leader_slot(bank); let (num_blocks_on_fork, num_dropped_blocks_on_fork) = { let stats = progress .get(&parent_slot) @@ -2245,42 +2360,81 @@ impl ReplayStage { (num_blocks_on_fork, num_dropped_blocks_on_fork) }; - // Insert a progress entry even for slots this node is the leader for, so that - // 1) confirm_forks can report confirmation, 2) we can cache computations about - // this bank in `select_forks()` - let bank_progress = &mut progress.entry(bank.slot()).or_insert_with(|| { + let bank_progress = progress.entry(bank.slot()).or_insert_with(|| { ForkProgress::new_from_bank( - &bank, + bank, my_pubkey, - vote_account, + &vote_account.clone(), prev_leader_slot, num_blocks_on_fork, num_dropped_blocks_on_fork, ) }); + if bank.collector_id() != my_pubkey { - let root_slot = bank_forks.read().unwrap().root(); let mut replay_blockstore_time = Measure::start("replay_blockstore_into_bank"); - let replay_result = Self::replay_blockstore_into_bank( - &bank, + let blockstore_result = Self::replay_blockstore_into_bank( + bank, blockstore, - bank_progress, + &bank_progress.replay_stats, + &bank_progress.replay_progress, transaction_status_sender, - replay_vote_sender, - verify_recyclers, + &replay_vote_sender.clone(), + &verify_recyclers.clone(), log_messages_bytes_limit, ); replay_blockstore_time.stop(); + replay_result.replay_result = Some(blockstore_result); replay_timing.replay_blockstore_us += replay_blockstore_time.as_us(); + } + } + replay_result + } + + #[allow(clippy::too_many_arguments)] + fn process_replay_results( + blockstore: &Blockstore, + bank_forks: &RwLock, + progress: &mut ProgressMap, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, + heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, + bank_notification_sender: &Option, + rewards_recorder_sender: &Option, + rpc_subscriptions: &Arc, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, + unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, + cluster_slots_update_sender: &ClusterSlotsUpdateSender, + cost_update_sender: &Sender, + duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, + ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, + block_metadata_notifier: Option, + replay_result_vec: &[ReplaySlotFromBlockstore], + ) -> bool { + // TODO: See if processing of blockstore replay results and bank completion can be made thread safe. + let mut did_complete_bank = false; + let mut tx_count = 0; + let mut execute_timings = ExecuteTimings::default(); + for replay_result in replay_result_vec { + if replay_result.is_slot_dead { + continue; + } + + let bank_slot = replay_result.bank_slot; + let bank = &bank_forks.read().unwrap().get(bank_slot).unwrap(); + if let Some(replay_result) = &replay_result.replay_result { match replay_result { Ok(replay_tx_count) => tx_count += replay_tx_count, Err(err) => { // Error means the slot needs to be marked as dead Self::mark_dead_slot( blockstore, - &bank, - root_slot, - &err, + bank, + bank_forks.read().unwrap().root(), + err, rpc_subscriptions, duplicate_slots_tracker, gossip_duplicate_confirmed_slots, @@ -2296,20 +2450,27 @@ impl ReplayStage { } } } - assert_eq!(*bank_slot, bank.slot()); + + assert_eq!(bank_slot, bank.slot()); if bank.is_complete() { let mut bank_complete_time = Measure::start("bank_complete_time"); - execute_timings.accumulate(&bank_progress.replay_stats.execute_timings); + let bank_progress = progress + .get_mut(&bank.slot()) + .expect("Bank fork progress entry missing for completed bank"); + + let replay_stats = bank_progress.replay_stats.clone(); + let r_replay_stats = replay_stats.read().unwrap(); + let replay_progress = bank_progress.replay_progress.clone(); + let r_replay_progress = replay_progress.read().unwrap(); debug!("bank {} is completed replay from blockstore, contribute to update cost with {:?}", - bank.slot(), - bank_progress.replay_stats.execute_timings - ); - + bank.slot(), + r_replay_stats.execute_timings + ); did_complete_bank = true; info!("bank frozen: {}", bank.slot()); - let _ = cluster_slots_update_sender.send(vec![*bank_slot]); + let _ = cluster_slots_update_sender.send(vec![bank_slot]); if let Some(transaction_status_sender) = transaction_status_sender { - transaction_status_sender.send_transaction_status_freeze_message(&bank); + transaction_status_sender.send_transaction_status_freeze_message(bank); } bank.freeze(); // report cost tracker stats @@ -2319,8 +2480,7 @@ impl ReplayStage { warn!("cost_update_sender failed sending bank stats: {:?}", err) }); - let bank_hash = bank.hash(); - assert_ne!(bank_hash, Hash::default()); + assert_ne!(bank.hash(), Hash::default()); // Needs to be updated before `check_slot_agrees_with_cluster()` so that // any updates in `check_slot_agrees_with_cluster()` on fork choice take // effect @@ -2353,7 +2513,7 @@ impl ReplayStage { .send(BankNotification::Frozen(bank.clone())) .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err)); } - blockstore_processor::cache_block_meta(&bank, cache_block_meta_sender); + blockstore_processor::cache_block_meta(bank, cache_block_meta_sender); let bank_hash = bank.hash(); if let Some(new_frozen_voters) = @@ -2368,7 +2528,7 @@ impl ReplayStage { ); } } - Self::record_rewards(&bank, rewards_recorder_sender); + Self::record_rewards(bank, rewards_recorder_sender); if let Some(ref block_metadata_notifier) = block_metadata_notifier { let block_metadata_notifier = block_metadata_notifier.read().unwrap(); block_metadata_notifier.notify_block_metadata( @@ -2381,12 +2541,13 @@ impl ReplayStage { } bank_complete_time.stop(); - bank_progress.replay_stats.report_stats( + r_replay_stats.report_stats( bank.slot(), - bank_progress.replay_progress.num_entries, - bank_progress.replay_progress.num_shreds, + r_replay_progress.num_entries, + r_replay_progress.num_shreds, bank_complete_time.as_us(), ); + execute_timings.accumulate(&r_replay_stats.execute_timings); } else { trace!( "bank {} not completed tick_height: {}, max_tick_height: {}", @@ -2397,7 +2558,7 @@ impl ReplayStage { } } - // send accumulated execute-timings to cost_update_service + // Send accumulated execute-timings to cost_update_service. if !execute_timings.details.per_program_timings.is_empty() { cost_update_sender .send(CostUpdate::ExecuteTiming { @@ -2405,11 +2566,129 @@ impl ReplayStage { }) .unwrap_or_else(|err| warn!("cost_update_sender failed: {:?}", err)); } - inc_new_counter_info!("replay_stage-replay_transactions", tx_count); did_complete_bank } + #[allow(clippy::too_many_arguments)] + fn replay_active_banks( + blockstore: &Blockstore, + bank_forks: &RwLock, + my_pubkey: &Pubkey, + vote_account: &Pubkey, + progress: &mut ProgressMap, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, + verify_recyclers: &VerifyRecyclers, + heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, + replay_vote_sender: &ReplayVoteSender, + bank_notification_sender: &Option, + rewards_recorder_sender: &Option, + rpc_subscriptions: &Arc, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, + unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, + cluster_slots_update_sender: &ClusterSlotsUpdateSender, + cost_update_sender: &Sender, + duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, + ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, + block_metadata_notifier: Option, + replay_timing: &mut ReplayTiming, + log_messages_bytes_limit: Option, + ) -> bool { + let active_bank_slots = bank_forks.read().unwrap().active_bank_slots(); + let num_active_banks = active_bank_slots.len(); + warn!( + "{} active bank(s) to replay: {:?}", + num_active_banks, active_bank_slots + ); + if num_active_banks > 0 { + let replay_result_vec = if num_active_banks > 1 { + if bank_forks + .read() + .unwrap() + .get(active_bank_slots[0]) + .unwrap() + .concurrent_replay_of_forks() + { + Self::replay_active_banks_concurrently( + blockstore, + bank_forks, + my_pubkey, + vote_account, + progress, + transaction_status_sender, + verify_recyclers, + replay_vote_sender, + replay_timing, + log_messages_bytes_limit, + &active_bank_slots, + ) + } else { + active_bank_slots + .iter() + .map(|bank_slot| { + Self::replay_active_bank( + blockstore, + bank_forks, + my_pubkey, + vote_account, + progress, + transaction_status_sender, + verify_recyclers, + replay_vote_sender, + replay_timing, + log_messages_bytes_limit, + *bank_slot, + ) + }) + .collect() + } + } else { + vec![Self::replay_active_bank( + blockstore, + bank_forks, + my_pubkey, + vote_account, + progress, + transaction_status_sender, + verify_recyclers, + replay_vote_sender, + replay_timing, + log_messages_bytes_limit, + active_bank_slots[0], + )] + }; + + Self::process_replay_results( + blockstore, + bank_forks, + progress, + transaction_status_sender, + cache_block_meta_sender, + heaviest_subtree_fork_choice, + bank_notification_sender, + rewards_recorder_sender, + rpc_subscriptions, + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + epoch_slots_frozen_slots, + unfrozen_gossip_verified_vote_hashes, + latest_validator_votes_for_frozen_banks, + cluster_slots_update_sender, + cost_update_sender, + duplicate_slots_to_repair, + ancestor_hashes_replay_update_sender, + block_metadata_notifier, + &replay_result_vec, + ) + } else { + false + } + } + #[allow(clippy::too_many_arguments)] pub fn compute_bank_stats( my_vote_pubkey: &Pubkey, @@ -2988,7 +3267,13 @@ impl ReplayStage { .get(*slot) .expect("bank in progress must exist in BankForks") .clone(); - let duration = prog.replay_stats.started.elapsed().as_millis(); + let duration = prog + .replay_stats + .read() + .unwrap() + .started + .elapsed() + .as_millis(); if bank.is_frozen() && tower.is_slot_confirmed(*slot, voted_stakes, total_stake) { info!("validator fork confirmed {} {}ms", *slot, duration); datapoint_info!("validator-confirmation", ("duration_ms", duration, i64)); @@ -3884,7 +4169,8 @@ pub(crate) mod tests { let res = ReplayStage::replay_blockstore_into_bank( &bank1, &blockstore, - bank1_progress, + &bank1_progress.replay_stats, + &bank1_progress.replay_progress, None, &replay_vote_sender, &VerifyRecyclers::default(), diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 9128efe6a7bef8..43208cf7068db6 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -894,7 +894,7 @@ pub fn process_blockstore_from_root( if bank_slots.len() > 1 { "s" } else { "" }, bank_slots.iter().map(|slot| slot.to_string()).join(", "), ); - assert!(bank_forks.active_banks().is_empty()); + assert!(bank_forks.active_bank_slots().is_empty()); } Ok(()) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4bc2b8bfe39c3e..c38eea8ce70ee2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7507,6 +7507,11 @@ impl Bank { .is_active(&feature_set::preserve_rent_epoch_for_rent_exempt_accounts::id()) } + pub fn concurrent_replay_of_forks(&self) -> bool { + self.feature_set + .is_active(&feature_set::concurrent_replay_of_forks::id()) + } + pub fn read_cost_tracker(&self) -> LockResult> { self.cost_tracker.read() } diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 54e448ae4090dc..6785b962aa51cf 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -107,7 +107,7 @@ impl BankForks { .collect() } - pub fn active_banks(&self) -> Vec { + pub fn active_bank_slots(&self) -> Vec { self.banks .iter() .filter(|(_, v)| !v.is_frozen()) @@ -635,7 +635,7 @@ mod tests { let mut bank_forks = BankForks::new(bank); let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1); bank_forks.insert(child_bank); - assert_eq!(bank_forks.active_banks(), vec![1]); + assert_eq!(bank_forks.active_bank_slots(), vec![1]); } #[test] diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 899120dada206e..4486cdf5a86a50 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -484,6 +484,10 @@ pub mod compact_vote_state_updates { solana_sdk::declare_id!("86HpNqzutEZwLcPxS6EHDcMNYWk6ikhteg9un7Y2PBKE"); } +pub mod concurrent_replay_of_forks { + solana_sdk::declare_id!("9F2Dcu8xkBPKxiiy65XKPZYdCG3VZDpjDTuSmeYLozJe"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -599,6 +603,7 @@ lazy_static! { (loosen_cpi_size_restriction::id(), "loosen cpi size restrictions #26641"), (use_default_units_in_fee_calculation::id(), "use default units per instruction in fee calculation #26785"), (compact_vote_state_updates::id(), "Compact vote state updates to lower block size"), + (concurrent_replay_of_forks::id(), "Allow slots from different forks to be replayed concurrently #26465"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 123f61ccad9bd1ebcc14e6c6778b4169c0f426f5 Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Thu, 28 Jul 2022 07:21:11 -0600 Subject: [PATCH 008/192] Correct DeactivateDelinquent/Redelegate account parsing --- transaction-status/src/parse_stake.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/transaction-status/src/parse_stake.rs b/transaction-status/src/parse_stake.rs index a973643ed26181..dfa3e9608a34a7 100644 --- a/transaction-status/src/parse_stake.rs +++ b/transaction-status/src/parse_stake.rs @@ -280,12 +280,12 @@ pub fn parse_stake( info: json!({ "stakeAccount": account_keys[instruction.accounts[0] as usize].to_string(), "voteAccount": account_keys[instruction.accounts[1] as usize].to_string(), - "referenceVoteAccount": account_keys[instruction.accounts[3] as usize].to_string(), + "referenceVoteAccount": account_keys[instruction.accounts[2] as usize].to_string(), }), }) } StakeInstruction::Redelegate => { - check_num_stake_accounts(&instruction.accounts, 4)?; + check_num_stake_accounts(&instruction.accounts, 5)?; Ok(ParsedInstructionEnum { instruction_type: "redelegate".to_string(), info: json!({ From c1f7d1a367effea9fe9158c130b185342e95efd6 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Thu, 28 Jul 2022 14:57:26 -0500 Subject: [PATCH 009/192] Add AppendVec::new_from_file_unchecked (#26795) new_from_file_unchecked --- runtime/src/append_vec.rs | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 1f5c6639b36845..4eae2ade86c175 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -321,15 +321,33 @@ impl AppendVec { } pub fn new_from_file>(path: P, current_len: usize) -> io::Result<(Self, usize)> { + let new = Self::new_from_file_unchecked(path, current_len)?; + + let (sanitized, num_accounts) = new.sanitize_layout_and_length(); + if !sanitized { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "incorrect layout/length/data", + )); + } + + Ok((new, num_accounts)) + } + + /// Creates an appendvec from file without performing sanitize checks or counting the number of accounts + pub fn new_from_file_unchecked>( + path: P, + current_len: usize, + ) -> io::Result { + let file_size = std::fs::metadata(&path)?.len(); + Self::sanitize_len_and_size(current_len, file_size as usize)?; + let data = OpenOptions::new() .read(true) .write(true) .create(false) .open(&path)?; - let file_size = std::fs::metadata(&path)?.len(); - AppendVec::sanitize_len_and_size(current_len, file_size as usize)?; - let map = unsafe { let result = MmapMut::map_mut(&data); if result.is_err() { @@ -339,24 +357,14 @@ impl AppendVec { result? }; - let new = AppendVec { + Ok(AppendVec { path: path.as_ref().to_path_buf(), map, append_lock: Mutex::new(()), current_len: AtomicUsize::new(current_len), file_size, remove_on_drop: true, - }; - - let (sanitized, num_accounts) = new.sanitize_layout_and_length(); - if !sanitized { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "incorrect layout/length/data", - )); - } - - Ok((new, num_accounts)) + }) } fn sanitize_layout_and_length(&self) -> (bool, usize) { From 9d31b1d2901718c2d5357d58d27b17c5d5eb82bd Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Thu, 28 Jul 2022 20:43:20 -0700 Subject: [PATCH 010/192] Parse ConfidentialTransaction extensions (#26820) * Bump spl-token-2022 to 0.4.2 * Revert Cargo.lock changes in #26746 and update spl solana crates to v1.10.33 * Parse ConfidentialTransfer extensions --- Cargo.lock | 96 +++++++++---------- account-decoder/Cargo.toml | 2 +- account-decoder/src/parse_token_extension.rs | 97 +++++++++++++++++++- client/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 96 +++++++++---------- rpc/Cargo.toml | 2 +- transaction-status/Cargo.toml | 2 +- 7 files changed, 183 insertions(+), 114 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4124d55e1f6044..a60548edd0a8e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3110,6 +3110,15 @@ dependencies = [ "crypto-mac", ] +[[package]] +name = "pbkdf2" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" +dependencies = [ + "digest 0.10.3", +] + [[package]] name = "pbkdf2" version = "0.11.0" @@ -5185,35 +5194,23 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6d2bcb469e59d941e9d45702c91af940f6a8f4a5947f51bafe72844ed9b71a4" +checksum = "49a5d3280421bb53fc12bdba1eaa505153fb4f99a06b5609dae22192652ead3b" dependencies = [ - "ahash", - "blake3", - "block-buffer 0.9.0", "bs58", "bv", - "byteorder", - "cc", - "either", "generic-array 0.14.5", - "getrandom 0.1.16", - "hashbrown 0.11.2", "im", "lazy_static", "log", "memmap2", - "once_cell", - "rand_core 0.6.3", "rustc_version 0.4.0", "serde", "serde_bytes", "serde_derive", - "serde_json", "sha2 0.10.2", - "solana-frozen-abi-macro 1.11.3", - "subtle", + "solana-frozen-abi-macro 1.10.33", "thiserror", ] @@ -5252,9 +5249,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1f8cd2f387d17ccfb2bd5dd097d9b3c1fa14acfa49e95a63ece15a4622b8d0" +checksum = "635c60ac96b1347af272c625465068b908aff919d19f29b5795a44310310494d" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -5567,9 +5564,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3faca9d9fe587dc5827e06acdcca072e5b744bef0488b98fb21b77932e642afb" +checksum = "b12cb6e6f1f9c9876d356c928b8c2ac532f6715e7cd2a1b4343d747bee3eca73" dependencies = [ "env_logger", "lazy_static", @@ -5738,9 +5735,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b57df37154125f5e4ba0eaa0e5ea3cbc747a950480ddd89395036c4d3b77b66b" +checksum = "eeecf504cee2821b006871f70e7a1f54db15f914cedf259eaf5976fe606470f0" dependencies = [ "base64 0.13.0", "bincode", @@ -5751,38 +5748,31 @@ dependencies = [ "bs58", "bv", "bytemuck", - "cc", "console_error_panic_hook", "console_log", "curve25519-dalek", - "getrandom 0.2.3", + "getrandom 0.1.16", "itertools", "js-sys", "lazy_static", - "libc", "libsecp256k1", "log", - "memoffset", "num-derive", "num-traits", "parking_lot 0.12.1", "rand 0.7.3", - "rand_chacha 0.2.2", "rustc_version 0.4.0", "rustversion", "serde", "serde_bytes", "serde_derive", - "serde_json", "sha2 0.10.2", "sha3 0.10.1", - "solana-frozen-abi 1.11.3", - "solana-frozen-abi-macro 1.11.3", - "solana-sdk-macro 1.11.3", + "solana-frozen-abi 1.10.33", + "solana-frozen-abi-macro 1.10.33", + "solana-sdk-macro 1.10.33", "thiserror", - "tiny-bip39", "wasm-bindgen", - "zeroize", ] [[package]] @@ -6053,9 +6043,9 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d17897e0ca6c36cf90dc4d58a8f6bb2e3af80afc74b1825e779ee087af5c4a" +checksum = "636f6c615aca6f75e22b6baceaf0ffed9d74367f9320b07ed57cd9b5ce2e4ff9" dependencies = [ "assert_matches", "base64 0.13.0", @@ -6080,7 +6070,7 @@ dependencies = [ "memmap2", "num-derive", "num-traits", - "pbkdf2 0.11.0", + "pbkdf2 0.10.1", "qstring", "rand 0.7.3", "rand_chacha 0.2.2", @@ -6092,11 +6082,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.1", - "solana-frozen-abi 1.11.3", - "solana-frozen-abi-macro 1.11.3", - "solana-logger 1.11.3", - "solana-program 1.11.3", - "solana-sdk-macro 1.11.3", + "solana-frozen-abi 1.10.33", + "solana-frozen-abi-macro 1.10.33", + "solana-logger 1.10.33", + "solana-program 1.10.33", + "solana-sdk-macro 1.10.33", "thiserror", "uriparse", "wasm-bindgen", @@ -6157,9 +6147,9 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "148d14ba16fed65d2426cd33fbe0661c4c8543721bfb5472f486a509cf01f8c2" +checksum = "2b8bcac4394644f21dc013e932a7df9f536fcecef3e5df43fe362b4ec532ce30" dependencies = [ "bs58", "proc-macro2 1.0.41", @@ -6567,9 +6557,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ca28bafde8d752654a1efbe0911cffc0f2f1518d3eefe094db94cbda8fc965" +checksum = "410ee53a26ac91098c289c983863535d4fbb6604b229ae1159503f48fa4fc90f" dependencies = [ "aes-gcm-siv", "arrayref", @@ -6588,8 +6578,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.11.3", - "solana-sdk 1.11.3", + "solana-program 1.10.33", + "solana-sdk 1.10.33", "subtle", "thiserror", "zeroize", @@ -6670,7 +6660,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b013067447a1396303ddfc294f36e3d260a32f8a16c501c295bcdc7de39b490" dependencies = [ "borsh", - "solana-program 1.11.3", + "solana-program 1.10.33", "spl-token", ] @@ -6680,7 +6670,7 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.11.3", + "solana-program 1.10.33", ] [[package]] @@ -6693,23 +6683,23 @@ dependencies = [ "num-derive", "num-traits", "num_enum", - "solana-program 1.11.3", + "solana-program 1.10.33", "thiserror", ] [[package]] name = "spl-token-2022" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e684f055853cf11bdc9cd3301da1a3238bc95d25c9e563fd67533c2314900eab" +checksum = "f0a97cbf60b91b610c846ccf8eecca96d92a24a19ffbf9fe06cd0c84e76ec45e" dependencies = [ "arrayref", "bytemuck", "num-derive", "num-traits", "num_enum", - "solana-program 1.11.3", - "solana-zk-token-sdk 1.11.3", + "solana-program 1.10.33", + "solana-zk-token-sdk 1.10.33", "spl-memo", "spl-token", "thiserror", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index 076a9664cef5e1..2492767e281457 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -23,7 +23,7 @@ solana-config-program = { path = "../programs/config", version = "=1.11.5" } solana-sdk = { path = "../sdk", version = "=1.11.5" } solana-vote-program = { path = "../programs/vote", version = "=1.11.5" } spl-token = { version = "=3.3.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.1", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } thiserror = "1.0" zstd = "0.11.2" diff --git a/account-decoder/src/parse_token_extension.rs b/account-decoder/src/parse_token_extension.rs index c9e60fb2024986..998250368b0f90 100644 --- a/account-decoder/src/parse_token_extension.rs +++ b/account-decoder/src/parse_token_extension.rs @@ -14,8 +14,8 @@ pub enum UiExtension { TransferFeeConfig(UiTransferFeeConfig), TransferFeeAmount(UiTransferFeeAmount), MintCloseAuthority(UiMintCloseAuthority), - ConfidentialTransferMint, // Implementation of extension state to come - ConfidentialTransferAccount, // Implementation of extension state to come + ConfidentialTransferMint(UiConfidentialTransferMint), + ConfidentialTransferAccount(UiConfidentialTransferAccount), DefaultAccountState(UiDefaultAccountState), ImmutableOwner, MemoTransfer(UiMemoTransfer), @@ -42,8 +42,14 @@ pub fn parse_extension( .get_extension::() .map(|&extension| UiExtension::MintCloseAuthority(extension.into())) .unwrap_or(UiExtension::UnparseableExtension), - ExtensionType::ConfidentialTransferMint => UiExtension::ConfidentialTransferMint, - ExtensionType::ConfidentialTransferAccount => UiExtension::ConfidentialTransferAccount, + ExtensionType::ConfidentialTransferMint => account + .get_extension::() + .map(|&extension| UiExtension::ConfidentialTransferMint(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::ConfidentialTransferAccount => account + .get_extension::() + .map(|&extension| UiExtension::ConfidentialTransferAccount(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), ExtensionType::DefaultAccountState => account .get_extension::() .map(|&extension| UiExtension::DefaultAccountState(extension.into())) @@ -197,3 +203,86 @@ impl From for UiInteres } } } + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiConfidentialTransferMint { + pub authority: String, + pub auto_approve_new_accounts: bool, + pub auditor_encryption_pubkey: String, + pub withdraw_withheld_authority_encryption_pubkey: String, + pub withheld_amount: String, +} + +impl From + for UiConfidentialTransferMint +{ + fn from( + confidential_transfer_mint: extension::confidential_transfer::ConfidentialTransferMint, + ) -> Self { + Self { + authority: confidential_transfer_mint.authority.to_string(), + auto_approve_new_accounts: confidential_transfer_mint.auto_approve_new_accounts.into(), + auditor_encryption_pubkey: format!( + "{}", + confidential_transfer_mint.auditor_encryption_pubkey + ), + withdraw_withheld_authority_encryption_pubkey: format!( + "{}", + confidential_transfer_mint.withdraw_withheld_authority_encryption_pubkey + ), + withheld_amount: format!("{}", confidential_transfer_mint.withheld_amount), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiConfidentialTransferAccount { + pub approved: bool, + pub encryption_pubkey: String, + pub pending_balance_lo: String, + pub pending_balance_hi: String, + pub available_balance: String, + pub decryptable_available_balance: String, + pub allow_balance_credits: bool, + pub pending_balance_credit_counter: u64, + pub maximum_pending_balance_credit_counter: u64, + pub expected_pending_balance_credit_counter: u64, + pub actual_pending_balance_credit_counter: u64, + pub withheld_amount: String, +} + +impl From + for UiConfidentialTransferAccount +{ + fn from( + confidential_transfer_account: extension::confidential_transfer::ConfidentialTransferAccount, + ) -> Self { + Self { + approved: confidential_transfer_account.approved.into(), + encryption_pubkey: format!("{}", confidential_transfer_account.encryption_pubkey), + pending_balance_lo: format!("{}", confidential_transfer_account.pending_balance_lo), + pending_balance_hi: format!("{}", confidential_transfer_account.pending_balance_hi), + available_balance: format!("{}", confidential_transfer_account.available_balance), + decryptable_available_balance: format!( + "{}", + confidential_transfer_account.decryptable_available_balance + ), + allow_balance_credits: confidential_transfer_account.allow_balance_credits.into(), + pending_balance_credit_counter: confidential_transfer_account + .pending_balance_credit_counter + .into(), + maximum_pending_balance_credit_counter: confidential_transfer_account + .maximum_pending_balance_credit_counter + .into(), + expected_pending_balance_credit_counter: confidential_transfer_account + .expected_pending_balance_credit_counter + .into(), + actual_pending_balance_credit_counter: confidential_transfer_account + .actual_pending_balance_credit_counter + .into(), + withheld_amount: format!("{}", confidential_transfer_account.withheld_amount), + } + } +} diff --git a/client/Cargo.toml b/client/Cargo.toml index 2a3b555443a20f..2dfd68ab453f89 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -49,7 +49,7 @@ solana-streamer = { path = "../streamer", version = "=1.11.5" } solana-transaction-status = { path = "../transaction-status", version = "=1.11.5" } solana-version = { path = "../version", version = "=1.11.5" } solana-vote-program = { path = "../programs/vote", version = "=1.11.5" } -spl-token-2022 = { version = "=0.4.1", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } tokio-stream = "0.1.9" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 139d86710eedfe..67a00edfd4b2f0 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -2838,6 +2838,15 @@ dependencies = [ "crypto-mac", ] +[[package]] +name = "pbkdf2" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" +dependencies = [ + "digest 0.10.3", +] + [[package]] name = "pbkdf2" version = "0.11.0" @@ -4753,35 +4762,23 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6d2bcb469e59d941e9d45702c91af940f6a8f4a5947f51bafe72844ed9b71a4" +checksum = "49a5d3280421bb53fc12bdba1eaa505153fb4f99a06b5609dae22192652ead3b" dependencies = [ - "ahash", - "blake3", - "block-buffer 0.9.0", "bs58", "bv", - "byteorder 1.4.3", - "cc", - "either", "generic-array 0.14.5", - "getrandom 0.1.14", - "hashbrown 0.11.2", "im", "lazy_static", "log", "memmap2", - "once_cell", - "rand_core 0.6.3", "rustc_version", "serde", "serde_bytes", "serde_derive", - "serde_json", "sha2 0.10.2", - "solana-frozen-abi-macro 1.11.3", - "subtle", + "solana-frozen-abi-macro 1.10.33", "thiserror", ] @@ -4819,9 +4816,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1f8cd2f387d17ccfb2bd5dd097d9b3c1fa14acfa49e95a63ece15a4622b8d0" +checksum = "635c60ac96b1347af272c625465068b908aff919d19f29b5795a44310310494d" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -4977,9 +4974,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3faca9d9fe587dc5827e06acdcca072e5b744bef0488b98fb21b77932e642afb" +checksum = "b12cb6e6f1f9c9876d356c928b8c2ac532f6715e7cd2a1b4343d747bee3eca73" dependencies = [ "env_logger", "lazy_static", @@ -5088,9 +5085,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b57df37154125f5e4ba0eaa0e5ea3cbc747a950480ddd89395036c4d3b77b66b" +checksum = "eeecf504cee2821b006871f70e7a1f54db15f914cedf259eaf5976fe606470f0" dependencies = [ "base64 0.13.0", "bincode", @@ -5101,38 +5098,31 @@ dependencies = [ "bs58", "bv", "bytemuck", - "cc", "console_error_panic_hook", "console_log", "curve25519-dalek", - "getrandom 0.2.4", + "getrandom 0.1.14", "itertools", "js-sys", "lazy_static", - "libc", "libsecp256k1", "log", - "memoffset", "num-derive", "num-traits", "parking_lot 0.12.1", "rand 0.7.3", - "rand_chacha 0.2.2", "rustc_version", "rustversion", "serde", "serde_bytes", "serde_derive", - "serde_json", "sha2 0.10.2", "sha3 0.10.1", - "solana-frozen-abi 1.11.3", - "solana-frozen-abi-macro 1.11.3", - "solana-sdk-macro 1.11.3", + "solana-frozen-abi 1.10.33", + "solana-frozen-abi-macro 1.10.33", + "solana-sdk-macro 1.10.33", "thiserror", - "tiny-bip39", "wasm-bindgen", - "zeroize", ] [[package]] @@ -5365,9 +5355,9 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d17897e0ca6c36cf90dc4d58a8f6bb2e3af80afc74b1825e779ee087af5c4a" +checksum = "636f6c615aca6f75e22b6baceaf0ffed9d74367f9320b07ed57cd9b5ce2e4ff9" dependencies = [ "assert_matches", "base64 0.13.0", @@ -5392,7 +5382,7 @@ dependencies = [ "memmap2", "num-derive", "num-traits", - "pbkdf2 0.11.0", + "pbkdf2 0.10.1", "qstring", "rand 0.7.3", "rand_chacha 0.2.2", @@ -5404,11 +5394,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.1", - "solana-frozen-abi 1.11.3", - "solana-frozen-abi-macro 1.11.3", - "solana-logger 1.11.3", - "solana-program 1.11.3", - "solana-sdk-macro 1.11.3", + "solana-frozen-abi 1.10.33", + "solana-frozen-abi-macro 1.10.33", + "solana-logger 1.10.33", + "solana-program 1.10.33", + "solana-sdk-macro 1.10.33", "thiserror", "uriparse", "wasm-bindgen", @@ -5465,9 +5455,9 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "148d14ba16fed65d2426cd33fbe0661c4c8543721bfb5472f486a509cf01f8c2" +checksum = "2b8bcac4394644f21dc013e932a7df9f536fcecef3e5df43fe362b4ec532ce30" dependencies = [ "bs58", "proc-macro2 1.0.41", @@ -5759,9 +5749,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.11.3" +version = "1.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ca28bafde8d752654a1efbe0911cffc0f2f1518d3eefe094db94cbda8fc965" +checksum = "410ee53a26ac91098c289c983863535d4fbb6604b229ae1159503f48fa4fc90f" dependencies = [ "aes-gcm-siv", "arrayref", @@ -5780,8 +5770,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.11.3", - "solana-sdk 1.11.3", + "solana-program 1.10.33", + "solana-sdk 1.10.33", "subtle", "thiserror", "zeroize", @@ -5862,7 +5852,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b013067447a1396303ddfc294f36e3d260a32f8a16c501c295bcdc7de39b490" dependencies = [ "borsh", - "solana-program 1.11.3", + "solana-program 1.10.33", "spl-token", ] @@ -5872,7 +5862,7 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.11.3", + "solana-program 1.10.33", ] [[package]] @@ -5885,23 +5875,23 @@ dependencies = [ "num-derive", "num-traits", "num_enum", - "solana-program 1.11.3", + "solana-program 1.10.33", "thiserror", ] [[package]] name = "spl-token-2022" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e684f055853cf11bdc9cd3301da1a3238bc95d25c9e563fd67533c2314900eab" +checksum = "f0a97cbf60b91b610c846ccf8eecca96d92a24a19ffbf9fe06cd0c84e76ec45e" dependencies = [ "arrayref", "bytemuck", "num-derive", "num-traits", "num_enum", - "solana-program 1.11.3", - "solana-zk-token-sdk 1.11.3", + "solana-program 1.10.33", + "solana-zk-token-sdk 1.10.33", "spl-memo", "spl-token", "thiserror", diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 05a46ca1f96183..6218aa124eda88 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -50,7 +50,7 @@ solana-transaction-status = { path = "../transaction-status", version = "=1.11.5 solana-version = { path = "../version", version = "=1.11.5" } solana-vote-program = { path = "../programs/vote", version = "=1.11.5" } spl-token = { version = "=3.3.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.1", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } stream-cancel = "0.8.1" thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index c9e5a724b7dd52..a337d6cbd5a56f 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -29,7 +29,7 @@ solana-vote-program = { path = "../programs/vote", version = "=1.11.5" } spl-associated-token-account = { version = "=1.0.5", features = ["no-entrypoint"] } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } spl-token = { version = "=3.3.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.1", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } thiserror = "1.0" [package.metadata.docs.rs] From fb922f613c1361faa5fe1bb56c985507c800434c Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 29 Jul 2022 08:44:24 -0700 Subject: [PATCH 011/192] Compute maximum parallel QUIC streams using client stake (#26802) * Compute maximum parallel QUIC streams using client stake * clippy fixes * Add unit test --- client/src/connection_cache.rs | 105 +++++++++++++++++++++++++- client/src/nonblocking/quic_client.rs | 19 +++-- core/src/tpu.rs | 4 +- core/src/validator.rs | 6 +- streamer/src/nonblocking/quic.rs | 56 ++++++-------- 5 files changed, 147 insertions(+), 43 deletions(-) diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 2c5ae30f954430..f0628d3e32b9de 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -9,8 +9,14 @@ use { indexmap::map::{Entry, IndexMap}, rand::{thread_rng, Rng}, solana_measure::measure::Measure, - solana_sdk::{quic::QUIC_PORT_OFFSET, signature::Keypair, timing::AtomicInterval}, - solana_streamer::tls_certificates::new_self_signed_tls_certificate_chain, + solana_sdk::{ + pubkey::Pubkey, quic::QUIC_PORT_OFFSET, signature::Keypair, timing::AtomicInterval, + }, + solana_streamer::{ + nonblocking::quic::{compute_max_allowed_uni_streams, ConnectionPeerType}, + streamer::StakedNodes, + tls_certificates::new_self_signed_tls_certificate_chain, + }, std::{ error::Error, net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -228,6 +234,8 @@ pub struct ConnectionCache { tpu_udp_socket: Arc, client_certificate: Arc, use_quic: bool, + maybe_staked_nodes: Option>>, + maybe_client_pubkey: Option, } /// Models the pool of connections @@ -279,6 +287,15 @@ impl ConnectionCache { Ok(()) } + pub fn set_staked_nodes( + &mut self, + staked_nodes: &Arc>, + client_pubkey: &Pubkey, + ) { + self.maybe_staked_nodes = Some(staked_nodes.clone()); + self.maybe_client_pubkey = Some(*client_pubkey); + } + pub fn with_udp(connection_pool_size: usize) -> Self { // The minimum pool size is 1. let connection_pool_size = 1.max(connection_pool_size); @@ -303,6 +320,24 @@ impl ConnectionCache { } } + fn compute_max_parallel_chunks(&self) -> usize { + let (client_type, stake, total_stake) = + self.maybe_client_pubkey + .map_or((ConnectionPeerType::Unstaked, 0, 0), |pubkey| { + self.maybe_staked_nodes.as_ref().map_or( + (ConnectionPeerType::Unstaked, 0, 0), + |stakes| { + let rstakes = stakes.read().unwrap(); + rstakes.pubkey_stake_map.get(&pubkey).map_or( + (ConnectionPeerType::Unstaked, 0, rstakes.total_stake), + |stake| (ConnectionPeerType::Staked, *stake, rstakes.total_stake), + ) + }, + ) + }); + compute_max_allowed_uni_streams(client_type, stake, total_stake) + } + /// Create a lazy connection object under the exclusive lock of the cache map if there is not /// enough used connections in the connection pool for the specified address. /// Returns CreateConnectionResult. @@ -335,6 +370,7 @@ impl ConnectionCache { BaseTpuConnection::Quic(Arc::new(QuicClient::new( endpoint.as_ref().unwrap().clone(), *addr, + self.compute_max_parallel_chunks(), ))) }; @@ -534,6 +570,8 @@ impl Default for ConnectionCache { key: priv_key, }), use_quic: DEFAULT_TPU_USE_QUIC, + maybe_staked_nodes: None, + maybe_client_pubkey: None, } } } @@ -604,8 +642,18 @@ mod tests { }, rand::{Rng, SeedableRng}, rand_chacha::ChaChaRng, - solana_sdk::quic::QUIC_PORT_OFFSET, - std::net::{IpAddr, Ipv4Addr, SocketAddr}, + solana_sdk::{ + pubkey::Pubkey, + quic::{ + QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS, QUIC_MIN_STAKED_CONCURRENT_STREAMS, + QUIC_PORT_OFFSET, + }, + }, + solana_streamer::streamer::StakedNodes, + std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + sync::{Arc, RwLock}, + }, }; fn get_addr(rng: &mut ChaChaRng) -> SocketAddr { @@ -661,6 +709,55 @@ mod tests { let _conn = map.get(&addr).expect("Address not found"); } + #[test] + fn test_connection_cache_max_parallel_chunks() { + solana_logger::setup(); + let mut connection_cache = ConnectionCache::default(); + assert_eq!( + connection_cache.compute_max_parallel_chunks(), + QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS + ); + + let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + let pubkey = Pubkey::new_unique(); + connection_cache.set_staked_nodes(&staked_nodes, &pubkey); + assert_eq!( + connection_cache.compute_max_parallel_chunks(), + QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS + ); + + staked_nodes.write().unwrap().total_stake = 10000; + assert_eq!( + connection_cache.compute_max_parallel_chunks(), + QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS + ); + + staked_nodes + .write() + .unwrap() + .pubkey_stake_map + .insert(pubkey, 1); + assert_eq!( + connection_cache.compute_max_parallel_chunks(), + QUIC_MIN_STAKED_CONCURRENT_STREAMS + ); + + staked_nodes + .write() + .unwrap() + .pubkey_stake_map + .remove(&pubkey); + staked_nodes + .write() + .unwrap() + .pubkey_stake_map + .insert(pubkey, 1000); + assert_ne!( + connection_cache.compute_max_parallel_chunks(), + QUIC_MIN_STAKED_CONCURRENT_STREAMS + ); + } + // Test that we can get_connection with a connection cache configured for quic // on an address with a port that, if QUIC_PORT_OFFSET were added to it, it would overflow to // an invalid port. diff --git a/client/src/nonblocking/quic_client.rs b/client/src/nonblocking/quic_client.rs index 4f25d42126744e..153937203a7a66 100644 --- a/client/src/nonblocking/quic_client.rs +++ b/client/src/nonblocking/quic_client.rs @@ -263,15 +263,21 @@ pub struct QuicClient { connection: Arc>>, addr: SocketAddr, stats: Arc, + num_chunks: usize, } impl QuicClient { - pub fn new(endpoint: Arc, addr: SocketAddr) -> Self { + pub fn new( + endpoint: Arc, + addr: SocketAddr, + num_chunks: usize, + ) -> Self { Self { endpoint, connection: Arc::new(Mutex::new(None)), addr, stats: Arc::new(ClientStats::default()), + num_chunks, } } @@ -439,7 +445,7 @@ impl QuicClient { fn compute_chunk_length(num_buffers_to_chunk: usize, num_chunks: usize) -> usize { // The function is equivalent to checked div_ceil() - // Also, if num_chunks == 0 || num_buffers_per_chunk == 0, return 1 + // Also, if num_chunks == 0 || num_buffers_to_chunk == 0, return 1 num_buffers_to_chunk .checked_div(num_chunks) .map_or(1, |value| { @@ -483,8 +489,7 @@ impl QuicClient { // by just getting a reference to the NewConnection once let connection_ref: &NewConnection = &connection; - let chunk_len = - Self::compute_chunk_length(buffers.len() - 1, QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS); + let chunk_len = Self::compute_chunk_length(buffers.len() - 1, self.num_chunks); let chunks = buffers[1..buffers.len()].iter().chunks(chunk_len); let futures: Vec<_> = chunks @@ -528,7 +533,11 @@ impl QuicTpuConnection { addr: SocketAddr, connection_stats: Arc, ) -> Self { - let client = Arc::new(QuicClient::new(endpoint, addr)); + let client = Arc::new(QuicClient::new( + endpoint, + addr, + QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS, + )); Self::new_with_client(client, connection_stats) } diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 7f0be6319eac0d..cfd546869d6134 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -97,6 +97,7 @@ impl Tpu { keypair: &Keypair, log_messages_bytes_limit: Option, enable_quic_servers: bool, + staked_nodes: &Arc>, ) -> Self { let TpuSockets { transactions: transactions_sockets, @@ -124,7 +125,6 @@ impl Tpu { Some(bank_forks.read().unwrap().get_vote_only_mode_signal()), ); - let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let staked_nodes_updater_service = StakedNodesUpdaterService::new( exit.clone(), cluster_info.clone(), @@ -178,7 +178,7 @@ impl Tpu { forwarded_packet_sender, exit.clone(), MAX_QUIC_CONNECTIONS_PER_PEER, - staked_nodes, + staked_nodes.clone(), MAX_STAKED_CONNECTIONS.saturating_add(MAX_UNSTAKED_CONNECTIONS), 0, // Prevent unstaked nodes from forwarding transactions stats, diff --git a/core/src/validator.rs b/core/src/validator.rs index 4bd725fb499183..ef0848ceca1773 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -98,7 +98,7 @@ use { timing::timestamp, }, solana_send_transaction_service::send_transaction_service, - solana_streamer::socket::SocketAddrSpace, + solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, solana_vote_program::vote_state::VoteState, std::{ collections::{HashMap, HashSet}, @@ -757,12 +757,15 @@ impl Validator { }; let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + let connection_cache = match use_quic { true => { let mut connection_cache = ConnectionCache::new(tpu_connection_pool_size); connection_cache .update_client_certificate(&identity_keypair, node.info.gossip.ip()) .expect("Failed to update QUIC client certificates"); + connection_cache.set_staked_nodes(&staked_nodes, &identity_keypair.pubkey()); Arc::new(connection_cache) } false => Arc::new(ConnectionCache::with_udp(tpu_connection_pool_size)), @@ -1025,6 +1028,7 @@ impl Validator { &identity_keypair, config.runtime_config.log_messages_bytes_limit, config.enable_quic_servers, + &staked_nodes, ); datapoint_info!( diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index d103b6fba8b538..5f866d30e029ba 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -159,10 +159,10 @@ fn get_connection_stake( }) } -fn compute_max_allowed_uni_streams( +pub fn compute_max_allowed_uni_streams( peer_type: ConnectionPeerType, peer_stake: u64, - staked_nodes: Arc>, + total_stake: u64, ) -> usize { if peer_stake == 0 { // Treat stake = 0 as unstaked @@ -170,13 +170,11 @@ fn compute_max_allowed_uni_streams( } else { match peer_type { ConnectionPeerType::Staked => { - let staked_nodes = staked_nodes.read().unwrap(); - // No checked math for f64 type. So let's explicitly check for 0 here - if staked_nodes.total_stake == 0 { + if total_stake == 0 { QUIC_MIN_STAKED_CONCURRENT_STREAMS } else { - (((peer_stake as f64 / staked_nodes.total_stake as f64) + (((peer_stake as f64 / total_stake as f64) * QUIC_TOTAL_STAKED_CONCURRENT_STREAMS as f64) as usize) .max(QUIC_MIN_STAKED_CONCURRENT_STREAMS) @@ -264,17 +262,19 @@ async fn setup_connection( if let Some((mut connection_table_l, stake)) = table_and_stake { let table_type = connection_table_l.peer_type; - let max_uni_streams = VarInt::from_u64(compute_max_allowed_uni_streams( - table_type, - stake, - staked_nodes.clone(), - ) as u64); + let total_stake = staked_nodes.read().map_or(0, |stakes| stakes.total_stake); + drop(staked_nodes); + + let max_uni_streams = + VarInt::from_u64( + compute_max_allowed_uni_streams(table_type, stake, total_stake) as u64, + ); debug!( "Peer type: {:?}, stake {}, total stake {}, max streams {}", table_type, stake, - staked_nodes.read().unwrap().total_stake, + total_stake, max_uni_streams.unwrap().into_inner() ); @@ -558,7 +558,7 @@ impl Drop for ConnectionEntry { } #[derive(Copy, Clone, Debug)] -enum ConnectionPeerType { +pub enum ConnectionPeerType { Unstaked, Staked, } @@ -1406,58 +1406,52 @@ pub mod test { #[test] fn test_max_allowed_uni_streams() { - let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 0, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 0, 0), QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 10, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 10, 0), QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 0, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 0, 0), QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 10, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 10, 0), QUIC_MIN_STAKED_CONCURRENT_STREAMS ); - staked_nodes.write().unwrap().total_stake = 10000; assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 1000, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 1000, 10000), (QUIC_TOTAL_STAKED_CONCURRENT_STREAMS / (10_f64)) as usize ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 100, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 100, 10000), (QUIC_TOTAL_STAKED_CONCURRENT_STREAMS / (100_f64)) as usize ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 10, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 10, 10000), QUIC_MIN_STAKED_CONCURRENT_STREAMS ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 1, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 1, 10000), QUIC_MIN_STAKED_CONCURRENT_STREAMS ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 0, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Staked, 0, 10000), QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS ); assert_eq!( - compute_max_allowed_uni_streams( - ConnectionPeerType::Unstaked, - 1000, - staked_nodes.clone() - ), + compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 1000, 10000), QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 1, staked_nodes.clone()), + compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 1, 10000), QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS ); assert_eq!( - compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 0, staked_nodes), + compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 0, 10000), QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS ); } From a0698d4cc3ba88510cdd2b869e9c7bc8448ab86a Mon Sep 17 00:00:00 2001 From: Steven Luscher Date: Fri, 29 Jul 2022 09:50:42 -0700 Subject: [PATCH 012/192] Make it clear that `minContextSlot` in `sendTransaction` affects _preflight_ checks (#26845) docs: make it clear that `minContextSlot` in `sendTransaction` affects _preflight_ checks --- docs/src/developing/clients/jsonrpc-api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/developing/clients/jsonrpc-api.md b/docs/src/developing/clients/jsonrpc-api.md index 4c5415c07bd360..24f8a794da3747 100644 --- a/docs/src/developing/clients/jsonrpc-api.md +++ b/docs/src/developing/clients/jsonrpc-api.md @@ -3399,7 +3399,7 @@ submission. - `encoding: ` - (optional) Encoding used for the transaction data. Either `"base58"` (_slow_, **DEPRECATED**), or `"base64"`. (default: `"base58"`). - `maxRetries: ` - (optional) Maximum number of times for the RPC node to retry sending the transaction to the leader. If this parameter not provided, the RPC node will retry the transaction until it is finalized or until the blockhash expires. - - (optional) `minContextSlot: ` - set the minimum slot that the request can be evaluated at. + - (optional) `minContextSlot: ` - set the minimum slot at which to perform preflight transaction checks. #### Results: From 69b5dd9b078eff4d611298724cc614cfdd82d80a Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 29 Jul 2022 12:10:22 -0500 Subject: [PATCH 013/192] cleanup ignore_eof_error (#26838) --- runtime/src/serde_snapshot/newer.rs | 8 ++------ sdk/src/deserialize_utils.rs | 8 ++++++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index 6c6f7b63c551f0..3dd73803cf3010 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -9,7 +9,7 @@ use { stakes::{serde_stakes_enum_compat, StakesEnum}, }, solana_measure::measure::Measure, - solana_sdk::stake::state::Delegation, + solana_sdk::{deserialize_utils::ignore_eof_error, stake::state::Delegation}, std::{cell::RefCell, collections::HashSet, sync::RwLock}, }; @@ -310,11 +310,7 @@ impl<'a> TypeContext<'a> for Context { deserialize_from::<_, DeserializableVersionedBank>(&mut stream)?.into(); let accounts_db_fields = Self::deserialize_accounts_db_fields(stream)?; // Process extra fields - let lamports_per_signature: u64 = match deserialize_from(stream) { - Err(err) if err.to_string() == "io error: unexpected end of file" => Ok(0), - Err(err) if err.to_string() == "io error: failed to fill whole buffer" => Ok(0), - result => result, - }?; + let lamports_per_signature = ignore_eof_error(deserialize_from(&mut stream))?; bank_fields.fee_rate_governor = bank_fields .fee_rate_governor .clone_with_lamports_per_signature(lamports_per_signature); diff --git a/sdk/src/deserialize_utils.rs b/sdk/src/deserialize_utils.rs index fea63de9f7b61e..a5c098dc865f7d 100644 --- a/sdk/src/deserialize_utils.rs +++ b/sdk/src/deserialize_utils.rs @@ -10,6 +10,14 @@ where T: Deserialize<'de> + Default, { let result = T::deserialize(d); + ignore_eof_error::<'de, T, D::Error>(result) +} + +pub fn ignore_eof_error<'de, T, D>(result: Result) -> Result +where + T: Deserialize<'de> + Default, + D: std::fmt::Display, +{ match result { Err(err) if err.to_string() == "io error: unexpected end of file" => Ok(T::default()), Err(err) if err.to_string() == "io error: failed to fill whole buffer" => Ok(T::default()), From fbfcc3febfaa638acac169ab8b0fd1ac93f1b7ab Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 29 Jul 2022 12:56:04 -0500 Subject: [PATCH 014/192] Bugfix: VoteProcessingTiming reset both counters (#26843) --- core/src/cluster_info_vote_listener.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index 12641262defdeb..ed94724824ff2d 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -194,7 +194,7 @@ const VOTE_PROCESSING_REPORT_INTERVAL_MS: u64 = 1_000; impl VoteProcessingTiming { fn reset(&mut self) { - self.gossip_slot_confirming_time_us = 0; + self.gossip_txn_processing_time_us = 0; self.gossip_slot_confirming_time_us = 0; } From 4d15e774eedf9fde3b55d743391ae79c18b87729 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 29 Jul 2022 13:58:05 -0500 Subject: [PATCH 015/192] insert perf improvement on bucket map (#26754) perf on bucket map --- bucket_map/src/bucket.rs | 95 +++++++++++++++++--------------- bucket_map/src/bucket_storage.rs | 14 ++--- 2 files changed, 56 insertions(+), 53 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index c0775ef21431ee..b5e6f581ce6298 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -142,38 +142,43 @@ impl Bucket { Self::bucket_find_entry(&self.index, key, self.random) } - fn find_entry_mut(&self, key: &Pubkey) -> Option<(&mut IndexEntry, u64)> { - Self::bucket_find_entry_mut(&self.index, key, self.random) - } - - fn bucket_find_entry_mut<'a>( - index: &'a BucketStorage, + fn find_entry_mut<'a>( + &'a self, key: &Pubkey, - random: u64, - ) -> Option<(&'a mut IndexEntry, u64)> { + ) -> Result<(bool, &'a mut IndexEntry, u64), BucketMapError> { + let ix = Self::bucket_index_ix(&self.index, key, self.random); + let mut first_free = None; let mut m = Measure::start("bucket_find_entry_mut"); - let ix = Self::bucket_index_ix(index, key, random); - for i in ix..ix + index.max_search() { - let ii = i % index.capacity(); - if index.is_free(ii) { + for i in ix..ix + self.index.max_search() { + let ii = i % self.index.capacity(); + if self.index.is_free(ii) { + if first_free.is_none() { + first_free = Some(ii); + } continue; } - let elem: &mut IndexEntry = index.get_mut(ii); + let elem: &mut IndexEntry = self.index.get_mut(ii); if elem.key == *key { m.stop(); - index - .stats + self.stats + .index .find_entry_mut_us .fetch_add(m.as_us(), Ordering::Relaxed); - return Some((elem, ii)); + return Ok((true, elem, ii)); } } m.stop(); - index - .stats + self.stats + .index .find_entry_mut_us .fetch_add(m.as_us(), Ordering::Relaxed); - None + match first_free { + Some(ii) => { + let elem: &mut IndexEntry = self.index.get_mut(ii); + Ok((false, elem, ii)) + } + None => Err(self.index_no_space()), + } } fn bucket_find_entry<'a>( @@ -231,25 +236,23 @@ impl Bucket { } pub fn addref(&mut self, key: &Pubkey) -> Option { - let (elem, _) = self.find_entry_mut(key)?; - elem.ref_count += 1; - Some(elem.ref_count) + if let Ok((found, elem, _)) = self.find_entry_mut(key) { + if found { + elem.ref_count += 1; + return Some(elem.ref_count); + } + } + None } pub fn unref(&mut self, key: &Pubkey) -> Option { - let (elem, _) = self.find_entry_mut(key)?; - elem.ref_count -= 1; - Some(elem.ref_count) - } - - fn create_key(&mut self, key: &Pubkey) -> Result { - Self::bucket_create_key( - &mut self.index, - key, - IndexEntry::key_uid(key), - self.random, - false, - ) + if let Ok((found, elem, _)) = self.find_entry_mut(key) { + if found { + elem.ref_count -= 1; + return Some(elem.ref_count); + } + } + None } pub fn read_value(&self, key: &Pubkey) -> Option<(&[T], RefCount)> { @@ -258,6 +261,10 @@ impl Bucket { elem.read_value(self) } + fn index_no_space(&self) -> BucketMapError { + BucketMapError::IndexNoSpace(self.index.capacity_pow2) + } + pub fn try_write( &mut self, key: &Pubkey, @@ -269,15 +276,15 @@ impl Bucket { // fail early if the data bucket we need doesn't exist - we don't want the index entry partially allocated return Err(BucketMapError::DataNoSpace((best_fit_bucket, 0))); } - let index_entry = self.find_entry_mut(key); - let (elem, elem_ix) = match index_entry { - None => { - let ii = self.create_key(key)?; - let elem: &mut IndexEntry = self.index.get_mut(ii); - (elem, ii) - } - Some(res) => res, - }; + let (found, elem, elem_ix) = self.find_entry_mut(key)?; + if !found { + let is_resizing = false; + let elem_uid = IndexEntry::key_uid(key); + self.index.allocate(elem_ix, elem_uid, is_resizing).unwrap(); + // These fields will be overwritten after allocation by callers. + // Since this part of the mmapped file could have previously been used by someone else, there can be garbage here. + elem.init(key); + } elem.ref_count = ref_count; let elem_uid = self.index.uid_unchecked(elem_ix); let bucket_ix = elem.data_bucket_ix(); diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index 722a4d6a38e768..05463387b6e4a7 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -152,11 +152,12 @@ impl BucketStorage { } /// return ref to header of item 'ix' in mmapped file - fn header_mut_ptr(&mut self, ix: u64) -> &mut Header { + #[allow(clippy::mut_from_ref)] + fn header_mut_ptr(&self, ix: u64) -> &mut Header { let ix = (ix * self.cell_size) as usize; - let hdr_slice: &mut [u8] = &mut self.mmap[ix..ix + std::mem::size_of::
()]; + let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::
()]; unsafe { - let hdr = hdr_slice.as_mut_ptr() as *mut Header; + let hdr = hdr_slice.as_ptr() as *mut Header; hdr.as_mut().unwrap() } } @@ -181,12 +182,7 @@ impl BucketStorage { /// 'is_resizing' true if caller is resizing the index (so don't increment count) /// 'is_resizing' false if caller is adding an item to the index (so increment count) - pub fn allocate( - &mut self, - ix: u64, - uid: Uid, - is_resizing: bool, - ) -> Result<(), BucketStorageError> { + pub fn allocate(&self, ix: u64, uid: Uid, is_resizing: bool) -> Result<(), BucketStorageError> { assert!(ix < self.capacity(), "allocate: bad index size"); assert!(UID_UNLOCKED != uid, "allocate: bad uid"); let mut e = Err(BucketStorageError::AlreadyAllocated); From bda14e348a90fbcde347fdc591193b4e42b3aa58 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 29 Jul 2022 14:25:19 -0500 Subject: [PATCH 016/192] Feature-gate for incremental snapshots only hashing accounts in the incremental snapshot (#26809) Add feature module and pubkey for incremental snapshots hashing only accounts in the snapshot --- sdk/src/feature_set.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 4486cdf5a86a50..695eafb2bc8b01 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -488,6 +488,10 @@ pub mod concurrent_replay_of_forks { solana_sdk::declare_id!("9F2Dcu8xkBPKxiiy65XKPZYdCG3VZDpjDTuSmeYLozJe"); } +pub mod incremental_snapshot_only_incremental_hash_calculation { + solana_sdk::declare_id!("25vqsfjk7Nv1prsQJmA4Xu1bN61s8LXCBGUPp8Rfy1UF"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -604,6 +608,7 @@ lazy_static! { (use_default_units_in_fee_calculation::id(), "use default units per instruction in fee calculation #26785"), (compact_vote_state_updates::id(), "Compact vote state updates to lower block size"), (concurrent_replay_of_forks::id(), "Allow slots from different forks to be replayed concurrently #26465"), + (incremental_snapshot_only_incremental_hash_calculation::id(), "only hash accounts in incremental snapshot during incremental snapshot creation #26799"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 422cff69fdc6e9268020dd52db842c6a0b64a4cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Jul 2022 14:11:58 -0600 Subject: [PATCH 017/192] chore: bump cargo_metadata from 0.14.2 to 0.15.0 (#26803) Bumps [cargo_metadata](https://github.com/oli-obk/cargo_metadata) from 0.14.2 to 0.15.0. - [Release notes](https://github.com/oli-obk/cargo_metadata/releases) - [Changelog](https://github.com/oli-obk/cargo_metadata/blob/main/CHANGELOG.md) - [Commits](https://github.com/oli-obk/cargo_metadata/commits) --- updated-dependencies: - dependency-name: cargo_metadata dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- sdk/cargo-build-bpf/Cargo.toml | 2 +- sdk/cargo-build-sbf/Cargo.toml | 2 +- sdk/cargo-test-bpf/Cargo.toml | 2 +- sdk/cargo-test-sbf/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a60548edd0a8e1..5efa065750ae01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -652,9 +652,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52d74260d9bf6944e2208aa46841b4b8f0d7ffc0849a06837b2f510337f86b2b" +checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" dependencies = [ "serde", ] @@ -681,9 +681,9 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.14.2" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +checksum = "3abb7553d5b9b8421c6de7cb02606ff15e0c6eea7d8eadd75ef013fd636bec36" dependencies = [ "camino", "cargo-platform", diff --git a/sdk/cargo-build-bpf/Cargo.toml b/sdk/cargo-build-bpf/Cargo.toml index cfab0746d21120..c21a756e71e23a 100644 --- a/sdk/cargo-build-bpf/Cargo.toml +++ b/sdk/cargo-build-bpf/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -cargo_metadata = "0.14.2" +cargo_metadata = "0.15.0" clap = { version = "3.1.5", features = ["cargo", "env"] } solana-sdk = { path = "..", version = "=1.11.5" } diff --git a/sdk/cargo-build-sbf/Cargo.toml b/sdk/cargo-build-sbf/Cargo.toml index b836bb14ac107a..52751c895b850c 100644 --- a/sdk/cargo-build-sbf/Cargo.toml +++ b/sdk/cargo-build-sbf/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dependencies] bzip2 = "0.4.3" -cargo_metadata = "0.14.2" +cargo_metadata = "0.15.0" clap = { version = "3.1.5", features = ["cargo", "env"] } log = { version = "0.4.14", features = ["std"] } regex = "1.5.6" diff --git a/sdk/cargo-test-bpf/Cargo.toml b/sdk/cargo-test-bpf/Cargo.toml index 8205619b95edfc..ee53c9fb8bf5ad 100644 --- a/sdk/cargo-test-bpf/Cargo.toml +++ b/sdk/cargo-test-bpf/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -cargo_metadata = "0.14.2" +cargo_metadata = "0.15.0" clap = { version = "3.1.5", features = ["cargo"] } [[bin]] diff --git a/sdk/cargo-test-sbf/Cargo.toml b/sdk/cargo-test-sbf/Cargo.toml index 512c7be4680ea4..d5711a8da0c4d8 100644 --- a/sdk/cargo-test-sbf/Cargo.toml +++ b/sdk/cargo-test-sbf/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -cargo_metadata = "0.14.2" +cargo_metadata = "0.15.0" clap = { version = "3.1.5", features = ["cargo"] } [[bin]] From 38cd29810f7a44cd80b6539343c6cbe4032ffb6f Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 29 Jul 2022 15:47:03 -0500 Subject: [PATCH 018/192] Fix ordering for AccountEntry ref_count (#26842) fix ordering for ref_count --- runtime/src/accounts_index.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 3a29216b4abb70..a2bd30d35d4bda 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -270,14 +270,14 @@ impl AccountMapEntryInner { } } pub fn ref_count(&self) -> RefCount { - self.ref_count.load(Ordering::Relaxed) + self.ref_count.load(Ordering::Acquire) } pub fn add_un_ref(&self, add: bool) { if add { - self.ref_count.fetch_add(1, Ordering::Relaxed); + self.ref_count.fetch_add(1, Ordering::Release); } else { - self.ref_count.fetch_sub(1, Ordering::Relaxed); + self.ref_count.fetch_sub(1, Ordering::Release); } self.set_dirty(true); } From c7462b7a52ba80ece798c30ea754bb9ac9aab73c Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 29 Jul 2022 15:54:56 -0500 Subject: [PATCH 019/192] ledger tool verify can store debug info on hash calc (#26837) --- ledger-tool/src/main.rs | 7 +++++++ ledger/src/blockstore_processor.rs | 3 +++ runtime/benches/accounts.rs | 1 + runtime/src/accounts.rs | 3 +++ runtime/src/accounts_db.rs | 5 ++++- runtime/src/bank.rs | 5 +++++ 6 files changed, 23 insertions(+), 1 deletion(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 45c351ff199ef1..a91e9888d6871c 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1160,6 +1160,10 @@ fn main() { .long("accounts-db-ancient-append-vecs") .help("AppendVecs that are older than an epoch are squashed together.") .hidden(true); + let halt_at_slot_store_hash_raw_data = Arg::with_name("halt_at_slot_store_hash_raw_data") + .long("halt-at-slot-store-hash-raw-data") + .help("After halting at slot, run an accounts hash calculation and store the raw hash data for debugging.") + .hidden(true); let verify_index_arg = Arg::with_name("verify_accounts_index") .long("verify-accounts-index") .takes_value(false) @@ -1512,6 +1516,7 @@ fn main() { .arg(&skip_rewrites_arg) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&ancient_append_vecs) + .arg(&halt_at_slot_store_hash_raw_data) .arg(&hard_forks_arg) .arg(&no_accounts_db_caching_arg) .arg(&accounts_db_test_hash_calculation_arg) @@ -2442,6 +2447,8 @@ fn main() { let process_options = ProcessOptions { new_hard_forks: hardforks_of(arg_matches, "hard_forks"), poh_verify: !arg_matches.is_present("skip_poh_verify"), + on_halt_store_hash_raw_data_for_debug: arg_matches + .is_present("halt_at_slot_store_hash_raw_data"), halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), debug_keys, accounts_db_caching_enabled: !arg_matches.is_present("no_accounts_db_caching"), diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 43208cf7068db6..7ac0e0db214e29 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -708,6 +708,7 @@ pub struct ProcessOptions { pub verify_index: bool, pub shrink_ratio: AccountShrinkThreshold, pub runtime_config: RuntimeConfig, + pub on_halt_store_hash_raw_data_for_debug: bool, } pub fn test_process_blockstore( @@ -1399,6 +1400,7 @@ fn load_frozen_forks( )?; let halt_at_slot = opts.halt_at_slot.unwrap_or(std::u64::MAX); + let on_halt_store_hash_raw_data_for_debug = opts.on_halt_store_hash_raw_data_for_debug; if bank_forks.read().unwrap().root() != halt_at_slot { while !pending_slots.is_empty() { timing.details.per_program_timings.clear(); @@ -1540,6 +1542,7 @@ fn load_frozen_forks( ignore_mismatch: true, require_rooted_bank: false, run_in_background: false, + store_hash_raw_data_for_debug: on_halt_store_hash_raw_data_for_debug, }); break; } diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 71c84e33b2f751..8fd7d00f959e9d 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -126,6 +126,7 @@ fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { &RentCollector::default(), false, false, + false, )) }); } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index b75854a659d1ff..95dc88f6249ffd 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -814,6 +814,7 @@ impl Accounts { /// Only called from startup or test code. #[must_use] + #[allow(clippy::too_many_arguments)] pub fn verify_bank_hash_and_lamports( &self, slot: Slot, @@ -824,6 +825,7 @@ impl Accounts { rent_collector: &RentCollector, can_cached_slot_be_unflushed: bool, ignore_mismatch: bool, + store_detailed_debug_info: bool, ) -> bool { if let Err(err) = self.accounts_db.verify_bank_hash_and_lamports_new( slot, @@ -834,6 +836,7 @@ impl Accounts { rent_collector, can_cached_slot_be_unflushed, ignore_mismatch, + store_detailed_debug_info, ) { warn!("verify_bank_hash failed: {:?}, slot: {}", err, slot); false diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 0b1947dd1bc5d7..300211e2d84ecd 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -6990,10 +6990,12 @@ impl AccountsDb { rent_collector, can_cached_slot_be_unflushed, false, + false, ) } /// Only called from startup or test code. + #[allow(clippy::too_many_arguments)] pub fn verify_bank_hash_and_lamports_new( &self, slot: Slot, @@ -7004,6 +7006,7 @@ impl AccountsDb { rent_collector: &RentCollector, can_cached_slot_be_unflushed: bool, ignore_mismatch: bool, + store_hash_raw_data_for_debug: bool, ) -> Result<(), BankHashVerificationError> { use BankHashVerificationError::*; @@ -7023,7 +7026,7 @@ impl AccountsDb { use_write_cache: can_cached_slot_be_unflushed, epoch_schedule, rent_collector, - store_detailed_debug_info_on_failure: false, + store_detailed_debug_info_on_failure: store_hash_raw_data_for_debug, full_snapshot: None, }, None, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c38eea8ce70ee2..57fb7bbad944aa 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -178,6 +178,7 @@ pub struct VerifyBankHash { pub ignore_mismatch: bool, pub require_rooted_bank: bool, pub run_in_background: bool, + pub store_hash_raw_data_for_debug: bool, } #[derive(Debug, Default)] @@ -6903,6 +6904,7 @@ impl Bank { &rent_collector, config.can_cached_slot_be_unflushed, config.ignore_mismatch, + config.store_hash_raw_data_for_debug, ); accounts_ .accounts_db @@ -6923,6 +6925,7 @@ impl Bank { rent_collector, config.can_cached_slot_be_unflushed, config.ignore_mismatch, + config.store_hash_raw_data_for_debug, ); self.set_initial_accounts_hash_verification_completed(); result @@ -7162,6 +7165,7 @@ impl Bank { ignore_mismatch: false, require_rooted_bank: false, run_in_background: true, + store_hash_raw_data_for_debug: false, }); verify_time.stop(); (verify, verify_time.as_us()) @@ -10393,6 +10397,7 @@ pub(crate) mod tests { ignore_mismatch: false, require_rooted_bank: false, run_in_background: false, + store_hash_raw_data_for_debug: false, } } } From ddfa64dcd99ef0fbf59e8b4fbc89db71ad064dea Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Sat, 30 Jul 2022 14:22:00 +0200 Subject: [PATCH 020/192] add withdraw durable nonce (#26829) * add withdraw durable nonce * Use Pubkey instead of Keypair in bench-tps withdraw Before &Keypair was passed although it is not necessary because nonce doesn't sign withdraw account transactions anyways. --- bench-tps/src/bench.rs | 1 + bench-tps/src/send_batch.rs | 180 +++++++++++++++++++++++++----------- 2 files changed, 127 insertions(+), 54 deletions(-) diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 633168e149b5d1..1e19ef089ceca8 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -924,5 +924,6 @@ mod tests { rent ); } + withdraw_durable_nonce_accounts(client, &authority_keypairs, &nonce_keypairs) } } diff --git a/bench-tps/src/send_batch.rs b/bench-tps/src/send_batch.rs index 7487d17bcb1068..77a74034904b0d 100644 --- a/bench-tps/src/send_batch.rs +++ b/bench-tps/src/send_batch.rs @@ -109,17 +109,35 @@ pub fn generate_durable_nonce_accounts = authority_keypairs + let to_fund: Vec = authority_keypairs .iter() .zip(nonce_keypairs.iter()) + .map(|x| NonceCreateSigners(x.0, x.1)) .collect(); to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| { - NonceContainer::with_capacity(chunk.len()).create_accounts(&client, chunk, nonce_rent); + NonceCreateContainer::with_capacity(chunk.len()) + .create_accounts(&client, chunk, nonce_rent); }); nonce_keypairs } +pub fn withdraw_durable_nonce_accounts( + client: Arc, + authority_keypairs: &[Keypair], + nonce_keypairs: &[Keypair], +) { + let to_withdraw: Vec = authority_keypairs + .iter() + .zip(nonce_keypairs.iter()) + .map(|x| NonceWithdrawSigners(x.0, x.1.pubkey())) + .collect(); + + to_withdraw.chunks(FUND_CHUNK_LEN).for_each(|chunk| { + NonceWithdrawContainer::with_capacity(chunk.len()).withdraw_accounts(&client, chunk); + }); +} + const MAX_SPENDS_PER_TX: u64 = 4; // Size of the chunk of transactions @@ -144,6 +162,11 @@ fn verify_funding_transfer( /// Helper trait to encapsulate common logic for sending transactions batch /// trait SendBatchTransactions<'a, T: Sliceable + Send + Sync> { + fn make (T, Transaction) + Send + Sync>( + &mut self, + chunk: &[V], + create_transaction: F, + ); fn send_transactions(&mut self, client: &Arc, to_lamports: u64, log_progress: F) where C: 'static + BenchTpsClient + Send + Sync, @@ -170,6 +193,18 @@ impl<'a, T: Sliceable + Send + Sync> SendBatchTransactions<'a, T> for Vec<(T, Tr where ::Slice: Signers, { + fn make (T, Transaction) + Send + Sync>( + &mut self, + chunk: &[V], + create_transaction: F, + ) { + let mut make_txs = Measure::start("make_txs"); + let txs: Vec<(T, Transaction)> = chunk.par_iter().map(create_transaction).collect(); + make_txs.stop(); + debug!("make {} unsigned txs: {}us", txs.len(), make_txs.as_us()); + self.extend(txs); + } + fn send_transactions(&mut self, client: &Arc, to_lamports: u64, log_progress: F) where C: 'static + BenchTpsClient + Send + Sync, @@ -312,7 +347,6 @@ trait FundingTransactions<'a>: SendBatchTransactions<'a, FundingSigners<'a>> { to_fund: &FundingChunk<'a>, to_lamports: u64, ); - fn make(&mut self, to_fund: &FundingChunk<'a>); } impl<'a> FundingTransactions<'a> for FundingContainer<'a> { @@ -322,7 +356,11 @@ impl<'a> FundingTransactions<'a> for FundingContainer<'a> { to_fund: &FundingChunk<'a>, to_lamports: u64, ) { - self.make(to_fund); + self.make(to_fund, |(k, t)| -> (FundingSigners<'a>, Transaction) { + let instructions = system_instruction::transfer_many(&k.pubkey(), t); + let message = Message::new(&instructions, Some(&k.pubkey())); + (*k, Transaction::new_unsigned(message)) + }); let log_progress = |tries: usize, batch_len: usize| { info!( @@ -339,28 +377,15 @@ impl<'a> FundingTransactions<'a> for FundingContainer<'a> { }; self.send_transactions(client, to_lamports, log_progress); } - - fn make(&mut self, to_fund: &FundingChunk<'a>) { - let mut make_txs = Measure::start("make_txs"); - let to_fund_txs: FundingContainer<'a> = to_fund - .par_iter() - .map(|(k, t)| { - let instructions = system_instruction::transfer_many(&k.pubkey(), t); - let message = Message::new(&instructions, Some(&k.pubkey())); - (*k, Transaction::new_unsigned(message)) - }) - .collect(); - make_txs.stop(); - debug!( - "make {} unsigned txs: {}us", - to_fund_txs.len(), - make_txs.as_us() - ); - self.extend(to_fund_txs); - } } -impl<'a> Sliceable for (&'a Keypair, &'a Keypair) { +// Introduce a new structure to specify Sliceable implementations +// which uses both Keypairs to sign the transaction +struct NonceCreateSigners<'a>(&'a Keypair, &'a Keypair); +type NonceCreateChunk<'a> = [NonceCreateSigners<'a>]; +type NonceCreateContainer<'a> = Vec<(NonceCreateSigners<'a>, Transaction)>; + +impl<'a> Sliceable for NonceCreateSigners<'a> { type Slice = [&'a Keypair; 2]; fn as_slice(&self) -> Self::Slice { [self.0, self.1] @@ -370,28 +395,36 @@ impl<'a> Sliceable for (&'a Keypair, &'a Keypair) { } } -type NonceSigners<'a> = (&'a Keypair, &'a Keypair); -type NonceChunk<'a> = [NonceSigners<'a>]; -type NonceContainer<'a> = Vec<(NonceSigners<'a>, Transaction)>; - -trait CreateNonceTransactions<'a>: SendBatchTransactions<'a, (&'a Keypair, &'a Keypair)> { +trait NonceTransactions<'a>: SendBatchTransactions<'a, NonceCreateSigners<'a>> { fn create_accounts( &mut self, client: &Arc, - to_fund: &'a NonceChunk<'a>, + to_fund: &'a NonceCreateChunk<'a>, nonce_rent: u64, ); - fn make(&mut self, nonce_rent: u64, to_fund: &'a NonceChunk<'a>); } -impl<'a> CreateNonceTransactions<'a> for NonceContainer<'a> { +impl<'a> NonceTransactions<'a> for NonceCreateContainer<'a> { fn create_accounts( &mut self, client: &Arc, - to_fund: &'a NonceChunk<'a>, + to_fund: &'a NonceCreateChunk<'a>, nonce_rent: u64, ) { - self.make(nonce_rent, to_fund); + self.make(to_fund, |kp| -> (NonceCreateSigners<'a>, Transaction) { + let authority = kp.0; + let nonce: &Keypair = kp.1; + let instructions = system_instruction::create_nonce_account( + &authority.pubkey(), + &nonce.pubkey(), + &authority.pubkey(), + nonce_rent, + ); + ( + NonceCreateSigners(authority, nonce), + Transaction::new_with_payer(&instructions, Some(&authority.pubkey())), + ) + }); let log_progress = |tries: usize, batch_len: usize| { info!( @@ -402,30 +435,69 @@ impl<'a> CreateNonceTransactions<'a> for NonceContainer<'a> { }; self.send_transactions(client, nonce_rent, log_progress); } +} - fn make(&mut self, nonce_rent: u64, to_fund: &'a NonceChunk<'a>) { - let mut make_txs = Measure::start("make_txs"); - let to_fund_txs: NonceContainer = to_fund - .par_iter() - .map(|(authority, nonce)| { - let instructions = system_instruction::create_nonce_account( - &authority.pubkey(), - &nonce.pubkey(), - &authority.pubkey(), - nonce_rent, - ); +// Only Pubkey is required for nonce because it doesn't sign withdraw account transaction +struct NonceWithdrawSigners<'a>(&'a Keypair, Pubkey); +type NonceWithdrawChunk<'a> = [NonceWithdrawSigners<'a>]; +type NonceWithdrawContainer<'a> = Vec<(NonceWithdrawSigners<'a>, Transaction)>; + +impl<'a> Sliceable for NonceWithdrawSigners<'a> { + type Slice = [&'a Keypair; 1]; + fn as_slice(&self) -> Self::Slice { + [self.0] + } + fn get_pubkey(&self) -> Pubkey { + self.0.pubkey() + } +} + +trait NonceWithdrawTransactions<'a>: SendBatchTransactions<'a, NonceWithdrawSigners<'a>> { + fn withdraw_accounts( + &mut self, + client: &Arc, + to_withdraw: &'a NonceWithdrawChunk<'a>, + ); +} +impl<'a> NonceWithdrawTransactions<'a> for NonceWithdrawContainer<'a> { + fn withdraw_accounts( + &mut self, + client: &Arc, + to_withdraw: &'a NonceWithdrawChunk<'a>, + ) { + self.make( + to_withdraw, + |kp| -> (NonceWithdrawSigners<'a>, Transaction) { + let authority = kp.0; + let nonce_pubkey: Pubkey = kp.1; + let nonce_balance = client.get_balance(&nonce_pubkey).unwrap(); + let instructions = vec![ + system_instruction::withdraw_nonce_account( + &nonce_pubkey, + &authority.pubkey(), + &authority.pubkey(), + nonce_balance, + ); + 1 + ]; ( - (*authority, *nonce), + NonceWithdrawSigners(authority, nonce_pubkey), Transaction::new_with_payer(&instructions, Some(&authority.pubkey())), ) - }) - .collect(); - make_txs.stop(); - debug!( - "make {} unsigned txs: {}us", - to_fund_txs.len(), - make_txs.as_us() + }, ); - self.extend(to_fund_txs); + + let log_progress = |tries: usize, batch_len: usize| { + info!( + "@ {} {} accounts", + if tries == 0 { + "withdrawing" + } else { + " retrying" + }, + batch_len, + ); + }; + self.send_transactions(client, 0, log_progress); } } From 8db5a6a4f2d5e29b5745857af90c1c379227ce42 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Sat, 30 Jul 2022 08:11:38 -0700 Subject: [PATCH 021/192] Copy changes made to a generated header file to its source --- sdk/bpf/c/inc/sol/inc/cpi.inc | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/sdk/bpf/c/inc/sol/inc/cpi.inc b/sdk/bpf/c/inc/sol/inc/cpi.inc index 6bd0e099a86f40..ce615e90b84f4d 100644 --- a/sdk/bpf/c/inc/sol/inc/cpi.inc +++ b/sdk/bpf/c/inc/sol/inc/cpi.inc @@ -11,6 +11,28 @@ extern "C" { #endif +/** + * Maximum CPI instruction data size. 10 KiB was chosen to ensure that CPI + * instructions are not more limited than transaction instructions if the size + * of transactions is doubled in the future. + */ +static const uint64_t MAX_CPI_INSTRUCTION_DATA_LEN = 10240; + +/** + * Maximum CPI instruction accounts. 255 was chosen to ensure that instruction + * accounts are always within the maximum instruction account limit for BPF + * program instructions. + */ +static const uint8_t MAX_CPI_INSTRUCTION_ACCOUNTS = 255; + +/** + * Maximum number of account info structs that can be used in a single CPI + * invocation. A limit on account info structs is effectively the same as + * limiting the number of unique accounts. 64 was chosen to match the max + * number of locked accounts per transaction (MAX_TX_ACCOUNT_LOCKS). + */ +static const uint8_t MAX_CPI_ACCOUNT_INFOS = 64; + /** * Account Meta */ From 857be1e2370dc383f7bc03faeb1f1e00f8ab59ca Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Sun, 31 Jul 2022 15:48:51 -0700 Subject: [PATCH 022/192] sign repair requests (#26833) --- core/src/ancestor_hashes_service.rs | 37 +- core/src/repair_service.rs | 53 +- core/src/serve_repair.rs | 1077 +++++++++++++++++++++++---- core/src/shred_fetch_stage.rs | 36 +- core/src/tvu.rs | 1 + core/src/validator.rs | 5 +- dos/src/main.rs | 70 +- gossip/src/ping_pong.rs | 4 + ledger/src/shred/stats.rs | 4 + sdk/src/feature_set.rs | 5 + 10 files changed, 1093 insertions(+), 199 deletions(-) diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index 9804ba8be2be0c..f07960d9f775ba 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -21,6 +21,7 @@ use { solana_sdk::{ clock::{Slot, SLOT_MS}, pubkey::Pubkey, + signer::keypair::Keypair, timing::timestamp, }, solana_streamer::streamer::{self, PacketBatchReceiver, StreamerReceiveStats}, @@ -451,7 +452,10 @@ impl AncestorHashesService { ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, retryable_slots_receiver: RetryableSlotsReceiver, ) -> JoinHandle<()> { - let serve_repair = ServeRepair::new(repair_info.cluster_info.clone()); + let serve_repair = ServeRepair::new( + repair_info.cluster_info.clone(), + repair_info.bank_forks.clone(), + ); let mut repair_stats = AncestorRepairRequestsStats::default(); let mut dead_slot_pool = HashSet::new(); @@ -540,6 +544,8 @@ impl AncestorHashesService { // Keep around the last second of requests in the throttler. request_throttle.retain(|request_time| *request_time > (timestamp() - 1000)); + let identity_keypair: &Keypair = &repair_info.cluster_info.keypair().clone(); + let number_of_allowed_requests = MAX_ANCESTOR_HASHES_SLOT_REQUESTS_PER_SECOND.saturating_sub(request_throttle.len()); @@ -563,6 +569,8 @@ impl AncestorHashesService { slot, repair_stats, outstanding_requests, + identity_keypair, + &root_bank, ) { request_throttle.push(timestamp()); repairable_dead_slot_pool.take(&slot).unwrap(); @@ -627,6 +635,7 @@ impl AncestorHashesService { /// Returns true if a request was successfully made and the status /// added to `ancestor_hashes_request_statuses` + #[allow(clippy::too_many_arguments)] fn initiate_ancestor_hashes_requests_for_duplicate_slot( ancestor_hashes_request_statuses: &DashMap, ancestor_hashes_request_socket: &UdpSocket, @@ -636,6 +645,8 @@ impl AncestorHashesService { duplicate_slot: Slot, repair_stats: &mut AncestorRepairRequestsStats, outstanding_requests: &RwLock, + identity_keypair: &Keypair, + root_bank: &Bank, ) -> bool { let sampled_validators = serve_repair.repair_request_ancestor_hashes_sample_peers( duplicate_slot, @@ -652,8 +663,13 @@ impl AncestorHashesService { .write() .unwrap() .add_request(AncestorHashesRepairType(duplicate_slot), timestamp()); - let request_bytes = - serve_repair.ancestor_repair_request_bytes(duplicate_slot, nonce); + let request_bytes = serve_repair.ancestor_repair_request_bytes( + identity_keypair, + root_bank, + pubkey, + duplicate_slot, + nonce, + ); if let Ok(request_bytes) = request_bytes { let _ = ancestor_hashes_request_socket.send_to(&request_bytes, socket_addr); } @@ -877,14 +893,17 @@ mod test { fn new(slot_to_query: Slot) -> Self { assert!(slot_to_query >= MAX_ANCESTOR_RESPONSES as Slot); + let vote_simulator = VoteSimulator::new(3); let responder_node = Node::new_localhost(); let cluster_info = ClusterInfo::new( responder_node.info.clone(), Arc::new(Keypair::new()), SocketAddrSpace::Unspecified, ); - let responder_serve_repair = - Arc::new(RwLock::new(ServeRepair::new(Arc::new(cluster_info)))); + let responder_serve_repair = Arc::new(RwLock::new(ServeRepair::new( + Arc::new(cluster_info), + vote_simulator.bank_forks, + ))); // Set up thread to give us responses let ledger_path = get_tmp_ledger_path!(); @@ -968,7 +987,8 @@ mod test { Arc::new(Keypair::new()), SocketAddrSpace::Unspecified, )); - let requester_serve_repair = ServeRepair::new(requester_cluster_info.clone()); + let requester_serve_repair = + ServeRepair::new(requester_cluster_info.clone(), bank_forks.clone()); let (duplicate_slots_reset_sender, _duplicate_slots_reset_receiver) = unbounded(); let repair_info = RepairInfo { bank_forks, @@ -1074,6 +1094,7 @@ mod test { } = ManageAncestorHashesState::new(vote_simulator.bank_forks); let RepairInfo { + bank_forks, cluster_info: requester_cluster_info, cluster_slots, repair_validators, @@ -1089,6 +1110,8 @@ mod test { dead_slot, &mut repair_stats, &outstanding_requests, + &requester_cluster_info.keypair(), + &bank_forks.read().unwrap().root_bank(), ); assert!(ancestor_hashes_request_statuses.is_empty()); @@ -1106,6 +1129,8 @@ mod test { dead_slot, &mut repair_stats, &outstanding_requests, + &requester_cluster_info.keypair(), + &bank_forks.read().unwrap().root_bank(), ); assert_eq!(ancestor_hashes_request_statuses.len(), 1); diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index b3e8d5b4798550..018824c7935e9e 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -17,7 +17,10 @@ use { solana_ledger::blockstore::{Blockstore, SlotMeta}, solana_measure::measure::Measure, solana_runtime::{bank_forks::BankForks, contains::Contains}, - solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, hash::Hash, pubkey::Pubkey}, + solana_sdk::{ + clock::Slot, epoch_schedule::EpochSchedule, hash::Hash, pubkey::Pubkey, + signer::keypair::Keypair, + }, solana_streamer::sendmmsg::{batch_send, SendPktsError}, std::{ collections::{HashMap, HashSet}, @@ -246,7 +249,10 @@ impl RepairService { outstanding_requests: &RwLock, ) { let mut repair_weight = RepairWeight::new(repair_info.bank_forks.read().unwrap().root()); - let serve_repair = ServeRepair::new(repair_info.cluster_info.clone()); + let serve_repair = ServeRepair::new( + repair_info.cluster_info.clone(), + repair_info.bank_forks.clone(), + ); let id = repair_info.cluster_info.id(); let mut repair_stats = RepairStats::default(); let mut repair_timing = RepairTiming::default(); @@ -265,8 +271,11 @@ impl RepairService { let mut get_votes_elapsed; let mut add_votes_elapsed; + let root_bank = repair_info.bank_forks.read().unwrap().root_bank(); + let sign_repair_requests_feature_epoch = + ServeRepair::sign_repair_requests_activated_epoch(&root_bank); + let repairs = { - let root_bank = repair_info.bank_forks.read().unwrap().root_bank().clone(); let new_root = root_bank.slot(); // Purge outdated slots from the weighting heuristic @@ -314,12 +323,24 @@ impl RepairService { repairs }; + let identity_keypair: &Keypair = &repair_info.cluster_info.keypair().clone(); + let mut build_repairs_batch_elapsed = Measure::start("build_repairs_batch_elapsed"); let batch: Vec<(Vec, SocketAddr)> = { let mut outstanding_requests = outstanding_requests.write().unwrap(); repairs .iter() .filter_map(|repair_request| { + let sign_repair_request = ServeRepair::should_sign_repair_request( + repair_request.slot(), + &root_bank, + sign_repair_requests_feature_epoch, + ); + let maybe_keypair = if sign_repair_request { + Some(identity_keypair) + } else { + None + }; let (to, req) = serve_repair .repair_request( &repair_info.cluster_slots, @@ -328,6 +349,7 @@ impl RepairService { &mut repair_stats, &repair_info.repair_validators, &mut outstanding_requests, + maybe_keypair, ) .ok()?; Some((req, to)) @@ -653,8 +675,13 @@ impl RepairService { repair_stats: &mut RepairStats, nonce: Nonce, ) -> Result<()> { - let req = - serve_repair.map_repair_request(repair_type, repair_pubkey, repair_stats, nonce)?; + let req = serve_repair.map_repair_request( + repair_type, + repair_pubkey, + repair_stats, + nonce, + None, + )?; repair_socket.send_to(&req, to)?; Ok(()) } @@ -722,9 +749,11 @@ mod test { blockstore::{ make_chaining_slot_entries, make_many_slot_entries, make_slot_entries, Blockstore, }, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path, shred::max_ticks_per_n_shreds, }, + solana_runtime::bank::Bank, solana_sdk::signature::Keypair, solana_streamer::socket::SocketAddrSpace, std::collections::HashSet, @@ -1044,11 +1073,16 @@ mod test { #[test] pub fn test_generate_and_send_duplicate_repairs() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); let cluster_slots = ClusterSlots::default(); - let serve_repair = - ServeRepair::new(Arc::new(new_test_cluster_info(Node::new_localhost().info))); + let serve_repair = ServeRepair::new( + Arc::new(new_test_cluster_info(Node::new_localhost().info)), + bank_forks, + ); let mut duplicate_slot_repair_statuses = HashMap::new(); let dead_slot = 9; let receive_socket = &UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -1127,12 +1161,15 @@ mod test { #[test] pub fn test_update_duplicate_slot_repair_addr() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let dummy_addr = Some(( Pubkey::default(), UdpSocket::bind("0.0.0.0:0").unwrap().local_addr().unwrap(), )); let cluster_info = Arc::new(new_test_cluster_info(Node::new_localhost().info)); - let serve_repair = ServeRepair::new(cluster_info.clone()); + let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); let valid_repair_peer = Node::new_localhost().info; // Signal that this peer has confirmed the dead slot, and is thus diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 5271aff0fa00bb..9020d70b5a3d1b 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -16,25 +16,39 @@ use { solana_gossip::{ cluster_info::{ClusterInfo, ClusterInfoError}, contact_info::ContactInfo, + ping_pong::{self, PingCache, Pong}, weighted_shuffle::WeightedShuffle, }, solana_ledger::{ ancestor_iterator::{AncestorIterator, AncestorIteratorWithHash}, blockstore::Blockstore, - shred::{Nonce, Shred, SIZE_OF_NONCE}, + shred::{Nonce, Shred, ShredFetchStats, SIZE_OF_NONCE}, }, solana_metrics::inc_new_counter_debug, solana_perf::{ data_budget::DataBudget, - packet::{PacketBatch, PacketBatchRecycler}, + packet::{Packet, PacketBatch, PacketBatchRecycler}, }, + solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ - clock::Slot, hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms, + clock::Slot, + feature_set::sign_repair_requests, + hash::{Hash, HASH_BYTES}, + packet::PACKET_DATA_SIZE, + pubkey::{Pubkey, PUBKEY_BYTES}, + signature::{Signable, Signature, Signer, SIGNATURE_BYTES}, + signer::keypair::Keypair, + stake_history::Epoch, + timing::{duration_as_ms, timestamp}, + }, + solana_streamer::{ + sendmmsg::{batch_send, SendPktsError}, + socket::SocketAddrSpace, + streamer::{PacketBatchReceiver, PacketBatchSender}, }, - solana_streamer::streamer::{PacketBatchReceiver, PacketBatchSender}, std::{ collections::HashSet, - net::SocketAddr, + net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -59,6 +73,14 @@ pub const MAX_ANCESTOR_BYTES_IN_PACKET: usize = 4 /*slot_hash length*/; pub const MAX_ANCESTOR_RESPONSES: usize = MAX_ANCESTOR_BYTES_IN_PACKET / std::mem::size_of::(); +/// Number of bytes in the randomly generated token sent with ping messages. +pub(crate) const REPAIR_PING_TOKEN_SIZE: usize = HASH_BYTES; +pub const REPAIR_PING_CACHE_CAPACITY: usize = 65536; +pub const REPAIR_PING_CACHE_TTL: Duration = Duration::from_secs(1280); +pub(crate) const REPAIR_RESPONSE_SERIALIZED_PING_BYTES: usize = + 4 /*enum discriminator*/ + PUBKEY_BYTES + REPAIR_PING_TOKEN_SIZE + SIGNATURE_BYTES; +const SIGNED_REPAIR_TIME_WINDOW: Duration = Duration::from_secs(60 * 10); // 10 min + #[cfg(test)] static_assertions::const_assert_eq!(MAX_ANCESTOR_RESPONSES, 30); @@ -143,35 +165,143 @@ impl RequestResponse for AncestorHashesRepairType { } #[derive(Default)] -pub struct ServeRepairStats { - pub total_requests: usize, - pub dropped_requests: usize, - pub total_dropped_response_packets: usize, - pub total_response_packets: usize, - pub total_response_bytes: usize, - pub processed: usize, - pub self_repair: usize, - pub window_index: usize, - pub highest_window_index: usize, - pub orphan: usize, - pub ancestor_hashes: usize, +struct ServeRepairStats { + total_requests: usize, + dropped_requests: usize, + total_dropped_response_packets: usize, + total_response_packets: usize, + total_response_bytes: usize, + processed: usize, + self_repair: usize, + window_index: usize, + highest_window_index: usize, + orphan: usize, + pong: usize, + ancestor_hashes: usize, + pings_required: usize, + err_time_skew: usize, + err_malformed: usize, + err_sig_verify: usize, + err_unsigned: usize, + err_id_mismatch: usize, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct RepairRequestHeader { + signature: Signature, + sender: Pubkey, + recipient: Pubkey, + timestamp: u64, + nonce: Nonce, } +impl RepairRequestHeader { + pub fn new(sender: Pubkey, recipient: Pubkey, timestamp: u64, nonce: Nonce) -> Self { + Self { + signature: Signature::default(), + sender, + recipient, + timestamp, + nonce, + } + } +} + +pub(crate) type Ping = ping_pong::Ping<[u8; REPAIR_PING_TOKEN_SIZE]>; + /// Window protocol messages #[derive(Serialize, Deserialize, Debug)] pub enum RepairProtocol { - WindowIndex(ContactInfo, Slot, u64), - HighestWindowIndex(ContactInfo, Slot, u64), - Orphan(ContactInfo, Slot), - WindowIndexWithNonce(ContactInfo, Slot, u64, Nonce), - HighestWindowIndexWithNonce(ContactInfo, Slot, u64, Nonce), - OrphanWithNonce(ContactInfo, Slot, Nonce), - AncestorHashes(ContactInfo, Slot, Nonce), + LegacyWindowIndex(ContactInfo, Slot, u64), + LegacyHighestWindowIndex(ContactInfo, Slot, u64), + LegacyOrphan(ContactInfo, Slot), + LegacyWindowIndexWithNonce(ContactInfo, Slot, u64, Nonce), + LegacyHighestWindowIndexWithNonce(ContactInfo, Slot, u64, Nonce), + LegacyOrphanWithNonce(ContactInfo, Slot, Nonce), + LegacyAncestorHashes(ContactInfo, Slot, Nonce), + Pong(ping_pong::Pong), + WindowIndex { + header: RepairRequestHeader, + slot: Slot, + shred_index: u64, + }, + HighestWindowIndex { + header: RepairRequestHeader, + slot: Slot, + shred_index: u64, + }, + Orphan { + header: RepairRequestHeader, + slot: Slot, + }, + AncestorHashes { + header: RepairRequestHeader, + slot: Slot, + }, +} + +#[derive(Serialize, Deserialize, Debug)] +enum RepairResponse { + Ping(Ping), +} + +impl RepairProtocol { + fn sender(&self) -> &Pubkey { + match self { + Self::LegacyWindowIndex(ci, _, _) => &ci.id, + Self::LegacyHighestWindowIndex(ci, _, _) => &ci.id, + Self::LegacyOrphan(ci, _) => &ci.id, + Self::LegacyWindowIndexWithNonce(ci, _, _, _) => &ci.id, + Self::LegacyHighestWindowIndexWithNonce(ci, _, _, _) => &ci.id, + Self::LegacyOrphanWithNonce(ci, _, _) => &ci.id, + Self::LegacyAncestorHashes(ci, _, _) => &ci.id, + Self::Pong(pong) => pong.from(), + Self::WindowIndex { header, .. } => &header.sender, + Self::HighestWindowIndex { header, .. } => &header.sender, + Self::Orphan { header, .. } => &header.sender, + Self::AncestorHashes { header, .. } => &header.sender, + } + } + + fn supports_signature(&self) -> bool { + match self { + Self::LegacyWindowIndex(_, _, _) + | Self::LegacyHighestWindowIndex(_, _, _) + | Self::LegacyOrphan(_, _) + | Self::LegacyWindowIndexWithNonce(_, _, _, _) + | Self::LegacyHighestWindowIndexWithNonce(_, _, _, _) + | Self::LegacyOrphanWithNonce(_, _, _) + | Self::LegacyAncestorHashes(_, _, _) => false, + Self::Pong(_) + | Self::WindowIndex { .. } + | Self::HighestWindowIndex { .. } + | Self::Orphan { .. } + | Self::AncestorHashes { .. } => true, + } + } + + fn requires_ping_check(&self) -> bool { + match self { + Self::LegacyWindowIndex(_, _, _) + | Self::LegacyHighestWindowIndex(_, _, _) + | Self::LegacyOrphan(_, _) + | Self::LegacyWindowIndexWithNonce(_, _, _, _) + | Self::LegacyHighestWindowIndexWithNonce(_, _, _, _) + | Self::LegacyOrphanWithNonce(_, _, _) + | Self::LegacyAncestorHashes(_, _, _) + | Self::Pong(_) + | Self::AncestorHashes { .. } => false, + Self::WindowIndex { .. } | Self::HighestWindowIndex { .. } | Self::Orphan { .. } => { + true + } + } + } } #[derive(Clone)] pub struct ServeRepair { cluster_info: Arc, + bank_forks: Arc>, } // Cache entry for repair peers for a slot. @@ -208,8 +338,11 @@ impl RepairPeers { } impl ServeRepair { - pub fn new(cluster_info: Arc) -> Self { - Self { cluster_info } + pub fn new(cluster_info: Arc, bank_forks: Arc>) -> Self { + Self { + cluster_info, + bank_forks, + } } fn my_info(&self) -> ContactInfo { @@ -220,47 +353,29 @@ impl ServeRepair { self.cluster_info.id() } - fn get_repair_sender(request: &RepairProtocol) -> &ContactInfo { - match request { - RepairProtocol::WindowIndex(ref from, _, _) => from, - RepairProtocol::HighestWindowIndex(ref from, _, _) => from, - RepairProtocol::Orphan(ref from, _) => from, - RepairProtocol::WindowIndexWithNonce(ref from, _, _, _) => from, - RepairProtocol::HighestWindowIndexWithNonce(ref from, _, _, _) => from, - RepairProtocol::OrphanWithNonce(ref from, _, _) => from, - RepairProtocol::AncestorHashes(ref from, _, _) => from, - } - } - fn handle_repair( - me: &Arc>, recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, request: RepairProtocol, stats: &mut ServeRepairStats, + ping_cache: &mut PingCache, ) -> Option { let now = Instant::now(); - - let my_id = me.read().unwrap().my_id(); - //TODO: verify `from` is signed - let from = Self::get_repair_sender(&request); - if from.id == my_id { - stats.self_repair += 1; - return None; - } - let (res, label) = { match &request { - RepairProtocol::WindowIndexWithNonce(_, slot, shred_index, nonce) => { + RepairProtocol::WindowIndex { + header: RepairRequestHeader { nonce, .. }, + slot, + shred_index, + } + | RepairProtocol::LegacyWindowIndexWithNonce(_, slot, shred_index, nonce) => { stats.window_index += 1; ( Self::run_window_request( recycler, - from, from_addr, blockstore, - &my_id, *slot, *shred_index, *nonce, @@ -268,7 +383,17 @@ impl ServeRepair { "WindowIndexWithNonce", ) } - RepairProtocol::HighestWindowIndexWithNonce(_, slot, highest_index, nonce) => { + RepairProtocol::HighestWindowIndex { + header: RepairRequestHeader { nonce, .. }, + slot, + shred_index: highest_index, + } + | RepairProtocol::LegacyHighestWindowIndexWithNonce( + _, + slot, + highest_index, + nonce, + ) => { stats.highest_window_index += 1; ( Self::run_highest_window_request( @@ -282,7 +407,11 @@ impl ServeRepair { "HighestWindowIndexWithNonce", ) } - RepairProtocol::OrphanWithNonce(_, slot, nonce) => { + RepairProtocol::Orphan { + header: RepairRequestHeader { nonce, .. }, + slot, + } + | RepairProtocol::LegacyOrphanWithNonce(_, slot, nonce) => { stats.orphan += 1; ( Self::run_orphan( @@ -296,18 +425,27 @@ impl ServeRepair { "OrphanWithNonce", ) } - RepairProtocol::AncestorHashes(_, slot, nonce) => { + RepairProtocol::AncestorHashes { + header: RepairRequestHeader { nonce, .. }, + slot, + } + | RepairProtocol::LegacyAncestorHashes(_, slot, nonce) => { stats.ancestor_hashes += 1; ( Self::run_ancestor_hashes(recycler, from_addr, blockstore, *slot, *nonce), "AncestorHashes", ) } - _ => (None, "Unsupported repair type"), + RepairProtocol::Pong(pong) => { + stats.pong += 1; + ping_cache.add(pong, *from_addr, Instant::now()); + (None, "Pong") + } + RepairProtocol::LegacyWindowIndex(_, _, _) + | RepairProtocol::LegacyHighestWindowIndex(_, _, _) + | RepairProtocol::LegacyOrphan(_, _) => (None, "Unsupported repair type"), } }; - - trace!("{}: received repair request: {:?}", my_id, request); Self::report_time_spent(label, &now.elapsed(), ""); res } @@ -319,9 +457,28 @@ impl ServeRepair { } } + pub(crate) fn sign_repair_requests_activated_epoch(root_bank: &Bank) -> Option { + root_bank + .feature_set + .activated_slot(&sign_repair_requests::id()) + .map(|slot| root_bank.epoch_schedule().get_epoch(slot)) + } + + pub(crate) fn should_sign_repair_request( + slot: Slot, + root_bank: &Bank, + sign_repairs_epoch: Option, + ) -> bool { + match sign_repairs_epoch { + None => false, + Some(feature_epoch) => feature_epoch < root_bank.epoch_schedule().get_epoch(slot), + } + } + /// Process messages from the network fn run_listen( obj: &Arc>, + ping_cache: &mut PingCache, recycler: &PacketBatchRecycler, blockstore: Option<&Arc>, requests_receiver: &PacketBatchReceiver, @@ -348,13 +505,16 @@ impl ServeRepair { stats.dropped_requests += dropped_requests; stats.total_requests += total_requests; + let root_bank = obj.read().unwrap().bank_forks.read().unwrap().root_bank(); for reqs in reqs_v { Self::handle_packets( obj, + ping_cache, recycler, blockstore, reqs, response_sender, + &root_bank, stats, data_budget, ); @@ -396,6 +556,13 @@ impl ServeRepair { stats.ancestor_hashes, i64 ), + ("pong", stats.pong, i64), + ("pings_required", stats.pings_required, i64), + ("err_time_skew", stats.err_time_skew, i64), + ("err_malformed", stats.err_malformed, i64), + ("err_sig_verify", stats.err_sig_verify, i64), + ("err_unsigned", stats.err_unsigned, i64), + ("err_id_mismatch", stats.err_id_mismatch, i64), ); *stats = ServeRepairStats::default(); @@ -412,6 +579,8 @@ impl ServeRepair { const MAX_BYTES_PER_SECOND: usize = 12_000_000; const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000; + let mut ping_cache = PingCache::new(REPAIR_PING_CACHE_TTL, REPAIR_PING_CACHE_CAPACITY); + let exit = exit.clone(); let recycler = PacketBatchRecycler::default(); Builder::new() @@ -423,6 +592,7 @@ impl ServeRepair { loop { let result = Self::run_listen( &me, + &mut ping_cache, &recycler, blockstore.as_ref(), &requests_receiver, @@ -447,77 +617,249 @@ impl ServeRepair { .unwrap() } + fn verify_signed_packet( + my_id: &Pubkey, + packet: &Packet, + request: &RepairProtocol, + stats: &mut ServeRepairStats, + ) -> bool { + match request { + RepairProtocol::LegacyWindowIndex(_, _, _) + | RepairProtocol::LegacyHighestWindowIndex(_, _, _) + | RepairProtocol::LegacyOrphan(_, _) + | RepairProtocol::LegacyWindowIndexWithNonce(_, _, _, _) + | RepairProtocol::LegacyHighestWindowIndexWithNonce(_, _, _, _) + | RepairProtocol::LegacyOrphanWithNonce(_, _, _) + | RepairProtocol::LegacyAncestorHashes(_, _, _) => { + debug_assert!(false); // expecting only signed request types + stats.err_unsigned += 1; + return false; + } + RepairProtocol::Pong(pong) => { + if !pong.verify() { + stats.err_sig_verify += 1; + return false; + } + } + RepairProtocol::WindowIndex { header, .. } + | RepairProtocol::HighestWindowIndex { header, .. } + | RepairProtocol::Orphan { header, .. } + | RepairProtocol::AncestorHashes { header, .. } => { + if &header.recipient != my_id { + stats.err_id_mismatch += 1; + return false; + } + let time_diff_ms = timestamp().abs_diff(header.timestamp); + if u128::from(time_diff_ms) > SIGNED_REPAIR_TIME_WINDOW.as_millis() { + stats.err_time_skew += 1; + return false; + } + let leading_buf = match packet.data(..4) { + Some(buf) => buf, + None => { + debug_assert!(false); // should have failed deserialize + stats.err_malformed += 1; + return false; + } + }; + let trailing_buf = match packet.data(4 + SIGNATURE_BYTES..) { + Some(buf) => buf, + None => { + debug_assert!(false); // should have failed deserialize + stats.err_malformed += 1; + return false; + } + }; + let from_id = request.sender(); + let signed_data = [leading_buf, trailing_buf].concat(); + if !header.signature.verify(from_id.as_ref(), &signed_data) { + stats.err_sig_verify += 1; + return false; + } + } + } + true + } + + fn check_ping_cache( + request: &RepairProtocol, + from_addr: &SocketAddr, + identity_keypair: &Keypair, + socket_addr_space: &SocketAddrSpace, + ping_cache: &mut PingCache, + pending_pings: &mut Vec<(SocketAddr, Ping)>, + stats: &mut ServeRepairStats, + ) -> bool { + if !ContactInfo::is_valid_address(from_addr, socket_addr_space) { + stats.err_malformed += 1; + return false; + } + let mut rng = rand::thread_rng(); + let mut pingf = move || Ping::new_rand(&mut rng, identity_keypair).ok(); + let (check, ping) = + ping_cache.check(Instant::now(), (*request.sender(), *from_addr), &mut pingf); + if let Some(ping) = ping { + pending_pings.push((*from_addr, ping)); + } + if !check { + stats.pings_required += 1; + } + check + } + + fn requires_signature_check( + request: &RepairProtocol, + root_bank: &Bank, + sign_repairs_epoch: Option, + ) -> bool { + match request { + RepairProtocol::LegacyWindowIndex(_, slot, _) + | RepairProtocol::LegacyHighestWindowIndex(_, slot, _) + | RepairProtocol::LegacyOrphan(_, slot) + | RepairProtocol::LegacyWindowIndexWithNonce(_, slot, _, _) + | RepairProtocol::LegacyHighestWindowIndexWithNonce(_, slot, _, _) + | RepairProtocol::LegacyOrphanWithNonce(_, slot, _) + | RepairProtocol::LegacyAncestorHashes(_, slot, _) + | RepairProtocol::WindowIndex { slot, .. } + | RepairProtocol::HighestWindowIndex { slot, .. } + | RepairProtocol::Orphan { slot, .. } + | RepairProtocol::AncestorHashes { slot, .. } => { + Self::should_sign_repair_request(*slot, root_bank, sign_repairs_epoch) + } + RepairProtocol::Pong(_) => true, + } + } + fn handle_packets( me: &Arc>, + ping_cache: &mut PingCache, recycler: &PacketBatchRecycler, blockstore: Option<&Arc>, packet_batch: PacketBatch, response_sender: &PacketBatchSender, + root_bank: &Bank, stats: &mut ServeRepairStats, data_budget: &DataBudget, ) { + let sign_repairs_epoch = Self::sign_repair_requests_activated_epoch(root_bank); + let (identity_keypair, socket_addr_space) = { + let me_r = me.read().unwrap(); + let keypair = me_r.cluster_info.keypair().clone(); + let socket_addr_space = *me_r.cluster_info.socket_addr_space(); + (keypair, socket_addr_space) + }; + let my_id = identity_keypair.pubkey(); + let mut pending_pings = Vec::default(); + // iter over the packets for (i, packet) in packet_batch.iter().enumerate() { - if let Ok(request) = packet.deserialize_slice(..) { - stats.processed += 1; - let from_addr = packet.meta.socket_addr(); - let rsp = - match Self::handle_repair(me, recycler, &from_addr, blockstore, request, stats) - { - None => continue, - Some(rsp) => rsp, - }; - let num_response_packets = rsp.len(); - let num_response_bytes = rsp.iter().map(|p| p.meta.size).sum(); - if data_budget.take(num_response_bytes) && response_sender.send(rsp).is_ok() { - stats.total_response_bytes += num_response_bytes; - stats.total_response_packets += num_response_packets; - } else { - stats.dropped_requests += packet_batch.len() - i; - stats.total_dropped_response_packets += num_response_packets; - break; + let request: RepairProtocol = match packet.deserialize_slice(..) { + Ok(request) => request, + Err(_) => { + stats.err_malformed += 1; + continue; } + }; + + if request.sender() == &my_id { + stats.self_repair += 1; + continue; } - } - } - fn window_index_request_bytes( - &self, - slot: Slot, - shred_index: u64, - nonce: Nonce, - ) -> Result> { - let req = RepairProtocol::WindowIndexWithNonce(self.my_info(), slot, shred_index, nonce); - let out = serialize(&req)?; - Ok(out) - } + let require_signature_check = + Self::requires_signature_check(&request, root_bank, sign_repairs_epoch); + if require_signature_check && !request.supports_signature() { + stats.err_unsigned += 1; + continue; + } + if request.supports_signature() + && !Self::verify_signed_packet(&my_id, packet, &request, stats) + { + continue; + } - fn window_highest_index_request_bytes( - &self, - slot: Slot, - shred_index: u64, - nonce: Nonce, - ) -> Result> { - let req = - RepairProtocol::HighestWindowIndexWithNonce(self.my_info(), slot, shred_index, nonce); - let out = serialize(&req)?; - Ok(out) - } + let from_addr = packet.meta.socket_addr(); + if request.requires_ping_check() + && !Self::check_ping_cache( + &request, + &from_addr, + &identity_keypair, + &socket_addr_space, + ping_cache, + &mut pending_pings, + stats, + ) + { + continue; + } + + stats.processed += 1; + let rsp = match Self::handle_repair( + recycler, &from_addr, blockstore, request, stats, ping_cache, + ) { + None => continue, + Some(rsp) => rsp, + }; + let num_response_packets = rsp.len(); + let num_response_bytes = rsp.iter().map(|p| p.meta.size).sum(); + if data_budget.take(num_response_bytes) && response_sender.send(rsp).is_ok() { + stats.total_response_bytes += num_response_bytes; + stats.total_response_packets += num_response_packets; + } else { + stats.dropped_requests += packet_batch.len() - i; + stats.total_dropped_response_packets += num_response_packets; + break; + } + } - fn orphan_bytes(&self, slot: Slot, nonce: Nonce) -> Result> { - let req = RepairProtocol::OrphanWithNonce(self.my_info(), slot, nonce); - let out = serialize(&req)?; - Ok(out) + if !pending_pings.is_empty() { + let packets: Vec<_> = pending_pings + .into_iter() + .filter_map(|(sockaddr, ping)| { + let ping = RepairResponse::Ping(ping); + Packet::from_data(Some(&sockaddr), ping).ok() + }) + .collect(); + let batch = PacketBatch::new(packets); + let _ = response_sender.send(batch); + } } pub fn ancestor_repair_request_bytes( &self, + keypair: &Keypair, + root_bank: &Bank, + repair_peer_id: &Pubkey, request_slot: Slot, nonce: Nonce, ) -> Result> { - let repair_request = RepairProtocol::AncestorHashes(self.my_info(), request_slot, nonce); - let out = serialize(&repair_request)?; - Ok(out) + let sign_repairs_epoch = Self::sign_repair_requests_activated_epoch(root_bank); + let require_sig = + Self::should_sign_repair_request(request_slot, root_bank, sign_repairs_epoch); + + let (request_proto, maybe_keypair) = if require_sig { + let header = RepairRequestHeader { + signature: Signature::default(), + sender: self.my_id(), + recipient: *repair_peer_id, + timestamp: timestamp(), + nonce, + }; + ( + RepairProtocol::AncestorHashes { + header, + slot: request_slot, + }, + Some(keypair), + ) + } else { + ( + RepairProtocol::LegacyAncestorHashes(self.my_info(), request_slot, nonce), + None, + ) + }; + + Self::repair_proto_to_bytes(&request_proto, maybe_keypair) } pub(crate) fn repair_request( @@ -528,6 +870,7 @@ impl ServeRepair { repair_stats: &mut RepairStats, repair_validators: &Option>, outstanding_requests: &mut OutstandingShredRepairs, + identity_keypair: Option<&Keypair>, ) -> Result<(SocketAddr, Vec)> { // find a peer that appears to be accepting replication and has the desired slot, as indicated // by a valid tvu port location @@ -544,13 +887,18 @@ impl ServeRepair { } }; let (peer, addr) = repair_peers.sample(&mut rand::thread_rng()); - let nonce = - outstanding_requests.add_request(repair_request, solana_sdk::timing::timestamp()); - let out = self.map_repair_request(&repair_request, &peer, repair_stats, nonce)?; + let nonce = outstanding_requests.add_request(repair_request, timestamp()); + let out = self.map_repair_request( + &repair_request, + &peer, + repair_stats, + nonce, + identity_keypair, + )?; Ok((addr, out)) } - pub fn repair_request_ancestor_hashes_sample_peers( + pub(crate) fn repair_request_ancestor_hashes_sample_peers( &self, slot: Slot, cluster_slots: &ClusterSlots, @@ -592,31 +940,134 @@ impl ServeRepair { Ok((repair_peers[n].id, repair_peers[n].serve_repair)) } - pub fn map_repair_request( + pub(crate) fn map_repair_request( &self, repair_request: &ShredRepairType, repair_peer_id: &Pubkey, repair_stats: &mut RepairStats, nonce: Nonce, + identity_keypair: Option<&Keypair>, ) -> Result> { - match repair_request { + let header = if identity_keypair.is_some() { + Some(RepairRequestHeader { + signature: Signature::default(), + sender: self.my_id(), + recipient: *repair_peer_id, + timestamp: timestamp(), + nonce, + }) + } else { + None + }; + let request_proto = match repair_request { ShredRepairType::Shred(slot, shred_index) => { repair_stats .shred .update(repair_peer_id, *slot, *shred_index); - Ok(self.window_index_request_bytes(*slot, *shred_index, nonce)?) + if let Some(header) = header { + RepairProtocol::WindowIndex { + header, + slot: *slot, + shred_index: *shred_index, + } + } else { + RepairProtocol::LegacyWindowIndexWithNonce( + self.my_info(), + *slot, + *shred_index, + nonce, + ) + } } ShredRepairType::HighestShred(slot, shred_index) => { repair_stats .highest_shred .update(repair_peer_id, *slot, *shred_index); - Ok(self.window_highest_index_request_bytes(*slot, *shred_index, nonce)?) + if let Some(header) = header { + RepairProtocol::HighestWindowIndex { + header, + slot: *slot, + shred_index: *shred_index, + } + } else { + RepairProtocol::LegacyHighestWindowIndexWithNonce( + self.my_info(), + *slot, + *shred_index, + nonce, + ) + } } ShredRepairType::Orphan(slot) => { repair_stats.orphan.update(repair_peer_id, *slot, 0); - Ok(self.orphan_bytes(*slot, nonce)?) + if let Some(header) = header { + RepairProtocol::Orphan { + header, + slot: *slot, + } + } else { + RepairProtocol::LegacyOrphanWithNonce(self.my_info(), *slot, nonce) + } + } + }; + Self::repair_proto_to_bytes(&request_proto, identity_keypair) + } + + /// Distinguish and process `RepairResponse` ping packets ignoring other + /// packets in the batch. + pub(crate) fn handle_repair_response_pings( + repair_socket: &UdpSocket, + keypair: &Keypair, + packet_batch: &mut PacketBatch, + stats: &mut ShredFetchStats, + ) { + let mut pending_pongs = Vec::default(); + for packet in packet_batch.iter_mut() { + if packet.meta.size != REPAIR_RESPONSE_SERIALIZED_PING_BYTES { + continue; } + if let Ok(RepairResponse::Ping(ping)) = packet.deserialize_slice(..) { + packet.meta.set_discard(true); + if !ping.verify() { + stats.ping_err_verify_count += 1; + continue; + } + stats.ping_count += 1; + if let Ok(pong) = Pong::new(&ping, keypair) { + let pong = RepairProtocol::Pong(pong); + if let Ok(pong_bytes) = serialize(&pong) { + let from_addr = packet.meta.socket_addr(); + pending_pongs.push((pong_bytes, from_addr)); + } + } + } + } + if !pending_pongs.is_empty() { + if let Err(SendPktsError::IoError(err, num_failed)) = + batch_send(repair_socket, &pending_pongs) + { + warn!( + "batch_send failed to send {}/{} packets. First error: {:?}", + num_failed, + pending_pongs.len(), + err + ); + } + } + } + + pub fn repair_proto_to_bytes( + request: &RepairProtocol, + keypair: Option<&Keypair>, + ) -> Result> { + let mut payload = serialize(&request)?; + if let Some(keypair) = keypair { + debug_assert!(request.supports_signature()); + let signable_data = [&payload[..4], &payload[4 + SIGNATURE_BYTES..]].concat(); + let signature = keypair.sign_message(&signable_data[..]); + payload[4..4 + SIGNATURE_BYTES].copy_from_slice(signature.as_ref()); } + Ok(payload) } fn repair_peers( @@ -642,10 +1093,8 @@ impl ServeRepair { fn run_window_request( recycler: &PacketBatchRecycler, - from: &ContactInfo, from_addr: &SocketAddr, blockstore: Option<&Arc>, - my_id: &Pubkey, slot: Slot, shred_index: u64, nonce: Nonce, @@ -671,14 +1120,6 @@ impl ServeRepair { } inc_new_counter_debug!("serve_repair-window-request-fail", 1); - trace!( - "{}: failed WindowIndex {} {} {}", - my_id, - from.id, - slot, - shred_index, - ); - None } @@ -797,14 +1238,363 @@ mod tests { solana_ledger::{ blockstore::make_many_slot_entries, blockstore_processor::fill_blockstore_slot_with_ticks, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path, shred::{max_ticks_per_n_shreds, Shred, ShredFlags}, }, - solana_perf::packet::Packet, - solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Keypair, timing::timestamp}, + solana_perf::packet::{deserialize_from_with_limit, Packet}, + solana_runtime::bank::Bank, + solana_sdk::{ + feature_set::FeatureSet, hash::Hash, pubkey::Pubkey, signature::Keypair, + timing::timestamp, + }, solana_streamer::socket::SocketAddrSpace, + std::io::Cursor, }; + #[test] + fn test_serialized_ping_size() { + let mut rng = rand::thread_rng(); + let keypair = Keypair::new(); + let ping = Ping::new_rand(&mut rng, &keypair).unwrap(); + let ping = RepairResponse::Ping(ping); + let pkt = Packet::from_data(None, ping).unwrap(); + assert_eq!(pkt.meta.size, REPAIR_RESPONSE_SERIALIZED_PING_BYTES); + } + + #[test] + fn test_serialize_deserialize_signed_request() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); + let cluster_info = Arc::new(new_test_cluster_info(me)); + let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); + let keypair = cluster_info.keypair().clone(); + let repair_peer_id = solana_sdk::pubkey::new_rand(); + let repair_request = ShredRepairType::Orphan(123); + + let rsp = serve_repair + .map_repair_request( + &repair_request, + &repair_peer_id, + &mut RepairStats::default(), + 456, + Some(&keypair), + ) + .unwrap(); + + let mut cursor = Cursor::new(&rsp[..]); + let deserialized_request: RepairProtocol = + deserialize_from_with_limit(&mut cursor).unwrap(); + assert_eq!(cursor.position(), rsp.len() as u64); + if let RepairProtocol::Orphan { header, slot } = deserialized_request { + assert_eq!(slot, 123); + assert_eq!(header.nonce, 456); + assert_eq!(&header.sender, &serve_repair.my_id()); + assert_eq!(&header.recipient, &repair_peer_id); + let signed_data = [&rsp[..4], &rsp[4 + SIGNATURE_BYTES..]].concat(); + assert!(header + .signature + .verify(keypair.pubkey().as_ref(), &signed_data)); + } else { + panic!("unexpected request type {:?}", &deserialized_request); + } + } + + #[test] + fn test_serialize_deserialize_ancestor_hashes_request() { + let slot = 50; + let nonce = 70; + let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); + let cluster_info = Arc::new(new_test_cluster_info(me)); + let repair_peer_id = solana_sdk::pubkey::new_rand(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let keypair = cluster_info.keypair().clone(); + + let mut bank = Bank::new_for_tests(&genesis_config); + bank.feature_set = Arc::new(FeatureSet::all_enabled()); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks.clone()); + + let root_bank = bank_forks.read().unwrap().root_bank(); + let request_bytes = serve_repair + .ancestor_repair_request_bytes(&keypair, &root_bank, &repair_peer_id, slot, nonce) + .unwrap(); + let mut cursor = Cursor::new(&request_bytes[..]); + let deserialized_request: RepairProtocol = + deserialize_from_with_limit(&mut cursor).unwrap(); + assert_eq!(cursor.position(), request_bytes.len() as u64); + if let RepairProtocol::AncestorHashes { + header, + slot: deserialized_slot, + } = deserialized_request + { + assert_eq!(deserialized_slot, slot); + assert_eq!(header.nonce, nonce); + assert_eq!(&header.sender, &serve_repair.my_id()); + assert_eq!(&header.recipient, &repair_peer_id); + let signed_data = [&request_bytes[..4], &request_bytes[4 + SIGNATURE_BYTES..]].concat(); + assert!(header + .signature + .verify(keypair.pubkey().as_ref(), &signed_data)); + } else { + panic!("unexpected request type {:?}", &deserialized_request); + } + + let mut bank = Bank::new_for_tests(&genesis_config); + let mut feature_set = FeatureSet::all_enabled(); + feature_set.deactivate(&sign_repair_requests::id()); + bank.feature_set = Arc::new(feature_set); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let serve_repair = ServeRepair::new(cluster_info, bank_forks.clone()); + + let root_bank = bank_forks.read().unwrap().root_bank(); + let request_bytes = serve_repair + .ancestor_repair_request_bytes(&keypair, &root_bank, &repair_peer_id, slot, nonce) + .unwrap(); + let mut cursor = Cursor::new(&request_bytes[..]); + let deserialized_request: RepairProtocol = + deserialize_from_with_limit(&mut cursor).unwrap(); + assert_eq!(cursor.position(), request_bytes.len() as u64); + if let RepairProtocol::LegacyAncestorHashes(ci, deserialized_slot, deserialized_nonce) = + deserialized_request + { + assert_eq!(slot, deserialized_slot); + assert_eq!(nonce, deserialized_nonce); + assert_eq!(&serve_repair.my_id(), &ci.id); + } else { + panic!("unexpected request type {:?}", &deserialized_request); + } + } + + #[test] + fn test_map_requests_signed_unsigned() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); + let cluster_info = Arc::new(new_test_cluster_info(me)); + let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); + let keypair = cluster_info.keypair().clone(); + let repair_peer_id = solana_sdk::pubkey::new_rand(); + + let slot = 50; + let shred_index = 60; + let nonce = 70; + + let request = ShredRepairType::Shred(slot, shred_index); + let rsp = serve_repair + .map_repair_request( + &request, + &repair_peer_id, + &mut RepairStats::default(), + nonce, + Some(&keypair), + ) + .unwrap(); + + let mut cursor = Cursor::new(&rsp[..]); + let deserialized_request: RepairProtocol = + deserialize_from_with_limit(&mut cursor).unwrap(); + assert_eq!(cursor.position(), rsp.len() as u64); + if let RepairProtocol::WindowIndex { + header, + slot: deserialized_slot, + shred_index: deserialized_shred_index, + } = deserialized_request + { + assert_eq!(deserialized_slot, slot); + assert_eq!(deserialized_shred_index, shred_index); + assert_eq!(header.nonce, nonce); + assert_eq!(&header.sender, &serve_repair.my_id()); + assert_eq!(&header.recipient, &repair_peer_id); + let signed_data = [&rsp[..4], &rsp[4 + SIGNATURE_BYTES..]].concat(); + assert!(header + .signature + .verify(keypair.pubkey().as_ref(), &signed_data)); + } else { + panic!("unexpected request type {:?}", &deserialized_request); + } + + let rsp = serve_repair + .map_repair_request( + &request, + &repair_peer_id, + &mut RepairStats::default(), + nonce, + None, + ) + .unwrap(); + + let mut cursor = Cursor::new(&rsp[..]); + let deserialized_request: RepairProtocol = + deserialize_from_with_limit(&mut cursor).unwrap(); + assert_eq!(cursor.position(), rsp.len() as u64); + if let RepairProtocol::LegacyWindowIndexWithNonce( + ci, + deserialized_slot, + deserialized_shred_index, + deserialized_nonce, + ) = deserialized_request + { + assert_eq!(slot, deserialized_slot); + assert_eq!(shred_index, deserialized_shred_index); + assert_eq!(nonce, deserialized_nonce); + assert_eq!(&serve_repair.my_id(), &ci.id); + } else { + panic!("unexpected request type {:?}", &deserialized_request); + } + + let request = ShredRepairType::HighestShred(slot, shred_index); + let rsp = serve_repair + .map_repair_request( + &request, + &repair_peer_id, + &mut RepairStats::default(), + nonce, + Some(&keypair), + ) + .unwrap(); + + let mut cursor = Cursor::new(&rsp[..]); + let deserialized_request: RepairProtocol = + deserialize_from_with_limit(&mut cursor).unwrap(); + assert_eq!(cursor.position(), rsp.len() as u64); + if let RepairProtocol::HighestWindowIndex { + header, + slot: deserialized_slot, + shred_index: deserialized_shred_index, + } = deserialized_request + { + assert_eq!(deserialized_slot, slot); + assert_eq!(deserialized_shred_index, shred_index); + assert_eq!(header.nonce, nonce); + assert_eq!(&header.sender, &serve_repair.my_id()); + assert_eq!(&header.recipient, &repair_peer_id); + let signed_data = [&rsp[..4], &rsp[4 + SIGNATURE_BYTES..]].concat(); + assert!(header + .signature + .verify(keypair.pubkey().as_ref(), &signed_data)); + } else { + panic!("unexpected request type {:?}", &deserialized_request); + } + + let rsp = serve_repair + .map_repair_request( + &request, + &repair_peer_id, + &mut RepairStats::default(), + nonce, + None, + ) + .unwrap(); + + let mut cursor = Cursor::new(&rsp[..]); + let deserialized_request: RepairProtocol = + deserialize_from_with_limit(&mut cursor).unwrap(); + assert_eq!(cursor.position(), rsp.len() as u64); + if let RepairProtocol::LegacyHighestWindowIndexWithNonce( + ci, + deserialized_slot, + deserialized_shred_index, + deserialized_nonce, + ) = deserialized_request + { + assert_eq!(slot, deserialized_slot); + assert_eq!(shred_index, deserialized_shred_index); + assert_eq!(nonce, deserialized_nonce); + assert_eq!(&serve_repair.my_id(), &ci.id); + } else { + panic!("unexpected request type {:?}", &deserialized_request); + } + } + + #[test] + fn test_verify_signed_packet() { + let keypair = Keypair::new(); + let other_keypair = Keypair::new(); + let my_id = Pubkey::new_unique(); + let other_id = Pubkey::new_unique(); + + fn sign_packet(packet: &mut Packet, keypair: &Keypair) { + let signable_data = [ + packet.data(..4).unwrap(), + packet.data(4 + SIGNATURE_BYTES..).unwrap(), + ] + .concat(); + let signature = keypair.sign_message(&signable_data[..]); + packet.buffer_mut()[4..4 + SIGNATURE_BYTES].copy_from_slice(signature.as_ref()); + } + + // well formed packet + let packet = { + let header = RepairRequestHeader::new(keypair.pubkey(), my_id, timestamp(), 678); + let slot = 239847; + let request = RepairProtocol::Orphan { header, slot }; + let mut packet = Packet::from_data(None, &request).unwrap(); + sign_packet(&mut packet, &keypair); + packet + }; + let request: RepairProtocol = packet.deserialize_slice(..).unwrap(); + assert!(ServeRepair::verify_signed_packet( + &my_id, + &packet, + &request, + &mut ServeRepairStats::default(), + )); + + // recipient mismatch + let packet = { + let header = RepairRequestHeader::new(keypair.pubkey(), other_id, timestamp(), 678); + let slot = 239847; + let request = RepairProtocol::Orphan { header, slot }; + let mut packet = Packet::from_data(None, &request).unwrap(); + sign_packet(&mut packet, &keypair); + packet + }; + let request: RepairProtocol = packet.deserialize_slice(..).unwrap(); + let mut stats = ServeRepairStats::default(); + assert!(!ServeRepair::verify_signed_packet( + &my_id, &packet, &request, &mut stats, + )); + assert_eq!(stats.err_id_mismatch, 1); + + // outside time window + let packet = { + let time_diff_ms = u64::try_from(SIGNED_REPAIR_TIME_WINDOW.as_millis() * 2).unwrap(); + let old_timestamp = timestamp().saturating_sub(time_diff_ms); + let header = RepairRequestHeader::new(keypair.pubkey(), my_id, old_timestamp, 678); + let slot = 239847; + let request = RepairProtocol::Orphan { header, slot }; + let mut packet = Packet::from_data(None, &request).unwrap(); + sign_packet(&mut packet, &keypair); + packet + }; + let request: RepairProtocol = packet.deserialize_slice(..).unwrap(); + let mut stats = ServeRepairStats::default(); + assert!(!ServeRepair::verify_signed_packet( + &my_id, &packet, &request, &mut stats, + )); + assert_eq!(stats.err_time_skew, 1); + + // bad signature + let packet = { + let header = RepairRequestHeader::new(keypair.pubkey(), my_id, timestamp(), 678); + let slot = 239847; + let request = RepairProtocol::Orphan { header, slot }; + let mut packet = Packet::from_data(None, &request).unwrap(); + sign_packet(&mut packet, &other_keypair); + packet + }; + let request: RepairProtocol = packet.deserialize_slice(..).unwrap(); + let mut stats = ServeRepairStats::default(); + assert!(!ServeRepair::verify_signed_packet( + &my_id, &packet, &request, &mut stats, + )); + assert_eq!(stats.err_sig_verify, 1); + } + #[test] fn test_run_highest_window_request() { run_highest_window_request(5, 3, 9); @@ -886,27 +1676,10 @@ mod tests { let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let me = ContactInfo { - id: solana_sdk::pubkey::new_rand(), - gossip: socketaddr!("127.0.0.1:1234"), - tvu: socketaddr!("127.0.0.1:1235"), - tvu_forwards: socketaddr!("127.0.0.1:1236"), - repair: socketaddr!("127.0.0.1:1237"), - tpu: socketaddr!("127.0.0.1:1238"), - tpu_forwards: socketaddr!("127.0.0.1:1239"), - tpu_vote: socketaddr!("127.0.0.1:1240"), - rpc: socketaddr!("127.0.0.1:1241"), - rpc_pubsub: socketaddr!("127.0.0.1:1242"), - serve_repair: socketaddr!("127.0.0.1:1243"), - wallclock: 0, - shred_version: 0, - }; let rv = ServeRepair::run_window_request( &recycler, - &me, &socketaddr_any!(), Some(&blockstore), - &me.id, slot, 0, nonce, @@ -921,10 +1694,8 @@ mod tests { let index = 1; let rv = ServeRepair::run_window_request( &recycler, - &me, &socketaddr_any!(), Some(&blockstore), - &me.id, slot, index, nonce, @@ -956,10 +1727,13 @@ mod tests { #[test] fn window_index_request() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let cluster_slots = ClusterSlots::default(); let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let cluster_info = Arc::new(new_test_cluster_info(me)); - let serve_repair = ServeRepair::new(cluster_info.clone()); + let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); let mut outstanding_requests = OutstandingShredRepairs::default(); let rv = serve_repair.repair_request( &cluster_slots, @@ -968,6 +1742,7 @@ mod tests { &mut RepairStats::default(), &None, &mut outstanding_requests, + None, ); assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers))); @@ -996,6 +1771,7 @@ mod tests { &mut RepairStats::default(), &None, &mut outstanding_requests, + None, ) .unwrap(); assert_eq!(nxt.serve_repair, serve_repair_addr); @@ -1030,6 +1806,7 @@ mod tests { &mut RepairStats::default(), &None, &mut outstanding_requests, + None, ) .unwrap(); if rv.0 == serve_repair_addr { @@ -1265,6 +2042,9 @@ mod tests { #[test] fn test_repair_with_repair_validators() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let cluster_slots = ClusterSlots::default(); let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let cluster_info = Arc::new(new_test_cluster_info(me.clone())); @@ -1276,7 +2056,7 @@ mod tests { ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); cluster_info.insert_info(contact_info2.clone()); cluster_info.insert_info(contact_info3.clone()); - let serve_repair = ServeRepair::new(cluster_info); + let serve_repair = ServeRepair::new(cluster_info, bank_forks); // If: // 1) repair validator set doesn't exist in gossip @@ -1293,6 +2073,7 @@ mod tests { &mut RepairStats::default(), &known_validators, &mut OutstandingShredRepairs::default(), + None, ) .is_err()); } @@ -1310,6 +2091,7 @@ mod tests { &mut RepairStats::default(), &known_validators, &mut OutstandingShredRepairs::default(), + None, ) .is_ok()); @@ -1331,6 +2113,7 @@ mod tests { &mut RepairStats::default(), &None, &mut OutstandingShredRepairs::default(), + None, ) .is_ok()); } diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 78b5e15b95efa8..ae604e766f1957 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -1,9 +1,10 @@ //! The `shred_fetch_stage` pulls shreds from UDP sockets and sends it to a channel. use { - crate::packet_hasher::PacketHasher, + crate::{packet_hasher::PacketHasher, serve_repair::ServeRepair}, crossbeam_channel::{unbounded, Sender}, lru::LruCache, + solana_gossip::cluster_info::ClusterInfo, solana_ledger::shred::{should_discard_shred, ShredFetchStats}, solana_perf::packet::{Packet, PacketBatch, PacketBatchRecycler, PacketFlags}, solana_runtime::bank_forks::BankForks, @@ -33,10 +34,14 @@ impl ShredFetchStage { shred_version: u16, name: &'static str, flags: PacketFlags, + repair_context: Option<(&UdpSocket, &ClusterInfo)>, ) { const STATS_SUBMIT_CADENCE: Duration = Duration::from_secs(1); let mut shreds_received = LruCache::new(DEFAULT_LRU_SIZE); let mut last_updated = Instant::now(); + let mut keypair = repair_context + .as_ref() + .map(|(_, cluster_info)| cluster_info.keypair().clone()); // In the case of bank_forks=None, setup to accept any slot range let mut last_root = 0; @@ -59,8 +64,25 @@ impl ShredFetchStage { let root_bank = bank_forks_r.root_bank(); slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch()); } + keypair = repair_context + .as_ref() + .map(|(_, cluster_info)| cluster_info.keypair().clone()); } stats.shred_count += packet_batch.len(); + + if let Some((udp_socket, _)) = repair_context { + debug_assert_eq!(flags, PacketFlags::REPAIR); + debug_assert!(keypair.is_some()); + if let Some(ref keypair) = keypair { + ServeRepair::handle_repair_response_pings( + udp_socket, + keypair, + &mut packet_batch, + &mut stats, + ); + } + } + // Limit shreds to 2 epochs away. let max_slot = last_slot + 2 * slots_per_epoch; for packet in packet_batch.iter_mut() { @@ -94,6 +116,7 @@ impl ShredFetchStage { shred_version: u16, name: &'static str, flags: PacketFlags, + repair_context: Option<(Arc, Arc)>, ) -> (Vec>, JoinHandle<()>) { let (packet_sender, packet_receiver) = unbounded(); let streamers = sockets @@ -111,10 +134,12 @@ impl ShredFetchStage { ) }) .collect(); - let modifier_hdl = Builder::new() .name("solana-tvu-fetch-stage-packet-modifier".to_string()) .spawn(move || { + let repair_context = repair_context + .as_ref() + .map(|(socket, cluster_info)| (socket.as_ref(), cluster_info.as_ref())); Self::modify_packets( packet_receiver, sender, @@ -122,6 +147,7 @@ impl ShredFetchStage { shred_version, name, flags, + repair_context, ) }) .unwrap(); @@ -135,6 +161,7 @@ impl ShredFetchStage { sender: Sender, shred_version: u16, bank_forks: Arc>, + cluster_info: Arc, exit: &Arc, ) -> Self { let recycler = PacketBatchRecycler::warmed(100, 1024); @@ -148,6 +175,7 @@ impl ShredFetchStage { shred_version, "shred_fetch", PacketFlags::empty(), + None, // repair_context ); let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier( @@ -159,10 +187,11 @@ impl ShredFetchStage { shred_version, "shred_fetch_tvu_forwards", PacketFlags::FORWARDED, + None, // repair_context ); let (repair_receiver, repair_handler) = Self::packet_modifier( - vec![repair_socket], + vec![repair_socket.clone()], exit, sender, recycler, @@ -170,6 +199,7 @@ impl ShredFetchStage { shred_version, "shred_fetch_repair", PacketFlags::REPAIR, + Some((repair_socket, cluster_info)), ); tvu_threads.extend(tvu_forwards_threads.into_iter()); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 9004131c5626ab..990b943f7478f0 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -152,6 +152,7 @@ impl Tvu { fetch_sender, tvu_config.shred_version, bank_forks.clone(), + cluster_info.clone(), exit, ); diff --git a/core/src/validator.rs b/core/src/validator.rs index ef0848ceca1773..867d1e698f11f8 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -883,7 +883,10 @@ impl Validator { Some(stats_reporter_sender.clone()), &exit, ); - let serve_repair = Arc::new(RwLock::new(ServeRepair::new(cluster_info.clone()))); + let serve_repair = Arc::new(RwLock::new(ServeRepair::new( + cluster_info.clone(), + bank_forks.clone(), + ))); let serve_repair_service = ServeRepairService::new( &serve_repair, Some(blockstore.clone()), diff --git a/dos/src/main.rs b/dos/src/main.rs index 6fefd16af3536c..d77ac1d620cb6c 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -49,7 +49,7 @@ use { rpc_client::RpcClient, tpu_connection::TpuConnection, }, - solana_core::serve_repair::RepairProtocol, + solana_core::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair}, solana_dos::cli::*, solana_gossip::{ contact_info::ContactInfo, @@ -64,6 +64,7 @@ use { stake, system_instruction::{self, SystemInstruction}, system_program, + timing::timestamp, transaction::Transaction, }, solana_streamer::socket::SocketAddrSpace, @@ -81,13 +82,6 @@ fn compute_rate_per_second(count: usize) -> usize { (count * 1000) / SAMPLE_PERIOD_MS } -fn get_repair_contact(nodes: &[ContactInfo]) -> ContactInfo { - let source = thread_rng().gen_range(0, nodes.len()); - let mut contact = nodes[source].clone(); - contact.id = solana_sdk::pubkey::new_rand(); - contact -} - /// Provide functionality to generate several types of transactions: /// /// 1. Without blockhash @@ -241,11 +235,11 @@ fn get_target( nodes: &[ContactInfo], mode: Mode, entrypoint_addr: SocketAddr, -) -> Option { +) -> Option<(Pubkey, SocketAddr)> { let mut target = None; if nodes.is_empty() { // skip-gossip case - target = Some(entrypoint_addr); + target = Some((solana_sdk::pubkey::new_rand(), entrypoint_addr)); } else { info!("************ NODE ***********"); for node in nodes { @@ -257,13 +251,13 @@ fn get_target( if node.gossip == entrypoint_addr { info!("{}", node.gossip); target = match mode { - Mode::Gossip => Some(node.gossip), - Mode::Tvu => Some(node.tvu), - Mode::TvuForwards => Some(node.tvu_forwards), - Mode::Tpu => Some(node.tpu), - Mode::TpuForwards => Some(node.tpu_forwards), - Mode::Repair => Some(node.repair), - Mode::ServeRepair => Some(node.serve_repair), + Mode::Gossip => Some((node.id, node.gossip)), + Mode::Tvu => Some((node.id, node.tvu)), + Mode::TvuForwards => Some((node.id, node.tvu_forwards)), + Mode::Tpu => Some((node.id, node.tpu)), + Mode::TpuForwards => Some((node.id, node.tpu_forwards)), + Mode::Repair => Some((node.id, node.repair)), + Mode::ServeRepair => Some((node.id, node.serve_repair)), Mode::Rpc => None, }; break; @@ -500,39 +494,47 @@ fn run_dos( } else if params.data_type == DataType::Transaction && params.transaction_params.unique_transactions { - let target = target.expect("should have target"); - info!("Targeting {}", target); + let (_, target_addr) = target.expect("should have target"); + info!("Targeting {}", target_addr); run_dos_transactions( - target, + target_addr, iterations, client, params.transaction_params, params.tpu_use_quic, ); } else { - let target = target.expect("should have target"); - info!("Targeting {}", target); + let (target_id, target_addr) = target.expect("should have target"); + info!("Targeting {}", target_addr); let mut data = match params.data_type { DataType::RepairHighest => { let slot = 100; - let req = - RepairProtocol::WindowIndexWithNonce(get_repair_contact(nodes), slot, 0, 0); - bincode::serialize(&req).unwrap() + let keypair = Keypair::new(); + let header = RepairRequestHeader::new(keypair.pubkey(), target_id, timestamp(), 0); + let req = RepairProtocol::WindowIndex { + header, + slot, + shred_index: 0, + }; + ServeRepair::repair_proto_to_bytes(&req, Some(&keypair)).unwrap() } DataType::RepairShred => { let slot = 100; - let req = RepairProtocol::HighestWindowIndexWithNonce( - get_repair_contact(nodes), + let keypair = Keypair::new(); + let header = RepairRequestHeader::new(keypair.pubkey(), target_id, timestamp(), 0); + let req = RepairProtocol::HighestWindowIndex { + header, slot, - 0, - 0, - ); - bincode::serialize(&req).unwrap() + shred_index: 0, + }; + ServeRepair::repair_proto_to_bytes(&req, Some(&keypair)).unwrap() } DataType::RepairOrphan => { let slot = 100; - let req = RepairProtocol::OrphanWithNonce(get_repair_contact(nodes), slot, 0); - bincode::serialize(&req).unwrap() + let keypair = Keypair::new(); + let header = RepairRequestHeader::new(keypair.pubkey(), target_id, timestamp(), 0); + let req = RepairProtocol::Orphan { header, slot }; + ServeRepair::repair_proto_to_bytes(&req, Some(&keypair)).unwrap() } DataType::Random => { vec![0; params.data_size] @@ -574,7 +576,7 @@ fn run_dos( if params.data_type == DataType::Random { thread_rng().fill(&mut data[..]); } - let res = socket.send_to(&data, target); + let res = socket.send_to(&data, target_addr); if res.is_err() { error_count += 1; } diff --git a/gossip/src/ping_pong.rs b/gossip/src/ping_pong.rs index e115077b99dfd8..6c3a219cfdb81b 100644 --- a/gossip/src/ping_pong.rs +++ b/gossip/src/ping_pong.rs @@ -109,6 +109,10 @@ impl Pong { }; Ok(pong) } + + pub fn from(&self) -> &Pubkey { + &self.from + } } impl Sanitize for Pong { diff --git a/ledger/src/shred/stats.rs b/ledger/src/shred/stats.rs index 49d4bea5b34ebe..0c630e007e334d 100644 --- a/ledger/src/shred/stats.rs +++ b/ledger/src/shred/stats.rs @@ -30,6 +30,8 @@ pub struct ProcessShredsStats { pub struct ShredFetchStats { pub index_overrun: usize, pub shred_count: usize, + pub ping_count: usize, + pub ping_err_verify_count: usize, pub(crate) index_bad_deserialize: usize, pub(crate) index_out_of_bounds: usize, pub(crate) slot_bad_deserialize: usize, @@ -115,6 +117,8 @@ impl ShredFetchStats { name, ("index_overrun", self.index_overrun, i64), ("shred_count", self.shred_count, i64), + ("ping_count", self.ping_count, i64), + ("ping_err_verify_count", self.ping_err_verify_count, i64), ("slot_bad_deserialize", self.slot_bad_deserialize, i64), ("index_bad_deserialize", self.index_bad_deserialize, i64), ("index_out_of_bounds", self.index_out_of_bounds, i64), diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 695eafb2bc8b01..0d85d3e3ab161f 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -484,6 +484,10 @@ pub mod compact_vote_state_updates { solana_sdk::declare_id!("86HpNqzutEZwLcPxS6EHDcMNYWk6ikhteg9un7Y2PBKE"); } +pub mod sign_repair_requests { + solana_sdk::declare_id!("sigrs6u1EWeHuoKFkY8RR7qcSsPmrAeBBPESyf5pnYe"); +} + pub mod concurrent_replay_of_forks { solana_sdk::declare_id!("9F2Dcu8xkBPKxiiy65XKPZYdCG3VZDpjDTuSmeYLozJe"); } @@ -607,6 +611,7 @@ lazy_static! { (loosen_cpi_size_restriction::id(), "loosen cpi size restrictions #26641"), (use_default_units_in_fee_calculation::id(), "use default units per instruction in fee calculation #26785"), (compact_vote_state_updates::id(), "Compact vote state updates to lower block size"), + (sign_repair_requests::id(), "sign repair requests #26834"), (concurrent_replay_of_forks::id(), "Allow slots from different forks to be replayed concurrently #26465"), (incremental_snapshot_only_incremental_hash_calculation::id(), "only hash accounts in incremental snapshot during incremental snapshot creation #26799"), /*************** ADD NEW FEATURES HERE ***************/ From 2fc888d864517518de95b4e4a0a7dfc50eb9839f Mon Sep 17 00:00:00 2001 From: Richard Patel Date: Sat, 18 Jun 2022 10:11:11 +0100 Subject: [PATCH 023/192] rbpf-cli: capture log --- rbpf-cli/src/main.rs | 49 ++++++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index 2b66e40868f381..3a136606b12422 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -304,24 +304,6 @@ native machine code before execting it in the virtual machine.", }; let duration = Instant::now() - start_time; - let output = Output { - result: format!("{:?}", result), - instruction_count: vm.get_total_instruction_count(), - execution_time: duration, - }; - match matches.value_of("output_format") { - Some("json") => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); - } - Some("json-compact") => { - println!("{}", serde_json::to_string(&output).unwrap()); - } - _ => { - println!("Program output:"); - println!("{:?}", output); - } - } - if matches.is_present("trace") { eprintln!("Trace is saved in trace.out"); let mut file = File::create("trace.out").unwrap(); @@ -339,6 +321,33 @@ native machine code before execting it in the virtual machine.", .visualize_graphically(&mut file, Some(&dynamic_analysis)) .unwrap(); } + + let instruction_count = vm.get_total_instruction_count(); + drop(vm); + + let output = Output { + result: format!("{:?}", result), + instruction_count, + execution_time: duration, + log: invoke_context + .get_log_collector() + .unwrap() + .borrow() + .get_recorded_content() + .to_vec(), + }; + match matches.value_of("output_format") { + Some("json") => { + println!("{}", serde_json::to_string_pretty(&output).unwrap()); + } + Some("json-compact") => { + println!("{}", serde_json::to_string(&output).unwrap()); + } + _ => { + println!("Program output:"); + println!("{:?}", output); + } + } } #[derive(Serialize)] @@ -346,6 +355,7 @@ struct Output { result: String, instruction_count: u64, execution_time: Duration, + log: Vec, } impl Debug for Output { @@ -353,6 +363,9 @@ impl Debug for Output { writeln!(f, "Result: {}", self.result)?; writeln!(f, "Instruction Count: {}", self.instruction_count)?; writeln!(f, "Execution time: {} us", self.execution_time.as_micros())?; + for line in &self.log { + writeln!(f, "{}", line)?; + } Ok(()) } } From ebabc53cee67bbba6c420ecd8756a2d3848c8f75 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sun, 31 Jul 2022 21:04:15 -0500 Subject: [PATCH 024/192] use Vec::drain instead of option/take (#26852) --- runtime/src/in_mem_accounts_index.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index 4d9042cfe7e376..89babf1dad3773 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -77,9 +77,9 @@ struct StartupInfo { /// result from scanning in-mem index during flush struct FlushScanResult { /// pubkeys whose age indicates they may be evicted now, pending further checks. - evictions_age_possible: Vec<(Pubkey, Option>)>, + evictions_age_possible: Vec<(Pubkey, AccountMapEntry)>, /// pubkeys chosen to evict based on random eviction - evictions_random: Vec<(Pubkey, Option>)>, + evictions_random: Vec<(Pubkey, AccountMapEntry)>, } impl InMemAccountsIndex { @@ -992,7 +992,7 @@ impl InMemAccountsIndex { } else { &mut evictions_age_possible } - .push((*k, Some(Arc::clone(v)))); + .push((*k, Arc::clone(v))); } } Self::update_time_stat(&self.stats().flush_scan_us, m); @@ -1104,8 +1104,7 @@ impl InMemAccountsIndex { (false, &mut evictions_age_possible), (true, &mut evictions_random), ] { - for (k, v) in check_for_eviction_and_dirty { - let v = v.take().unwrap(); + for (k, v) in check_for_eviction_and_dirty.drain(..) { let mut slot_list = None; if !is_random { let mut mse = Measure::start("flush_should_evict"); @@ -1120,7 +1119,7 @@ impl InMemAccountsIndex { mse.stop(); flush_should_evict_us += mse.as_us(); if evict_for_age { - evictions_age.push(*k); + evictions_age.push(k); } else { // not evicting, so don't write, even if dirty continue; @@ -1142,7 +1141,7 @@ impl InMemAccountsIndex { let slot_list = slot_list .take() .unwrap_or_else(|| v.slot_list.read().unwrap()); - disk.try_write(k, (&slot_list, v.ref_count())) + disk.try_write(&k, (&slot_list, v.ref_count())) }; match disk_resize { Ok(_) => { From 00ce8057881cff4181fb3d414a03fcd90b3be1ea Mon Sep 17 00:00:00 2001 From: Steven Luscher Date: Sun, 31 Jul 2022 21:12:59 -0700 Subject: [PATCH 025/192] chore: Update web3.js README to ask that contributions and issues regarding web3.js be filed against the monorepo and not the mirror --- web3.js/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/web3.js/README.md b/web3.js/README.md index 1b07c4b6cc32ba..313b15fc0ea41d 100644 --- a/web3.js/README.md +++ b/web3.js/README.md @@ -109,6 +109,10 @@ Each Github release features a tarball containing API documentation and a minified version of the module suitable for direct use in a browser environment (`