diff --git a/.github/workflows/manage-stale-issues-and-prs.yml b/.github/workflows/manage-stale-issues-and-prs.yml index a5bb8e34c1044d..13b6d8bd0449cf 100644 --- a/.github/workflows/manage-stale-issues-and-prs.yml +++ b/.github/workflows/manage-stale-issues-and-prs.yml @@ -40,4 +40,4 @@ jobs: # Time immemorial when in debug-only mode (ie. on pull requests). # `STALEBOT_START_DATE` otherwise. # You can use this as a killswitch by setting `STALEBOT_START_DATE` in the far future. - start-date: ${{ github.event_name == 'pull_request' && '1970-01-01T00:00:00Z' || secrets.STALEBOT_START_DATE }} # ISO 8601 or RFC 2822 + start-date: ${{ github.event_name == 'pull_request' && '1970-01-01T00:00:00Z' || secrets.STALEBOT_START_DATE_RFC_2822 }} # ISO 8601 or RFC 2822 diff --git a/Cargo.lock b/Cargo.lock index da776c285d8913..afaebf6099db71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5121,7 +5121,6 @@ version = "1.15.0" dependencies = [ "futures-util", "serde_json", - "serial_test", "solana-ledger", "solana-logger 1.15.0", "solana-measure", @@ -5141,6 +5140,7 @@ dependencies = [ "solana-version", "systemstat", "tokio", + "tungstenite", ] [[package]] diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index 04b24aa007a9a2..ff72bb7da2d066 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -10,7 +10,7 @@ if [[ -n $APPVEYOR ]]; then appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe export USERPROFILE="D:\\" - ./rustup-init -yv --default-toolchain $rust_stable --default-host x86_64-pc-windows-msvc + ./rustup-init -yv --default-toolchain "$rust_stable" --default-host x86_64-pc-windows-msvc export PATH="$PATH:/d/.cargo/bin" rustc -vV cargo -vV diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 5ba97cc783fd3b..1f2bfb98f0fa5f 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,7 +18,12 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.65.0 + # read rust version from rust-toolchain.toml file + base="$(dirname "${BASH_SOURCE[0]}")" + # pacify shellcheck: cannot follow dynamic path + # shellcheck disable=SC1090,SC1091 + source "$base/../scripts/read-cargo-variable.sh" + stable_version=$(readCargoVariable channel "$base/../rust-toolchain.toml") fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then diff --git a/cli/src/program.rs b/cli/src/program.rs index 6d9fd1ae1e6287..63e8d56e234d0b 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -95,6 +95,11 @@ pub enum ProgramCliCommand { upgrade_authority_index: Option, new_upgrade_authority: Option, }, + SetUpgradeAuthorityChecked { + program_pubkey: Pubkey, + upgrade_authority_index: SignerIndex, + new_upgrade_authority_index: SignerIndex, + }, Show { account_pubkey: Option, authority_pubkey: Pubkey, @@ -272,11 +277,13 @@ impl ProgramSubCommands for App<'_, '_> { .help("Upgrade authority [default: the default configured keypair]") ) .arg( - pubkey!(Arg::with_name("new_upgrade_authority") + Arg::with_name("new_upgrade_authority") .long("new-upgrade-authority") + .value_name("NEW_UPGRADE_AUTHORITY") .required_unless("final") - .value_name("NEW_UPGRADE_AUTHORITY"), - "Address of the new upgrade authority"), + .takes_value(true) + .help("New upgrade authority (keypair or pubkey). It is strongly recommended to pass in a keypair to prevent mistakes in setting the upgrade authority. You can opt out of this behavior by passing --skip-new-upgrade-authority-signer-check if you are really confident that you are setting the correct authority. Alternatively, If you wish to make the program immutable, you should ignore this arg and pass the --final flag." + ) ) .arg( Arg::with_name("final") @@ -284,6 +291,13 @@ impl ProgramSubCommands for App<'_, '_> { .conflicts_with("new_upgrade_authority") .help("The program will not be upgradeable") ) + .arg( + Arg::with_name("skip_new_upgrade_authority_signer_check") + .long("skip-new-upgrade-authority-signer-check") + .requires("new_upgrade_authority") + .takes_value(false) + .help("Set this flag if you don't want the new authority to sign the set-upgrade-authority transaction."), + ), ) .subcommand( SubCommand::with_name("show") @@ -560,28 +574,49 @@ pub fn parse_program_subcommand( let (upgrade_authority_signer, upgrade_authority_pubkey) = signer_of(matches, "upgrade_authority", wallet_manager)?; let program_pubkey = pubkey_of(matches, "program_id").unwrap(); - let new_upgrade_authority = if matches.is_present("final") { + let is_final = matches.is_present("final"); + let new_upgrade_authority = if is_final { None } else { pubkey_of_signer(matches, "new_upgrade_authority", wallet_manager)? }; - let signer_info = default_signer.generate_unique_signers( - vec![ - Some(default_signer.signer_from_path(matches, wallet_manager)?), - upgrade_authority_signer, - ], - matches, - wallet_manager, - )?; + let mut signers = vec![ + Some(default_signer.signer_from_path(matches, wallet_manager)?), + upgrade_authority_signer, + ]; - CliCommandInfo { - command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { - program_pubkey, - upgrade_authority_index: signer_info.index_of(upgrade_authority_pubkey), - new_upgrade_authority, - }), - signers: signer_info.signers, + if !is_final && !matches.is_present("skip_new_upgrade_authority_signer_check") { + let (new_upgrade_authority_signer, _) = + signer_of(matches, "new_upgrade_authority", wallet_manager)?; + signers.push(new_upgrade_authority_signer); + } + + let signer_info = + default_signer.generate_unique_signers(signers, matches, wallet_manager)?; + + if matches.is_present("skip_new_upgrade_authority_signer_check") || is_final { + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { + program_pubkey, + upgrade_authority_index: signer_info.index_of(upgrade_authority_pubkey), + new_upgrade_authority, + }), + signers: signer_info.signers, + } + } else { + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthorityChecked { + program_pubkey, + upgrade_authority_index: signer_info + .index_of(upgrade_authority_pubkey) + .expect("upgrade authority is missing from signers"), + new_upgrade_authority_index: signer_info + .index_of(new_upgrade_authority) + .expect("new upgrade authority is missing from signers"), + }), + signers: signer_info.signers, + } } } ("show", Some(matches)) => { @@ -731,6 +766,17 @@ pub fn process_program_subcommand( *upgrade_authority_index, *new_upgrade_authority, ), + ProgramCliCommand::SetUpgradeAuthorityChecked { + program_pubkey, + upgrade_authority_index, + new_upgrade_authority_index, + } => process_set_authority_checked( + &rpc_client, + config, + *program_pubkey, + *upgrade_authority_index, + *new_upgrade_authority_index, + ), ProgramCliCommand::Show { account_pubkey, authority_pubkey, @@ -1167,6 +1213,51 @@ fn process_set_authority( Ok(config.output_format.formatted_string(&authority)) } +fn process_set_authority_checked( + rpc_client: &RpcClient, + config: &CliConfig, + program_pubkey: Pubkey, + authority_index: SignerIndex, + new_authority_index: SignerIndex, +) -> ProcessResult { + let authority_signer = config.signers[authority_index]; + let new_authority_signer = config.signers[new_authority_index]; + + trace!("Set a new (checked) authority"); + let blockhash = rpc_client.get_latest_blockhash()?; + + let mut tx = Transaction::new_unsigned(Message::new( + &[bpf_loader_upgradeable::set_upgrade_authority_checked( + &program_pubkey, + &authority_signer.pubkey(), + &new_authority_signer.pubkey(), + )], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign( + &[config.signers[0], authority_signer, new_authority_signer], + blockhash, + )?; + rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ) + .map_err(|e| format!("Setting authority failed: {e}"))?; + + let authority = CliProgramAuthority { + authority: new_authority_signer.pubkey().to_string(), + account_type: CliProgramAccountType::Program, + }; + Ok(config.output_format.formatted_string(&authority)) +} + const ACCOUNT_TYPE_SIZE: usize = 4; const SLOT_SIZE: usize = size_of::(); const OPTION_SIZE: usize = 1; @@ -2622,6 +2713,7 @@ mod tests { &program_pubkey.to_string(), "--new-upgrade-authority", &new_authority_pubkey.to_string(), + "--skip-new-upgrade-authority-signer-check", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), @@ -2646,6 +2738,7 @@ mod tests { &program_pubkey.to_string(), "--new-upgrade-authority", &new_authority_pubkey_file, + "--skip-new-upgrade-authority-signer-check", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), @@ -2659,6 +2752,35 @@ mod tests { } ); + let program_pubkey = Pubkey::new_unique(); + let new_authority_pubkey = Keypair::new(); + let new_authority_pubkey_file = make_tmp_path("authority_keypair_file"); + write_keypair_file(&new_authority_pubkey, &new_authority_pubkey_file).unwrap(); + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program", + "set-upgrade-authority", + &program_pubkey.to_string(), + "--new-upgrade-authority", + &new_authority_pubkey_file, + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthorityChecked { + program_pubkey, + upgrade_authority_index: 0, + new_upgrade_authority_index: 1, + }), + signers: vec![ + read_keypair_file(&keypair_file).unwrap().into(), + read_keypair_file(&new_authority_pubkey_file) + .unwrap() + .into(), + ], + } + ); + let program_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Keypair::new(); let new_authority_pubkey_file = make_tmp_path("authority_keypair_file"); diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index 59863b33bc71d0..574ee68dff30b2 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -13,7 +13,6 @@ publish = false [dependencies] futures-util = "0.3.21" serde_json = "1.0.83" -serial_test = "0.9.0" solana-ledger = { path = "../ledger", version = "=1.15.0" } solana-measure = { path = "../measure", version = "=1.15.0" } solana-merkle-tree = { path = "../merkle-tree", version = "=1.15.0" } @@ -32,6 +31,7 @@ solana-transaction-status = { path = "../transaction-status", version = "=1.15.0 solana-version = { path = "../version", version = "=1.15.0" } systemstat = "0.2.0" tokio = { version = "~1.14.1", features = ["full"] } +tungstenite = { version = "0.17.2", features = ["rustls-tls-webpki-roots"] } [dev-dependencies] solana-logger = { path = "../logger", version = "=1.15.0" } diff --git a/client-test/tests/client.rs b/client-test/tests/client.rs index 7af15cb3f12b48..6e3dded7e1f2d9 100644 --- a/client-test/tests/client.rs +++ b/client-test/tests/client.rs @@ -1,7 +1,6 @@ use { futures_util::StreamExt, serde_json::{json, Value}, - serial_test::serial, solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}, solana_pubsub_client::{nonblocking, pubsub_client::PubsubClient}, solana_rpc::{ @@ -42,15 +41,25 @@ use { collections::HashSet, net::{IpAddr, SocketAddr}, sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, + atomic::{AtomicBool, AtomicU16, AtomicU64, Ordering}, Arc, RwLock, }, thread::sleep, time::{Duration, Instant}, }, systemstat::Ipv4Addr, + tungstenite::connect, }; +static NEXT_RPC_PUBSUB_PORT: AtomicU16 = AtomicU16::new(rpc_port::DEFAULT_RPC_PUBSUB_PORT); + +fn pubsub_addr() -> SocketAddr { + SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), + NEXT_RPC_PUBSUB_PORT.fetch_add(1, Ordering::Relaxed), + ) +} + #[test] fn test_rpc_client() { solana_logger::setup(); @@ -84,7 +93,7 @@ fn test_rpc_client() { let now = Instant::now(); while now.elapsed().as_secs() <= 20 { let response = client - .confirm_transaction_with_commitment(&signature, CommitmentConfig::default()) + .confirm_transaction_with_commitment(&signature, CommitmentConfig::processed()) .unwrap(); if response.value { @@ -98,22 +107,24 @@ fn test_rpc_client() { assert!(confirmed_tx); assert_eq!( - client.get_balance(&bob_pubkey).unwrap(), + client + .get_balance_with_commitment(&bob_pubkey, CommitmentConfig::processed()) + .unwrap() + .value, sol_to_lamports(20.0) ); assert_eq!( - client.get_balance(&alice.pubkey()).unwrap(), + client + .get_balance_with_commitment(&alice.pubkey(), CommitmentConfig::processed()) + .unwrap() + .value, original_alice_balance - sol_to_lamports(20.0) ); } #[test] -#[serial] fn test_account_subscription() { - let pubsub_addr = SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port::DEFAULT_RPC_PUBSUB_PORT, - ); + let pubsub_addr = pubsub_addr(); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { @@ -138,7 +149,9 @@ fn test_account_subscription() { )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); - std::thread::sleep(Duration::from_millis(400)); + + check_server_is_ready_or_panic(&pubsub_addr, 10, Duration::from_millis(300)); + let config = Some(RpcAccountInfoConfig { commitment: Some(CommitmentConfig::finalized()), encoding: None, @@ -207,7 +220,6 @@ fn test_account_subscription() { } #[test] -#[serial] fn test_block_subscription() { // setup BankForks let exit = Arc::new(AtomicBool::new(false)); @@ -253,17 +265,14 @@ fn test_block_subscription() { Arc::new(RwLock::new(BlockCommitmentCache::default())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), )); - let pubsub_addr = SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port::DEFAULT_RPC_PUBSUB_PORT, - ); + let pubsub_addr = pubsub_addr(); let pub_cfg = PubSubConfig { enable_block_subscription: true, ..PubSubConfig::default() }; let (trigger, pubsub_service) = PubSubService::new(pub_cfg, &subscriptions, pubsub_addr); - std::thread::sleep(Duration::from_millis(400)); + check_server_is_ready_or_panic(&pubsub_addr, 10, Duration::from_millis(300)); // setup PubsubClient let (mut client, receiver) = PubsubClient::block_subscribe( @@ -316,12 +325,8 @@ fn test_block_subscription() { } #[test] -#[serial] fn test_program_subscription() { - let pubsub_addr = SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port::DEFAULT_RPC_PUBSUB_PORT, - ); + let pubsub_addr = pubsub_addr(); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { @@ -346,7 +351,9 @@ fn test_program_subscription() { )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); - std::thread::sleep(Duration::from_millis(400)); + + check_server_is_ready_or_panic(&pubsub_addr, 10, Duration::from_millis(300)); + let config = Some(RpcProgramAccountsConfig { ..RpcProgramAccountsConfig::default() }); @@ -408,12 +415,8 @@ fn test_program_subscription() { } #[test] -#[serial] fn test_root_subscription() { - let pubsub_addr = SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port::DEFAULT_RPC_PUBSUB_PORT, - ); + let pubsub_addr = pubsub_addr(); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); @@ -432,7 +435,9 @@ fn test_root_subscription() { )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); - std::thread::sleep(Duration::from_millis(400)); + + check_server_is_ready_or_panic(&pubsub_addr, 10, Duration::from_millis(300)); + let (mut client, receiver) = PubsubClient::root_subscribe(&format!("ws://0.0.0.0:{}/", pubsub_addr.port())).unwrap(); @@ -461,12 +466,8 @@ fn test_root_subscription() { } #[test] -#[serial] fn test_slot_subscription() { - let pubsub_addr = SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port::DEFAULT_RPC_PUBSUB_PORT, - ); + let pubsub_addr = pubsub_addr(); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); @@ -483,7 +484,8 @@ fn test_slot_subscription() { )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); - std::thread::sleep(Duration::from_millis(400)); + + check_server_is_ready_or_panic(&pubsub_addr, 10, Duration::from_millis(300)); let (mut client, receiver) = PubsubClient::slot_subscribe(&format!("ws://0.0.0.0:{}/", pubsub_addr.port())).unwrap(); @@ -523,7 +525,6 @@ fn test_slot_subscription() { } #[tokio::test] -#[serial] async fn test_slot_subscription_async() { let sync_service = Arc::new(AtomicU64::new(0)); let sync_client = Arc::clone(&sync_service); @@ -533,10 +534,7 @@ async fn test_slot_subscription_async() { } } - let pubsub_addr = SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port::DEFAULT_RPC_PUBSUB_PORT, - ); + let pubsub_addr = pubsub_addr(); tokio::task::spawn_blocking(move || { let exit = Arc::new(AtomicBool::new(false)); @@ -555,7 +553,9 @@ async fn test_slot_subscription_async() { )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); - sleep(Duration::from_millis(100)); + + check_server_is_ready_or_panic(&pubsub_addr, 10, Duration::from_millis(100)); + sync_service.store(1, Ordering::Relaxed); wait_until(&sync_service, 2); @@ -604,3 +604,20 @@ async fn test_slot_subscription_async() { unsubscribe().await; } + +fn check_server_is_ready_or_panic(socket_addr: &SocketAddr, retry: u8, sleep_duration: Duration) { + loop { + if retry == 0 { + break; + } else { + retry.checked_sub(1).unwrap(); + } + + if connect(format!("ws://{socket_addr}")).is_ok() { + return; + } + sleep(sleep_duration); + } + + panic!("server hasn't been ready"); +} diff --git a/client/src/quic_client.rs b/client/src/quic_client.rs index 3bdd341ae5af6b..7c57fa3b330c25 100644 --- a/client/src/quic_client.rs +++ b/client/src/quic_client.rs @@ -29,7 +29,7 @@ impl TpuConnection for QuicTpuConnection { let _lock = ASYNC_TASK_SEMAPHORE.acquire(); let inner = self.inner.clone(); - let _ = RUNTIME + _ = RUNTIME .spawn(async move { send_wire_transaction_async(inner, wire_transaction).await }); Ok(()) } @@ -37,8 +37,7 @@ impl TpuConnection for QuicTpuConnection { fn send_wire_transaction_batch_async(&self, buffers: Vec>) -> TransportResult<()> { let _lock = ASYNC_TASK_SEMAPHORE.acquire(); let inner = self.inner.clone(); - let _ = - RUNTIME.spawn(async move { send_wire_transaction_batch_async(inner, buffers).await }); + _ = RUNTIME.spawn(async move { send_wire_transaction_batch_async(inner, buffers).await }); Ok(()) } } diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index a58874fa0b7fb6..2f30bedb116753 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -19,7 +19,7 @@ use { sorted_storages::SortedStorages, }, solana_sdk::{ - clock::{Slot, SLOT_MS}, + clock::{Slot, DEFAULT_MS_PER_SLOT}, hash::Hash, pubkey::Pubkey, }, @@ -51,7 +51,7 @@ impl AccountsHashVerifier { snapshot_config: SnapshotConfig, ) -> Self { // If there are no accounts packages to process, limit how often we re-check - const LOOP_LIMITER: Duration = Duration::from_millis(SLOT_MS); + const LOOP_LIMITER: Duration = Duration::from_millis(DEFAULT_MS_PER_SLOT); let exit = exit.clone(); let cluster_info = cluster_info.clone(); let t_accounts_hash_verifier = Builder::new() diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index 6707171ec7c201..7a771bb9d7b841 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -22,7 +22,7 @@ use { }, solana_runtime::bank::Bank, solana_sdk::{ - clock::{Slot, SLOT_MS}, + clock::{Slot, DEFAULT_MS_PER_SLOT}, pubkey::Pubkey, signature::Signable, signer::keypair::Keypair, @@ -523,6 +523,7 @@ impl AncestorHashesService { let serve_repair = ServeRepair::new( repair_info.cluster_info.clone(), repair_info.bank_forks.clone(), + repair_info.repair_whitelist.clone(), ); let mut repair_stats = AncestorRepairRequestsStats::default(); @@ -553,7 +554,7 @@ impl AncestorHashesService { &mut request_throttle, ); - sleep(Duration::from_millis(SLOT_MS)); + sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)); }) .unwrap() } @@ -969,8 +970,11 @@ mod test { Arc::new(keypair), SocketAddrSpace::Unspecified, ); - let responder_serve_repair = - ServeRepair::new(Arc::new(cluster_info), vote_simulator.bank_forks); + let responder_serve_repair = ServeRepair::new( + Arc::new(cluster_info), + vote_simulator.bank_forks, + Arc::>>::default(), // repair whitelist + ); // Set up thread to give us responses let ledger_path = get_tmp_ledger_path!(); @@ -1054,8 +1058,12 @@ mod test { Arc::new(keypair), SocketAddrSpace::Unspecified, )); - let requester_serve_repair = - ServeRepair::new(requester_cluster_info.clone(), bank_forks.clone()); + let repair_whitelist = Arc::new(RwLock::new(HashSet::default())); + let requester_serve_repair = ServeRepair::new( + requester_cluster_info.clone(), + bank_forks.clone(), + repair_whitelist.clone(), + ); let (duplicate_slots_reset_sender, _duplicate_slots_reset_receiver) = unbounded(); let repair_info = RepairInfo { bank_forks, @@ -1064,6 +1072,7 @@ mod test { epoch_schedule, duplicate_slots_reset_sender, repair_validators: None, + repair_whitelist, }; let (ancestor_hashes_replay_update_sender, ancestor_hashes_replay_update_receiver) = diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 0844e6e9bdaa17..df19f841709e9f 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -173,7 +173,10 @@ pub struct RepairInfo { pub cluster_slots: Arc, pub epoch_schedule: EpochSchedule, pub duplicate_slots_reset_sender: DuplicateSlotsResetSender, + // Validators from which repairs are requested pub repair_validators: Option>, + // Validators which should be given priority when serving + pub repair_whitelist: Arc>>, } pub struct RepairSlotRange { @@ -251,6 +254,7 @@ impl RepairService { let serve_repair = ServeRepair::new( repair_info.cluster_info.clone(), repair_info.bank_forks.clone(), + repair_info.repair_whitelist.clone(), ); let id = repair_info.cluster_info.id(); let mut repair_stats = RepairStats::default(); @@ -1084,7 +1088,11 @@ mod test { let cluster_slots = ClusterSlots::default(); let cluster_info = Arc::new(new_test_cluster_info(Node::new_localhost().info)); let identity_keypair = cluster_info.keypair().clone(); - let serve_repair = ServeRepair::new(cluster_info, bank_forks); + let serve_repair = ServeRepair::new( + cluster_info, + bank_forks, + Arc::new(RwLock::new(HashSet::default())), + ); let mut duplicate_slot_repair_statuses = HashMap::new(); let dead_slot = 9; let receive_socket = &UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -1179,7 +1187,11 @@ mod test { UdpSocket::bind("0.0.0.0:0").unwrap().local_addr().unwrap(), )); let cluster_info = Arc::new(new_test_cluster_info(Node::new_localhost().info)); - let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); + let serve_repair = ServeRepair::new( + cluster_info.clone(), + bank_forks, + Arc::new(RwLock::new(HashSet::default())), + ); let valid_repair_peer = Node::new_localhost().info; // Signal that this peer has confirmed the dead slot, and is thus diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index ae90df167ef93a..998b1da253a0a0 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -159,6 +159,7 @@ struct ServeRepairStats { dropped_requests_outbound_bandwidth: usize, dropped_requests_load_shed: usize, dropped_requests_low_stake: usize, + whitelisted_requests: usize, total_dropped_response_packets: usize, total_response_packets: usize, total_response_bytes_staked: usize, @@ -281,6 +282,7 @@ impl RepairProtocol { pub struct ServeRepair { cluster_info: Arc, bank_forks: Arc>, + repair_whitelist: Arc>>, } // Cache entry for repair peers for a slot. @@ -316,11 +318,23 @@ impl RepairPeers { } } +struct RepairRequestWithMeta { + request: RepairProtocol, + from_addr: SocketAddr, + stake: u64, + whitelisted: bool, +} + impl ServeRepair { - pub fn new(cluster_info: Arc, bank_forks: Arc>) -> Self { + pub fn new( + cluster_info: Arc, + bank_forks: Arc>, + repair_whitelist: Arc>>, + ) -> Self { Self { cluster_info, bank_forks, + repair_whitelist, } } @@ -456,7 +470,11 @@ impl ServeRepair { let my_id = identity_keypair.pubkey(); let max_buffered_packets = if root_bank.cluster_type() != ClusterType::MainnetBeta { - 2 * MAX_REQUESTS_PER_ITERATION + if self.repair_whitelist.read().unwrap().len() > 0 { + 4 * MAX_REQUESTS_PER_ITERATION + } else { + 2 * MAX_REQUESTS_PER_ITERATION + } } else { MAX_REQUESTS_PER_ITERATION }; @@ -475,58 +493,74 @@ impl ServeRepair { stats.total_requests += total_requests; let decode_start = Instant::now(); - let mut decoded_reqs = Vec::default(); - for packet in reqs_v.iter().flatten() { - let request: RepairProtocol = match packet.deserialize_slice(..) { - Ok(request) => request, - Err(_) => { + let mut decoded_requests = Vec::default(); + let mut whitelisted_request_count: usize = 0; + { + let whitelist = self.repair_whitelist.read().unwrap(); + for packet in reqs_v.iter().flatten() { + let request: RepairProtocol = match packet.deserialize_slice(..) { + Ok(request) => request, + Err(_) => { + stats.err_malformed += 1; + continue; + } + }; + + let from_addr = packet.meta().socket_addr(); + if !ContactInfo::is_valid_address(&from_addr, &socket_addr_space) { stats.err_malformed += 1; continue; } - }; - let from_addr = packet.meta().socket_addr(); - if !ContactInfo::is_valid_address(&from_addr, &socket_addr_space) { - stats.err_malformed += 1; - continue; - } + if request.supports_signature() { + // collect stats for signature verification + Self::verify_signed_packet(&my_id, packet, &request, stats); + } else { + stats.unsigned_requests += 1; + } - if request.supports_signature() { - // collect stats for signature verification - Self::verify_signed_packet(&my_id, packet, &request, stats); - } else { - stats.unsigned_requests += 1; - } + if request.sender() == &my_id { + stats.self_repair += 1; + continue; + } - if request.sender() == &my_id { - stats.self_repair += 1; - continue; - } + let stake = epoch_staked_nodes + .as_ref() + .and_then(|stakes| stakes.get(request.sender())) + .unwrap_or(&0); + if *stake == 0 { + stats.handle_requests_unstaked += 1; + } else { + stats.handle_requests_staked += 1; + } - let stake = epoch_staked_nodes - .as_ref() - .and_then(|stakes| stakes.get(request.sender())) - .unwrap_or(&0); - if *stake == 0 { - stats.handle_requests_unstaked += 1; - } else { - stats.handle_requests_staked += 1; + let whitelisted = whitelist.contains(request.sender()); + if whitelisted { + whitelisted_request_count += 1; + } + + decoded_requests.push(RepairRequestWithMeta { + request, + from_addr, + stake: *stake, + whitelisted, + }); } - decoded_reqs.push((request, from_addr, *stake)); } stats.decode_time_us += decode_start.elapsed().as_micros() as u64; + stats.whitelisted_requests += whitelisted_request_count.min(MAX_REQUESTS_PER_ITERATION); - if decoded_reqs.len() > MAX_REQUESTS_PER_ITERATION { - stats.dropped_requests_low_stake += decoded_reqs.len() - MAX_REQUESTS_PER_ITERATION; - decoded_reqs.sort_unstable_by_key(|(_, _, stake)| Reverse(*stake)); - decoded_reqs.truncate(MAX_REQUESTS_PER_ITERATION); + if decoded_requests.len() > MAX_REQUESTS_PER_ITERATION { + stats.dropped_requests_low_stake += decoded_requests.len() - MAX_REQUESTS_PER_ITERATION; + decoded_requests.sort_unstable_by_key(|r| Reverse((r.whitelisted, r.stake))); + decoded_requests.truncate(MAX_REQUESTS_PER_ITERATION); } self.handle_packets( ping_cache, recycler, blockstore, - decoded_reqs, + decoded_requests, response_sender, stats, data_budget, @@ -564,6 +598,7 @@ impl ServeRepair { stats.dropped_requests_low_stake, i64 ), + ("whitelisted_requests", stats.whitelisted_requests, i64), ( "total_dropped_response_packets", stats.total_dropped_response_packets, @@ -778,7 +813,7 @@ impl ServeRepair { ping_cache: &mut PingCache, recycler: &PacketBatchRecycler, blockstore: &Blockstore, - requests: Vec<(RepairProtocol, SocketAddr, /*stake*/ u64)>, + requests: Vec, response_sender: &PacketBatchSender, stats: &mut ServeRepairStats, data_budget: &DataBudget, @@ -787,7 +822,16 @@ impl ServeRepair { let mut pending_pings = Vec::default(); let requests_len = requests.len(); - for (i, (request, from_addr, stake)) in requests.into_iter().enumerate() { + for ( + i, + RepairRequestWithMeta { + request, + from_addr, + stake, + .. + }, + ) in requests.into_iter().enumerate() + { if !matches!(&request, RepairProtocol::Pong(_)) { let (check, ping_pkt) = Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair); @@ -1246,7 +1290,11 @@ mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let cluster_info = Arc::new(new_test_cluster_info(me)); - let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); + let serve_repair = ServeRepair::new( + cluster_info.clone(), + bank_forks, + Arc::new(RwLock::new(HashSet::default())), + ); let keypair = cluster_info.keypair().clone(); let repair_peer_id = solana_sdk::pubkey::new_rand(); let repair_request = ShredRepairType::Orphan(123); @@ -1292,7 +1340,11 @@ mod tests { let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let serve_repair = ServeRepair::new(cluster_info, bank_forks); + let serve_repair = ServeRepair::new( + cluster_info, + bank_forks, + Arc::new(RwLock::new(HashSet::default())), + ); let request_bytes = serve_repair .ancestor_repair_request_bytes(&keypair, &repair_peer_id, slot, nonce) @@ -1326,7 +1378,11 @@ mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let cluster_info = Arc::new(new_test_cluster_info(me)); - let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); + let serve_repair = ServeRepair::new( + cluster_info.clone(), + bank_forks, + Arc::new(RwLock::new(HashSet::default())), + ); let keypair = cluster_info.keypair().clone(); let repair_peer_id = solana_sdk::pubkey::new_rand(); @@ -1653,7 +1709,11 @@ mod tests { let cluster_slots = ClusterSlots::default(); let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let cluster_info = Arc::new(new_test_cluster_info(me)); - let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); + let serve_repair = ServeRepair::new( + cluster_info.clone(), + bank_forks, + Arc::new(RwLock::new(HashSet::default())), + ); let identity_keypair = cluster_info.keypair().clone(); let mut outstanding_requests = OutstandingShredRepairs::default(); let rv = serve_repair.repair_request( @@ -1984,7 +2044,11 @@ mod tests { cluster_info.insert_info(contact_info2.clone()); cluster_info.insert_info(contact_info3.clone()); let identity_keypair = cluster_info.keypair().clone(); - let serve_repair = ServeRepair::new(cluster_info, bank_forks); + let serve_repair = ServeRepair::new( + cluster_info, + bank_forks, + Arc::new(RwLock::new(HashSet::default())), + ); // If: // 1) repair validator set doesn't exist in gossip diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 8bdfcaa2499cb0..0e1d5b3021b7d9 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -80,7 +80,10 @@ pub struct TvuSockets { pub struct TvuConfig { pub max_ledger_shreds: Option, pub shred_version: u16, + // Validators from which repairs are requested pub repair_validators: Option>, + // Validators which should be given priority when serving repairs + pub repair_whitelist: Arc>>, pub wait_for_vote_to_start_leader: bool, pub replay_slots_concurrently: bool, } @@ -189,6 +192,7 @@ impl Tvu { epoch_schedule, duplicate_slots_reset_sender, repair_validators: tvu_config.repair_validators, + repair_whitelist: tvu_config.repair_whitelist, cluster_info: cluster_info.clone(), cluster_slots: cluster_slots.clone(), }; diff --git a/core/src/unprocessed_transaction_storage.rs b/core/src/unprocessed_transaction_storage.rs index 532fd3da8c432a..631239b221d211 100644 --- a/core/src/unprocessed_transaction_storage.rs +++ b/core/src/unprocessed_transaction_storage.rs @@ -542,27 +542,14 @@ impl ThreadLocalUnprocessedPackets { ) } - fn filter_forwardable_packets_and_add_batches( - &mut self, - bank: Arc, - forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, - ) -> FilterForwardingResults { - self.filter_and_forward_with_account_limits( - bank, - forward_packet_batches_by_accounts, - UNPROCESSED_BUFFER_STEP_SIZE, - ) - } - /// Filter out packets that fail to sanitize, or are no longer valid (could be /// too old, a duplicate of something already processed). Doing this in batches to avoid /// checking bank's blockhash and status cache per transaction which could be bad for performance. /// Added valid and sanitized packets to forwarding queue. - fn filter_and_forward_with_account_limits( + fn filter_forwardable_packets_and_add_batches( &mut self, bank: Arc, forward_buffer: &mut ForwardPacketBatchesByAccounts, - batch_size: usize, ) -> FilterForwardingResults { let mut total_forwardable_tracer_packets: usize = 0; let mut total_tracer_packets_in_buffer: usize = 0; @@ -582,7 +569,7 @@ impl ThreadLocalUnprocessedPackets { new_priority_queue.extend( original_priority_queue .drain_desc() - .chunks(batch_size) + .chunks(UNPROCESSED_BUFFER_STEP_SIZE) .into_iter() .flat_map(|packets_to_process| { // Only process packets not yet forwarded diff --git a/core/src/validator.rs b/core/src/validator.rs index c1f6655aed8880..e1fbe7e4d74906 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -140,6 +140,7 @@ pub struct ValidatorConfig { pub new_hard_forks: Option>, pub known_validators: Option>, // None = trust all pub repair_validators: Option>, // None = repair from all + pub repair_whitelist: Arc>>, // Empty = repair with all pub gossip_validators: Option>, // None = gossip with all pub halt_on_known_validators_accounts_hash_mismatch: bool, pub accounts_hash_fault_injection_slots: u64, // 0 = no fault injection @@ -201,6 +202,7 @@ impl Default for ValidatorConfig { new_hard_forks: None, known_validators: None, repair_validators: None, + repair_whitelist: Arc::new(RwLock::new(HashSet::default())), gossip_validators: None, halt_on_known_validators_accounts_hash_mismatch: false, accounts_hash_fault_injection_slots: 0, @@ -870,7 +872,11 @@ impl Validator { Some(stats_reporter_sender.clone()), &exit, ); - let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks.clone()); + let serve_repair = ServeRepair::new( + cluster_info.clone(), + bank_forks.clone(), + config.repair_whitelist.clone(), + ); let serve_repair_service = ServeRepairService::new( serve_repair, blockstore.clone(), @@ -964,6 +970,7 @@ impl Validator { max_ledger_shreds: config.max_ledger_shreds, shred_version: node.info.shred_version, repair_validators: config.repair_validators.clone(), + repair_whitelist: config.repair_whitelist.clone(), wait_for_vote_to_start_leader, replay_slots_concurrently: config.replay_slots_concurrently, }, diff --git a/docs/src/developing/programming-model/runtime.md b/docs/src/developing/programming-model/runtime.md index de2dfd69d7b95d..89547cbd8a09ae 100644 --- a/docs/src/developing/programming-model/runtime.md +++ b/docs/src/developing/programming-model/runtime.md @@ -44,7 +44,7 @@ The policy is as follows: ## Balancing the balances Before and after each instruction, the sum of all account balances must stay the same. -E.g. if one account's balance is increased, another's must be decreased by the same ammount. +E.g. if one account's balance is increased, another's must be decreased by the same amount. Because the runtime can not see changes to accounts which were not passed to it, all accounts for which the balances were modified must be passed, even if they are not needed in the called instruction. diff --git a/docs/src/storage_rent_economics.md b/docs/src/storage_rent_economics.md index 8405c8906f3a29..4b65b250e140a7 100644 --- a/docs/src/storage_rent_economics.md +++ b/docs/src/storage_rent_economics.md @@ -2,16 +2,38 @@ title: Storage Rent Economics --- -Each transaction that is submitted to the Solana ledger imposes costs. Transaction fees paid by the submitter, and collected by a validator, in theory, account for the acute, transactional, costs of validating and adding that data to the ledger. Unaccounted in this process is the mid-term storage of active ledger state, necessarily maintained by the rotating validator set. This type of storage imposes costs not only to validators but also to the broader network as active state grows so does data transmission and validation overhead. To account for these costs, we describe here our preliminary design and implementation of storage rent. +Each transaction that is submitted to the Solana ledger imposes costs. +Transaction fees paid by the submitter, and collected by a validator, in +theory, account for the acute, transactional, costs of validating and adding +that data to the ledger. Unaccounted in this process is the mid-term storage of +active ledger state, necessarily maintained by the rotating validator set. This +type of storage imposes costs not only to validators but also to the broader +network as active state grows so does data transmission and validation +overhead. To account for these costs, we describe here our preliminary design +and implementation of storage rent. Storage rent can be paid via one of two methods: Method 1: Set it and forget it -With this approach, accounts with two-years worth of rent deposits secured are exempt from network rent charges. By maintaining this minimum-balance, the broader network benefits from reduced liquidity and the account holder can rest assured that their `Account::data` will be retained for continual access/usage. +With this approach, accounts with two-years worth of rent deposits secured are +exempt from network rent charges. By maintaining this minimum-balance, the +broader network benefits from reduced liquidity and the account holder can rest +assured that their `Account::data` will be retained for continual access/usage. Method 2: Pay per byte -If an account has less than two-years worth of deposited rent the network charges rent on a per-epoch basis, in credit for the next epoch. This rent is deducted at a rate specified in genesis, in lamports per kilobyte-year. +If an account has less than two-years worth of deposited rent the network +charges rent on a per-epoch basis, in credit for the next epoch. This rent is +deducted at a rate specified in genesis, in lamports per kilobyte-year. -For information on the technical implementation details of this design, see the [Rent](implemented-proposals/rent.md) section. +For information on the technical implementation details of this design, see the +[Rent](implemented-proposals/rent.md) section. + +**Note:** New accounts now **are required** to be initialized with enough +lamports to be rent exempt. Additionally, transactions that leave an account's +balance below the rent exempt minimum (and non-zero) will **fail**. This +essentially renders all accounts rent exempt. Rent-paying accounts that were +created before this requirement will continue paying rent until either (1) +their balance falls to zero, or (2) a transaction increases the account's +balance to be rent exempt. diff --git a/entry/src/poh.rs b/entry/src/poh.rs index 8a27a3ac1b2775..9716425b0c8293 100644 --- a/entry/src/poh.rs +++ b/entry/src/poh.rs @@ -10,7 +10,6 @@ pub struct Poh { num_hashes: u64, hashes_per_tick: u64, remaining_hashes: u64, - ticks_per_slot: u64, tick_number: u64, slot_start_time: Instant, } @@ -23,15 +22,10 @@ pub struct PohEntry { impl Poh { pub fn new(hash: Hash, hashes_per_tick: Option) -> Self { - Self::new_with_slot_info(hash, hashes_per_tick, 0, 0) + Self::new_with_slot_info(hash, hashes_per_tick, 0) } - pub fn new_with_slot_info( - hash: Hash, - hashes_per_tick: Option, - ticks_per_slot: u64, - tick_number: u64, - ) -> Self { + pub fn new_with_slot_info(hash: Hash, hashes_per_tick: Option, tick_number: u64) -> Self { let hashes_per_tick = hashes_per_tick.unwrap_or(std::u64::MAX); assert!(hashes_per_tick > 1); let now = Instant::now(); @@ -40,7 +34,6 @@ impl Poh { num_hashes: 0, hashes_per_tick, remaining_hashes: hashes_per_tick, - ticks_per_slot, tick_number, slot_start_time: now, } @@ -49,7 +42,7 @@ impl Poh { pub fn reset(&mut self, hash: Hash, hashes_per_tick: Option) { // retains ticks_per_slot: this cannot change without restarting the validator let tick_number = 0; - *self = Poh::new_with_slot_info(hash, hashes_per_tick, self.ticks_per_slot, tick_number); + *self = Poh::new_with_slot_info(hash, hashes_per_tick, tick_number); } pub fn target_poh_time(&self, target_ns_per_tick: u64) -> Instant { diff --git a/explorer/src/components/account/TokenAccountSection.tsx b/explorer/src/components/account/TokenAccountSection.tsx index 3aa7bd68473d50..dd187399324224 100644 --- a/explorer/src/components/account/TokenAccountSection.tsx +++ b/explorer/src/components/account/TokenAccountSection.tsx @@ -209,7 +209,7 @@ function FungibleTokenMintAccountCard({ {normalizeTokenAmount(info.supply, info.decimals).toLocaleString( "en-US", { - minimumFractionDigits: info.decimals, + maximumFractionDigits: 20, } )} diff --git a/explorer/src/components/account/UnknownAccountCard.tsx b/explorer/src/components/account/UnknownAccountCard.tsx index 188fafad4bf9d8..6e39bb3dfa8ee7 100644 --- a/explorer/src/components/account/UnknownAccountCard.tsx +++ b/explorer/src/components/account/UnknownAccountCard.tsx @@ -34,7 +34,11 @@ export function UnknownAccountCard({ account }: { account: Account }) { Balance (SOL) - + {account.lamports === 0 ? ( + "Account does not exist" + ) : ( + + )} diff --git a/explorer/src/pages/AccountDetailsPage.tsx b/explorer/src/pages/AccountDetailsPage.tsx index a37ee60be1e96a..1428124d16ab25 100644 --- a/explorer/src/pages/AccountDetailsPage.tsx +++ b/explorer/src/pages/AccountDetailsPage.tsx @@ -35,7 +35,7 @@ import { useFetchAccountInfo, useMintAccountInfo, } from "providers/accounts"; -import { useFlaggedAccounts } from "providers/accounts/flagged-accounts"; +import FLAGGED_ACCOUNTS_WARNING from "providers/accounts/flagged-accounts"; import isMetaplexNFT from "providers/accounts/utils/isMetaplexNFT"; import { useAnchorProgram } from "providers/anchor"; import { CacheEntry, FetchStatus } from "providers/cache"; @@ -295,7 +295,6 @@ function DetailsSections({ const fetchAccount = useFetchAccountInfo(); const address = pubkey.toBase58(); const location = useLocation(); - const { flaggedAccounts } = useFlaggedAccounts(); if (!info || info.status === FetchStatus.Fetching) { return ; @@ -329,12 +328,7 @@ function DetailsSections({ return ( <> - {flaggedAccounts.has(address) && ( -
- Warning! This account has been flagged by the community as a scam - account. Please be cautious sending SOL to this account. -
- )} + {FLAGGED_ACCOUNTS_WARNING[address] ?? null} - + {cluster.cluster === Cluster.Custom ? : null} ); } diff --git a/explorer/src/providers/accounts/flagged-accounts.tsx b/explorer/src/providers/accounts/flagged-accounts.tsx index ff8ca6fa66532e..b9fcf05bc48ec3 100644 --- a/explorer/src/providers/accounts/flagged-accounts.tsx +++ b/explorer/src/providers/accounts/flagged-accounts.tsx @@ -1,47 +1,52 @@ import React from "react"; -import { fetch } from "cross-fetch"; - -const FLAGGED_REGISTRY = - "https://solana-labs.github.io/solana-flagged-accounts/flagged.txt"; - -type FlaggedMap = Map; -type ProviderProps = { children: React.ReactNode }; - -const FlaggedContext = React.createContext(new Map()); - -export function FlaggedAccountsProvider({ children }: ProviderProps) { - const [flaggedAccounts, setFlaggedAccounts] = React.useState( - new Map() - ); - - React.useEffect(() => { - fetch(FLAGGED_REGISTRY) - .then((res) => { - return res.text(); - }) - .then((body: string) => { - const flaggedAccounts = new Map(); - body - .split("\n") - .forEach((account) => flaggedAccounts.set(account, true)); - setFlaggedAccounts(flaggedAccounts); - }); - }, []); - - return ( - - {children} - - ); -} - -export function useFlaggedAccounts() { - const flaggedAccounts = React.useContext(FlaggedContext); - if (!flaggedAccounts) { - throw new Error( - `useFlaggedAccounts must be used within a AccountsProvider` - ); - } - - return { flaggedAccounts }; +import { Link } from "react-router-dom"; + +type FlaggedMap = Record; + +type IncidentId = "ftx-hack-november-2022" | "known-scam"; +type IncidentDescription = React.ReactElement; + +const FLAGGED_ACCOUNTS: Record = { + GACpXND1SSfTSQMmqGuFvGwXB3jGEYBDRGNzmLfTYwSP: "known-scam", + "9tAViia54YAaL9gv92hBu8K4QGRBKbytCQ9TYsJ6F6or": "known-scam", + // Serum Swap + "22Y43yTVxuUkoRKdm9thyRhQ3SdgQS7c7kB6UNCiaczD": "ftx-hack-november-2022", + // Serum Dex V3 + "9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin": "ftx-hack-november-2022", + // Serum Dex V2 + EUqojwWA2rd19FZrzeBncJsm38Jm1hEhE3zsmX3bRc2o: "ftx-hack-november-2022", + // Serum Dex V1 + BJ3jrUzddfuSrZHXSCxMUUQsjKEyLmuuyZebkcaFp2fg: "ftx-hack-november-2022", +}; + +const INCIDENTS: Record = { + "known-scam": ( + <> +
+ Warning! This account has been flagged by the community as a scam + account. Please be cautious sending SOL to this account. +
+ + ), + "ftx-hack-november-2022": ( + <> +
+ Warning! This program's upgrade key may have been compromised by the FTX + hack. Please migrate to the community fork:{" "} + + https://github.com/openbook-dex/program + +
+ + ), +} as const; + +const FLAGGED_ACCOUNTS_WARNING: FlaggedMap = {}; +for (const [account, incidentId] of Object.entries(FLAGGED_ACCOUNTS)) { + FLAGGED_ACCOUNTS_WARNING[account] = INCIDENTS[incidentId]; } +export default FLAGGED_ACCOUNTS_WARNING; diff --git a/explorer/src/providers/accounts/index.tsx b/explorer/src/providers/accounts/index.tsx index c616f6e4f8a19a..2dead415a7122c 100644 --- a/explorer/src/providers/accounts/index.tsx +++ b/explorer/src/providers/accounts/index.tsx @@ -28,7 +28,6 @@ import { NonceAccount } from "validators/accounts/nonce"; import { SysvarAccount } from "validators/accounts/sysvar"; import { ConfigAccount } from "validators/accounts/config"; import { ParsedAddressLookupTableAccount } from "validators/accounts/address-lookup-table"; -import { FlaggedAccountsProvider } from "./flagged-accounts"; import { ProgramDataAccount, ProgramDataAccountInfo, @@ -177,9 +176,7 @@ export function AccountsProvider({ children }: AccountsProviderProps) { - - {children} - + {children} diff --git a/explorer/src/utils/anchor.tsx b/explorer/src/utils/anchor.tsx index 84ed84446361ab..8ae0e6c1ee24a9 100644 --- a/explorer/src/utils/anchor.tsx +++ b/explorer/src/utils/anchor.tsx @@ -6,6 +6,7 @@ import { useAnchorProgram } from "providers/anchor"; import { getProgramName } from "utils/tx"; import { snakeToTitleCase, camelToTitleCase, numberWithSeparator } from "utils"; import { + IdlField, IdlInstruction, IdlType, IdlTypeDef, @@ -279,8 +280,43 @@ function mapField( ); } else { - const enumValue = Object.keys(value)[0]; - return ( + const enumVariantName = Object.keys(value)[0]; + const variant = fieldType.type.variants.find( + (val) => + val.name.toLocaleLowerCase() === enumVariantName.toLocaleLowerCase() + ); + + return variant && variant.fields ? ( + + + {Object.entries(value[enumVariantName]).map( + ([innerKey, innerValue]: [string, any], index) => { + const innerFieldType = variant.fields![index]; + if (!innerFieldType) { + throw Error( + `Could not type definition for ${innerKey} field in user-defined struct ${fieldType.name}` + ); + } + return mapField( + innerKey, + innerValue, + (innerFieldType as any).name + ? (innerFieldType as IdlField).type + : (innerFieldType as IdlType), + idl, + key, + nestingLevel + 1 + ); + } + )} + + + ) : ( - {camelToTitleCase(enumValue)} + {camelToTitleCase(enumVariantName)} ); } diff --git a/geyser-plugin-manager/src/accounts_update_notifier.rs b/geyser-plugin-manager/src/accounts_update_notifier.rs index 5bbc0cd6032756..34a43d8958f31d 100644 --- a/geyser-plugin-manager/src/accounts_update_notifier.rs +++ b/geyser-plugin-manager/src/accounts_update_notifier.rs @@ -134,7 +134,7 @@ impl AccountsUpdateNotifierImpl { executable: stored_account_meta.account_meta.executable, rent_epoch: stored_account_meta.account_meta.rent_epoch, data: stored_account_meta.data, - write_version: stored_account_meta.meta.write_version, + write_version: stored_account_meta.meta.write_version_obsolete, txn_signature: None, }) } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index df1bf4c0ed4360..6c0efad5ba0838 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1800,7 +1800,7 @@ impl ClusterInfo { .unwrap() } - fn handle_batch_prune_messages(&self, messages: Vec<(Pubkey, PruneData)>) { + fn handle_batch_prune_messages(&self, messages: Vec) { let _st = ScopedTimer::from(&self.stats.handle_batch_prune_messages_time); if messages.is_empty() { return; @@ -1808,22 +1808,19 @@ impl ClusterInfo { self.stats .prune_message_count .add_relaxed(messages.len() as u64); - self.stats.prune_message_len.add_relaxed( - messages - .iter() - .map(|(_, data)| data.prunes.len() as u64) - .sum(), - ); + self.stats + .prune_message_len + .add_relaxed(messages.iter().map(|data| data.prunes.len() as u64).sum()); let mut prune_message_timeout = 0; let mut bad_prune_destination = 0; let self_pubkey = self.id(); { let _st = ScopedTimer::from(&self.stats.process_prune); let now = timestamp(); - for (from, data) in messages { + for data in messages { match self.gossip.process_prune_msg( &self_pubkey, - &from, + &data.pubkey, &data.destination, &data.prunes, data.wallclock, @@ -2263,12 +2260,7 @@ impl ClusterInfo { let origins: HashSet<_> = { let _st = ScopedTimer::from(&self.stats.process_push_message); let now = timestamp(); - messages - .into_iter() - .flat_map(|(from, crds_values)| { - self.gossip.process_push_message(&from, crds_values, now) - }) - .collect() + self.gossip.process_push_message(messages, now) }; // Generate prune messages. let self_pubkey = self.id(); @@ -2294,7 +2286,6 @@ impl ClusterInfo { let prune_messages: Vec<_> = { let gossip_crds = self.gossip.crds.read().unwrap(); let wallclock = timestamp(); - let self_pubkey = self.id(); thread_pool.install(|| { prunes .into_par_iter() @@ -2429,7 +2420,7 @@ impl ClusterInfo { check_duplicate_instance(&data)?; push_messages.push((from, data)); } - Protocol::PruneMessage(from, data) => prune_messages.push((from, data)), + Protocol::PruneMessage(_from, data) => prune_messages.push(data), Protocol::PingMessage(ping) => ping_messages.push((from_addr, ping)), Protocol::PongMessage(pong) => pong_messages.push((from_addr, pong)), } diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index 14d7d57aaa50c9..7df1b90419bb74 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -79,6 +79,7 @@ pub struct Crds { #[derive(PartialEq, Eq, Debug)] pub enum CrdsError { + DuplicatePush(/*num dups:*/ u8), InsertFailed, UnknownStakes, } @@ -115,6 +116,8 @@ pub struct VersionedCrdsValue { pub(crate) local_timestamp: u64, /// value hash pub(crate) value_hash: Hash, + /// Number of times duplicates of this value are recevied from gossip push. + num_push_dups: u8, } #[derive(Clone, Copy, Default)] @@ -140,6 +143,7 @@ impl VersionedCrdsValue { value, local_timestamp, value_hash, + num_push_dups: 0u8, } } } @@ -263,17 +267,25 @@ impl Crds { entry.insert(value); Ok(()) } - Entry::Occupied(entry) => { + Entry::Occupied(mut entry) => { self.stats.lock().unwrap().record_fail(&value, route); trace!( "INSERT FAILED data: {} new.wallclock: {}", value.value.label(), value.value.wallclock(), ); + // Identify if the message is outdated (as opposed to + // duplicate) by comparing value hashes. if entry.get().value_hash != value.value_hash { self.purged.push_back((value.value_hash, now)); + Err(CrdsError::InsertFailed) + } else if matches!(route, GossipRoute::PushMessage) { + let entry = entry.get_mut(); + entry.num_push_dups = entry.num_push_dups.saturating_add(1); + Err(CrdsError::DuplicatePush(entry.num_push_dups)) + } else { + Err(CrdsError::InsertFailed) } - Err(CrdsError::InsertFailed) } } } diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 88cf5b930f6a1b..34d867e38b713b 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -47,15 +47,10 @@ impl CrdsGossip { /// Returns unique origins' pubkeys of upserted values. pub fn process_push_message( &self, - from: &Pubkey, - values: Vec, + messages: Vec<(/*from:*/ Pubkey, Vec)>, now: u64, ) -> HashSet { - self.push - .process_push_message(&self.crds, from, values, now) - .into_iter() - .filter_map(Result::ok) - .collect() + self.push.process_push_message(&self.crds, messages, now) } /// Remove redundant paths in the network. @@ -68,8 +63,7 @@ impl CrdsGossip { where I: IntoIterator, { - self.push - .prune_received_cache_many(self_pubkey, origins, stakes) + self.push.prune_received_cache(self_pubkey, origins, stakes) } pub fn new_push_messages( @@ -163,11 +157,9 @@ impl CrdsGossip { wallclock: u64, now: u64, ) -> Result<(), CrdsGossipError> { - let expired = now > wallclock + self.push.prune_timeout; - if expired { - return Err(CrdsGossipError::PruneMessageTimeout); - } - if self_pubkey == destination { + if now > wallclock.saturating_add(self.push.prune_timeout) { + Err(CrdsGossipError::PruneMessageTimeout) + } else if self_pubkey == destination { self.push.process_prune_msg(self_pubkey, peer, origin); Ok(()) } else { @@ -318,10 +310,6 @@ impl CrdsGossip { timeouts: &HashMap, ) -> usize { let mut rv = 0; - if now > 5 * self.push.msg_timeout { - let min = now - 5 * self.push.msg_timeout; - self.push.purge_old_received_cache(min); - } if now > self.pull.crds_timeout { //sanity check assert_eq!(timeouts[self_pubkey], std::u64::MAX); diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index 4b39fa764c0916..d98489363c06d2 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -15,11 +15,11 @@ use { crate::{ cluster_info::{Ping, CRDS_UNIQUE_PUBKEY_CAPACITY}, contact_info::ContactInfo, - crds::{Crds, Cursor, GossipRoute}, + crds::{Crds, CrdsError, Cursor, GossipRoute}, crds_gossip::{get_stake, get_weight}, - crds_gossip_error::CrdsGossipError, crds_value::CrdsValue, ping_pong::PingCache, + received_cache::ReceivedCache, weighted_shuffle::WeightedShuffle, }, bincode::serialized_size, @@ -49,15 +49,15 @@ use { }, }; -pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30; -pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6; +pub(crate) const CRDS_GOSSIP_NUM_ACTIVE: usize = 30; +const CRDS_GOSSIP_PUSH_FANOUT: usize = 6; // With a fanout of 6, a 1000 node cluster should only take ~4 hops to converge. // However since pushes are stake weighed, some trailing nodes // might need more time to receive values. 30 seconds should be plenty. pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000; -pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500; -pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15; -pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 3; +const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500; +const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15; +const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 3; // Do not push to peers which have not been updated for this long. const PUSH_ACTIVE_TIMEOUT_MS: u64 = 60_000; @@ -69,17 +69,9 @@ pub struct CrdsGossipPush { /// Cursor into the crds table for values to push. crds_cursor: Mutex, /// Cache that tracks which validators a message was received from - /// bool indicates it has been pruned. - /// /// This cache represents a lagging view of which validators /// currently have this node in their `active_set` - #[allow(clippy::type_complexity)] - received_cache: Mutex< - HashMap< - Pubkey, // origin/owner - HashMap, - >, - >, + received_cache: Mutex, last_pushed_to: RwLock>, num_active: usize, push_fanout: usize, @@ -97,7 +89,7 @@ impl Default for CrdsGossipPush { max_bytes: PACKET_DATA_SIZE * 64, active_set: RwLock::default(), crds_cursor: Mutex::default(), - received_cache: Mutex::default(), + received_cache: Mutex::new(ReceivedCache::new(2 * CRDS_UNIQUE_PUBKEY_CAPACITY)), last_pushed_to: RwLock::new(LruCache::new(CRDS_UNIQUE_PUBKEY_CAPACITY)), num_active: CRDS_GOSSIP_NUM_ACTIVE, push_fanout: CRDS_GOSSIP_PUSH_FANOUT, @@ -115,12 +107,7 @@ impl CrdsGossipPush { crds.read().unwrap().get_entries(&mut cursor).count() } - fn prune_stake_threshold(self_stake: u64, origin_stake: u64) -> u64 { - let min_path_stake = self_stake.min(origin_stake); - ((CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT * min_path_stake as f64).round() as u64).max(1) - } - - pub(crate) fn prune_received_cache_many( + pub(crate) fn prune_received_cache( &self, self_pubkey: &Pubkey, origins: I, // Unique pubkeys of crds values' owners. @@ -133,81 +120,19 @@ impl CrdsGossipPush { origins .into_iter() .flat_map(|origin| { - let peers = Self::prune_received_cache( - self_pubkey, - &origin, - stakes, - received_cache.deref_mut(), - ); - peers.into_iter().zip(repeat(origin)) + received_cache + .prune( + self_pubkey, + origin, + CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT, + CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES, + stakes, + ) + .zip(repeat(origin)) }) .into_group_map() } - fn prune_received_cache( - self_pubkey: &Pubkey, - origin: &Pubkey, - stakes: &HashMap, - received_cache: &mut HashMap< - Pubkey, // origin/owner - HashMap, - >, - ) -> Vec { - let origin_stake = stakes.get(origin).unwrap_or(&0); - let self_stake = stakes.get(self_pubkey).unwrap_or(&0); - let peers = match received_cache.get_mut(origin) { - None => return Vec::default(), - Some(peers) => peers, - }; - let peer_stake_total: u64 = peers - .iter() - .filter(|(_, (pruned, _))| !pruned) - .filter_map(|(peer, _)| stakes.get(peer)) - .sum(); - let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake); - if peer_stake_total < prune_stake_threshold { - return Vec::new(); - } - let mut rng = rand::thread_rng(); - let shuffled_staked_peers = { - let peers: Vec<_> = peers - .iter() - .filter(|(_, (pruned, _))| !pruned) - .filter_map(|(peer, _)| Some((*peer, *stakes.get(peer)?))) - .filter(|(_, stake)| *stake > 0) - .collect(); - let weights: Vec<_> = peers.iter().map(|(_, stake)| *stake).collect(); - WeightedShuffle::new("prune-received-cache", &weights) - .shuffle(&mut rng) - .map(move |i| peers[i]) - }; - let mut keep = HashSet::new(); - let mut peer_stake_sum = 0; - keep.insert(*origin); - for (peer, stake) in shuffled_staked_peers { - if peer == *origin { - continue; - } - keep.insert(peer); - peer_stake_sum += stake; - if peer_stake_sum >= prune_stake_threshold - && keep.len() >= CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES - { - break; - } - } - for (peer, (pruned, _)) in peers.iter_mut() { - if !*pruned && !keep.contains(peer) { - *pruned = true; - } - } - peers - .keys() - .filter(|peer| !keep.contains(peer)) - .copied() - .collect() - } - fn wallclock_window(&self, now: u64) -> impl RangeBounds { now.saturating_sub(self.msg_timeout)..=now.saturating_add(self.msg_timeout) } @@ -218,45 +143,36 @@ impl CrdsGossipPush { pub(crate) fn process_push_message( &self, crds: &RwLock, - from: &Pubkey, - values: Vec, + messages: Vec<(/*from:*/ Pubkey, Vec)>, now: u64, - ) -> Vec> { - self.num_total.fetch_add(values.len(), Ordering::Relaxed); - let values: Vec<_> = { - let wallclock_window = self.wallclock_window(now); - let mut received_cache = self.received_cache.lock().unwrap(); - values - .into_iter() - .map(|value| { - if !wallclock_window.contains(&value.wallclock()) { - return Err(CrdsGossipError::PushMessageTimeout); - } - let origin = value.pubkey(); - let peers = received_cache.entry(origin).or_default(); - peers - .entry(*from) - .and_modify(|(_pruned, timestamp)| *timestamp = now) - .or_insert((/*pruned:*/ false, now)); - Ok(value) - }) - .collect() - }; + ) -> HashSet { + let mut received_cache = self.received_cache.lock().unwrap(); let mut crds = crds.write().unwrap(); - values - .into_iter() - .map(|value| { - let value = value?; + let wallclock_window = self.wallclock_window(now); + let mut origins = HashSet::with_capacity(messages.len()); + for (from, values) in messages { + self.num_total.fetch_add(values.len(), Ordering::Relaxed); + for value in values { + if !wallclock_window.contains(&value.wallclock()) { + continue; + } let origin = value.pubkey(); match crds.insert(value, now, GossipRoute::PushMessage) { - Ok(()) => Ok(origin), + Ok(()) => { + received_cache.record(origin, from, /*num_dups:*/ 0); + origins.insert(origin); + } + Err(CrdsError::DuplicatePush(num_dups)) => { + received_cache.record(origin, from, usize::from(num_dups)); + self.num_old.fetch_add(1, Ordering::Relaxed); + } Err(_) => { self.num_old.fetch_add(1, Ordering::Relaxed); - Err(CrdsGossipError::PushMessageOldVersion) } } - }) - .collect() + } + } + origins } /// New push message to broadcast to peers. @@ -326,7 +242,12 @@ impl CrdsGossipPush { } /// Add the `from` to the peer's filter of nodes. - pub fn process_prune_msg(&self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) { + pub(crate) fn process_prune_msg( + &self, + self_pubkey: &Pubkey, + peer: &Pubkey, + origins: &[Pubkey], + ) { if let Some(filter) = self.active_set.read().unwrap().get(peer) { for origin in origins { if origin != self_pubkey { @@ -477,14 +398,6 @@ impl CrdsGossipPush { .collect() } - /// Purge received push message cache - pub(crate) fn purge_old_received_cache(&self, min_time: u64) { - self.received_cache.lock().unwrap().retain(|_, v| { - v.retain(|_, (_, t)| *t > min_time); - !v.is_empty() - }); - } - // Only for tests and simulations. pub(crate) fn mock_clone(&self) -> Self { let active_set = { @@ -502,7 +415,7 @@ impl CrdsGossipPush { } clone }; - let received_cache = self.received_cache.lock().unwrap().clone(); + let received_cache = self.received_cache.lock().unwrap().mock_clone(); let crds_cursor = *self.crds_cursor.lock().unwrap(); Self { active_set: RwLock::new(active_set), @@ -533,68 +446,6 @@ mod tests { ) } - #[test] - fn test_prune() { - let crds = RwLock::::default(); - let push = CrdsGossipPush::default(); - let mut stakes = HashMap::new(); - - let self_id = solana_sdk::pubkey::new_rand(); - let origin = solana_sdk::pubkey::new_rand(); - stakes.insert(self_id, 100); - stakes.insert(origin, 100); - - let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &origin, 0, - ))); - let low_staked_peers = (0..10).map(|_| solana_sdk::pubkey::new_rand()); - let mut low_staked_set = HashSet::new(); - low_staked_peers.for_each(|p| { - push.process_push_message(&crds, &p, vec![value.clone()], 0); - low_staked_set.insert(p); - stakes.insert(p, 1); - }); - - let pruned = { - let mut received_cache = push.received_cache.lock().unwrap(); - CrdsGossipPush::prune_received_cache( - &self_id, - &origin, - &stakes, - received_cache.deref_mut(), - ) - }; - assert!( - pruned.is_empty(), - "should not prune if min threshold has not been reached" - ); - - let high_staked_peer = solana_sdk::pubkey::new_rand(); - let high_stake = CrdsGossipPush::prune_stake_threshold(100, 100) + 10; - stakes.insert(high_staked_peer, high_stake); - push.process_push_message(&crds, &high_staked_peer, vec![value], 0); - - let pruned = { - let mut received_cache = push.received_cache.lock().unwrap(); - CrdsGossipPush::prune_received_cache( - &self_id, - &origin, - &stakes, - received_cache.deref_mut(), - ) - }; - assert!( - pruned.len() < low_staked_set.len() + 1, - "should not prune all peers" - ); - pruned.iter().for_each(|p| { - assert!( - low_staked_set.contains(p), - "only low staked peers should be pruned" - ); - }); - } - #[test] fn test_process_push_one() { let crds = RwLock::::default(); @@ -606,16 +457,15 @@ mod tests { let label = value.label(); // push a new message assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value.clone()], 0), - [Ok(label.pubkey())], + push.process_push_message(&crds, vec![(Pubkey::default(), vec![value.clone()])], 0), + [label.pubkey()].into_iter().collect(), ); assert_eq!(crds.read().unwrap().get::<&CrdsValue>(&label), Some(&value)); // push it again - assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value], 0), - [Err(CrdsGossipError::PushMessageOldVersion)], - ); + assert!(push + .process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0) + .is_empty()); } #[test] fn test_process_push_old_version() { @@ -627,17 +477,16 @@ mod tests { // push a new message assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value], 0), - [Ok(ci.id)], + push.process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0), + [ci.id].into_iter().collect() ); // push an old version ci.wallclock = 0; let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci)); - assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value], 0), - [Err(CrdsGossipError::PushMessageOldVersion)], - ); + assert!(push + .process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0) + .is_empty()); } #[test] fn test_process_push_timeout() { @@ -649,18 +498,16 @@ mod tests { // push a version to far in the future ci.wallclock = timeout + 1; let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())); - assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value], 0), - [Err(CrdsGossipError::PushMessageTimeout)], - ); + assert!(push + .process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0) + .is_empty()); // push a version to far in the past ci.wallclock = 0; let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci)); - assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value], timeout + 1), - [Err(CrdsGossipError::PushMessageTimeout)] - ); + assert!(push + .process_push_message(&crds, vec![(Pubkey::default(), vec![value])], timeout + 1) + .is_empty()); } #[test] fn test_process_push_update() { @@ -673,16 +520,16 @@ mod tests { // push a new message assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value_old], 0), - [Ok(origin)], + push.process_push_message(&crds, vec![(Pubkey::default(), vec![value_old])], 0), + [origin].into_iter().collect() ); // push an old version ci.wallclock = 1; let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci)); assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value], 0), - [Ok(origin)], + push.process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0), + [origin].into_iter().collect() ); } #[test] @@ -997,8 +844,8 @@ mod tests { expected.insert(peer.label().pubkey(), vec![new_msg.clone()]); let origin = new_msg.pubkey(); assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![new_msg], 0), - [Ok(origin)] + push.process_push_message(&crds, vec![(Pubkey::default(), vec![new_msg])], 0), + [origin].into_iter().collect() ); assert_eq!(push.active_set.read().unwrap().len(), 1); assert_eq!(push.new_push_messages(&crds, 0).0, expected); @@ -1030,8 +877,12 @@ mod tests { ); let crds = RwLock::new(crds); assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![peers[2].clone()], now), - [Ok(origin[2])], + push.process_push_message( + &crds, + vec![(Pubkey::default(), vec![peers[2].clone()])], + now + ), + [origin[2]].into_iter().collect() ); let ping_cache = Mutex::new(ping_cache); push.refresh_push_active_set( @@ -1092,8 +943,8 @@ mod tests { let expected = HashMap::new(); let origin = new_msg.pubkey(); assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![new_msg.clone()], 0), - [Ok(origin)], + push.process_push_message(&crds, vec![(Pubkey::default(), vec![new_msg.clone()])], 0), + [origin].into_iter().collect() ); push.process_prune_msg( &self_id, @@ -1132,8 +983,8 @@ mod tests { let expected = HashMap::new(); let origin = new_msg.pubkey(); assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![new_msg], 1), - [Ok(origin)], + push.process_push_message(&crds, vec![(Pubkey::default(), vec![new_msg])], 1), + [origin].into_iter().collect() ); assert_eq!(push.new_push_messages(&crds, 0).0, expected); } @@ -1148,8 +999,8 @@ mod tests { let label = value.label(); // push a new message assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value.clone()], 0), - [Ok(label.pubkey())] + push.process_push_message(&crds, vec![(Pubkey::default(), vec![value.clone()])], 0), + [label.pubkey()].into_iter().collect() ); assert_eq!( crds.write().unwrap().get::<&CrdsValue>(&label), @@ -1157,18 +1008,13 @@ mod tests { ); // push it again - assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value.clone()], 0), - [Err(CrdsGossipError::PushMessageOldVersion)], - ); - - // purge the old pushed - push.purge_old_received_cache(1); + assert!(push + .process_push_message(&crds, vec![(Pubkey::default(), vec![value.clone()])], 0) + .is_empty()); // push it again - assert_eq!( - push.process_push_message(&crds, &Pubkey::default(), vec![value], 0), - [Err(CrdsGossipError::PushMessageOldVersion)], - ); + assert!(push + .process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0) + .is_empty()); } } diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index 7b37240f896a8e..4172bfeef2c5fd 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -19,6 +19,7 @@ pub mod epoch_slots; pub mod gossip_error; pub mod gossip_service; pub mod ping_pong; +mod received_cache; pub mod weighted_shuffle; #[macro_use] diff --git a/gossip/src/received_cache.rs b/gossip/src/received_cache.rs new file mode 100644 index 00000000000000..a77a758e2ae722 --- /dev/null +++ b/gossip/src/received_cache.rs @@ -0,0 +1,191 @@ +use { + itertools::Itertools, + lru::LruCache, + solana_sdk::pubkey::Pubkey, + std::{cmp::Reverse, collections::HashMap}, +}; + +// For each origin, tracks which nodes have sent messages from that origin and +// their respective score in terms of timeliness of delivered messages. +pub(crate) struct ReceivedCache(LruCache); + +#[derive(Clone, Default)] +struct ReceivedCacheEntry { + nodes: HashMap, + num_upserts: usize, +} + +impl ReceivedCache { + // Minimum number of upserts before a cache entry can be pruned. + const MIN_NUM_UPSERTS: usize = 20; + + pub(crate) fn new(capacity: usize) -> Self { + Self(LruCache::new(capacity)) + } + + pub(crate) fn record(&mut self, origin: Pubkey, node: Pubkey, num_dups: usize) { + match self.0.get_mut(&origin) { + Some(entry) => entry.record(node, num_dups), + None => { + let mut entry = ReceivedCacheEntry::default(); + entry.record(node, num_dups); + self.0.put(origin, entry); + } + } + } + + pub(crate) fn prune( + &mut self, + pubkey: &Pubkey, // This node. + origin: Pubkey, // CRDS value owner. + stake_threshold: f64, + min_ingress_nodes: usize, + stakes: &HashMap, + ) -> impl Iterator { + match self.0.peek_mut(&origin) { + None => None, + Some(entry) if entry.num_upserts < Self::MIN_NUM_UPSERTS => None, + Some(entry) => Some( + std::mem::take(entry) + .prune(pubkey, &origin, stake_threshold, min_ingress_nodes, stakes) + .filter(move |node| node != &origin), + ), + } + .into_iter() + .flatten() + } + + // Only for tests and simulations. + pub(crate) fn mock_clone(&self) -> Self { + let mut cache = LruCache::new(self.0.cap()); + for (&origin, entry) in self.0.iter().rev() { + cache.put(origin, entry.clone()); + } + Self(cache) + } +} + +impl ReceivedCacheEntry { + // Limit how big the cache can get if it is spammed + // with old messages with random pubkeys. + const CAPACITY: usize = 50; + // Threshold for the number of duplicates before which a message + // is counted as timely towards node's score. + const NUM_DUPS_THRESHOLD: usize = 2; + + fn record(&mut self, node: Pubkey, num_dups: usize) { + if num_dups == 0 { + self.num_upserts = self.num_upserts.saturating_add(1); + } + // If the message has been timely enough increment node's score. + if num_dups < Self::NUM_DUPS_THRESHOLD { + let score = self.nodes.entry(node).or_default(); + *score = score.saturating_add(1); + } else if self.nodes.len() < Self::CAPACITY { + // Ensure that node is inserted into the cache for later pruning. + let _ = self.nodes.entry(node).or_default(); + } + } + + fn prune( + self, + pubkey: &Pubkey, // This node. + origin: &Pubkey, // CRDS value owner. + stake_threshold: f64, + min_ingress_nodes: usize, + stakes: &HashMap, + ) -> impl Iterator { + debug_assert!((0.0..=1.0).contains(&stake_threshold)); + debug_assert!(self.num_upserts >= ReceivedCache::MIN_NUM_UPSERTS); + // Enforce a minimum aggregate ingress stake; see: + // https://github.com/solana-labs/solana/issues/3214 + let min_ingress_stake = { + let stake = stakes.get(pubkey).min(stakes.get(origin)); + (stake.copied().unwrap_or_default() as f64 * stake_threshold) as u64 + }; + self.nodes + .into_iter() + .map(|(node, score)| { + let stake = stakes.get(&node).copied().unwrap_or_default(); + (node, score, stake) + }) + .sorted_unstable_by_key(|&(_, score, stake)| Reverse((score, stake))) + .scan(0u64, |acc, (node, _score, stake)| { + let old = *acc; + *acc = acc.saturating_add(stake); + Some((node, old)) + }) + .skip(min_ingress_nodes) + .skip_while(move |&(_, stake)| stake < min_ingress_stake) + .map(|(node, _stake)| node) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + std::{collections::HashSet, iter::repeat_with}, + }; + + #[test] + fn test_received_cache() { + let mut cache = ReceivedCache::new(/*capacity:*/ 100); + let pubkey = Pubkey::new_unique(); + let origin = Pubkey::new_unique(); + let records = vec![ + vec![3, 1, 7, 5], + vec![7, 6, 5, 2], + vec![2, 0, 0, 2], + vec![3, 5, 0, 6], + vec![6, 2, 6, 2], + ]; + let nodes: Vec<_> = repeat_with(Pubkey::new_unique) + .take(records.len()) + .collect(); + for (node, records) in nodes.iter().zip(records) { + for (num_dups, k) in records.into_iter().enumerate() { + for _ in 0..k { + cache.record(origin, *node, num_dups); + } + } + } + assert_eq!(cache.0.get(&origin).unwrap().num_upserts, 21); + let scores: HashMap = [ + (nodes[0], 4), + (nodes[1], 13), + (nodes[2], 2), + (nodes[3], 8), + (nodes[4], 8), + ] + .into_iter() + .collect(); + assert_eq!(cache.0.get(&origin).unwrap().nodes, scores); + let stakes = [ + (nodes[0], 6), + (nodes[1], 1), + (nodes[2], 5), + (nodes[3], 3), + (nodes[4], 7), + (pubkey, 9), + (origin, 9), + ] + .into_iter() + .collect(); + let prunes: HashSet = [nodes[0], nodes[2], nodes[3]].into_iter().collect(); + assert_eq!( + cache + .mock_clone() + .prune(&pubkey, origin, 0.5, 2, &stakes) + .collect::>(), + prunes + ); + let prunes: HashSet = [nodes[0], nodes[2]].into_iter().collect(); + assert_eq!( + cache + .prune(&pubkey, origin, 1.0, 0, &stakes) + .collect::>(), + prunes + ); + } +} diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index a0543903e01959..301d67d85d8344 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -290,8 +290,10 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver }; m.wallclock = now; node.gossip.process_push_message( - &Pubkey::default(), - vec![CrdsValue::new_unsigned(CrdsData::ContactInfo(m))], + vec![( + Pubkey::default(), + vec![CrdsValue::new_unsigned(CrdsData::ContactInfo(m))], + )], now, ); }); @@ -364,7 +366,7 @@ fn network_run_push( .get(&to) .unwrap() .gossip - .process_push_message(&from, msgs.clone(), now) + .process_push_message(vec![(from, msgs.clone())], now) .into_iter() .collect(); let prunes_map = network @@ -655,7 +657,11 @@ fn test_star_network_push_ring_200() { let thread_pool = build_gossip_thread_pool(); network_simulator(&thread_pool, &mut network, 0.9); } + +// With the new pruning logic, this test is no longer valid and can be deleted. +// Ignoring it for now until the pruning code is stable. #[test] +#[ignore] #[serial] fn test_connected_staked_network() { solana_logger::setup(); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index b2243d9d0ec659..c9101174ef8467 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -4076,25 +4076,7 @@ fn main() { .rooted_slot_iterator(start_root) .expect("Failed to get rooted slot"); - let mut slot_hash = Vec::new(); - for (i, slot) in iter.into_iter().enumerate() { - if i > num_roots { - break; - } - if slot <= max_height as u64 { - let blockhash = blockstore - .get_slot_entries(slot, 0) - .unwrap() - .last() - .unwrap() - .hash; - slot_hash.push((slot, blockhash)); - } else { - break; - } - } - - let mut output_file: Box = + let mut output: Box = if let Some(path) = arg_matches.value_of("slot_list") { match File::create(path) { Ok(file) => Box::new(file), @@ -4104,16 +4086,20 @@ fn main() { Box::new(stdout()) }; - slot_hash + iter.take(num_roots) + .take_while(|slot| *slot <= max_height as u64) + .collect::>() .into_iter() .rev() - .enumerate() - .for_each(|(i, (slot, hash))| { - if i < num_roots { - output_file - .write_all(format!("{slot:?}: {hash:?}\n").as_bytes()) - .expect("failed to write"); - } + .for_each(|slot| { + let blockhash = blockstore + .get_slot_entries(slot, 0) + .unwrap() + .last() + .unwrap() + .hash; + + writeln!(output, "{}: {:?}", slot, blockhash).expect("failed to write"); }); } ("latest-optimistic-slots", Some(arg_matches)) => { diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 9c88a0d5c7c4e5..00fbf87c4211bc 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3353,11 +3353,11 @@ fn update_slot_meta( reference_tick: u8, received_data_shreds: &ShredIndex, ) -> Vec<(u32, u32)> { - let maybe_first_insert = slot_meta.received == 0; + let first_insert = slot_meta.received == 0; // Index is zero-indexed, while the "received" height starts from 1, // so received = index + 1 for the same shred. slot_meta.received = cmp::max(u64::from(index) + 1, slot_meta.received); - if maybe_first_insert && slot_meta.received > 0 { + if first_insert { // predict the timestamp of what would have been the first shred in this slot let slot_time_elapsed = u64::from(reference_tick) * 1000 / DEFAULT_TICKS_PER_SECOND; slot_meta.first_shred_timestamp = timestamp() - slot_time_elapsed; diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index f36ac9b64ef8d6..2fa114528075cf 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -88,7 +88,8 @@ mod stats; mod traits; pub type Nonce = u32; -pub const SIZE_OF_NONCE: usize = 4; +const_assert_eq!(SIZE_OF_NONCE, 4); +pub const SIZE_OF_NONCE: usize = std::mem::size_of::(); /// The following constants are computed by hand, and hardcoded. /// `test_shred_constants` ensures that the values are correct. diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 90b686f5c8f74b..04a626c8199bbf 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -16,7 +16,7 @@ use { shredder::{self, ReedSolomonCache}, }, assert_matches::debug_assert_matches, - itertools::Itertools, + itertools::{Either, Itertools}, rayon::{prelude::*, ThreadPool}, reed_solomon_erasure::Error::{InvalidIndex, TooFewParityShards, TooFewShards}, solana_perf::packet::deserialize_from_with_limit, @@ -208,11 +208,14 @@ impl ShredData { return Err(Error::InvalidShardSize(shard_size)); } let data_header = deserialize_from_with_limit(&mut cursor)?; - Ok(Self { + let shred = Self { common_header, data_header, payload: shard, - }) + }; + // Merkle proof is not erasure coded and is not yet available here. + shred.sanitize(/*verify_merkle_proof:*/ false)?; + Ok(shred) } fn set_merkle_branch(&mut self, merkle_branch: &MerkleBranch) -> Result<(), Error> { @@ -238,9 +241,7 @@ impl ShredData { if !matches!(shred_variant, ShredVariant::MerkleData(_)) { return Err(Error::InvalidShredVariant); } - if !verify_merkle_proof { - debug_assert_matches!(self.verify_merkle_proof(), Ok(true)); - } else if !self.verify_merkle_proof()? { + if verify_merkle_proof && !self.verify_merkle_proof()? { return Err(Error::InvalidMerkleProof); } shred_data::sanitize(self) @@ -342,11 +343,14 @@ impl ShredCode { let mut cursor = Cursor::new(&mut shard[..]); bincode::serialize_into(&mut cursor, &common_header)?; bincode::serialize_into(&mut cursor, &coding_header)?; - Ok(Self { + let shred = Self { common_header, coding_header, payload: shard, - }) + }; + // Merkle proof is not erasure coded and is not yet available here. + shred.sanitize(/*verify_merkle_proof:*/ false)?; + Ok(shred) } fn set_merkle_branch(&mut self, merkle_branch: &MerkleBranch) -> Result<(), Error> { @@ -372,9 +376,7 @@ impl ShredCode { if !matches!(shred_variant, ShredVariant::MerkleCode(_)) { return Err(Error::InvalidShredVariant); } - if !verify_merkle_proof { - debug_assert_matches!(self.verify_merkle_proof(), Ok(true)); - } else if !self.verify_merkle_proof()? { + if verify_merkle_proof && !self.verify_merkle_proof()? { return Err(Error::InvalidMerkleProof); } shred_code::sanitize(self) @@ -771,6 +773,8 @@ pub(super) fn recover( return Err(Error::InvalidMerkleProof); } shred.set_merkle_branch(&merkle_branch)?; + // Already sanitized in Shred{Code,Data}::from_recovered_shard. + debug_assert_matches!(shred.sanitize(/*verify_merkle_proof:*/ true), Ok(())); // Assert that shred payload is fully populated. debug_assert_eq!(shred, { let shred = shred.payload().clone(); @@ -778,15 +782,12 @@ pub(super) fn recover( }); } } - shreds + Ok(shreds .into_iter() .zip(mask) .filter(|(_, mask)| !mask) - .map(|(shred, _)| { - shred.sanitize(/*verify_merkle_proof:*/ false)?; - Ok(shred) - }) - .collect() + .map(|(shred, _)| shred) + .collect()) } // Maps number of (code + data) shreds to MerkleBranch.proof.len(). @@ -874,13 +875,16 @@ pub(super) fn make_shreds_from_data( } data = rest; } - if !data.is_empty() { + // If shreds.is_empty() then the data argument was empty. In that case we + // want to generate one data shred with empty data. + if !data.is_empty() || shreds.is_empty() { // Find the Merkle proof_size and data_buffer_size // which can embed the remaining data. let (proof_size, data_buffer_size) = (1u8..32) .find_map(|proof_size| { let data_buffer_size = ShredData::capacity(proof_size).ok()?; let num_data_shreds = (data.len() + data_buffer_size - 1) / data_buffer_size; + let num_data_shreds = num_data_shreds.max(1); let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds); (proof_size == get_proof_size(erasure_batch_size)) .then_some((proof_size, data_buffer_size)) @@ -888,7 +892,13 @@ pub(super) fn make_shreds_from_data( .ok_or(Error::UnknownProofSize)?; common_header.shred_variant = ShredVariant::MerkleData(proof_size); common_header.fec_set_index = common_header.index; - for shred in data.chunks(data_buffer_size) { + let chunks = if data.is_empty() { + // Generate one data shred with empty data. + Either::Left(std::iter::once(data)) + } else { + Either::Right(data.chunks(data_buffer_size)) + }; + for shred in chunks { let shred = new_shred_data(common_header, data_header, shred); shreds.push(shred); common_header.index += 1; @@ -1273,12 +1283,8 @@ mod test { assert_eq!(shreds.iter().map(Shred::signature).dedup().count(), 1); for size in num_data_shreds..num_shreds { let mut shreds = shreds.clone(); - let mut removed_shreds = Vec::new(); - while shreds.len() > size { - let index = rng.gen_range(0, shreds.len()); - removed_shreds.push(shreds.swap_remove(index)); - } shreds.shuffle(rng); + let mut removed_shreds = shreds.split_off(size); // Should at least contain one coding shred. if shreds.iter().all(|shred| { matches!( @@ -1336,9 +1342,9 @@ mod test { #[test_case(46800)] fn test_make_shreds_from_data(data_size: usize) { let mut rng = rand::thread_rng(); - let data_size = data_size.saturating_sub(16).max(1); + let data_size = data_size.saturating_sub(16); let reed_solomon_cache = ReedSolomonCache::default(); - for data_size in (data_size..data_size + 32).step_by(3) { + for data_size in data_size..data_size + 32 { run_make_shreds_from_data(&mut rng, data_size, &reed_solomon_cache); } } @@ -1391,7 +1397,7 @@ mod test { Shred::ShredData(shred) => Some(shred), }) .collect(); - // Assert that the input data can be recovered from data sherds. + // Assert that the input data can be recovered from data shreds. assert_eq!( data, data_shreds diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 1064c7288b6fef..639988505806fb 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -295,7 +295,7 @@ pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [PacketBatch]) { .for_each(|p| sign_shred_cpu(keypair, p)); }); }); - inc_new_counter_debug!("ed25519_shred_verify_cpu", packet_count); + inc_new_counter_debug!("ed25519_shred_sign_cpu", packet_count); } pub fn sign_shreds_gpu_pinned_keypair(keypair: &Keypair, cache: &RecyclerCache) -> PinnedVec { diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index 7ea16eccb5a1dd..06b4c8d299953f 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -132,7 +132,7 @@ fn test_multi_fec_block_different_size_coding() { let recovered_data = Shredder::try_recovery(all_shreds, &reed_solomon_cache).unwrap(); // Necessary in order to ensure the last shred in the slot // is part of the recovered set, and that the below `index` - // calcuation in the loop is correct + // calculation in the loop is correct assert!(fec_data_shreds.len() % 2 == 0); for (i, recovered_shred) in recovered_data.into_iter().enumerate() { let index = first_data_index + (i * 2) + 1; diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 044b94dcf4e0ee..a2b32fec3620c8 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -27,6 +27,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { new_hard_forks: config.new_hard_forks.clone(), known_validators: config.known_validators.clone(), repair_validators: config.repair_validators.clone(), + repair_whitelist: config.repair_whitelist.clone(), gossip_validators: config.gossip_validators.clone(), halt_on_known_validators_accounts_hash_mismatch: config .halt_on_known_validators_accounts_hash_mismatch, diff --git a/local-cluster/tests/local_cluster_flakey.rs b/local-cluster/tests/local_cluster_flakey.rs index 88a87b27a49dc2..4f3a98463a8fed 100644 --- a/local-cluster/tests/local_cluster_flakey.rs +++ b/local-cluster/tests/local_cluster_flakey.rs @@ -38,6 +38,27 @@ fn test_optimistic_confirmation_violation_without_tower() { do_test_optimistic_confirmation_violation_with_or_without_tower(false); } +enum RunResult { + Success, + FailNoViolation, + FailViolation, +} + +fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: bool) { + let mut retry = 10; + while retry > 0 { + match do_test_optimistic_confirmation_violation_with_or_without_tower_inner(with_tower) { + RunResult::Success => { + return; + } + _ => { + retry -= 1; + } + } + } + panic!("optimistic confirmation violation with or without tower failed after 10 trial"); +} + // A bit convoluted test case; but this roughly follows this test theoretical scenario: // // Step 1: You have validator A + B with 31% and 36% of the stake. Run only validator B: @@ -78,7 +99,9 @@ fn test_optimistic_confirmation_violation_without_tower() { // With the persisted tower: // `A` should not be able to generate a switching proof. // -fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: bool) { +fn do_test_optimistic_confirmation_violation_with_or_without_tower_inner( + with_tower: bool, +) -> RunResult { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 4 nodes @@ -346,13 +369,17 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b let expects_optimistic_confirmation_violation = !with_tower; if bad_vote_detected != expects_optimistic_confirmation_violation { if bad_vote_detected { - panic!("No violation expected because of persisted tower!"); + error!("No violation expected because of persisted tower!"); + return RunResult::FailNoViolation; } else { - panic!("Violation expected because of removed persisted tower!"); + error!("Violation expected because of removed persisted tower!"); + return RunResult::FailViolation; } } else if bad_vote_detected { info!("THIS TEST expected violations. And indeed, there was some, because of removed persisted tower."); } else { info!("THIS TEST expected no violation. And indeed, there was none, thanks to persisted tower."); } + + RunResult::Success } diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs index fcd5ab1a3b7f93..7d4186ccb6a810 100644 --- a/net-utils/src/ip_echo_server.rs +++ b/net-utils/src/ip_echo_server.rs @@ -130,7 +130,7 @@ async fn process_connection( .await??; debug!("Connection established to tcp/{}", *tcp_port); - let _ = tcp_stream.shutdown(); + tcp_stream.shutdown().await?; } } let response = IpEchoServerResponse { diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 6e06454858eb0b..f588d57c4f8ccc 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -835,7 +835,6 @@ impl PohRecorder { let poh = Arc::new(Mutex::new(Poh::new_with_slot_info( last_entry_hash, poh_config.hashes_per_tick, - ticks_per_slot, tick_number, ))); diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index a1eccd5d5a9e5a..47028f5ecdc0ce 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -153,6 +153,7 @@ fn create_executor_from_bytes( create_executor_metrics.load_elf_us = load_elf_time.as_us(); let executable = executable?; let mut verify_code_time = Measure::start("verify_code_time"); + #[allow(unused_mut)] let mut verified_executable = VerifiedExecutable::::from_executable(executable) .map_err(|err| { diff --git a/quic-client/src/quic_client.rs b/quic-client/src/quic_client.rs index 316358b0cf70c6..271429326a0b63 100644 --- a/quic-client/src/quic_client.rs +++ b/quic-client/src/quic_client.rs @@ -174,7 +174,7 @@ impl TpuConnection for QuicTpuConnection { let _lock = ASYNC_TASK_SEMAPHORE.acquire(); let inner = self.inner.clone(); - let _ = RUNTIME + _ = RUNTIME .spawn(async move { send_wire_transaction_async(inner, wire_transaction).await }); Ok(()) } @@ -182,8 +182,7 @@ impl TpuConnection for QuicTpuConnection { fn send_wire_transaction_batch_async(&self, buffers: Vec>) -> TransportResult<()> { let _lock = ASYNC_TASK_SEMAPHORE.acquire(); let inner = self.inner.clone(); - let _ = - RUNTIME.spawn(async move { send_wire_transaction_batch_async(inner, buffers).await }); + _ = RUNTIME.spawn(async move { send_wire_transaction_batch_async(inner, buffers).await }); Ok(()) } } diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index 21572e6dabe4d4..d32f799a968c2e 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -157,6 +157,14 @@ before execting it in the virtual machine.", .takes_value(true) .possible_values(["json", "json-compact"]), ) + .arg( + Arg::new("trace") + .help("Output instruction trace") + .short('t') + .long("trace") + .takes_value(true) + .value_name("FILE"), + ) .get_matches(); let loader_id = bpf_loader::id(); @@ -257,6 +265,7 @@ before execting it in the virtual machine.", } .unwrap(); + #[allow(unused_mut)] let mut verified_executable = VerifiedExecutable::::from_executable(executable) .map_err(|err| format!("Executable verifier failed: {err:?}")) @@ -296,6 +305,23 @@ before execting it in the virtual machine.", } let (instruction_count, result) = vm.execute_program(matches.value_of("use").unwrap() != "jit"); let duration = Instant::now() - start_time; + if matches.occurrences_of("trace") > 0 { + let trace_log = vm.env.context_object_pointer.trace_log.as_slice(); + if matches.value_of("trace").unwrap() == "stdout" { + analysis + .analyze() + .disassemble_trace_log(&mut std::io::stdout(), trace_log) + .unwrap(); + } else { + analysis + .analyze() + .disassemble_trace_log( + &mut File::create(matches.value_of("trace").unwrap()).unwrap(), + trace_log, + ) + .unwrap(); + } + } drop(vm); let output = Output { diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 9df2b0417df6ce..dfd57fe4fce0b5 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -2225,10 +2225,14 @@ fn optimize_filters(filters: &mut [RpcFilterType]) { match &compare.bytes { #[allow(deprecated)] Binary(bytes) | Base58(bytes) => { - compare.bytes = Bytes(bs58::decode(bytes).into_vec().unwrap()); + if let Ok(bytes) = bs58::decode(bytes).into_vec() { + compare.bytes = Bytes(bytes); + } } Base64(bytes) => { - compare.bytes = Bytes(base64::decode(bytes).unwrap()); + if let Ok(bytes) = base64::decode(bytes) { + compare.bytes = Bytes(bytes); + } } _ => {} } diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 18438c886ba35f..79cab842122b79 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -13,6 +13,7 @@ use { }, }, crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender}, + itertools::Either, rayon::prelude::*, serde::Serialize, solana_account_decoder::{parse_token::is_known_spl_token_id, UiAccount, UiAccountEncoding}, @@ -45,7 +46,7 @@ use { cell::RefCell, collections::{HashMap, VecDeque}, io::Cursor, - iter, str, + str, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, RwLock, Weak, @@ -129,7 +130,7 @@ impl std::fmt::Debug for NotificationEntry { } #[allow(clippy::type_complexity)] -fn check_commitment_and_notify( +fn check_commitment_and_notify( params: &P, subscription: &SubscriptionInfo, bank_forks: &Arc>, @@ -142,8 +143,9 @@ fn check_commitment_and_notify( where S: Clone + Serialize, B: Fn(&Bank, &P) -> X, - F: Fn(X, &P, Slot, Arc) -> (Box>, Slot), + F: Fn(X, &P, Slot, Arc) -> (I, Slot), X: Clone + Default, + I: IntoIterator, { let mut notified = false; let bank = bank_forks.read().unwrap().get(slot); @@ -370,36 +372,23 @@ fn filter_account_result( params: &AccountSubscriptionParams, last_notified_slot: Slot, bank: Arc, -) -> (Box>, Slot) { +) -> (Option, Slot) { // If the account is not found, `last_modified_slot` will default to zero and // we will notify clients that the account no longer exists if we haven't already let (account, last_modified_slot) = result.unwrap_or_default(); // If last_modified_slot < last_notified_slot this means that we last notified for a fork // and should notify that the account state has been reverted. - let results: Box> = if last_modified_slot != last_notified_slot { + let account = (last_modified_slot != last_notified_slot).then(|| { if is_known_spl_token_id(account.owner()) && params.encoding == UiAccountEncoding::JsonParsed { - Box::new(iter::once(get_parsed_token_account( - bank, - ¶ms.pubkey, - account, - ))) + get_parsed_token_account(bank, ¶ms.pubkey, account) } else { - Box::new(iter::once(UiAccount::encode( - ¶ms.pubkey, - &account, - params.encoding, - None, - None, - ))) + UiAccount::encode(¶ms.pubkey, &account, params.encoding, None, None) } - } else { - Box::new(iter::empty()) - }; - - (results, last_modified_slot) + }); + (account, last_modified_slot) } fn filter_signature_result( @@ -407,11 +396,11 @@ fn filter_signature_result( _params: &SignatureSubscriptionParams, last_notified_slot: Slot, _bank: Arc, -) -> (Box>, Slot) { +) -> (Option, Slot) { ( - Box::new(result.into_iter().map(|result| { + result.map(|result| { RpcSignatureResult::ProcessedSignature(ProcessedSignatureResult { err: result.err() }) - })), + }), last_notified_slot, ) } @@ -421,7 +410,7 @@ fn filter_program_results( params: &ProgramSubscriptionParams, last_notified_slot: Slot, bank: Arc, -) -> (Box>, Slot) { +) -> (impl Iterator, Slot) { let accounts_is_empty = accounts.is_empty(); let encoding = params.encoding; let filters = params.filters.clone(); @@ -430,20 +419,19 @@ fn filter_program_results( .iter() .all(|filter_type| filter_type.allows(account)) }); - let accounts: Box> = - if is_known_spl_token_id(¶ms.pubkey) - && params.encoding == UiAccountEncoding::JsonParsed - && !accounts_is_empty - { - Box::new(get_parsed_token_accounts(bank, keyed_accounts)) - } else { - Box::new( - keyed_accounts.map(move |(pubkey, account)| RpcKeyedAccount { - pubkey: pubkey.to_string(), - account: UiAccount::encode(&pubkey, &account, encoding, None, None), - }), - ) - }; + let accounts = if is_known_spl_token_id(¶ms.pubkey) + && params.encoding == UiAccountEncoding::JsonParsed + && !accounts_is_empty + { + let accounts = get_parsed_token_accounts(bank, keyed_accounts); + Either::Left(accounts) + } else { + let accounts = keyed_accounts.map(move |(pubkey, account)| RpcKeyedAccount { + pubkey: pubkey.to_string(), + account: UiAccount::encode(&pubkey, &account, encoding, None, None), + }); + Either::Right(accounts) + }; (accounts, last_notified_slot) } @@ -452,18 +440,13 @@ fn filter_logs_results( _params: &LogsSubscriptionParams, last_notified_slot: Slot, _bank: Arc, -) -> (Box>, Slot) { - match logs { - None => (Box::new(iter::empty()), last_notified_slot), - Some(logs) => ( - Box::new(logs.into_iter().map(|log| RpcLogsResponse { - signature: log.signature.to_string(), - err: log.result.err(), - logs: log.log_messages, - })), - last_notified_slot, - ), - } +) -> (impl Iterator, Slot) { + let responses = logs.into_iter().flatten().map(|log| RpcLogsResponse { + signature: log.signature.to_string(), + err: log.result.err(), + logs: log.log_messages, + }); + (responses, last_notified_slot) } fn initial_last_notified_slot( diff --git a/runtime/benches/append_vec.rs b/runtime/benches/append_vec.rs index 9ff8e7bd647cbc..8f66e8585c1bfe 100644 --- a/runtime/benches/append_vec.rs +++ b/runtime/benches/append_vec.rs @@ -40,7 +40,7 @@ fn append_account( StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &accounts, vec![&hash], - vec![storage_meta.write_version], + vec![storage_meta.write_version_obsolete], ); let res = vec.append_accounts(&storable_accounts, 0); res.and_then(|res| res.first().cloned()) diff --git a/runtime/src/account_storage.rs b/runtime/src/account_storage.rs new file mode 100644 index 00000000000000..7d28d020b672f1 --- /dev/null +++ b/runtime/src/account_storage.rs @@ -0,0 +1,106 @@ +//! Manage the map of slot -> append vecs + +use { + crate::accounts_db::{AccountStorageEntry, AppendVecId, SlotStores, SnapshotStorage}, + dashmap::DashMap, + solana_sdk::clock::Slot, + std::{ + collections::{hash_map::RandomState, HashMap}, + sync::{Arc, RwLock}, + }, +}; + +pub type AccountStorageMap = DashMap; + +#[derive(Clone, Default, Debug)] +pub struct AccountStorage { + map: AccountStorageMap, +} + +impl AccountStorage { + pub(crate) fn get_account_storage_entry( + &self, + slot: Slot, + store_id: AppendVecId, + ) -> Option> { + self.get_slot_stores(slot) + .and_then(|storage_map| storage_map.read().unwrap().get(&store_id).cloned()) + } + + pub fn get_slot_stores(&self, slot: Slot) -> Option { + self.map.get(&slot).map(|result| result.value().clone()) + } + + pub(crate) fn get_slot_storage_entries(&self, slot: Slot) -> Option { + self.get_slot_stores(slot) + .map(|res| res.read().unwrap().values().cloned().collect()) + } + + pub(crate) fn slot_store_count(&self, slot: Slot, store_id: AppendVecId) -> Option { + self.get_account_storage_entry(slot, store_id) + .map(|store| store.count()) + } + + pub(crate) fn all_slots(&self) -> Vec { + self.map.iter().map(|iter_item| *iter_item.key()).collect() + } + + pub(crate) fn extend(&mut self, source: AccountStorageMap) { + self.map.extend(source.into_iter()) + } + + pub(crate) fn remove(&self, slot: &Slot) -> Option<(Slot, SlotStores)> { + self.map.remove(slot) + } + + pub(crate) fn iter(&self) -> dashmap::iter::Iter { + self.map.iter() + } + pub(crate) fn get( + &self, + slot: &Slot, + ) -> Option> { + self.map.get(slot) + } + pub(crate) fn insert(&self, slot: Slot, store: Arc) { + let slot_storages: SlotStores = self.get_slot_stores(slot).unwrap_or_else(|| + // DashMap entry.or_insert() returns a RefMut, essentially a write lock, + // which is dropped after this block ends, minimizing time held by the lock. + // However, we still want to persist the reference to the `SlotStores` behind + // the lock, hence we clone it out, (`SlotStores` is an Arc so is cheap to clone). + self + .map + .entry(slot) + .or_insert(Arc::new(RwLock::new(HashMap::new()))) + .clone()); + + assert!(slot_storages + .write() + .unwrap() + .insert(store.append_vec_id(), store) + .is_none()); + } + #[cfg(test)] + pub(crate) fn insert_empty_at_slot(&self, slot: Slot) { + self.map + .entry(slot) + .or_insert(Arc::new(RwLock::new(HashMap::new()))); + } + #[cfg(test)] + pub(crate) fn len(&self) -> usize { + self.map.len() + } +} + +#[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize, AbiExample, AbiEnumVisitor)] +pub enum AccountStorageStatus { + Available = 0, + Full = 1, + Candidate = 2, +} + +impl Default for AccountStorageStatus { + fn default() -> Self { + Self::Available + } +} diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 45b611490aa823..66b13d61ab047b 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -21,6 +21,7 @@ use { crate::{ account_info::{AccountInfo, Offset, StorageLocation, StoredSize}, + account_storage::{AccountStorage, AccountStorageStatus}, accounts_background_service::{DroppedSlotsSender, SendDroppedBankCallback}, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, accounts_hash::{ @@ -744,7 +745,9 @@ impl<'a> LoadedAccount<'a> { pub fn write_version(&self) -> StoredMetaWriteVersion { match self { - LoadedAccount::Stored(stored_account_meta) => stored_account_meta.meta.write_version, + LoadedAccount::Stored(stored_account_meta) => { + stored_account_meta.meta.write_version_obsolete + } LoadedAccount::Cached(_) => CACHE_VIRTUAL_WRITE_VERSION, } } @@ -837,55 +840,6 @@ impl<'a> ReadableAccount for LoadedAccount<'a> { } } -pub type AccountStorageMap = DashMap; - -#[derive(Clone, Default, Debug)] -pub struct AccountStorage { - pub map: AccountStorageMap, -} - -impl AccountStorage { - fn get_account_storage_entry( - &self, - slot: Slot, - store_id: AppendVecId, - ) -> Option> { - self.get_slot_stores(slot) - .and_then(|storage_map| storage_map.read().unwrap().get(&store_id).cloned()) - } - - pub fn get_slot_stores(&self, slot: Slot) -> Option { - self.map.get(&slot).map(|result| result.value().clone()) - } - - fn get_slot_storage_entries(&self, slot: Slot) -> Option>> { - self.get_slot_stores(slot) - .map(|res| res.read().unwrap().values().cloned().collect()) - } - - fn slot_store_count(&self, slot: Slot, store_id: AppendVecId) -> Option { - self.get_account_storage_entry(slot, store_id) - .map(|store| store.count()) - } - - fn all_slots(&self) -> Vec { - self.map.iter().map(|iter_item| *iter_item.key()).collect() - } -} - -#[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize, AbiExample, AbiEnumVisitor)] -pub enum AccountStorageStatus { - Available = 0, - Full = 1, - Candidate = 2, -} - -impl Default for AccountStorageStatus { - fn default() -> Self { - Self::Available - } -} - #[derive(Debug)] pub enum BankHashVerificationError { MismatchedAccountHash, @@ -1191,7 +1145,7 @@ impl RecycleStores { self.entries.iter() } - fn add_entries(&mut self, new_entries: Vec>) { + fn add_entries(&mut self, new_entries: SnapshotStorage) { self.total_bytes += new_entries.iter().map(|e| e.total_bytes()).sum::(); let now = Instant::now(); for new_entry in new_entries { @@ -1199,7 +1153,7 @@ impl RecycleStores { } } - fn expire_old_entries(&mut self) -> Vec> { + fn expire_old_entries(&mut self) -> SnapshotStorage { let mut expired = vec![]; let now = Instant::now(); let mut expired_bytes = 0; @@ -1271,7 +1225,7 @@ pub struct AccountsDb { /// true iff we want to skip the initial hash calculation on startup pub skip_initial_hash_calc: bool, - pub storage: AccountStorage, + pub(crate) storage: AccountStorage, pub accounts_cache: AccountsCache, @@ -1288,9 +1242,6 @@ pub struct AccountsDb { /// Set of shrinkable stores organized by map of slot to append_vec_id pub shrink_candidate_slots: Mutex, - /// Legacy shrink slots to support non-cached code-path. - pub shrink_candidate_slots_v1: Mutex>, - pub(crate) write_version: AtomicU64, /// Set of storage paths to pick from @@ -2314,7 +2265,6 @@ impl AccountsDb { recycle_stores: RwLock::new(RecycleStores::default()), uncleaned_pubkeys: DashMap::new(), next_id: AtomicAppendVecId::new(0), - shrink_candidate_slots_v1: Mutex::new(Vec::new()), shrink_candidate_slots: Mutex::new(HashMap::new()), write_cache_limit_bytes: None, write_version: AtomicU64::new(0), @@ -3721,8 +3671,8 @@ impl AccountsDb { match stored_accounts.entry(*new_entry.account.pubkey()) { Entry::Occupied(mut occupied_entry) => { assert!( - new_entry.account.meta.write_version - > occupied_entry.get().account.meta.write_version + new_entry.account.meta.write_version_obsolete + > occupied_entry.get().account.meta.write_version_obsolete ); occupied_entry.insert(new_entry); } @@ -3925,7 +3875,7 @@ impl AccountsDb { for alive_account in &shrink_collect.alive_accounts { accounts.push(&alive_account.account); hashes.push(alive_account.account.hash); - write_versions.push(alive_account.account.meta.write_version); + write_versions.push(alive_account.account.meta.write_version_obsolete); } find_alive_elapsed.stop(); @@ -4029,7 +3979,7 @@ impl AccountsDb { slot: Slot, should_retain: impl Fn(&AccountStorageEntry) -> bool, add_dirty_stores: bool, - ) -> (usize, Vec>) { + ) -> (usize, SnapshotStorage) { let mut dead_storages = Vec::default(); let remaining_stores = if let Some(slot_stores) = self.storage.get_slot_stores(slot) { let mut list = slot_stores.write().unwrap(); @@ -4054,7 +4004,7 @@ impl AccountsDb { pub(crate) fn drop_or_recycle_stores( &self, - dead_storages: Vec>, + dead_storages: SnapshotStorage, stats: &ShrinkStats, ) { let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time"); @@ -4087,23 +4037,17 @@ impl AccountsDb { slot: Slot, aligned_total: u64, ) -> Arc { - let shrunken_store = if let Some(new_store) = - self.try_recycle_and_insert_store(slot, aligned_total, aligned_total + 1024) - { - new_store - } else { - let maybe_shrink_paths = self.shrink_paths.read().unwrap(); - if let Some(ref shrink_paths) = *maybe_shrink_paths { - self.create_and_insert_store_with_paths( - slot, - aligned_total, - "shrink-w-path", - shrink_paths, - ) - } else { - self.create_and_insert_store(slot, aligned_total, "shrink") - } - }; + let shrunken_store = self + .try_recycle_store(slot, aligned_total, aligned_total + 1024) + .unwrap_or_else(|| { + let maybe_shrink_paths = self.shrink_paths.read().unwrap(); + let (shrink_paths, from) = maybe_shrink_paths + .as_ref() + .map(|paths| (paths, "shrink-w-path")) + .unwrap_or_else(|| (&self.paths, "shrink")); + self.create_store(slot, aligned_total, from, shrink_paths) + }); + self.insert_store(slot, Arc::clone(&shrunken_store)); shrunken_store } @@ -4113,8 +4057,7 @@ impl AccountsDb { debug!("shrink_slot_forced: slot: {}", slot); if let Some(stores_lock) = self.storage.get_slot_stores(slot) { - let stores: Vec> = - stores_lock.read().unwrap().values().cloned().collect(); + let stores: SnapshotStorage = stores_lock.read().unwrap().values().cloned().collect(); if !Self::is_shrinking_productive(slot, stores.iter()) { return 0; } @@ -4297,10 +4240,9 @@ impl AccountsDb { } fn get_storages_for_slot(&self, slot: Slot) -> Option { - self.storage.map.get(&slot).map(|storages| { + self.storage.get(&slot).map(|storages| { // per slot, get the storages. There should usually only be 1. storages - .value() .read() .unwrap() .values() @@ -4596,7 +4538,6 @@ impl AccountsDb { // all storages have been removed from here and recycled or dropped assert!(self .storage - .map .remove(slot) .unwrap() .1 @@ -4902,7 +4843,7 @@ impl AccountsDb { // If the slot is not in the cache, then all the account information must have // been flushed. This is guaranteed because we only remove the rooted slot from // the cache *after* we've finished flushing in `flush_slot_cache`. - let storage_maps: Vec> = self + let storage_maps: SnapshotStorage = self .storage .get_slot_storage_entries(slot) .unwrap_or_default(); @@ -5629,22 +5570,7 @@ impl AccountsDb { } fn insert_store(&self, slot: Slot, store: Arc) { - let slot_storages: SlotStores = self.storage.get_slot_stores(slot).unwrap_or_else(|| - // DashMap entry.or_insert() returns a RefMut, essentially a write lock, - // which is dropped after this block ends, minimizing time held by the lock. - // However, we still want to persist the reference to the `SlotStores` behind - // the lock, hence we clone it out, (`SlotStores` is an Arc so is cheap to clone). - self.storage - .map - .entry(slot) - .or_insert(Arc::new(RwLock::new(HashMap::new()))) - .clone()); - - assert!(slot_storages - .write() - .unwrap() - .insert(store.append_vec_id(), store) - .is_none()); + self.storage.insert(slot, store) } pub fn create_drop_bank_callback( @@ -5801,7 +5727,7 @@ impl AccountsDb { let mut remove_storage_entries_elapsed = Measure::start("remove_storage_entries_elapsed"); for remove_slot in removed_slots { // Remove the storage entries and collect some metrics - if let Some((_, slot_storages_to_be_removed)) = self.storage.map.remove(remove_slot) { + if let Some((_, slot_storages_to_be_removed)) = self.storage.remove(remove_slot) { { let r_slot_removed_storages = slot_storages_to_be_removed.read().unwrap(); total_removed_storage_entries += r_slot_removed_storages.len(); @@ -6765,7 +6691,7 @@ impl AccountsDb { let mut oldest_slot = std::u64::MAX; let mut total_bytes = 0; let mut total_alive_bytes = 0; - for iter_item in self.storage.map.iter() { + for iter_item in self.storage.iter() { let slot = iter_item.key(); let slot_stores = iter_item.value().read().unwrap(); total_count += slot_stores.len(); @@ -7029,10 +6955,12 @@ impl AccountsDb { Vec::<(StoredMetaWriteVersion, Option>)>::with_capacity(len); for storage in storages { let mut iterator = storage.accounts.account_iter(); - if let Some(item) = iterator - .next() - .map(|stored_account| (stored_account.meta.write_version, Some(stored_account))) - { + if let Some(item) = iterator.next().map(|stored_account| { + ( + stored_account.meta.write_version_obsolete, + Some(stored_account), + ) + }) { current.push(item); progress.push(iterator); } @@ -7058,7 +6986,10 @@ impl AccountsDb { scanner.found_account(&LoadedAccount::Stored(account.1.unwrap())); } let next = progress[min_index].next().map(|stored_account| { - (stored_account.meta.write_version, Some(stored_account)) + ( + stored_account.meta.write_version_obsolete, + Some(stored_account), + ) }); match next { Some(item) => { @@ -7074,11 +7005,7 @@ impl AccountsDb { } } - fn update_old_slot_stats( - &self, - stats: &HashStats, - sub_storages: Option<&Vec>>, - ) { + fn update_old_slot_stats(&self, stats: &HashStats, sub_storages: Option<&SnapshotStorage>) { if let Some(sub_storages) = sub_storages { stats.roots_older_than_epoch.fetch_add(1, Ordering::Relaxed); let mut ancients = 0; @@ -7134,7 +7061,7 @@ impl AccountsDb { /// return true iff storages are valid for loading from cache fn hash_storage_info( hasher: &mut impl StdHasher, - storages: Option<&Vec>>, + storages: Option<&SnapshotStorage>, slot: Slot, ) -> bool { if let Some(sub_storages) = storages { @@ -7236,7 +7163,7 @@ impl AccountsDb { hash ); if load_from_cache { - if let Ok(mapped_file) = cache_hash_data.load_map(&Path::new(&file_name)) { + if let Ok(mapped_file) = cache_hash_data.load_map(&file_name) { return Some(mapped_file); } } @@ -7278,8 +7205,7 @@ impl AccountsDb { assert!(!file_name.is_empty()); (!r.is_empty() && r.iter().any(|b| !b.is_empty())).then(|| { // error if we can't write this - let file_name = Path::new(&file_name); - cache_hash_data.save(Path::new(&file_name), &r).unwrap(); + cache_hash_data.save(&file_name, &r).unwrap(); cache_hash_data.load_map(&file_name).unwrap() }) }) @@ -8168,7 +8094,7 @@ impl AccountsDb { pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, ) { let mut measure = Measure::start("clean_stored_dead_slots-ms"); - let mut stores: Vec> = vec![]; + let mut stores: SnapshotStorage = vec![]; // get all stores in a vec so we can iterate in parallel for slot in dead_slots.iter() { if let Some(slot_storage) = self.storage.get_slot_stores(*slot) { @@ -8586,7 +8512,6 @@ impl AccountsDb { let mut m = Measure::start("get slots"); let slots = self .storage - .map .iter() .map(|k| *k.key() as Slot) .collect::>(); @@ -8607,11 +8532,10 @@ impl AccountsDb { .map(|ancestors| ancestors.contains_key(slot)) .unwrap_or_default()) { - self.storage.map.get(slot).map_or_else( + self.storage.get(slot).map_or_else( || None, |item| { let storages = item - .value() .read() .unwrap() .values() @@ -8668,7 +8592,7 @@ impl AccountsDb { let mut accounts_map = GenerateIndexAccountsMap::with_capacity(num_accounts); storage_maps.iter().for_each(|storage| { storage.accounts.account_iter().for_each(|stored_account| { - let this_version = stored_account.meta.write_version; + let this_version = stored_account.meta.write_version_obsolete; let pubkey = stored_account.pubkey(); assert!(!self.is_filler_account(pubkey)); match accounts_map.entry(*pubkey) { @@ -8918,7 +8842,7 @@ impl AccountsDb { .take(per_pass) .collect::>(); roots_in_this_pass.into_par_iter().for_each(|slot| { - let storage_maps: Vec> = self + let storage_maps: SnapshotStorage = self .storage .get_slot_storage_entries(*slot) .unwrap_or_default(); @@ -9032,11 +8956,12 @@ impl AccountsDb { for (index, slot) in slots.iter().enumerate() { let mut scan_time = Measure::start("scan"); log_status.report(index as u64); - let storage_maps: Vec> = self - .storage - .get_slot_storage_entries(*slot) + let storage_maps = self.storage.get_slot_storage_entries(*slot); + let accounts_map = storage_maps + .as_ref() + .map(|storage_maps| self.process_storage_slot(storage_maps)) .unwrap_or_default(); - let accounts_map = self.process_storage_slot(&storage_maps); + scan_time.stop(); scan_time_sum += scan_time.as_us(); Self::update_storage_info( @@ -9351,7 +9276,7 @@ impl AccountsDb { ) { // store count and size for each storage let mut storage_size_storages_time = Measure::start("storage_size_storages"); - for slot_stores in self.storage.map.iter() { + for slot_stores in self.storage.iter() { for (id, store) in slot_stores.value().read().unwrap().iter() { // Should be default at this point assert_eq!(store.alive_bytes(), 0); @@ -9568,7 +9493,7 @@ pub mod tests { let temp_dir = TempDir::new().unwrap(); let accounts_hash_cache_path = temp_dir.path(); self.scan_snapshot_stores_with_cache( - &CacheHashData::new(&accounts_hash_cache_path), + &CacheHashData::new(accounts_hash_cache_path), storage, stats, bins, @@ -9693,7 +9618,7 @@ pub mod tests { let hash = Hash::new(&[2; 32]); let stored_meta = StoredMeta { /// global write version - write_version: 0, + write_version_obsolete: 0, /// key for the account pubkey, data_len: 43, @@ -9761,22 +9686,22 @@ pub mod tests { let pubkey4 = solana_sdk::pubkey::new_rand(); let meta = StoredMeta { - write_version: 5, + write_version_obsolete: 5, pubkey, data_len: 7, }; let meta2 = StoredMeta { - write_version: 5, + write_version_obsolete: 5, pubkey: pubkey2, data_len: 7, }; let meta3 = StoredMeta { - write_version: 5, + write_version_obsolete: 5, pubkey: pubkey3, data_len: 7, }; let meta4 = StoredMeta { - write_version: 5, + write_version_obsolete: 5, pubkey: pubkey4, data_len: 7, }; @@ -10457,7 +10382,7 @@ pub mod tests { }; let result = accounts_db.scan_account_storage_no_bank( - &CacheHashData::new(&accounts_hash_cache_path), + &CacheHashData::new(accounts_hash_cache_path), &CalcAccountsHashConfig::default(), &get_storage_refs(&storages), test_scan, @@ -10972,7 +10897,7 @@ pub mod tests { assert!(db.load_without_fixed_root(&ancestors, &key).is_none()); assert!(db.bank_hashes.read().unwrap().get(&unrooted_slot).is_none()); assert!(db.accounts_cache.slot_cache(unrooted_slot).is_none()); - assert!(db.storage.map.get(&unrooted_slot).is_none()); + assert!(db.storage.get(&unrooted_slot).is_none()); assert!(db.accounts_index.get_account_read_entry(&key).is_none()); assert!(db .accounts_index @@ -11218,7 +11143,7 @@ pub mod tests { let mut append_vec_histogram = HashMap::new(); let mut all_storages = vec![]; - for slot_storage in accounts.storage.map.iter() { + for slot_storage in accounts.storage.iter() { all_storages.extend(slot_storage.read().unwrap().values().cloned()) } for storage in all_storages { @@ -11254,7 +11179,7 @@ pub mod tests { if pass == 1 { accounts.add_root_and_flush_write_cache(0); - assert_eq!(accounts.storage.map.len(), 1); + assert_eq!(accounts.storage.len(), 1); let stores = &accounts.storage.get_slot_stores(0).unwrap(); let r_stores = stores.read().unwrap(); assert_eq!(r_stores.len(), 1); @@ -11284,7 +11209,7 @@ pub mod tests { let flush = pass == i + 2; if flush { accounts.add_root_and_flush_write_cache(0); - assert_eq!(accounts.storage.map.len(), 1); + assert_eq!(accounts.storage.len(), 1); let stores = &accounts.storage.get_slot_stores(0).unwrap(); let r_stores = stores.read().unwrap(); assert_eq!(r_stores.len(), 1); @@ -11355,7 +11280,7 @@ pub mod tests { accounts.print_accounts_stats("pre-clean"); accounts.add_root_and_flush_write_cache(1); accounts.clean_accounts_for_tests(); - assert!(accounts.storage.map.get(&0).is_none()); + assert!(accounts.storage.get(&0).is_none()); //new value is there let ancestors = vec![(1, 1)].into_iter().collect(); @@ -12402,7 +12327,7 @@ pub mod tests { let executable = true; let rent_epoch = 2; let meta = StoredMeta { - write_version: 5, + write_version_obsolete: 5, pubkey: Pubkey::new_unique(), data_len: 7, }; @@ -14249,7 +14174,7 @@ pub mod tests { } } - fn slot_stores(db: &AccountsDb, slot: Slot) -> Vec> { + fn slot_stores(db: &AccountsDb, slot: Slot) -> SnapshotStorage { db.storage .get_slot_storage_entries(slot) .unwrap_or_default() @@ -14624,7 +14549,7 @@ pub mod tests { accounts_db.add_root(slot); accounts_db.flush_accounts_cache(true, None); - let mut storage_maps: Vec> = accounts_db + let mut storage_maps: SnapshotStorage = accounts_db .storage .get_slot_storage_entries(slot) .unwrap_or_default(); @@ -15968,7 +15893,7 @@ pub mod tests { accounts.add_root_and_flush_write_cache(slot0); // fake out the store count to avoid the assert - for slot_stores in accounts.storage.map.iter() { + for slot_stores in accounts.storage.iter() { for (_id, store) in slot_stores.value().read().unwrap().iter() { store.alive_bytes.store(0, Ordering::Release); } @@ -15984,8 +15909,8 @@ pub mod tests { }, ); accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default()); - assert_eq!(accounts.storage.map.len(), 1); - for slot_stores in accounts.storage.map.iter() { + assert_eq!(accounts.storage.len(), 1); + for slot_stores in accounts.storage.iter() { for (id, store) in slot_stores.value().read().unwrap().iter() { assert_eq!(id, &0); assert_eq!(store.count_and_status.read().unwrap().0, 3); @@ -17689,7 +17614,7 @@ pub mod tests { .write() .unwrap() .insert(slot0, BankHashInfo::default()); - db.storage.map.insert(slot0, Arc::default()); + db.storage.insert_empty_at_slot(slot0); assert!(!db.bank_hashes.read().unwrap().is_empty()); db.accounts_index.add_root(slot0); db.accounts_index.add_uncleaned_roots([slot0].into_iter()); @@ -17702,16 +17627,12 @@ pub mod tests { } fn insert_store(db: &AccountsDb, append_vec: Arc) { - let mut hm = HashMap::default(); - hm.insert(append_vec.append_vec_id(), Arc::clone(&append_vec)); - db.storage - .map - .insert(append_vec.slot(), Arc::new(RwLock::new(hm))); + db.storage.insert(append_vec.slot(), append_vec); } #[test] #[should_panic( - expected = "assertion failed: self.storage.map.remove(slot).unwrap().1.read().unwrap().is_empty()" + expected = "assertion failed: self.storage.remove(slot).unwrap().1.read().unwrap().is_empty()" )] fn test_handle_dropped_roots_for_ancient_assert() { solana_logger::setup(); diff --git a/runtime/src/accounts_db/geyser_plugin_utils.rs b/runtime/src/accounts_db/geyser_plugin_utils.rs index ab301f13c081ba..866a5628408d51 100644 --- a/runtime/src/accounts_db/geyser_plugin_utils.rs +++ b/runtime/src/accounts_db/geyser_plugin_utils.rs @@ -96,9 +96,9 @@ impl AccountsDb { accounts.for_each(|account| { account_len += 1; if let Some(previous_write_version) = previous_write_version { - assert!(previous_write_version < account.meta.write_version); + assert!(previous_write_version < account.meta.write_version_obsolete); } - previous_write_version = Some(account.meta.write_version); + previous_write_version = Some(account.meta.write_version_obsolete); if notified_accounts.contains(&account.meta.pubkey) { notify_stats.skipped_accounts += 1; return; diff --git a/runtime/src/ancient_append_vecs.rs b/runtime/src/ancient_append_vecs.rs index 0582459dbb8824..a939b3ec38193b 100644 --- a/runtime/src/ancient_append_vecs.rs +++ b/runtime/src/ancient_append_vecs.rs @@ -135,7 +135,7 @@ pub mod tests { let hash = Hash::new(&[2; 32]); let stored_meta = StoredMeta { /// global write version - write_version: 0, + write_version_obsolete: 0, /// key for the account pubkey, data_len: 43, diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 35acde83061dd2..a17f3766120300 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -150,7 +150,10 @@ impl<'a: 'b, 'b, T: ReadableAccount + Sync + 'b, U: StorableAccounts<'a, T>, V: #[derive(Clone, PartialEq, Eq, Debug)] pub struct StoredMeta { /// global write version - pub write_version: StoredMetaWriteVersion, + /// This will be made completely obsolete such that we stop storing it. + /// We will not support multiple append vecs per slot anymore, so this concept is no longer necessary. + /// Order of stores of an account to an append vec will determine 'latest' account data per pubkey. + pub write_version_obsolete: StoredMetaWriteVersion, /// key for the account pub pubkey: Pubkey, pub data_len: u64, @@ -656,7 +659,7 @@ impl AppendVec { data_len: account .map(|account| account.data().len()) .unwrap_or_default() as u64, - write_version: accounts.write_version(i), + write_version_obsolete: accounts.write_version(i), }; let meta_ptr = &stored_meta as *const StoredMeta; let account_meta_ptr = &account_meta as *const AccountMeta; @@ -716,7 +719,7 @@ pub mod tests { StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &account_data, vec![&hash], - vec![data.0.write_version], + vec![data.0.write_version_obsolete], ); self.append_accounts(&storable_accounts, 0) diff --git a/runtime/src/append_vec/test_utils.rs b/runtime/src/append_vec/test_utils.rs index a731b458590ab1..99064add647ca7 100644 --- a/runtime/src/append_vec/test_utils.rs +++ b/runtime/src/append_vec/test_utils.rs @@ -39,7 +39,7 @@ pub fn create_test_account(sample: usize) -> (StoredMeta, AccountSharedData) { let mut account = AccountSharedData::new(sample as u64, 0, &Pubkey::default()); account.set_data((0..data_len).map(|_| data_len as u8).collect()); let stored_meta = StoredMeta { - write_version: 0, + write_version_obsolete: 0, pubkey: Pubkey::default(), data_len: data_len as u64, }; diff --git a/runtime/src/bucket_map_holder.rs b/runtime/src/bucket_map_holder.rs index 765a487bdbbd72..51c15f2d1b3436 100644 --- a/runtime/src/bucket_map_holder.rs +++ b/runtime/src/bucket_map_holder.rs @@ -8,7 +8,7 @@ use { solana_bucket_map::bucket_map::{BucketMap, BucketMapConfig}, solana_measure::measure::Measure, solana_sdk::{ - clock::{Slot, SLOT_MS}, + clock::{Slot, DEFAULT_MS_PER_SLOT}, timing::AtomicInterval, }, std::{ @@ -22,7 +22,7 @@ use { }; pub type Age = u8; -const AGE_MS: u64 = SLOT_MS; // match one age per slot time +const AGE_MS: u64 = DEFAULT_MS_PER_SLOT; // match one age per slot time // 10 GB limit for in-mem idx. In practice, we don't get this high. This tunes how aggressively to save items we expect to use soon. pub const DEFAULT_DISK_INDEX: Option = Some(10_000); diff --git a/runtime/src/cache_hash_data.rs b/runtime/src/cache_hash_data.rs index 5849c2fc1b42c4..3d22b2e7558722 100644 --- a/runtime/src/cache_hash_data.rs +++ b/runtime/src/cache_hash_data.rs @@ -112,7 +112,7 @@ impl CacheHashDataFile { } } - fn new_map(file: &Path, capacity: u64) -> Result { + fn new_map(file: impl AsRef, capacity: u64) -> Result { let mut data = OpenOptions::new() .read(true) .write(true) @@ -129,7 +129,7 @@ impl CacheHashDataFile { Ok(unsafe { MmapMut::map_mut(&data).unwrap() }) } - fn load_map(file: &Path) -> Result { + fn load_map(file: impl AsRef) -> Result { let data = OpenOptions::new() .read(true) .write(true) @@ -140,7 +140,7 @@ impl CacheHashDataFile { } } -pub type PreExistingCacheFiles = HashSet; +pub type PreExistingCacheFiles = HashSet; pub struct CacheHashData { cache_folder: PathBuf, pre_existing_cache_files: Arc>, @@ -155,11 +155,11 @@ impl Drop for CacheHashData { } impl CacheHashData { - pub fn new + std::fmt::Debug>(parent_folder: &P) -> CacheHashData { + pub fn new(parent_folder: impl AsRef) -> CacheHashData { let cache_folder = Self::get_cache_root_path(parent_folder); - std::fs::create_dir_all(cache_folder.clone()) - .unwrap_or_else(|_| panic!("error creating cache dir: {cache_folder:?}")); + std::fs::create_dir_all(&cache_folder) + .unwrap_or_else(|_| panic!("error creating cache dir: {}", cache_folder.display())); let result = CacheHashData { cache_folder, @@ -182,12 +182,12 @@ impl CacheHashData { } fn get_cache_files(&self) { if self.cache_folder.is_dir() { - let dir = fs::read_dir(self.cache_folder.clone()); + let dir = fs::read_dir(&self.cache_folder); if let Ok(dir) = dir { let mut pre_existing = self.pre_existing_cache_files.lock().unwrap(); for entry in dir.flatten() { if let Some(name) = entry.path().file_name() { - pre_existing.insert(name.to_str().unwrap().to_string()); + pre_existing.insert(PathBuf::from(name)); } } self.stats.lock().unwrap().cache_file_count += pre_existing.len(); @@ -195,15 +195,15 @@ impl CacheHashData { } } - fn get_cache_root_path>(parent_folder: &P) -> PathBuf { + fn get_cache_root_path(parent_folder: impl AsRef) -> PathBuf { parent_folder.as_ref().join("calculate_accounts_hash_cache") } #[cfg(test)] /// load from 'file_name' into 'accumulator' - pub(crate) fn load + std::fmt::Debug>( + pub(crate) fn load( &self, - file_name: &P, + file_name: impl AsRef, accumulator: &mut SavedType, start_bin_index: usize, bin_calculator: &PubkeyBinCalculator24, @@ -218,9 +218,9 @@ impl CacheHashData { } /// map 'file_name' into memory - pub(crate) fn load_map + std::fmt::Debug>( + pub(crate) fn load_map( &self, - file_name: &P, + file_name: impl AsRef, ) -> Result { let mut stats = CacheHashDataStats::default(); let result = self.map(file_name, &mut stats); @@ -229,13 +229,13 @@ impl CacheHashData { } /// create and return a MappedCacheFile for a cache file path - fn map + std::fmt::Debug>( + fn map( &self, - file_name: &P, + file_name: impl AsRef, stats: &mut CacheHashDataStats, ) -> Result { - let path = self.cache_folder.join(file_name); - let file_len = std::fs::metadata(path.clone())?.len(); + let path = self.cache_folder.join(&file_name); + let file_len = std::fs::metadata(&path)?.len(); let mut m1 = Measure::start("read_file"); let mmap = CacheHashDataFile::load_map(&path)?; m1.stop(); @@ -269,17 +269,16 @@ impl CacheHashData { cache_file.capacity = capacity; assert_eq!( capacity, file_len, - "expected: {capacity}, len on disk: {file_len} {path:?}, entries: {entries}, cell_size: {cell_size}" + "expected: {capacity}, len on disk: {file_len} {}, entries: {entries}, cell_size: {cell_size}", path.display(), ); stats.total_entries = entries; stats.cache_file_size += capacity as usize; - let file_name_lookup = file_name.as_ref().to_str().unwrap().to_string(); self.pre_existing_cache_files .lock() .unwrap() - .remove(&file_name_lookup); + .remove(file_name.as_ref()); stats.loaded_from_cache += 1; stats.entries_loaded_from_cache += entries; @@ -288,7 +287,11 @@ impl CacheHashData { } /// save 'data' to 'file_name' - pub fn save(&self, file_name: &Path, data: &SavedTypeSlice) -> Result<(), std::io::Error> { + pub fn save( + &self, + file_name: impl AsRef, + data: &SavedTypeSlice, + ) -> Result<(), std::io::Error> { let mut stats = CacheHashDataStats::default(); let result = self.save_internal(file_name, data, &mut stats); self.stats.lock().unwrap().accumulate(&stats); @@ -297,16 +300,14 @@ impl CacheHashData { fn save_internal( &self, - file_name: &Path, + file_name: impl AsRef, data: &SavedTypeSlice, stats: &mut CacheHashDataStats, ) -> Result<(), std::io::Error> { let mut m = Measure::start("save"); let cache_path = self.cache_folder.join(file_name); - let create = true; - if create { - let _ignored = remove_file(&cache_path); - } + // overwrite any existing file at this path + let _ignored = remove_file(&cache_path); let cell_size = std::mem::size_of::() as u64; let mut m1 = Measure::start("create save"); let entries = data @@ -390,9 +391,8 @@ pub mod tests { } } let cache = CacheHashData::new(&tmpdir); - let file_name = "test"; - let file = Path::new(file_name).to_path_buf(); - cache.save(&file, &data_this_pass).unwrap(); + let file_name = PathBuf::from("test"); + cache.save(&file_name, &data_this_pass).unwrap(); cache.get_cache_files(); assert_eq!( cache @@ -401,11 +401,11 @@ pub mod tests { .unwrap() .iter() .collect::>(), - vec![file_name] + vec![&file_name], ); let mut accum = (0..bins_per_pass).into_iter().map(|_| vec![]).collect(); cache - .load(&file, &mut accum, start_bin_this_pass, &bin_calculator) + .load(&file_name, &mut accum, start_bin_this_pass, &bin_calculator) .unwrap(); if flatten_data { bin_data( diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index b6369d90f6e8e3..e5cd2a8a2de700 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -7,6 +7,7 @@ extern crate lazy_static; pub mod account_info; pub mod account_overrides; pub mod account_rent_state; +pub mod account_storage; pub mod accounts; pub mod accounts_background_service; pub mod accounts_cache; diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index dab6bd9b73b34f..5077e8bba4f29a 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -1,15 +1,12 @@ //! calculate and collect rent from Accounts -use { - log::*, - solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - clock::Epoch, - epoch_schedule::EpochSchedule, - genesis_config::GenesisConfig, - incinerator, - pubkey::Pubkey, - rent::{Rent, RentDue}, - }, +use solana_sdk::{ + account::{AccountSharedData, ReadableAccount, WritableAccount}, + clock::Epoch, + epoch_schedule::EpochSchedule, + genesis_config::GenesisConfig, + incinerator, + pubkey::Pubkey, + rent::{Rent, RentDue}, }; #[derive(Serialize, Deserialize, Clone, PartialEq, Debug, AbiExample)] @@ -101,19 +98,6 @@ impl RentCollector { // we know this account is not exempt let due = self.rent.due_amount(account.data().len(), years_elapsed); - - // we expect rent_epoch to always be one of: {0, self.epoch-1, self.epoch, self.epoch+1} - if account_rent_epoch != 0 - && (account_rent_epoch + 1 < self.epoch || account_rent_epoch > self.epoch + 1) - { - // this should not occur in a running validator - if due == 0 { - inc_new_counter_info!("rent-collector-rent-epoch-range-large-exempt", 1); - } else { - inc_new_counter_info!("rent-collector-rent-epoch-range-large-paying", 1); - } - } - RentDue::Paying(due) } } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 5582a3a1b9f6b7..34d12db01167fd 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -733,7 +733,7 @@ where .write() .unwrap() .insert(snapshot_slot, snapshot_bank_hash_info); - accounts_db.storage.map.extend(storage.into_iter()); + accounts_db.storage.extend(storage); accounts_db .next_id .store(next_append_vec_id, Ordering::Release); diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 7765bcfa3874e3..27643699f1c9cc 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -3,8 +3,9 @@ use { super::*, crate::{ + account_storage::AccountStorageMap, accounts::{test_utils::create_test_accounts, Accounts}, - accounts_db::{get_temp_accounts_paths, AccountShrinkThreshold, AccountStorageMap}, + accounts_db::{get_temp_accounts_paths, AccountShrinkThreshold}, accounts_hash::AccountsHash, append_vec::AppendVec, bank::{Bank, BankTestConfig}, diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 6622b9c167faf1..2e1591bfe0e301 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -3,7 +3,7 @@ use { crate::{ accounts_db::{ - AccountStorageEntry, AccountsDb, GetUniqueAccountsResult, PurgeStats, StoreReclaims, + AccountsDb, GetUniqueAccountsResult, PurgeStats, SnapshotStorage, StoreReclaims, }, bank::Bank, builtins, static_ids, @@ -27,7 +27,7 @@ use { collections::HashSet, sync::{ atomic::{AtomicUsize, Ordering}, - Arc, Mutex, + Mutex, }, }, }; @@ -273,7 +273,7 @@ impl<'a> SnapshotMinimizer<'a> { fn process_snapshot_storages( &self, minimized_slot_set: DashSet, - ) -> (Vec, Vec>) { + ) -> (Vec, SnapshotStorage) { let snapshot_storages = self .accounts_db() .get_snapshot_storages(..=self.starting_slot, None) @@ -299,11 +299,7 @@ impl<'a> SnapshotMinimizer<'a> { } /// Creates new storage replacing `storages` that contains only accounts in `minimized_account_set`. - fn filter_storages( - &self, - storages: Vec>, - dead_storages: &Mutex>>, - ) { + fn filter_storages(&self, storages: SnapshotStorage, dead_storages: &Mutex) { let slot = storages.first().unwrap().slot(); let GetUniqueAccountsResult { stored_accounts, .. @@ -363,7 +359,7 @@ impl<'a> SnapshotMinimizer<'a> { for alive_account in keep_accounts { accounts.push(&alive_account.account); hashes.push(alive_account.account.hash); - write_versions.push(alive_account.account.meta.write_version); + write_versions.push(alive_account.account.meta.write_version_obsolete); } let new_storage = self.accounts_db().get_store_for_shrink(slot, aligned_total); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 11f349040d6c3b..b3ec73a53f29d7 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1,7 +1,8 @@ use { crate::{ + account_storage::AccountStorageMap, accounts_db::{ - AccountShrinkThreshold, AccountStorageMap, AccountsDbConfig, AtomicAppendVecId, + AccountShrinkThreshold, AccountsDbConfig, AtomicAppendVecId, CalcAccountsHashDataSource, SnapshotStorage, SnapshotStorages, }, accounts_index::AccountSecondaryIndexes, diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index 097f3ad158a8cf..35701121922471 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -3,7 +3,8 @@ use { super::{get_io_error, snapshot_version_from_file, SnapshotError, SnapshotVersion}, crate::{ - accounts_db::{AccountStorageEntry, AccountStorageMap, AppendVecId, AtomicAppendVecId}, + account_storage::AccountStorageMap, + accounts_db::{AccountStorageEntry, AppendVecId, AtomicAppendVecId}, serde_snapshot::{ self, remap_and_reconstruct_single_storage, snapshot_storage_lengths_from_fields, SerdeStyle, SerializedAppendVecId, diff --git a/runtime/src/storable_accounts.rs b/runtime/src/storable_accounts.rs index bd751162ec8155..a9cd5b94ddfce9 100644 --- a/runtime/src/storable_accounts.rs +++ b/runtime/src/storable_accounts.rs @@ -195,7 +195,7 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> self.1[index].hash } fn write_version(&self, index: usize) -> u64 { - self.1[index].meta.write_version + self.1[index].meta.write_version_obsolete } } @@ -238,7 +238,7 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> self.1[index].account.hash } fn write_version(&self, index: usize) -> u64 { - self.1[index].account.meta.write_version + self.1[index].account.meta.write_version_obsolete } } #[cfg(test)] @@ -281,7 +281,7 @@ pub mod tests { let executable = false; let rent_epoch = 0; let meta = StoredMeta { - write_version: 5, + write_version_obsolete: 5, pubkey: pk, data_len: 7, }; @@ -338,7 +338,7 @@ pub mod tests { account.clone(), starting_slot % max_slots, StoredMeta { - write_version: 0, // just something + write_version_obsolete: 0, // just something pubkey: pk, data_len: u64::MAX, // just something }, diff --git a/runtime/store-tool/src/main.rs b/runtime/store-tool/src/main.rs index 642b81b68b514e..acfe2ae9519609 100644 --- a/runtime/store-tool/src/main.rs +++ b/runtime/store-tool/src/main.rs @@ -42,7 +42,7 @@ fn main() { info!( " account: {:?} version: {} lamports: {} data: {} hash: {:?}", account.pubkey(), - account.meta.write_version, + account.meta.write_version_obsolete, account.account_meta.lamports, account.meta.data_len, account.hash @@ -59,7 +59,7 @@ fn main() { fn is_account_zeroed(account: &StoredAccountMeta) -> bool { account.hash == &Hash::default() && account.meta.data_len == 0 - && account.meta.write_version == 0 + && account.meta.write_version_obsolete == 0 && account.pubkey() == &Pubkey::default() && account.clone_account() == AccountSharedData::default() } diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 00000000000000..f8c2abbb3da936 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "1.65.0" diff --git a/sdk/program/src/clock.rs b/sdk/program/src/clock.rs index 52431796f654ab..45f49f218b15c7 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/program/src/clock.rs @@ -7,7 +7,7 @@ //! by their slot number, and some slots do not contain a block. //! //! An approximation of the passage of real-world time can be calculated by -//! multiplying a number of slots by [`SLOT_MS`], which is a constant target +//! multiplying a number of slots by [`DEFAULT_MS_PER_SLOT`], which is a constant target //! time for the network to produce slots. Note though that this method suffers //! a variable amount of drift, as the network does not produce slots at exactly //! the target rate, and the greater number of slots being calculated for, the @@ -33,11 +33,9 @@ static_assertions::const_assert_eq!(MS_PER_TICK, 6); /// The number of milliseconds per tick (6). pub const MS_PER_TICK: u64 = 1000 / DEFAULT_TICKS_PER_SECOND; -#[cfg(test)] -static_assertions::const_assert_eq!(SLOT_MS, 400); - +#[deprecated(since = "1.15.0", note = "Please use DEFAULT_MS_PER_SLOT instead")] /// The expected duration of a slot (400 milliseconds). -pub const SLOT_MS: u64 = (DEFAULT_TICKS_PER_SLOT * 1000) / DEFAULT_TICKS_PER_SECOND; +pub const SLOT_MS: u64 = DEFAULT_MS_PER_SLOT; // At 160 ticks/s, 64 ticks per slot implies that leader rotation and voting will happen // every 400 ms. A fast voting cadence ensures faster finality and convergence @@ -74,6 +72,7 @@ pub const NUM_CONSECUTIVE_LEADER_SLOTS: u64 = 4; #[cfg(test)] static_assertions::const_assert_eq!(DEFAULT_MS_PER_SLOT, 400); +/// The expected duration of a slot (400 milliseconds). pub const DEFAULT_MS_PER_SLOT: u64 = 1_000 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND; pub const DEFAULT_S_PER_SLOT: f64 = DEFAULT_TICKS_PER_SLOT as f64 / DEFAULT_TICKS_PER_SECOND as f64; diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index fe94448464914a..c4a1f250469a52 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -566,9 +566,17 @@ async fn handle_connection( let last_update = last_update.clone(); tokio::spawn(async move { let mut maybe_batch = None; + // The min is to guard against a value too small which can wake up unnecessarily + // frequently and wasting CPU cycles. The max guard against waiting for too long + // which delay exit and cause some test failures when the timeout value is large. + // Within this value, the heuristic is to wake up 10 times to check for exit + // for the set timeout if there are no data. + let exit_check_interval = + (wait_for_chunk_timeout_ms / 10).clamp(10, 1000); + let mut start = Instant::now(); while !stream_exit.load(Ordering::Relaxed) { if let Ok(chunk) = tokio::time::timeout( - Duration::from_millis(wait_for_chunk_timeout_ms), + Duration::from_millis(exit_check_interval), stream.read_chunk(PACKET_DATA_SIZE, false), ) .await @@ -585,12 +593,16 @@ async fn handle_connection( last_update.store(timing::timestamp(), Ordering::Relaxed); break; } + start = Instant::now(); } else { - debug!("Timeout in receiving on stream"); - stats - .total_stream_read_timeouts - .fetch_add(1, Ordering::Relaxed); - break; + let elapse = Instant::now() - start; + if elapse.as_millis() as u64 > wait_for_chunk_timeout_ms { + debug!("Timeout in receiving on stream"); + stats + .total_stream_read_timeouts + .fetch_add(1, Ordering::Relaxed); + break; + } } } stats.total_streams.fetch_sub(1, Ordering::Relaxed); @@ -1020,7 +1032,7 @@ pub mod test { MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS, stats.clone(), - 1000, + 2000, ) .unwrap(); (t, exit, receiver, server_address, stats) diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index e0aed308534930..151a8cc317392a 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -974,6 +974,10 @@ impl TestValidator { pub fn bank_forks(&self) -> Arc> { self.validator.as_ref().unwrap().bank_forks.clone() } + + pub fn repair_whitelist(&self) -> Arc>> { + Arc::new(RwLock::new(HashSet::default())) + } } impl Drop for TestValidator { diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index eb2996fc1ee020..f7441235da159a 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -17,7 +17,7 @@ use { signature::{read_keypair_file, Keypair, Signer}, }, std::{ - collections::HashMap, + collections::{HashMap, HashSet}, error, fmt::{self, Display}, net::SocketAddr, @@ -33,6 +33,7 @@ pub struct AdminRpcRequestMetadataPostInit { pub cluster_info: Arc, pub bank_forks: Arc>, pub vote_account: Pubkey, + pub repair_whitelist: Arc>>, } #[derive(Clone)] @@ -80,6 +81,11 @@ pub struct AdminRpcContactInfo { pub shred_version: u16, } +#[derive(Debug, Deserialize, Serialize)] +pub struct AdminRpcRepairWhitelist { + pub whitelist: Vec, +} + impl From for AdminRpcContactInfo { fn from(contact_info: ContactInfo) -> Self { let ContactInfo { @@ -133,6 +139,12 @@ impl Display for AdminRpcContactInfo { } } +impl Display for AdminRpcRepairWhitelist { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f, "Repair whitelist: {:?}", &self.whitelist) + } +} + #[rpc] pub trait AdminRpc { type Metadata; @@ -183,6 +195,12 @@ pub trait AdminRpc { #[rpc(meta, name = "contactInfo")] fn contact_info(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "repairWhitelist")] + fn repair_whitelist(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "setRepairWhitelist")] + fn set_repair_whitelist(&self, meta: Self::Metadata, whitelist: Vec) -> Result<()>; } pub struct AdminRpcImpl; @@ -321,6 +339,35 @@ impl AdminRpc for AdminRpcImpl { fn contact_info(&self, meta: Self::Metadata) -> Result { meta.with_post_init(|post_init| Ok(post_init.cluster_info.my_contact_info().into())) } + + fn repair_whitelist(&self, meta: Self::Metadata) -> Result { + debug!("repair_whitelist request received"); + + meta.with_post_init(|post_init| { + let whitelist: Vec<_> = post_init + .repair_whitelist + .read() + .unwrap() + .iter() + .copied() + .collect(); + Ok(AdminRpcRepairWhitelist { whitelist }) + }) + } + + fn set_repair_whitelist(&self, meta: Self::Metadata, whitelist: Vec) -> Result<()> { + debug!("set_repair_whitelist request received"); + + let whitelist: HashSet = whitelist.into_iter().collect(); + meta.with_post_init(|post_init| { + *post_init.repair_whitelist.write().unwrap() = whitelist; + warn!( + "Repair whitelist set to {:?}", + &post_init.repair_whitelist.read().unwrap() + ); + Ok(()) + }) + } } impl AdminRpcImpl { diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index b1b4b319769cf8..148bd047db4a89 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -498,6 +498,7 @@ fn main() { bank_forks: test_validator.bank_forks(), cluster_info: test_validator.cluster_info(), vote_account: test_validator.vote_account_address(), + repair_whitelist: test_validator.repair_whitelist(), }); if let Some(dashboard) = dashboard { dashboard.run(Duration::from_millis(250)); diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 31f9ef878a4a5d..1b94dc180e995e 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -714,6 +714,18 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .help("A list of validators to request repairs from. If specified, repair will not \ request from validators outside this set [default: all validators]") ) + .arg( + Arg::with_name("repair_whitelist") + .hidden(true) + .long("repair-whitelist") + .validator(is_pubkey) + .value_name("VALIDATOR IDENTITY") + .multiple(true) + .takes_value(true) + .help("A list of validators to prioritize repairs from. If specified, repair requests \ + from validators in the list will be prioritized over requests from other validators. \ + [default: all validators]") + ) .arg( Arg::with_name("gossip_validators") .long("gossip-validator") @@ -1387,6 +1399,46 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .help("Output display mode") ) ) + .subcommand( + SubCommand::with_name("repair-whitelist") + .about("Manage the validator's repair protocol whitelist") + .setting(AppSettings::SubcommandRequiredElseHelp) + .setting(AppSettings::InferSubcommands) + .subcommand( + SubCommand::with_name("get") + .about("Display the validator's repair protocol whitelist") + .arg( + Arg::with_name("output") + .long("output") + .takes_value(true) + .value_name("MODE") + .possible_values(&["json", "json-compact"]) + .help("Output display mode") + ) + ) + .subcommand( + SubCommand::with_name("set") + .about("Set the validator's repair protocol whitelist") + .setting(AppSettings::ArgRequiredElseHelp) + .arg( + Arg::with_name("whitelist") + .long("whitelist") + .validator(is_pubkey) + .value_name("VALIDATOR IDENTITY") + .multiple(true) + .takes_value(true) + .help("Set the validator's repair protocol whitelist") + ) + .after_help("Note: repair protocol whitelist changes only apply to the currently \ + running validator instance") + ) + .subcommand( + SubCommand::with_name("remove-all") + .about("Clear the validator's repair protocol whitelist") + .after_help("Note: repair protocol whitelist changes only apply to the currently \ + running validator instance") + ) + ) .subcommand( SubCommand::with_name("init") .about("Initialize the ledger directory then exit") diff --git a/validator/src/main.rs b/validator/src/main.rs index a8fe626704989c..46748d7ab34ff3 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -344,6 +344,22 @@ fn wait_for_restart_window( Ok(()) } +fn set_repair_whitelist( + ledger_path: &Path, + whitelist: Vec, +) -> Result<(), Box> { + let admin_client = admin_rpc_service::connect(ledger_path); + admin_rpc_service::runtime() + .block_on(async move { admin_client.await?.set_repair_whitelist(whitelist).await }) + .map_err(|err| { + std::io::Error::new( + std::io::ErrorKind::Other, + format!("setRepairWhitelist request failed: {}", err), + ) + })?; + Ok(()) +} + /// Returns the default fifo shred storage size (include both data and coding /// shreds) based on the validator config. fn default_fifo_shred_storage_size(vc: &ValidatorConfig) -> Option { @@ -667,6 +683,59 @@ pub fn main() { }); return; } + ("repair-whitelist", Some(repair_whitelist_subcommand_matches)) => { + match repair_whitelist_subcommand_matches.subcommand() { + ("get", Some(subcommand_matches)) => { + let output_mode = subcommand_matches.value_of("output"); + let admin_client = admin_rpc_service::connect(&ledger_path); + let repair_whitelist = admin_rpc_service::runtime() + .block_on(async move { admin_client.await?.repair_whitelist().await }) + .unwrap_or_else(|err| { + eprintln!("Repair whitelist query failed: {}", err); + exit(1); + }); + if let Some(mode) = output_mode { + match mode { + "json" => println!( + "{}", + serde_json::to_string_pretty(&repair_whitelist).unwrap() + ), + "json-compact" => { + print!("{}", serde_json::to_string(&repair_whitelist).unwrap()) + } + _ => unreachable!(), + } + } else { + print!("{}", repair_whitelist); + } + return; + } + ("set", Some(subcommand_matches)) => { + let whitelist = if subcommand_matches.is_present("whitelist") { + let validators_set: HashSet<_> = + values_t_or_exit!(subcommand_matches, "whitelist", Pubkey) + .into_iter() + .collect(); + validators_set.into_iter().collect::>() + } else { + return; + }; + set_repair_whitelist(&ledger_path, whitelist).unwrap_or_else(|err| { + eprintln!("{err}"); + exit(1); + }); + return; + } + ("remove-all", _) => { + set_repair_whitelist(&ledger_path, Vec::default()).unwrap_or_else(|err| { + eprintln!("{err}"); + exit(1); + }); + return; + } + _ => unreachable!(), + } + } _ => unreachable!(), }; @@ -786,6 +855,13 @@ pub fn main() { "repair_validators", "--repair-validator", ); + let repair_whitelist = validators_set( + &identity_keypair.pubkey(), + &matches, + "repair_whitelist", + "--repair-whitelist", + ); + let repair_whitelist = Arc::new(RwLock::new(repair_whitelist.unwrap_or_default())); let gossip_validators = validators_set( &identity_keypair.pubkey(), &matches, @@ -1094,6 +1170,7 @@ pub fn main() { wait_for_supermajority: value_t!(matches, "wait_for_supermajority", Slot).ok(), known_validators, repair_validators, + repair_whitelist: repair_whitelist.clone(), gossip_validators, wal_recovery_mode, poh_verify: !matches.is_present("skip_poh_verify"), @@ -1572,6 +1649,7 @@ pub fn main() { bank_forks: validator.bank_forks.clone(), cluster_info: validator.cluster_info.clone(), vote_account, + repair_whitelist, }); if let Some(filename) = init_complete_file {