From 0e12172ddd44f0b44130581f82e595084a553fd0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 11 Mar 2024 11:34:08 -0400 Subject: [PATCH] Moves accounts benches into accounts-db crate (#164) --- accounts-db/benches/accounts.rs | 343 ++++++++++++++++++++++++++++++++ runtime/benches/accounts.rs | 340 +------------------------------ 2 files changed, 349 insertions(+), 334 deletions(-) create mode 100644 accounts-db/benches/accounts.rs diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs new file mode 100644 index 00000000000000..9b3b70600a60a2 --- /dev/null +++ b/accounts-db/benches/accounts.rs @@ -0,0 +1,343 @@ +#![feature(test)] +#![allow(clippy::arithmetic_side_effects)] + +extern crate test; + +use { + dashmap::DashMap, + rand::Rng, + rayon::iter::{IntoParallelRefIterator, ParallelIterator}, + solana_accounts_db::{ + accounts::{AccountAddressFilter, Accounts}, + accounts_db::{ + test_utils::create_test_accounts, AccountShrinkThreshold, AccountsDb, + VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, + }, + accounts_index::{AccountSecondaryIndexes, ScanConfig}, + ancestors::Ancestors, + }, + solana_sdk::{ + account::{Account, AccountSharedData, ReadableAccount}, + genesis_config::ClusterType, + hash::Hash, + pubkey::Pubkey, + rent_collector::RentCollector, + sysvar::epoch_schedule::EpochSchedule, + }, + std::{ + collections::{HashMap, HashSet}, + path::PathBuf, + sync::{Arc, RwLock}, + thread::Builder, + }, + test::Bencher, +}; + +fn new_accounts_db(account_paths: Vec) -> AccountsDb { + AccountsDb::new_with_config( + account_paths, + &ClusterType::Development, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + None, + Arc::default(), + ) +} + +#[bench] +fn bench_accounts_hash_bank_hash(bencher: &mut Bencher) { + let accounts_db = new_accounts_db(vec![PathBuf::from("bench_accounts_hash_internal")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut pubkeys: Vec = vec![]; + let num_accounts = 60_000; + let slot = 0; + create_test_accounts(&accounts, &mut pubkeys, num_accounts, slot); + let ancestors = Ancestors::from(vec![0]); + let (_, total_lamports) = accounts + .accounts_db + .update_accounts_hash_for_tests(0, &ancestors, false, false); + accounts.add_root(slot); + accounts.accounts_db.flush_accounts_cache(true, Some(slot)); + bencher.iter(|| { + assert!(accounts.verify_accounts_hash_and_lamports( + 0, + total_lamports, + None, + VerifyAccountsHashAndLamportsConfig { + ancestors: &ancestors, + test_hash_calculation: false, + epoch_schedule: &EpochSchedule::default(), + rent_collector: &RentCollector::default(), + ignore_mismatch: false, + store_detailed_debug_info: false, + use_bg_thread_pool: false, + } + )) + }); +} + +#[bench] +fn bench_update_accounts_hash(bencher: &mut Bencher) { + solana_logger::setup(); + let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut pubkeys: Vec = vec![]; + create_test_accounts(&accounts, &mut pubkeys, 50_000, 0); + let ancestors = Ancestors::from(vec![0]); + bencher.iter(|| { + accounts + .accounts_db + .update_accounts_hash_for_tests(0, &ancestors, false, false); + }); +} + +#[bench] +fn bench_accounts_delta_hash(bencher: &mut Bencher) { + solana_logger::setup(); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delta_hash")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut pubkeys: Vec = vec![]; + create_test_accounts(&accounts, &mut pubkeys, 100_000, 0); + bencher.iter(|| { + accounts.accounts_db.calculate_accounts_delta_hash(0); + }); +} + +#[bench] +fn bench_delete_dependencies(bencher: &mut Bencher) { + solana_logger::setup(); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delete_deps")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut old_pubkey = Pubkey::default(); + let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + for i in 0..1000 { + let pubkey = solana_sdk::pubkey::new_rand(); + let account = AccountSharedData::new(i + 1, 0, AccountSharedData::default().owner()); + accounts.store_slow_uncached(i, &pubkey, &account); + accounts.store_slow_uncached(i, &old_pubkey, &zero_account); + old_pubkey = pubkey; + accounts.add_root(i); + } + bencher.iter(|| { + accounts.accounts_db.clean_accounts_for_tests(); + }); +} + +fn store_accounts_with_possible_contention( + bench_name: &str, + bencher: &mut Bencher, + reader_f: F, +) where + F: Fn(&Accounts, &[Pubkey]) + Send + Copy, +{ + let num_readers = 5; + let accounts_db = new_accounts_db(vec![PathBuf::from( + std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), + ) + .join(bench_name)]); + let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); + let num_keys = 1000; + let slot = 0; + + let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .take(num_keys) + .collect(); + let accounts_data: Vec<_> = std::iter::repeat(Account { + lamports: 1, + ..Default::default() + }) + .take(num_keys) + .collect(); + let storable_accounts: Vec<_> = pubkeys.iter().zip(accounts_data.iter()).collect(); + accounts.store_accounts_cached((slot, storable_accounts.as_slice())); + accounts.add_root(slot); + accounts + .accounts_db + .flush_accounts_cache_slot_for_tests(slot); + + let pubkeys = Arc::new(pubkeys); + for i in 0..num_readers { + let accounts = accounts.clone(); + let pubkeys = pubkeys.clone(); + Builder::new() + .name(format!("reader{i:02}")) + .spawn(move || { + reader_f(&accounts, &pubkeys); + }) + .unwrap(); + } + + let num_new_keys = 1000; + bencher.iter(|| { + let new_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .take(num_new_keys) + .collect(); + let new_storable_accounts: Vec<_> = new_pubkeys.iter().zip(accounts_data.iter()).collect(); + // Write to a different slot than the one being read from. Because + // there's a new account pubkey being written to every time, will + // compete for the accounts index lock on every store + accounts.store_accounts_cached((slot + 1, new_storable_accounts.as_slice())); + }); +} + +#[bench] +fn bench_concurrent_read_write(bencher: &mut Bencher) { + store_accounts_with_possible_contention( + "concurrent_read_write", + bencher, + |accounts, pubkeys| { + let mut rng = rand::thread_rng(); + loop { + let i = rng.gen_range(0..pubkeys.len()); + test::black_box( + accounts + .load_without_fixed_root(&Ancestors::default(), &pubkeys[i]) + .unwrap(), + ); + } + }, + ) +} + +#[bench] +fn bench_concurrent_scan_write(bencher: &mut Bencher) { + store_accounts_with_possible_contention("concurrent_scan_write", bencher, |accounts, _| loop { + test::black_box( + accounts + .load_by_program( + &Ancestors::default(), + 0, + AccountSharedData::default().owner(), + &ScanConfig::default(), + ) + .unwrap(), + ); + }) +} + +#[bench] +#[ignore] +fn bench_dashmap_single_reader_with_n_writers(bencher: &mut Bencher) { + let num_readers = 5; + let num_keys = 10000; + let map = Arc::new(DashMap::new()); + for i in 0..num_keys { + map.insert(i, i); + } + for _ in 0..num_readers { + let map = map.clone(); + Builder::new() + .name("readers".to_string()) + .spawn(move || loop { + test::black_box(map.entry(5).or_insert(2)); + }) + .unwrap(); + } + bencher.iter(|| { + for _ in 0..num_keys { + test::black_box(map.get(&5).unwrap().value()); + } + }) +} + +#[bench] +#[ignore] +fn bench_rwlock_hashmap_single_reader_with_n_writers(bencher: &mut Bencher) { + let num_readers = 5; + let num_keys = 10000; + let map = Arc::new(RwLock::new(HashMap::new())); + for i in 0..num_keys { + map.write().unwrap().insert(i, i); + } + for _ in 0..num_readers { + let map = map.clone(); + Builder::new() + .name("readers".to_string()) + .spawn(move || loop { + test::black_box(map.write().unwrap().get(&5)); + }) + .unwrap(); + } + bencher.iter(|| { + for _ in 0..num_keys { + test::black_box(map.read().unwrap().get(&5)); + } + }) +} + +fn setup_bench_dashmap_iter() -> (Arc, DashMap) { + let accounts_db = new_accounts_db(vec![PathBuf::from( + std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), + ) + .join("bench_dashmap_par_iter")]); + let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); + + let dashmap = DashMap::new(); + let num_keys = std::env::var("NUM_BENCH_KEYS") + .map(|num_keys| num_keys.parse::().unwrap()) + .unwrap_or_else(|_| 10000); + for _ in 0..num_keys { + dashmap.insert( + Pubkey::new_unique(), + ( + AccountSharedData::new(1, 0, AccountSharedData::default().owner()), + Hash::new_unique(), + ), + ); + } + + (accounts, dashmap) +} + +#[bench] +fn bench_dashmap_par_iter(bencher: &mut Bencher) { + let (accounts, dashmap) = setup_bench_dashmap_iter(); + + bencher.iter(|| { + test::black_box(accounts.accounts_db.thread_pool.install(|| { + dashmap + .par_iter() + .map(|cached_account| (*cached_account.key(), cached_account.value().1)) + .collect::>() + })); + }); +} + +#[bench] +fn bench_dashmap_iter(bencher: &mut Bencher) { + let (_accounts, dashmap) = setup_bench_dashmap_iter(); + + bencher.iter(|| { + test::black_box( + dashmap + .iter() + .map(|cached_account| (*cached_account.key(), cached_account.value().1)) + .collect::>(), + ); + }); +} + +#[bench] +fn bench_load_largest_accounts(b: &mut Bencher) { + let accounts_db = new_accounts_db(Vec::new()); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut rng = rand::thread_rng(); + for _ in 0..10_000 { + let lamports = rng.gen(); + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::new(lamports, 0, &Pubkey::default()); + accounts.store_slow_uncached(0, &pubkey, &account); + } + let ancestors = Ancestors::from(vec![0]); + let bank_id = 0; + b.iter(|| { + accounts.load_largest_accounts( + &ancestors, + bank_id, + 20, + &HashSet::new(), + AccountAddressFilter::Exclude, + ) + }); +} diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index b99425b1507cab..9d09b5c3650c97 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -4,50 +4,19 @@ extern crate test; use { - dashmap::DashMap, - rand::Rng, - rayon::iter::{IntoParallelRefIterator, ParallelIterator}, - solana_accounts_db::{ - accounts::{AccountAddressFilter, Accounts}, - accounts_db::{ - test_utils::create_test_accounts, AccountShrinkThreshold, AccountsDb, - VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, - }, - accounts_index::{AccountSecondaryIndexes, ScanConfig}, - ancestors::Ancestors, - epoch_accounts_hash::EpochAccountsHash, - }, + solana_accounts_db::epoch_accounts_hash::EpochAccountsHash, solana_runtime::bank::*, solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount}, - genesis_config::{create_genesis_config, ClusterType}, + account::{AccountSharedData, ReadableAccount}, + genesis_config::create_genesis_config, hash::Hash, lamports::LamportsError, pubkey::Pubkey, - rent_collector::RentCollector, - sysvar::epoch_schedule::EpochSchedule, - }, - std::{ - collections::{HashMap, HashSet}, - path::PathBuf, - sync::{Arc, RwLock}, - thread::Builder, }, + std::{path::PathBuf, sync::Arc}, test::Bencher, }; -fn new_accounts_db(account_paths: Vec) -> AccountsDb { - AccountsDb::new_with_config( - account_paths, - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), - None, - Arc::default(), - ) -} - fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) -> Result<(), LamportsError> { for t in 0..num { let pubkey = solana_sdk::pubkey::new_rand(); @@ -62,7 +31,7 @@ fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) -> Result<() } #[bench] -fn test_accounts_create(bencher: &mut Bencher) { +fn bench_accounts_create(bencher: &mut Bencher) { let (genesis_config, _) = create_genesis_config(10_000); let bank0 = Bank::new_with_paths_for_benches(&genesis_config, vec![PathBuf::from("bench_a0")]); bencher.iter(|| { @@ -72,7 +41,7 @@ fn test_accounts_create(bencher: &mut Bencher) { } #[bench] -fn test_accounts_squash(bencher: &mut Bencher) { +fn bench_accounts_squash(bencher: &mut Bencher) { let (mut genesis_config, _) = create_genesis_config(100_000); genesis_config.rent.burn_percent = 100; // Avoid triggering an assert in Bank::distribute_rent_to_validators() let mut prev_bank = Arc::new(Bank::new_with_paths_for_benches( @@ -108,300 +77,3 @@ fn test_accounts_squash(bencher: &mut Bencher) { prev_bank = next_bank; }); } - -#[bench] -fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { - let accounts_db = new_accounts_db(vec![PathBuf::from("bench_accounts_hash_internal")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - let num_accounts = 60_000; - let slot = 0; - create_test_accounts(&accounts, &mut pubkeys, num_accounts, slot); - let ancestors = Ancestors::from(vec![0]); - let (_, total_lamports) = accounts - .accounts_db - .update_accounts_hash_for_tests(0, &ancestors, false, false); - accounts.add_root(slot); - accounts.accounts_db.flush_accounts_cache(true, Some(slot)); - bencher.iter(|| { - assert!(accounts.verify_accounts_hash_and_lamports( - 0, - total_lamports, - None, - VerifyAccountsHashAndLamportsConfig { - ancestors: &ancestors, - test_hash_calculation: false, - epoch_schedule: &EpochSchedule::default(), - rent_collector: &RentCollector::default(), - ignore_mismatch: false, - store_detailed_debug_info: false, - use_bg_thread_pool: false, - } - )) - }); -} - -#[bench] -fn test_update_accounts_hash(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - create_test_accounts(&accounts, &mut pubkeys, 50_000, 0); - let ancestors = Ancestors::from(vec![0]); - bencher.iter(|| { - accounts - .accounts_db - .update_accounts_hash_for_tests(0, &ancestors, false, false); - }); -} - -#[bench] -fn test_accounts_delta_hash(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delta_hash")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - create_test_accounts(&accounts, &mut pubkeys, 100_000, 0); - bencher.iter(|| { - accounts.accounts_db.calculate_accounts_delta_hash(0); - }); -} - -#[bench] -fn bench_delete_dependencies(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delete_deps")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut old_pubkey = Pubkey::default(); - let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); - for i in 0..1000 { - let pubkey = solana_sdk::pubkey::new_rand(); - let account = AccountSharedData::new(i + 1, 0, AccountSharedData::default().owner()); - accounts.store_slow_uncached(i, &pubkey, &account); - accounts.store_slow_uncached(i, &old_pubkey, &zero_account); - old_pubkey = pubkey; - accounts.add_root(i); - } - bencher.iter(|| { - accounts.accounts_db.clean_accounts_for_tests(); - }); -} - -fn store_accounts_with_possible_contention( - bench_name: &str, - bencher: &mut Bencher, - reader_f: F, -) where - F: Fn(&Accounts, &[Pubkey]) + Send + Copy, -{ - let num_readers = 5; - let accounts_db = new_accounts_db(vec![PathBuf::from( - std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), - ) - .join(bench_name)]); - let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); - let num_keys = 1000; - let slot = 0; - - let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) - .take(num_keys) - .collect(); - let accounts_data: Vec<_> = std::iter::repeat(Account { - lamports: 1, - ..Default::default() - }) - .take(num_keys) - .collect(); - let storable_accounts: Vec<_> = pubkeys.iter().zip(accounts_data.iter()).collect(); - accounts.store_accounts_cached((slot, storable_accounts.as_slice())); - accounts.add_root(slot); - accounts - .accounts_db - .flush_accounts_cache_slot_for_tests(slot); - - let pubkeys = Arc::new(pubkeys); - for i in 0..num_readers { - let accounts = accounts.clone(); - let pubkeys = pubkeys.clone(); - Builder::new() - .name(format!("reader{i:02}")) - .spawn(move || { - reader_f(&accounts, &pubkeys); - }) - .unwrap(); - } - - let num_new_keys = 1000; - bencher.iter(|| { - let new_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) - .take(num_new_keys) - .collect(); - let new_storable_accounts: Vec<_> = new_pubkeys.iter().zip(accounts_data.iter()).collect(); - // Write to a different slot than the one being read from. Because - // there's a new account pubkey being written to every time, will - // compete for the accounts index lock on every store - accounts.store_accounts_cached((slot + 1, new_storable_accounts.as_slice())); - }); -} - -#[bench] -fn bench_concurrent_read_write(bencher: &mut Bencher) { - store_accounts_with_possible_contention( - "concurrent_read_write", - bencher, - |accounts, pubkeys| { - let mut rng = rand::thread_rng(); - loop { - let i = rng.gen_range(0..pubkeys.len()); - test::black_box( - accounts - .load_without_fixed_root(&Ancestors::default(), &pubkeys[i]) - .unwrap(), - ); - } - }, - ) -} - -#[bench] -fn bench_concurrent_scan_write(bencher: &mut Bencher) { - store_accounts_with_possible_contention("concurrent_scan_write", bencher, |accounts, _| loop { - test::black_box( - accounts - .load_by_program( - &Ancestors::default(), - 0, - AccountSharedData::default().owner(), - &ScanConfig::default(), - ) - .unwrap(), - ); - }) -} - -#[bench] -#[ignore] -fn bench_dashmap_single_reader_with_n_writers(bencher: &mut Bencher) { - let num_readers = 5; - let num_keys = 10000; - let map = Arc::new(DashMap::new()); - for i in 0..num_keys { - map.insert(i, i); - } - for _ in 0..num_readers { - let map = map.clone(); - Builder::new() - .name("readers".to_string()) - .spawn(move || loop { - test::black_box(map.entry(5).or_insert(2)); - }) - .unwrap(); - } - bencher.iter(|| { - for _ in 0..num_keys { - test::black_box(map.get(&5).unwrap().value()); - } - }) -} - -#[bench] -#[ignore] -fn bench_rwlock_hashmap_single_reader_with_n_writers(bencher: &mut Bencher) { - let num_readers = 5; - let num_keys = 10000; - let map = Arc::new(RwLock::new(HashMap::new())); - for i in 0..num_keys { - map.write().unwrap().insert(i, i); - } - for _ in 0..num_readers { - let map = map.clone(); - Builder::new() - .name("readers".to_string()) - .spawn(move || loop { - test::black_box(map.write().unwrap().get(&5)); - }) - .unwrap(); - } - bencher.iter(|| { - for _ in 0..num_keys { - test::black_box(map.read().unwrap().get(&5)); - } - }) -} - -fn setup_bench_dashmap_iter() -> (Arc, DashMap) { - let accounts_db = new_accounts_db(vec![PathBuf::from( - std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), - ) - .join("bench_dashmap_par_iter")]); - let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); - - let dashmap = DashMap::new(); - let num_keys = std::env::var("NUM_BENCH_KEYS") - .map(|num_keys| num_keys.parse::().unwrap()) - .unwrap_or_else(|_| 10000); - for _ in 0..num_keys { - dashmap.insert( - Pubkey::new_unique(), - ( - AccountSharedData::new(1, 0, AccountSharedData::default().owner()), - Hash::new_unique(), - ), - ); - } - - (accounts, dashmap) -} - -#[bench] -fn bench_dashmap_par_iter(bencher: &mut Bencher) { - let (accounts, dashmap) = setup_bench_dashmap_iter(); - - bencher.iter(|| { - test::black_box(accounts.accounts_db.thread_pool.install(|| { - dashmap - .par_iter() - .map(|cached_account| (*cached_account.key(), cached_account.value().1)) - .collect::>() - })); - }); -} - -#[bench] -fn bench_dashmap_iter(bencher: &mut Bencher) { - let (_accounts, dashmap) = setup_bench_dashmap_iter(); - - bencher.iter(|| { - test::black_box( - dashmap - .iter() - .map(|cached_account| (*cached_account.key(), cached_account.value().1)) - .collect::>(), - ); - }); -} - -#[bench] -fn bench_load_largest_accounts(b: &mut Bencher) { - let accounts_db = new_accounts_db(Vec::new()); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut rng = rand::thread_rng(); - for _ in 0..10_000 { - let lamports = rng.gen(); - let pubkey = Pubkey::new_unique(); - let account = AccountSharedData::new(lamports, 0, &Pubkey::default()); - accounts.store_slow_uncached(0, &pubkey, &account); - } - let ancestors = Ancestors::from(vec![0]); - let bank_id = 0; - b.iter(|| { - accounts.load_largest_accounts( - &ancestors, - bank_id, - 20, - &HashSet::new(), - AccountAddressFilter::Exclude, - ) - }); -}