From 72a968fbe8df4692bbb008fd71d11bcd37fd8f72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 1 Jul 2022 00:06:24 +0200 Subject: [PATCH 001/100] Fix/system instruction processor tests (#26338) * Make mock_process_instruction() stricter by invoking the verify() method. * Fixes broken test cases. --- program-runtime/src/invoke_context.rs | 3 ++- runtime/src/system_instruction_processor.rs | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 0d4bbf3b1f563d..7b3c364ec751c7 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -1196,7 +1196,8 @@ pub fn mock_process_instruction( &program_indices, instruction_data, ) - .and_then(|_| process_instruction(1, &mut invoke_context)); + .and_then(|_| process_instruction(1, &mut invoke_context)) + .and_then(|_| invoke_context.verify(&preparation.instruction_accounts, &program_indices)); invoke_context.pop().unwrap(); assert_eq!(result, expected_result); let mut transaction_accounts = transaction_context.deconstruct_without_keys().unwrap(); diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 6ad7aecaa5afb8..e69c8680c515a7 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -916,7 +916,7 @@ mod tests { #[test] fn test_request_more_than_allowed_data_length() { let from = Pubkey::new_unique(); - let from_account = AccountSharedData::new(100, 0, &Pubkey::new_unique()); + let from_account = AccountSharedData::new(100, 0, &system_program::id()); let to = Pubkey::new_unique(); let to_account = AccountSharedData::new(0, 0, &Pubkey::default()); let instruction_accounts = vec![ @@ -1359,7 +1359,7 @@ mod tests { #[test] fn test_transfer_lamports() { let from = Pubkey::new_unique(); - let from_account = AccountSharedData::new(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter + let from_account = AccountSharedData::new(100, 0, &system_program::id()); let to = Pubkey::new(&[3; 32]); let to_account = AccountSharedData::new(1, 0, &to); // account owner should not matter let transaction_accounts = vec![(from, from_account), (to, to_account)]; @@ -1439,7 +1439,7 @@ mod tests { let from_seed = "42".to_string(); let from_owner = system_program::id(); let from = Pubkey::create_with_seed(&base, from_seed.as_str(), &from_owner).unwrap(); - let from_account = AccountSharedData::new(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter + let from_account = AccountSharedData::new(100, 0, &system_program::id()); let to = Pubkey::new(&[3; 32]); let to_account = AccountSharedData::new(1, 0, &to); // account owner should not matter let transaction_accounts = From 94685e1222b3289859a447d62fadea20898241e0 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Thu, 30 Jun 2022 17:56:15 -0700 Subject: [PATCH 002/100] Implement randomized pruning of QUIC connection from staked peers (#26299) --- Cargo.lock | 1 + core/src/staked_nodes_updater_service.rs | 2 +- programs/bpf/Cargo.lock | 1 + streamer/Cargo.toml | 1 + streamer/src/nonblocking/quic.rs | 240 +++++++++++++++++------ streamer/src/quic.rs | 14 ++ streamer/src/streamer.rs | 2 +- 7 files changed, 201 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df37853f86c095..a40406c770771e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6156,6 +6156,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", + "indexmap", "itertools", "libc", "log", diff --git a/core/src/staked_nodes_updater_service.rs b/core/src/staked_nodes_updater_service.rs index e26740a3c7a8cb..23a3587b0d1f00 100644 --- a/core/src/staked_nodes_updater_service.rs +++ b/core/src/staked_nodes_updater_service.rs @@ -42,7 +42,7 @@ impl StakedNodesUpdaterService { &cluster_info, ) { let mut shared = shared_staked_nodes.write().unwrap(); - shared.total_stake = total_stake as f64; + shared.total_stake = total_stake; shared.stake_map = new_ip_to_stake; } } diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 7d5cb0c362757b..cf966d1b296103 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -5446,6 +5446,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", + "indexmap", "itertools", "libc", "log", diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 3dcde3f090b990..7a89d077315eda 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -13,6 +13,7 @@ edition = "2021" crossbeam-channel = "0.5" futures-util = "0.3.21" histogram = "0.6.9" +indexmap = "1.8.1" itertools = "0.10.3" libc = "0.2.126" log = "0.4.17" diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 1b55d958ec7447..133f26ca63424c 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -5,11 +5,13 @@ use { }, crossbeam_channel::Sender, futures_util::stream::StreamExt, + indexmap::map::{Entry, IndexMap}, percentage::Percentage, quinn::{ Connecting, Connection, Endpoint, EndpointConfig, Incoming, IncomingUniStreams, NewConnection, VarInt, }, + rand::{thread_rng, Rng}, solana_perf::packet::PacketBatch, solana_sdk::{ packet::{Packet, PACKET_DATA_SIZE}, @@ -18,11 +20,10 @@ use { timing, }, std::{ - collections::{hash_map::Entry, HashMap}, net::{IpAddr, SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, Mutex, RwLock, + Arc, Mutex, MutexGuard, RwLock, }, time::{Duration, Instant}, }, @@ -116,6 +117,21 @@ pub async fn run_server( } } +fn prune_unstaked_connection_table( + unstaked_connection_table: &mut MutexGuard, + max_unstaked_connections: usize, + stats: Arc, +) { + if unstaked_connection_table.total_size >= max_unstaked_connections { + const PRUNE_TABLE_TO_PERCENTAGE: u8 = 90; + let max_percentage_full = Percentage::from(PRUNE_TABLE_TO_PERCENTAGE); + + let max_connections = max_percentage_full.apply_to(max_unstaked_connections); + let num_pruned = unstaked_connection_table.prune_oldest(max_connections); + stats.num_evictions.fetch_add(num_pruned, Ordering::Relaxed); + } +} + async fn setup_connection( connection: Connecting, unstaked_connection_table: Arc>, @@ -138,70 +154,102 @@ async fn setup_connection( let remote_addr = connection.remote_address(); - let (mut connection_table_l, stake) = { - const PRUNE_TABLE_TO_PERCENTAGE: u8 = 90; - let max_percentage_full = Percentage::from(PRUNE_TABLE_TO_PERCENTAGE); - + let table_and_stake = { let staked_nodes = staked_nodes.read().unwrap(); if let Some(stake) = staked_nodes.stake_map.get(&remote_addr.ip()) { let stake = *stake; - let total_stake = staked_nodes.total_stake; drop(staked_nodes); + let mut connection_table_l = staked_connection_table.lock().unwrap(); if connection_table_l.total_size >= max_staked_connections { - let max_connections = max_percentage_full.apply_to(max_staked_connections); - let num_pruned = connection_table_l.prune_oldest(max_connections); - stats.num_evictions.fetch_add(num_pruned, Ordering::Relaxed); + let num_pruned = connection_table_l.prune_random(stake); + if num_pruned == 0 { + if max_unstaked_connections > 0 { + // If we couldn't prune a connection in the staked connection table, let's + // put this connection in the unstaked connection table. If needed, prune a + // connection from the unstaked connection table. + connection_table_l = unstaked_connection_table.lock().unwrap(); + prune_unstaked_connection_table( + &mut connection_table_l, + max_unstaked_connections, + stats.clone(), + ); + Some((connection_table_l, stake)) + } else { + stats + .connection_add_failed_on_pruning + .fetch_add(1, Ordering::Relaxed); + None + } + } else { + stats.num_evictions.fetch_add(num_pruned, Ordering::Relaxed); + Some((connection_table_l, stake)) + } + } else { + Some((connection_table_l, stake)) } - connection.set_max_concurrent_uni_streams( - VarInt::from_u64( - ((stake as f64 / total_stake as f64) * QUIC_TOTAL_STAKED_CONCURRENT_STREAMS) - as u64, - ) - .unwrap(), - ); - (connection_table_l, stake) - } else { + } else if max_unstaked_connections > 0 { drop(staked_nodes); let mut connection_table_l = unstaked_connection_table.lock().unwrap(); - if connection_table_l.total_size >= max_unstaked_connections { - let max_connections = max_percentage_full.apply_to(max_unstaked_connections); - let num_pruned = connection_table_l.prune_oldest(max_connections); - stats.num_evictions.fetch_add(num_pruned, Ordering::Relaxed); - } - connection.set_max_concurrent_uni_streams( - VarInt::from_u64(QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS as u64).unwrap(), + prune_unstaked_connection_table( + &mut connection_table_l, + max_unstaked_connections, + stats.clone(), ); - (connection_table_l, 0) + Some((connection_table_l, 0)) + } else { + None } }; - if stake != 0 || max_unstaked_connections > 0 { - if let Some((last_update, stream_exit)) = connection_table_l.try_add_connection( - &remote_addr, - Some(connection), - timing::timestamp(), - max_connections_per_ip, - ) { - let table_type = connection_table_l.peer_type; - drop(connection_table_l); - let stats = stats.clone(); - let connection_table = match table_type { - ConnectionPeerType::Unstaked => unstaked_connection_table.clone(), - ConnectionPeerType::Staked => staked_connection_table.clone(), - }; - tokio::spawn(handle_connection( - uni_streams, - packet_sender, - remote_addr, - last_update, - connection_table, - stream_exit, - stats, + if let Some((mut connection_table_l, stake)) = table_and_stake { + let table_type = connection_table_l.peer_type; + let max_uni_streams = match table_type { + ConnectionPeerType::Unstaked => { + VarInt::from_u64(QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS as u64) + } + ConnectionPeerType::Staked => { + let staked_nodes = staked_nodes.read().unwrap(); + VarInt::from_u64( + ((stake as f64 / staked_nodes.total_stake as f64) + * QUIC_TOTAL_STAKED_CONCURRENT_STREAMS) as u64, + ) + } + }; + + if let Ok(max_uni_streams) = max_uni_streams { + connection.set_max_concurrent_uni_streams(max_uni_streams); + + if let Some((last_update, stream_exit)) = connection_table_l.try_add_connection( + &remote_addr, + Some(connection), stake, - )); + timing::timestamp(), + max_connections_per_ip, + ) { + drop(connection_table_l); + let stats = stats.clone(); + let connection_table = match table_type { + ConnectionPeerType::Unstaked => unstaked_connection_table.clone(), + ConnectionPeerType::Staked => staked_connection_table.clone(), + }; + tokio::spawn(handle_connection( + uni_streams, + packet_sender, + remote_addr, + last_update, + connection_table, + stream_exit, + stats, + stake, + )); + } else { + stats.connection_add_failed.fetch_add(1, Ordering::Relaxed); + } } else { - stats.connection_add_failed.fetch_add(1, Ordering::Relaxed); + stats + .connection_add_failed_invalid_stream_count + .fetch_add(1, Ordering::Relaxed); } } else { connection.close(0u32.into(), &[0u8]); @@ -387,6 +435,7 @@ fn handle_chunk( #[derive(Debug)] struct ConnectionEntry { exit: Arc, + stake: u64, last_update: Arc, port: u16, connection: Option, @@ -395,12 +444,14 @@ struct ConnectionEntry { impl ConnectionEntry { fn new( exit: Arc, + stake: u64, last_update: Arc, port: u16, connection: Option, ) -> Self { Self { exit, + stake, last_update, port, connection, @@ -429,7 +480,7 @@ enum ConnectionPeerType { // Map of IP to list of connection entries struct ConnectionTable { - table: HashMap>, + table: IndexMap>, total_size: usize, peer_type: ConnectionPeerType, } @@ -439,7 +490,7 @@ struct ConnectionTable { impl ConnectionTable { fn new(peer_type: ConnectionPeerType) -> Self { Self { - table: HashMap::default(), + table: IndexMap::default(), total_size: 0, peer_type, } @@ -473,10 +524,47 @@ impl ConnectionTable { num_pruned } + fn connection_stake(&self, index: usize) -> Option { + self.table + .get_index(index) + .and_then(|(_, connection_vec)| connection_vec.first()) + .map(|connection| connection.stake) + } + + // Randomly select two connections, and evict the one with lower stake. If the stakes of both + // the connections are higher than the threshold_stake, reject the pruning attempt, and return 0. + fn prune_random(&mut self, threshold_stake: u64) -> usize { + let mut num_pruned = 0; + let mut rng = thread_rng(); + // The candidate1 and candidate2 could potentially be the same. If so, the stake of the candidate + // will be compared just against the threshold_stake. + let candidate1 = rng.gen_range(0, self.table.len()); + let candidate2 = rng.gen_range(0, self.table.len()); + + let candidate1_stake = self.connection_stake(candidate1).unwrap_or(0); + let candidate2_stake = self.connection_stake(candidate2).unwrap_or(0); + + if candidate1_stake < threshold_stake || candidate2_stake < threshold_stake { + let removed = if candidate1_stake < candidate2_stake { + self.table.swap_remove_index(candidate1) + } else { + self.table.swap_remove_index(candidate2) + }; + + if let Some((_, removed_value)) = removed { + self.total_size -= removed_value.len(); + num_pruned += removed_value.len(); + } + } + + num_pruned + } + fn try_add_connection( &mut self, addr: &SocketAddr, connection: Option, + stake: u64, last_update: u64, max_connections_per_ip: usize, ) -> Option<(Arc, Arc)> { @@ -491,6 +579,7 @@ impl ConnectionTable { let last_update = Arc::new(AtomicU64::new(last_update)); connection_entry.push(ConnectionEntry::new( exit.clone(), + stake, last_update.clone(), addr.port(), connection, @@ -818,7 +907,7 @@ pub mod test { staked_nodes .stake_map .insert(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 100000); - staked_nodes.total_stake = 100000_f64; + staked_nodes.total_stake = 100000; let (t, exit, receiver, server_address, stats) = setup_quic_server(Some(staked_nodes)); check_multiple_writes(receiver, server_address).await; @@ -918,12 +1007,12 @@ pub mod test { .collect(); for (i, socket) in sockets.iter().enumerate() { table - .try_add_connection(socket, None, i as u64, max_connections_per_ip) + .try_add_connection(socket, None, 0, i as u64, max_connections_per_ip) .unwrap(); } num_entries += 1; table - .try_add_connection(&sockets[0], None, 5, max_connections_per_ip) + .try_add_connection(&sockets[0], None, 0, 5, max_connections_per_ip) .unwrap(); let new_size = 3; @@ -942,6 +1031,40 @@ pub mod test { assert_eq!(table.total_size, 0); } + #[test] + fn test_prune_table_random() { + use std::net::Ipv4Addr; + solana_logger::setup(); + let mut table = ConnectionTable::new(ConnectionPeerType::Staked); + let num_entries = 5; + let max_connections_per_ip = 10; + let sockets: Vec<_> = (0..num_entries) + .into_iter() + .map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0)) + .collect(); + for (i, socket) in sockets.iter().enumerate() { + table + .try_add_connection( + socket, + None, + (i + 1) as u64, + i as u64, + max_connections_per_ip, + ) + .unwrap(); + } + + // Try pruninng with threshold stake less than all the entries in the table + // It should fail to prune (i.e. return 0 number of pruned entries) + let pruned = table.prune_random(0); + assert_eq!(pruned, 0); + + // Try pruninng with threshold stake higher than all the entries in the table + // It should succeed to prune (i.e. return 1 number of pruned entries) + let pruned = table.prune_random(num_entries as u64 + 1); + assert_eq!(pruned, 1); + } + #[test] fn test_remove_connections() { use std::net::Ipv4Addr; @@ -955,11 +1078,11 @@ pub mod test { .collect(); for (i, socket) in sockets.iter().enumerate() { table - .try_add_connection(socket, None, (i * 2) as u64, max_connections_per_ip) + .try_add_connection(socket, None, 0, (i * 2) as u64, max_connections_per_ip) .unwrap(); table - .try_add_connection(socket, None, (i * 2 + 1) as u64, max_connections_per_ip) + .try_add_connection(socket, None, 0, (i * 2 + 1) as u64, max_connections_per_ip) .unwrap(); } @@ -969,6 +1092,7 @@ pub mod test { .try_add_connection( &single_connection_addr, None, + 0, (num_ips * 2) as u64, max_connections_per_ip, ) diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index f1c17c15655b90..2b49f05c5bba32 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -158,7 +158,9 @@ pub struct StreamStats { pub(crate) total_stream_read_timeouts: AtomicUsize, pub(crate) num_evictions: AtomicUsize, pub(crate) connection_add_failed: AtomicUsize, + pub(crate) connection_add_failed_invalid_stream_count: AtomicUsize, pub(crate) connection_add_failed_unstaked_node: AtomicUsize, + pub(crate) connection_add_failed_on_pruning: AtomicUsize, pub(crate) connection_setup_timeout: AtomicUsize, pub(crate) connection_removed: AtomicUsize, pub(crate) connection_remove_failed: AtomicUsize, @@ -198,12 +200,24 @@ impl StreamStats { self.connection_add_failed.swap(0, Ordering::Relaxed), i64 ), + ( + "connection_add_failed_invalid_stream_count", + self.connection_add_failed_invalid_stream_count + .swap(0, Ordering::Relaxed), + i64 + ), ( "connection_add_failed_unstaked_node", self.connection_add_failed_unstaked_node .swap(0, Ordering::Relaxed), i64 ), + ( + "connection_add_failed_on_pruning", + self.connection_add_failed_on_pruning + .swap(0, Ordering::Relaxed), + i64 + ), ( "connection_removed", self.connection_removed.swap(0, Ordering::Relaxed), diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 18383181127043..4dad6135356e1c 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -27,7 +27,7 @@ use { // Total stake and nodes => stake map #[derive(Default)] pub struct StakedNodes { - pub total_stake: f64, + pub total_stake: u64, pub stake_map: HashMap, } From fdb186ba3b9126c44ce64411cbe7efc8f1fcb1c4 Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Fri, 1 Jul 2022 02:14:33 +0000 Subject: [PATCH 003/100] pacify nightly clippy --- cli/src/cli.rs | 2 +- runtime/benches/bank.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 2d8807fca909dd..6e360dee084792 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1923,7 +1923,7 @@ mod tests { assert_eq!( parse_command(&test_resolve_signer, &default_signer, &mut None).unwrap(), CliCommandInfo { - command: CliCommand::ResolveSigner(Some(keypair_file.clone())), + command: CliCommand::ResolveSigner(Some(keypair_file)), signers: vec![], } ); diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index 38cf6bde58d6da..139cd15692414a 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -119,6 +119,7 @@ fn async_bencher(bank: &Arc, bank_client: &BankClient, transactions: &[Tra } } +#[allow(clippy::type_complexity)] fn do_bench_transactions( bencher: &mut Bencher, bench_work: &dyn Fn(&Arc, &BankClient, &[Transaction]), From 3fcdc45092b969baeb7273de6596399d98277366 Mon Sep 17 00:00:00 2001 From: Mark Daniel <83738282+Mark-777-0@users.noreply.github.com> Date: Thu, 30 Jun 2022 23:20:22 -0700 Subject: [PATCH 004/100] fix: web3.js; maxRetries no longer stripped when zero (#26345) Co-authored-by: Mark D <83738282+markusmark1@users.noreply.github.com> --- web3.js/src/connection.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web3.js/src/connection.ts b/web3.js/src/connection.ts index 3f20a81f0efe68..2906864b1d1e29 100644 --- a/web3.js/src/connection.ts +++ b/web3.js/src/connection.ts @@ -4442,7 +4442,7 @@ export class Connection { const preflightCommitment = (options && options.preflightCommitment) || this.commitment; - if (options && options.maxRetries) { + if (options && options.maxRetries != null) { config.maxRetries = options.maxRetries; } if (options && options.minContextSlot != null) { From 47c596ff121a646cee0592f13e40e17c1553afc6 Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Fri, 1 Jul 2022 00:55:32 -0700 Subject: [PATCH 005/100] Bump --contact-debug-interval default to 2m from 10s --- validator/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/src/main.rs b/validator/src/main.rs index 69fa203434098a..62a18b8a22bd8b 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -942,7 +942,7 @@ pub fn main() { .long("contact-debug-interval") .value_name("CONTACT_DEBUG_INTERVAL") .takes_value(true) - .default_value("10000") + .default_value("120000") .help("Milliseconds between printing contact debug from gossip."), ) .arg( From d6b53fb1736b892e25fa82bb2070f66a33c4bc1d Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Fri, 1 Jul 2022 10:42:23 -0600 Subject: [PATCH 006/100] BigtableUploadService: recheck first_available_block (#26347) Recheck first_available_block --- ledger/src/bigtable_upload_service.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ledger/src/bigtable_upload_service.rs b/ledger/src/bigtable_upload_service.rs index 22d149d3249b10..812f87cf8704a9 100644 --- a/ledger/src/bigtable_upload_service.rs +++ b/ledger/src/bigtable_upload_service.rs @@ -112,6 +112,9 @@ impl BigTableUploadService { Err(err) => { warn!("bigtable: upload_confirmed_blocks: {}", err); std::thread::sleep(std::time::Duration::from_secs(2)); + if start_slot == 0 { + start_slot = blockstore.get_first_available_block().unwrap_or_default(); + } } } } From 728986ddf4e054a77747e9f748951ad631d7cc7f Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 1 Jul 2022 12:39:35 -0500 Subject: [PATCH 007/100] Spell correction and add remove slot in AccountDB purge assert message (#26358) nit spell. add remove slots in accoutdb purge assert message --- runtime/src/accounts_db.rs | 6 +++++- runtime/src/accounts_index.rs | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index dc2a8f44ec7494..e65e7290f785e4 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -5166,7 +5166,11 @@ impl AccountsDb { .fetch_add(handle_reclaims_elapsed.as_us(), Ordering::Relaxed); // After handling the reclaimed entries, this slot's // storage entries should be purged from self.storage - assert!(self.storage.get_slot_stores(remove_slot).is_none()); + assert!( + self.storage.get_slot_stores(remove_slot).is_none(), + "slot {} is not none", + remove_slot + ); } #[allow(clippy::needless_collect)] diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index bfd85d4defd084..cc6650af52d5d0 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -1711,9 +1711,9 @@ impl AccountsIndex { is_slot_list_empty = slot_list.is_empty(); }); - // If the slot list is empty, remove the pubkey from `account_maps`. Make sure to grab the + // If the slot list is empty, remove the pubkey from `account_maps`. Make sure to grab the // lock and double check the slot list is still empty, because another writer could have - // locked and inserted the pubkey inbetween when `is_slot_list_empty=true` and the call to + // locked and inserted the pubkey in-between when `is_slot_list_empty=true` and the call to // remove() below. if is_slot_list_empty { let w_maps = self.get_account_maps_write_lock(pubkey); From 88b5b7e30a0e94d1108ff6bf0a89756fc41cf2e3 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 1 Jul 2022 13:12:52 -0500 Subject: [PATCH 008/100] improve log (#26360) --- runtime/src/accounts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 47f9a21235a39d..2b9040930a7f9e 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -827,7 +827,7 @@ impl Accounts { can_cached_slot_be_unflushed, ignore_mismatch, ) { - warn!("verify_bank_hash failed: {:?}", err); + warn!("verify_bank_hash failed: {:?}, slot: {}", err, slot); false } else { true From e24b64dd7152c6c15e6cf83f84fe46caac70f01f Mon Sep 17 00:00:00 2001 From: steveluscher Date: Fri, 1 Jul 2022 22:23:25 -0700 Subject: [PATCH 009/100] fix: `getPerformanceSamples` no longer breaks when the connection has a default commitment --- web3.js/src/connection.ts | 10 ++++-- web3.js/test/connection.test.ts | 54 ++++++++++++++++++++------------- 2 files changed, 40 insertions(+), 24 deletions(-) diff --git a/web3.js/src/connection.ts b/web3.js/src/connection.ts index 2906864b1d1e29..5bf6e7287ebdd5 100644 --- a/web3.js/src/connection.ts +++ b/web3.js/src/connection.ts @@ -195,6 +195,9 @@ type RpcRequest = (methodName: string, args: Array) => Promise; type RpcBatchRequest = (requests: RpcParams[]) => Promise; +const NO_COMMITMENT = + typeof Symbol === 'function' ? Symbol('NO_COMMITMENT') : ({} as const); + /** * @internal */ @@ -3431,7 +3434,7 @@ export class Connection { async getRecentPerformanceSamples( limit?: number, ): Promise> { - const args = this._buildArgs(limit ? [limit] : []); + const args = this._buildArgs(limit ? [limit] : [], NO_COMMITMENT); const unsafeRes = await this._rpcRequest( 'getRecentPerformanceSamples', args, @@ -5062,11 +5065,12 @@ export class Connection { _buildArgs( args: Array, - override?: Commitment, + override?: Commitment | typeof NO_COMMITMENT, encoding?: 'jsonParsed' | 'base64', extra?: any, ): Array { - const commitment = override || this._commitment; + const commitment = + override === NO_COMMITMENT ? undefined : override || this._commitment; if (commitment || encoding || extra) { let options: any = {}; if (encoding) { diff --git a/web3.js/test/connection.test.ts b/web3.js/test/connection.test.ts index b796b2a29f8bca..a9d18af911081a 100644 --- a/web3.js/test/connection.test.ts +++ b/web3.js/test/connection.test.ts @@ -3154,29 +3154,41 @@ describe('Connection', function () { expect(supply.nonCirculatingAccounts.length).to.eq(0); }); - it('get performance samples', async () => { - await mockRpcResponse({ - method: 'getRecentPerformanceSamples', - params: [], - value: [ - { - slot: 1234, - numTransactions: 1000, - numSlots: 60, - samplePeriodSecs: 60, - }, - ], - }); + [undefined, 'confirmed' as Commitment].forEach(function (commitment) { + describe.only( + "when the connection's default commitment is `" + commitment + '`', + () => { + let connectionWithCommitment: Connection; + beforeEach(() => { + connectionWithCommitment = new Connection(url, commitment); + }); + it('get performance samples', async () => { + await mockRpcResponse({ + method: 'getRecentPerformanceSamples', + params: [], + value: [ + { + slot: 1234, + numTransactions: 1000, + numSlots: 60, + samplePeriodSecs: 60, + }, + ], + }); - const perfSamples = await connection.getRecentPerformanceSamples(); - expect(Array.isArray(perfSamples)).to.be.true; + const perfSamples = + await connectionWithCommitment.getRecentPerformanceSamples(); + expect(Array.isArray(perfSamples)).to.be.true; - if (perfSamples.length > 0) { - expect(perfSamples[0].slot).to.be.greaterThan(0); - expect(perfSamples[0].numTransactions).to.be.greaterThan(0); - expect(perfSamples[0].numSlots).to.be.greaterThan(0); - expect(perfSamples[0].samplePeriodSecs).to.be.greaterThan(0); - } + if (perfSamples.length > 0) { + expect(perfSamples[0].slot).to.be.greaterThan(0); + expect(perfSamples[0].numTransactions).to.be.greaterThan(0); + expect(perfSamples[0].numSlots).to.be.greaterThan(0); + expect(perfSamples[0].samplePeriodSecs).to.be.greaterThan(0); + } + }); + }, + ); }); it('get performance samples limit too high', async () => { From b1f8baf6ad53f79cd90d13128c18c8a2d5a693ca Mon Sep 17 00:00:00 2001 From: steveluscher Date: Fri, 1 Jul 2022 22:31:02 -0700 Subject: [PATCH 010/100] test: maybe don't disable all the web3.js tests --- web3.js/test/connection.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web3.js/test/connection.test.ts b/web3.js/test/connection.test.ts index a9d18af911081a..c7520aa9ac1b2f 100644 --- a/web3.js/test/connection.test.ts +++ b/web3.js/test/connection.test.ts @@ -3155,7 +3155,7 @@ describe('Connection', function () { }); [undefined, 'confirmed' as Commitment].forEach(function (commitment) { - describe.only( + describe( "when the connection's default commitment is `" + commitment + '`', () => { let connectionWithCommitment: Connection; From 6b82235fab155ef80e990309d693bf2045e2ee4e Mon Sep 17 00:00:00 2001 From: steveluscher Date: Fri, 1 Jul 2022 22:34:05 -0700 Subject: [PATCH 011/100] fix: a much simpler way to prevent getRecentPerformanceSamples from sending a commitment --- web3.js/src/connection.ts | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/web3.js/src/connection.ts b/web3.js/src/connection.ts index 5bf6e7287ebdd5..c398d02423a5bb 100644 --- a/web3.js/src/connection.ts +++ b/web3.js/src/connection.ts @@ -195,9 +195,6 @@ type RpcRequest = (methodName: string, args: Array) => Promise; type RpcBatchRequest = (requests: RpcParams[]) => Promise; -const NO_COMMITMENT = - typeof Symbol === 'function' ? Symbol('NO_COMMITMENT') : ({} as const); - /** * @internal */ @@ -3434,10 +3431,9 @@ export class Connection { async getRecentPerformanceSamples( limit?: number, ): Promise> { - const args = this._buildArgs(limit ? [limit] : [], NO_COMMITMENT); const unsafeRes = await this._rpcRequest( 'getRecentPerformanceSamples', - args, + limit ? [limit] : [], ); const res = create(unsafeRes, GetRecentPerformanceSamplesRpcResult); if ('error' in res) { @@ -5065,12 +5061,11 @@ export class Connection { _buildArgs( args: Array, - override?: Commitment | typeof NO_COMMITMENT, + override?: Commitment, encoding?: 'jsonParsed' | 'base64', extra?: any, ): Array { - const commitment = - override === NO_COMMITMENT ? undefined : override || this._commitment; + const commitment = override || this._commitment; if (commitment || encoding || extra) { let options: any = {}; if (encoding) { From 5f3b7bdd166ebec9326d0bea9ff7428a3e2168e4 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sat, 2 Jul 2022 11:50:01 -0500 Subject: [PATCH 012/100] prevent ledger tool from calculating hash on non-rooted slots (#26355) --- ledger/src/blockstore_processor.rs | 10 +++- runtime/src/bank.rs | 77 ++++++++++++++++++++++-------- 2 files changed, 65 insertions(+), 22 deletions(-) diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 72fcc65959e88e..28854bdacf5a97 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -23,7 +23,7 @@ use { accounts_update_notifier_interface::AccountsUpdateNotifier, bank::{ Bank, RentDebits, TransactionBalancesSet, TransactionExecutionDetails, - TransactionExecutionResult, TransactionResults, + TransactionExecutionResult, TransactionResults, VerifyBankHash, }, bank_forks::BankForks, bank_utils, @@ -1422,7 +1422,13 @@ fn load_frozen_forks( if slot >= halt_at_slot { bank.force_flush_accounts_cache(); let can_cached_slot_be_unflushed = true; - let _ = bank.verify_bank_hash(false, can_cached_slot_be_unflushed, true); + // note that this slot may not be a root + let _ = bank.verify_bank_hash(VerifyBankHash { + test_hash_calculation: false, + can_cached_slot_be_unflushed, + ignore_mismatch: true, + require_rooted_bank: false, + }); break; } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index a1f346d5e317e5..44d993c24dc7b3 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -169,6 +169,14 @@ use { }, }; +/// params to `verify_bank_hash` +pub struct VerifyBankHash { + pub test_hash_calculation: bool, + pub can_cached_slot_be_unflushed: bool, + pub ignore_mismatch: bool, + pub require_rooted_bank: bool, +} + #[derive(Debug, Default)] struct RewardsMetrics { load_vote_and_stake_accounts_us: AtomicU64, @@ -6699,21 +6707,34 @@ impl Bank { /// snapshot. /// Only called from startup or test code. #[must_use] - pub fn verify_bank_hash( - &self, - test_hash_calculation: bool, - can_cached_slot_be_unflushed: bool, - ignore_mismatch: bool, - ) -> bool { + pub fn verify_bank_hash(&self, config: VerifyBankHash) -> bool { + if config.require_rooted_bank + && !self + .rc + .accounts + .accounts_db + .accounts_index + .is_alive_root(self.slot()) + { + if let Some(parent) = self.parent() { + info!("{} is not a root, so attempting to verify bank hash on parent bank at slot: {}", self.slot(), parent.slot()); + return parent.verify_bank_hash(config); + } else { + // this will result in mismatch errors + // accounts hash calc doesn't include unrooted slots + panic!("cannot verify bank hash when bank is not a root"); + } + } + self.rc.accounts.verify_bank_hash_and_lamports( self.slot(), &self.ancestors, self.capitalization(), - test_hash_calculation, + config.test_hash_calculation, self.epoch_schedule(), &self.rent_collector, - can_cached_slot_be_unflushed, - ignore_mismatch, + config.can_cached_slot_be_unflushed, + config.ignore_mismatch, ) } @@ -6943,7 +6964,12 @@ impl Bank { let (mut verify, verify_time_us) = if !self.rc.accounts.accounts_db.skip_initial_hash_calc { info!("verify_bank_hash.."); let mut verify_time = Measure::start("verify_bank_hash"); - let verify = self.verify_bank_hash(test_hash_calculation, false, false); + let verify = self.verify_bank_hash(VerifyBankHash { + test_hash_calculation, + can_cached_slot_be_unflushed: false, + ignore_mismatch: false, + require_rooted_bank: false, + }); verify_time.stop(); (verify, verify_time.as_us()) } else { @@ -10132,6 +10158,17 @@ pub(crate) mod tests { } } + impl VerifyBankHash { + fn default_for_test() -> Self { + Self { + test_hash_calculation: true, + can_cached_slot_be_unflushed: false, + ignore_mismatch: false, + require_rooted_bank: false, + } + } + } + // Test that purging 0 lamports accounts works. #[test] fn test_purge_empty_accounts() { @@ -10195,17 +10232,17 @@ pub(crate) mod tests { ); assert_eq!(bank1.get_account(&keypair.pubkey()), None); - assert!(bank0.verify_bank_hash(true, false, false)); + assert!(bank0.verify_bank_hash(VerifyBankHash::default_for_test())); // Squash and then verify hash_internal value bank0.freeze(); bank0.squash(); - assert!(bank0.verify_bank_hash(true, false, false)); + assert!(bank0.verify_bank_hash(VerifyBankHash::default_for_test())); bank1.freeze(); bank1.squash(); bank1.update_accounts_hash(); - assert!(bank1.verify_bank_hash(true, false, false)); + assert!(bank1.verify_bank_hash(VerifyBankHash::default_for_test())); // keypair should have 0 tokens on both forks assert_eq!(bank0.get_account(&keypair.pubkey()), None); @@ -10213,7 +10250,7 @@ pub(crate) mod tests { bank1.force_flush_accounts_cache(); bank1.clean_accounts(false, false, None); - assert!(bank1.verify_bank_hash(true, false, false)); + assert!(bank1.verify_bank_hash(VerifyBankHash::default_for_test())); } #[test] @@ -11320,7 +11357,7 @@ pub(crate) mod tests { info!("transfer 2 {}", pubkey2); bank2.transfer(amount, &mint_keypair, &pubkey2).unwrap(); bank2.update_accounts_hash(); - assert!(bank2.verify_bank_hash(true, false, false)); + assert!(bank2.verify_bank_hash(VerifyBankHash::default_for_test())); } #[test] @@ -11345,19 +11382,19 @@ pub(crate) mod tests { // Checkpointing should never modify the checkpoint's state once frozen let bank0_state = bank0.hash_internal_state(); bank2.update_accounts_hash(); - assert!(bank2.verify_bank_hash(true, false, false)); + assert!(bank2.verify_bank_hash(VerifyBankHash::default_for_test())); let bank3 = Bank::new_from_parent(&bank0, &solana_sdk::pubkey::new_rand(), 2); assert_eq!(bank0_state, bank0.hash_internal_state()); - assert!(bank2.verify_bank_hash(true, false, false)); + assert!(bank2.verify_bank_hash(VerifyBankHash::default_for_test())); bank3.update_accounts_hash(); - assert!(bank3.verify_bank_hash(true, false, false)); + assert!(bank3.verify_bank_hash(VerifyBankHash::default_for_test())); let pubkey2 = solana_sdk::pubkey::new_rand(); info!("transfer 2 {}", pubkey2); bank2.transfer(amount, &mint_keypair, &pubkey2).unwrap(); bank2.update_accounts_hash(); - assert!(bank2.verify_bank_hash(true, false, false)); - assert!(bank3.verify_bank_hash(true, false, false)); + assert!(bank2.verify_bank_hash(VerifyBankHash::default_for_test())); + assert!(bank3.verify_bank_hash(VerifyBankHash::default_for_test())); } #[test] From 6f6e5172d3f443882d9128303da6a4ee916d62f4 Mon Sep 17 00:00:00 2001 From: steveluscher Date: Tue, 7 Jun 2022 20:08:52 -0700 Subject: [PATCH 013/100] fix: ingest only the relevant properties when constructing `Transactions` --- web3.js/src/transaction.ts | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/web3.js/src/transaction.ts b/web3.js/src/transaction.ts index 4a9c842f1120bc..686dd587a3c72b 100644 --- a/web3.js/src/transaction.ts +++ b/web3.js/src/transaction.ts @@ -258,17 +258,25 @@ export class Transaction { ) { if (!opts) { return; - } else if ( - Object.prototype.hasOwnProperty.call(opts, 'lastValidBlockHeight') - ) { - const newOpts = opts as TransactionBlockhashCtor; - Object.assign(this, newOpts); - this.recentBlockhash = newOpts.blockhash; - this.lastValidBlockHeight = newOpts.lastValidBlockHeight; + } + if (opts.feePayer) { + this.feePayer = opts.feePayer; + } + if (opts.signatures) { + this.signatures = opts.signatures; + } + if (Object.prototype.hasOwnProperty.call(opts, 'lastValidBlockHeight')) { + const {blockhash, lastValidBlockHeight} = + opts as TransactionBlockhashCtor; + this.recentBlockhash = blockhash; + this.lastValidBlockHeight = lastValidBlockHeight; } else { - const oldOpts = opts as TransactionCtorFields_DEPRECATED; - Object.assign(this, oldOpts); - this.recentBlockhash = oldOpts.recentBlockhash; + const {recentBlockhash, nonceInfo} = + opts as TransactionCtorFields_DEPRECATED; + if (nonceInfo) { + this.nonceInfo = nonceInfo; + } + this.recentBlockhash = recentBlockhash; } } From 627d91fb20272afa5ce790fbec01b40a29fb9852 Mon Sep 17 00:00:00 2001 From: steveluscher Date: Tue, 7 Jun 2022 20:40:01 -0700 Subject: [PATCH 014/100] chore: move `checkBlockHeight` into block where it's used --- web3.js/src/connection.ts | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/web3.js/src/connection.ts b/web3.js/src/connection.ts index c398d02423a5bb..39fa03801d7347 100644 --- a/web3.js/src/connection.ts +++ b/web3.js/src/connection.ts @@ -3051,15 +3051,6 @@ export class Connection { } }); - const checkBlockHeight = async () => { - try { - const blockHeight = await this.getBlockHeight(commitment); - return blockHeight; - } catch (_e) { - return -1; - } - }; - const expiryPromise = new Promise< | {__type: TransactionStatus.BLOCKHEIGHT_EXCEEDED} | {__type: TransactionStatus.TIMED_OUT; timeoutMs: number} @@ -3088,6 +3079,14 @@ export class Connection { } else { let config = strategy as BlockheightBasedTransactionConfirmationStrategy; + const checkBlockHeight = async () => { + try { + const blockHeight = await this.getBlockHeight(commitment); + return blockHeight; + } catch (_e) { + return -1; + } + }; (async () => { let currentBlockHeight = await checkBlockHeight(); if (done) return; From 5450f978445cff9ac9959d47a7429e0b78c873e3 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Mon, 4 Jul 2022 15:23:42 +0200 Subject: [PATCH 015/100] explorer: Update address map program name to address lookup table (#26388) --- explorer/src/utils/tx.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/explorer/src/utils/tx.ts b/explorer/src/utils/tx.ts index 63a5a909fe7d32..0bf4dc17c5527f 100644 --- a/explorer/src/utils/tx.ts +++ b/explorer/src/utils/tx.ts @@ -23,7 +23,7 @@ import { TokenInfoMap } from "@solana/spl-token-registry"; export enum PROGRAM_NAMES { // native built-ins - ADDRESS_MAP = "Address Map Program", + ADDRESS_LOOKUP_TABLE = "Address Lookup Table Program", COMPUTE_BUDGET = "Compute Budget Program", CONFIG = "Config Program", STAKE = "Stake Program", @@ -114,8 +114,8 @@ export type ProgramInfo = { export const PROGRAM_INFO_BY_ID: { [address: string]: ProgramInfo } = { // native built-ins - AddressMap111111111111111111111111111111111: { - name: PROGRAM_NAMES.ADDRESS_MAP, + AddressLookupTab1e1111111111111111111111111: { + name: PROGRAM_NAMES.ADDRESS_LOOKUP_TABLE, deployments: ALL_CLUSTERS, }, ComputeBudget111111111111111111111111111111: { From 95ae82e074e5756cb60243d6d529fefcd72ff908 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 5 Jul 2022 09:23:06 -0500 Subject: [PATCH 016/100] add info to ledger tool for total execution time (#26368) --- ledger-tool/src/main.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 51f27492f9f375..27dec4d50264d8 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1156,6 +1156,7 @@ fn main() { .max(rent.minimum_balance(StakeState::size_of())) .to_string(); + let mut measure_total_execution_time = Measure::start("ledger tool"); let matches = App::new(crate_name!()) .about(crate_description!()) .version(solana_version::version!()) @@ -3715,5 +3716,7 @@ fn main() { } _ => unreachable!(), }; + measure_total_execution_time.stop(); + info!("{}", measure_total_execution_time); } } From 9ec38a3191dce8b08af91cbc0904b0c58254d406 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Tue, 5 Jul 2022 09:23:23 -0500 Subject: [PATCH 017/100] Cleanup snapshot integration tests (#26390) --- Cargo.lock | 23 + core/Cargo.toml | 1 + core/tests/snapshots.rs | 1952 +++++++++++++++++++-------------------- 3 files changed, 985 insertions(+), 991 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a40406c770771e..c9044dd13bf853 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4981,6 +4981,7 @@ dependencies = [ "sysctl", "systemstat", "tempfile", + "test-case", "thiserror", "tokio", "trees", @@ -6810,6 +6811,28 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13a4ec180a2de59b57434704ccfad967f789b12737738798fa08798cd5824c16" +[[package]] +name = "test-case" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "196e8a70562e252cc51eaaaee3ecddc39803d9b7fd4a772b7c7dae7cdf42a859" +dependencies = [ + "test-case-macros", +] + +[[package]] +name = "test-case-macros" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dd461f47ade621665c9f4e44b20449341769911c253275dc5cb03726cbb852c" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro-error", + "proc-macro2 1.0.38", + "quote 1.0.18", + "syn 1.0.93", +] + [[package]] name = "textwrap" version = "0.11.0" diff --git a/core/Cargo.toml b/core/Cargo.toml index f706ac09c0ad47..781f8975b2e621 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -74,6 +74,7 @@ solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } static_assertions = "1.1.0" systemstat = "0.1.11" +test-case = "2.1.0" [target."cfg(unix)".dependencies] sysctl = "0.4.4" diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index d6b65214c53a11..2ad8c06235bbd9 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -1,1079 +1,1049 @@ -// Long-running bank_forks tests #![allow(clippy::integer_arithmetic)] -macro_rules! DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS { - ($x:ident, $y:ident, $z:ident) => { - #[allow(non_snake_case)] - mod $z { - use super::*; - - const SNAPSHOT_VERSION: SnapshotVersion = SnapshotVersion::$x; - const CLUSTER_TYPE: ClusterType = ClusterType::$y; - - #[test] - fn test_bank_forks_status_cache_snapshot_n() { - run_test_bank_forks_status_cache_snapshot_n(SNAPSHOT_VERSION, CLUSTER_TYPE) - } - - #[test] - fn test_bank_forks_snapshot_n() { - run_test_bank_forks_snapshot_n(SNAPSHOT_VERSION, CLUSTER_TYPE) - } - - #[test] - fn test_concurrent_snapshot_packaging() { - run_test_concurrent_snapshot_packaging(SNAPSHOT_VERSION, CLUSTER_TYPE) - } - - #[test] - fn test_slots_to_snapshot() { - run_test_slots_to_snapshot(SNAPSHOT_VERSION, CLUSTER_TYPE) - } - - #[test] - fn test_bank_forks_incremental_snapshot_n() { - run_test_bank_forks_incremental_snapshot_n(SNAPSHOT_VERSION, CLUSTER_TYPE) - } - - #[test] - fn test_snapshots_with_background_services() { - run_test_snapshots_with_background_services(SNAPSHOT_VERSION, CLUSTER_TYPE) - } - } - }; -} - -#[cfg(test)] -mod tests { - use { - bincode::serialize_into, - crossbeam_channel::unbounded, - fs_extra::dir::CopyOptions, - itertools::Itertools, - log::{info, trace}, - solana_core::{ - accounts_hash_verifier::AccountsHashVerifier, - snapshot_packager_service::SnapshotPackagerService, +use { + bincode::serialize_into, + crossbeam_channel::unbounded, + fs_extra::dir::CopyOptions, + itertools::Itertools, + log::{info, trace}, + solana_core::{ + accounts_hash_verifier::AccountsHashVerifier, + snapshot_packager_service::SnapshotPackagerService, + }, + solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, + solana_runtime::{ + accounts_background_service::{ + AbsRequestHandler, AbsRequestSender, AccountsBackgroundService, SnapshotRequestHandler, }, - solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, - solana_runtime::{ - accounts_background_service::{ - AbsRequestHandler, AbsRequestSender, AccountsBackgroundService, - SnapshotRequestHandler, - }, - accounts_db::{self, ACCOUNTS_DB_CONFIG_FOR_TESTING}, - accounts_index::AccountSecondaryIndexes, - bank::{Bank, BankSlotDelta}, - bank_forks::BankForks, - genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, - snapshot_archive_info::FullSnapshotArchiveInfo, - snapshot_config::SnapshotConfig, - snapshot_package::{ - AccountsPackage, PendingAccountsPackage, PendingSnapshotPackage, SnapshotPackage, - SnapshotType, - }, - snapshot_utils::{self, ArchiveFormat, SnapshotVersion}, - status_cache::MAX_CACHE_ENTRIES, + accounts_db::{self, ACCOUNTS_DB_CONFIG_FOR_TESTING}, + accounts_index::AccountSecondaryIndexes, + bank::{Bank, BankSlotDelta}, + bank_forks::BankForks, + genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, + snapshot_archive_info::FullSnapshotArchiveInfo, + snapshot_config::SnapshotConfig, + snapshot_package::{ + AccountsPackage, PendingAccountsPackage, PendingSnapshotPackage, SnapshotPackage, + SnapshotType, }, - solana_sdk::{ - clock::Slot, - genesis_config::{ClusterType, GenesisConfig}, - hash::{hashv, Hash}, - pubkey::Pubkey, - signature::{Keypair, Signer}, - system_transaction, - timing::timestamp, + snapshot_utils::{ + self, ArchiveFormat, + SnapshotVersion::{self, V1_2_0}, }, - solana_streamer::socket::SocketAddrSpace, - std::{ - collections::HashSet, - fs, - io::{Error, ErrorKind}, - path::PathBuf, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, RwLock, - }, - time::Duration, + status_cache::MAX_CACHE_ENTRIES, + }, + solana_sdk::{ + clock::Slot, + genesis_config::{ + ClusterType::{self, Development, Devnet, MainnetBeta, Testnet}, + GenesisConfig, }, - tempfile::TempDir, - }; - - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Development, V1_2_0_Development); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Devnet, V1_2_0_Devnet); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Testnet, V1_2_0_Testnet); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, MainnetBeta, V1_2_0_MainnetBeta); - - struct SnapshotTestConfig { - accounts_dir: TempDir, - bank_snapshots_dir: TempDir, - full_snapshot_archives_dir: TempDir, - incremental_snapshot_archives_dir: TempDir, - snapshot_config: SnapshotConfig, - bank_forks: BankForks, - genesis_config_info: GenesisConfigInfo, - } - - impl SnapshotTestConfig { - fn new( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, - accounts_hash_interval_slots: Slot, - full_snapshot_archive_interval_slots: Slot, - incremental_snapshot_archive_interval_slots: Slot, - ) -> SnapshotTestConfig { - let accounts_dir = TempDir::new().unwrap(); - let bank_snapshots_dir = TempDir::new().unwrap(); - let full_snapshot_archives_dir = TempDir::new().unwrap(); - let incremental_snapshot_archives_dir = TempDir::new().unwrap(); - // validator_stake_lamports should be non-zero otherwise stake - // account will not be stored in accounts-db but still cached in - // bank stakes which results in mismatch when banks are loaded from - // snapshots. - let mut genesis_config_info = create_genesis_config_with_leader( - 10_000, // mint_lamports - &solana_sdk::pubkey::new_rand(), // validator_pubkey - 1, // validator_stake_lamports - ); - genesis_config_info.genesis_config.cluster_type = cluster_type; - let bank0 = Bank::new_with_paths_for_tests( - &genesis_config_info.genesis_config, - vec![accounts_dir.path().to_path_buf()], - None, - None, - AccountSecondaryIndexes::default(), - false, - accounts_db::AccountShrinkThreshold::default(), - false, - ); - bank0.freeze(); - let mut bank_forks = BankForks::new(bank0); - bank_forks.accounts_hash_interval_slots = accounts_hash_interval_slots; - - let snapshot_config = SnapshotConfig { - full_snapshot_archive_interval_slots, - incremental_snapshot_archive_interval_slots, - full_snapshot_archives_dir: full_snapshot_archives_dir.path().to_path_buf(), - incremental_snapshot_archives_dir: incremental_snapshot_archives_dir - .path() - .to_path_buf(), - bank_snapshots_dir: bank_snapshots_dir.path().to_path_buf(), - snapshot_version, - ..SnapshotConfig::default() - }; - bank_forks.set_snapshot_config(Some(snapshot_config.clone())); - SnapshotTestConfig { - accounts_dir, - bank_snapshots_dir, - full_snapshot_archives_dir, - incremental_snapshot_archives_dir, - snapshot_config, - bank_forks, - genesis_config_info, - } - } - } - - fn restore_from_snapshot( - old_bank_forks: &BankForks, - old_last_slot: Slot, - old_genesis_config: &GenesisConfig, - account_paths: &[PathBuf], - ) { - let full_snapshot_archives_dir = old_bank_forks - .snapshot_config - .as_ref() - .map(|c| &c.full_snapshot_archives_dir) - .unwrap(); - - let old_last_bank = old_bank_forks.get(old_last_slot).unwrap(); + hash::{hashv, Hash}, + pubkey::Pubkey, + signature::{Keypair, Signer}, + system_transaction, + timing::timestamp, + }, + solana_streamer::socket::SocketAddrSpace, + std::{ + collections::HashSet, + fs, + io::{Error, ErrorKind}, + path::PathBuf, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + time::Duration, + }, + tempfile::TempDir, + test_case::test_case, +}; + +struct SnapshotTestConfig { + accounts_dir: TempDir, + bank_snapshots_dir: TempDir, + full_snapshot_archives_dir: TempDir, + incremental_snapshot_archives_dir: TempDir, + snapshot_config: SnapshotConfig, + bank_forks: BankForks, + genesis_config_info: GenesisConfigInfo, +} - let check_hash_calculation = false; - let full_snapshot_archive_path = snapshot_utils::build_full_snapshot_archive_path( - full_snapshot_archives_dir, - old_last_bank.slot(), - &old_last_bank.get_accounts_hash(), - ArchiveFormat::TarBzip2, +impl SnapshotTestConfig { + fn new( + snapshot_version: SnapshotVersion, + cluster_type: ClusterType, + accounts_hash_interval_slots: Slot, + full_snapshot_archive_interval_slots: Slot, + incremental_snapshot_archive_interval_slots: Slot, + ) -> SnapshotTestConfig { + let accounts_dir = TempDir::new().unwrap(); + let bank_snapshots_dir = TempDir::new().unwrap(); + let full_snapshot_archives_dir = TempDir::new().unwrap(); + let incremental_snapshot_archives_dir = TempDir::new().unwrap(); + // validator_stake_lamports should be non-zero otherwise stake + // account will not be stored in accounts-db but still cached in + // bank stakes which results in mismatch when banks are loaded from + // snapshots. + let mut genesis_config_info = create_genesis_config_with_leader( + 10_000, // mint_lamports + &solana_sdk::pubkey::new_rand(), // validator_pubkey + 1, // validator_stake_lamports ); - let full_snapshot_archive_info = - FullSnapshotArchiveInfo::new_from_path(full_snapshot_archive_path).unwrap(); - - let (deserialized_bank, _timing) = snapshot_utils::bank_from_snapshot_archives( - account_paths, - &old_bank_forks - .snapshot_config - .as_ref() - .unwrap() - .bank_snapshots_dir, - &full_snapshot_archive_info, - None, - old_genesis_config, + genesis_config_info.genesis_config.cluster_type = cluster_type; + let bank0 = Bank::new_with_paths_for_tests( + &genesis_config_info.genesis_config, + vec![accounts_dir.path().to_path_buf()], None, None, AccountSecondaryIndexes::default(), false, - None, accounts_db::AccountShrinkThreshold::default(), - check_hash_calculation, - false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - ) - .unwrap(); - - let bank = old_bank_forks.get(deserialized_bank.slot()).unwrap(); - assert_eq!(bank.as_ref(), &deserialized_bank); - } - - // creates banks up to "last_slot" and runs the input function `f` on each bank created - // also marks each bank as root and generates snapshots - // finally tries to restore from the last bank's snapshot and compares the restored bank to the - // `last_slot` bank - fn run_bank_forks_snapshot_n( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, - last_slot: Slot, - f: F, - set_root_interval: u64, - ) where - F: Fn(&mut Bank, &Keypair), - { - solana_logger::setup(); - // Set up snapshotting config - let mut snapshot_test_config = SnapshotTestConfig::new( - snapshot_version, - cluster_type, - set_root_interval, - set_root_interval, - Slot::MAX, ); - - let bank_forks = &mut snapshot_test_config.bank_forks; - let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; - - let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); - let request_sender = AbsRequestSender::new(snapshot_request_sender); - let snapshot_request_handler = SnapshotRequestHandler { - snapshot_config: snapshot_test_config.snapshot_config.clone(), - snapshot_request_receiver, - pending_accounts_package: PendingAccountsPackage::default(), + bank0.freeze(); + let mut bank_forks = BankForks::new(bank0); + bank_forks.accounts_hash_interval_slots = accounts_hash_interval_slots; + + let snapshot_config = SnapshotConfig { + full_snapshot_archive_interval_slots, + incremental_snapshot_archive_interval_slots, + full_snapshot_archives_dir: full_snapshot_archives_dir.path().to_path_buf(), + incremental_snapshot_archives_dir: incremental_snapshot_archives_dir + .path() + .to_path_buf(), + bank_snapshots_dir: bank_snapshots_dir.path().to_path_buf(), + snapshot_version, + ..SnapshotConfig::default() }; - for slot in 1..=last_slot { - let mut bank = Bank::new_from_parent(&bank_forks[slot - 1], &Pubkey::default(), slot); - f(&mut bank, mint_keypair); - let bank = bank_forks.insert(bank); - // Set root to make sure we don't end up with too many account storage entries - // and to allow snapshotting of bank and the purging logic on status_cache to - // kick in - if slot % set_root_interval == 0 || slot == last_slot { - // set_root should send a snapshot request - bank_forks.set_root(bank.slot(), &request_sender, None); - bank.update_accounts_hash(); - snapshot_request_handler.handle_snapshot_requests(false, false, 0, &mut None); - } - } - - // Generate a snapshot package for last bank - let last_bank = bank_forks.get(last_slot).unwrap(); - let snapshot_config = &snapshot_test_config.snapshot_config; - let bank_snapshots_dir = &snapshot_config.bank_snapshots_dir; - let last_bank_snapshot_info = - snapshot_utils::get_highest_bank_snapshot_pre(bank_snapshots_dir) - .expect("no bank snapshots found in path"); - let slot_deltas = last_bank.status_cache.read().unwrap().root_slot_deltas(); - let accounts_package = AccountsPackage::new( - &last_bank, - &last_bank_snapshot_info, + bank_forks.set_snapshot_config(Some(snapshot_config.clone())); + SnapshotTestConfig { + accounts_dir, bank_snapshots_dir, - slot_deltas, - &snapshot_config.full_snapshot_archives_dir, - &snapshot_config.incremental_snapshot_archives_dir, - last_bank.get_snapshot_storages(None), - ArchiveFormat::TarBzip2, - snapshot_version, - None, - Some(SnapshotType::FullSnapshot), - ) - .unwrap(); - solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash( - accounts_package.snapshot_links.path(), - accounts_package.slot, - &last_bank.get_accounts_hash(), - ); - let snapshot_package = - SnapshotPackage::new(accounts_package, last_bank.get_accounts_hash()); - snapshot_utils::archive_snapshot_package( - &snapshot_package, - &snapshot_config.full_snapshot_archives_dir, - &snapshot_config.incremental_snapshot_archives_dir, - snapshot_config.maximum_full_snapshot_archives_to_retain, - snapshot_config.maximum_incremental_snapshot_archives_to_retain, - ) - .unwrap(); - - // Restore bank from snapshot - let account_paths = &[snapshot_test_config.accounts_dir.path().to_path_buf()]; - let genesis_config = &snapshot_test_config.genesis_config_info.genesis_config; - restore_from_snapshot(bank_forks, last_slot, genesis_config, account_paths); + full_snapshot_archives_dir, + incremental_snapshot_archives_dir, + snapshot_config, + bank_forks, + genesis_config_info, + } } +} - fn run_test_bank_forks_snapshot_n( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, - ) { - // create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots - // and restores correctly - run_bank_forks_snapshot_n( - snapshot_version, - cluster_type, - 4, - |bank, mint_keypair| { - let key1 = Keypair::new().pubkey(); - let tx = - system_transaction::transfer(mint_keypair, &key1, 1, bank.last_blockhash()); - assert_eq!(bank.process_transaction(&tx), Ok(())); - - let key2 = Keypair::new().pubkey(); - let tx = - system_transaction::transfer(mint_keypair, &key2, 0, bank.last_blockhash()); - assert_eq!(bank.process_transaction(&tx), Ok(())); +fn restore_from_snapshot( + old_bank_forks: &BankForks, + old_last_slot: Slot, + old_genesis_config: &GenesisConfig, + account_paths: &[PathBuf], +) { + let full_snapshot_archives_dir = old_bank_forks + .snapshot_config + .as_ref() + .map(|c| &c.full_snapshot_archives_dir) + .unwrap(); - bank.freeze(); - }, - 1, - ); - } + let old_last_bank = old_bank_forks.get(old_last_slot).unwrap(); + + let check_hash_calculation = false; + let full_snapshot_archive_path = snapshot_utils::build_full_snapshot_archive_path( + full_snapshot_archives_dir, + old_last_bank.slot(), + &old_last_bank.get_accounts_hash(), + ArchiveFormat::TarBzip2, + ); + let full_snapshot_archive_info = + FullSnapshotArchiveInfo::new_from_path(full_snapshot_archive_path).unwrap(); + + let (deserialized_bank, _timing) = snapshot_utils::bank_from_snapshot_archives( + account_paths, + &old_bank_forks + .snapshot_config + .as_ref() + .unwrap() + .bank_snapshots_dir, + &full_snapshot_archive_info, + None, + old_genesis_config, + None, + None, + AccountSecondaryIndexes::default(), + false, + None, + accounts_db::AccountShrinkThreshold::default(), + check_hash_calculation, + false, + false, + Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + None, + ) + .unwrap(); + + let bank = old_bank_forks.get(deserialized_bank.slot()).unwrap(); + assert_eq!(bank.as_ref(), &deserialized_bank); +} - fn goto_end_of_slot(bank: &mut Bank) { - let mut tick_hash = bank.last_blockhash(); - loop { - tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); - bank.register_tick(&tick_hash); - if tick_hash == bank.last_blockhash() { - bank.freeze(); - return; - } +// creates banks up to "last_slot" and runs the input function `f` on each bank created +// also marks each bank as root and generates snapshots +// finally tries to restore from the last bank's snapshot and compares the restored bank to the +// `last_slot` bank +fn run_bank_forks_snapshot_n( + snapshot_version: SnapshotVersion, + cluster_type: ClusterType, + last_slot: Slot, + f: F, + set_root_interval: u64, +) where + F: Fn(&mut Bank, &Keypair), +{ + solana_logger::setup(); + // Set up snapshotting config + let mut snapshot_test_config = SnapshotTestConfig::new( + snapshot_version, + cluster_type, + set_root_interval, + set_root_interval, + Slot::MAX, + ); + + let bank_forks = &mut snapshot_test_config.bank_forks; + let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; + + let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); + let request_sender = AbsRequestSender::new(snapshot_request_sender); + let snapshot_request_handler = SnapshotRequestHandler { + snapshot_config: snapshot_test_config.snapshot_config.clone(), + snapshot_request_receiver, + pending_accounts_package: PendingAccountsPackage::default(), + }; + for slot in 1..=last_slot { + let mut bank = Bank::new_from_parent(&bank_forks[slot - 1], &Pubkey::default(), slot); + f(&mut bank, mint_keypair); + let bank = bank_forks.insert(bank); + // Set root to make sure we don't end up with too many account storage entries + // and to allow snapshotting of bank and the purging logic on status_cache to + // kick in + if slot % set_root_interval == 0 || slot == last_slot { + // set_root should send a snapshot request + bank_forks.set_root(bank.slot(), &request_sender, None); + bank.update_accounts_hash(); + snapshot_request_handler.handle_snapshot_requests(false, false, 0, &mut None); } } - fn run_test_concurrent_snapshot_packaging( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, - ) { - solana_logger::setup(); - - // Set up snapshotting config - let mut snapshot_test_config = - SnapshotTestConfig::new(snapshot_version, cluster_type, 1, 1, Slot::MAX); - - let bank_forks = &mut snapshot_test_config.bank_forks; - let snapshot_config = &snapshot_test_config.snapshot_config; - let bank_snapshots_dir = &snapshot_config.bank_snapshots_dir; - let full_snapshot_archives_dir = &snapshot_config.full_snapshot_archives_dir; - let incremental_snapshot_archives_dir = &snapshot_config.incremental_snapshot_archives_dir; - let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; - let genesis_config = &snapshot_test_config.genesis_config_info.genesis_config; - - // Take snapshot of zeroth bank - let bank0 = bank_forks.get(0).unwrap(); - let storages = bank0.get_snapshot_storages(None); - snapshot_utils::add_bank_snapshot(bank_snapshots_dir, &bank0, &storages, snapshot_version) - .unwrap(); - - // Set up snapshotting channels - let real_pending_accounts_package = PendingAccountsPackage::default(); - let fake_pending_accounts_package = PendingAccountsPackage::default(); - - // Create next MAX_CACHE_ENTRIES + 2 banks and snapshots. Every bank will get snapshotted - // and the snapshot purging logic will run on every snapshot taken. This means the three - // (including snapshot for bank0 created above) earliest snapshots will get purged by the - // time this loop is done. - - // Also, make a saved copy of the state of the snapshot for a bank with - // bank.slot == saved_slot, so we can use it for a correctness check later. - let saved_snapshots_dir = TempDir::new().unwrap(); - let saved_accounts_dir = TempDir::new().unwrap(); - let saved_slot = 4; - let mut saved_archive_path = None; + // Generate a snapshot package for last bank + let last_bank = bank_forks.get(last_slot).unwrap(); + let snapshot_config = &snapshot_test_config.snapshot_config; + let bank_snapshots_dir = &snapshot_config.bank_snapshots_dir; + let last_bank_snapshot_info = snapshot_utils::get_highest_bank_snapshot_pre(bank_snapshots_dir) + .expect("no bank snapshots found in path"); + let slot_deltas = last_bank.status_cache.read().unwrap().root_slot_deltas(); + let accounts_package = AccountsPackage::new( + &last_bank, + &last_bank_snapshot_info, + bank_snapshots_dir, + slot_deltas, + &snapshot_config.full_snapshot_archives_dir, + &snapshot_config.incremental_snapshot_archives_dir, + last_bank.get_snapshot_storages(None), + ArchiveFormat::TarBzip2, + snapshot_version, + None, + Some(SnapshotType::FullSnapshot), + ) + .unwrap(); + solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash( + accounts_package.snapshot_links.path(), + accounts_package.slot, + &last_bank.get_accounts_hash(), + ); + let snapshot_package = SnapshotPackage::new(accounts_package, last_bank.get_accounts_hash()); + snapshot_utils::archive_snapshot_package( + &snapshot_package, + &snapshot_config.full_snapshot_archives_dir, + &snapshot_config.incremental_snapshot_archives_dir, + snapshot_config.maximum_full_snapshot_archives_to_retain, + snapshot_config.maximum_incremental_snapshot_archives_to_retain, + ) + .unwrap(); + + // Restore bank from snapshot + let account_paths = &[snapshot_test_config.accounts_dir.path().to_path_buf()]; + let genesis_config = &snapshot_test_config.genesis_config_info.genesis_config; + restore_from_snapshot(bank_forks, last_slot, genesis_config, account_paths); +} - for forks in 0..snapshot_utils::MAX_BANK_SNAPSHOTS_TO_RETAIN + 2 { - let bank = Bank::new_from_parent( - &bank_forks[forks as u64], - &Pubkey::default(), - (forks + 1) as u64, - ); - let slot = bank.slot(); +#[test_case(V1_2_0, Development)] +#[test_case(V1_2_0, Devnet)] +#[test_case(V1_2_0, Testnet)] +#[test_case(V1_2_0, MainnetBeta)] +fn test_bank_forks_snapshot(snapshot_version: SnapshotVersion, cluster_type: ClusterType) { + // create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots + // and restores correctly + run_bank_forks_snapshot_n( + snapshot_version, + cluster_type, + 4, + |bank, mint_keypair| { let key1 = Keypair::new().pubkey(); - let tx = system_transaction::transfer(mint_keypair, &key1, 1, genesis_config.hash()); + let tx = system_transaction::transfer(mint_keypair, &key1, 1, bank.last_blockhash()); assert_eq!(bank.process_transaction(&tx), Ok(())); - bank.squash(); - - let pending_accounts_package = { - if slot == saved_slot as u64 { - // Only send one package on the real pending_accounts_package so that the - // packaging service doesn't take forever to run the packaging logic on all - // MAX_CACHE_ENTRIES later - &real_pending_accounts_package - } else { - &fake_pending_accounts_package - } - }; - - snapshot_utils::snapshot_bank( - &bank, - vec![], - pending_accounts_package, - bank_snapshots_dir, - full_snapshot_archives_dir, - incremental_snapshot_archives_dir, - snapshot_config.snapshot_version, - snapshot_config.archive_format, - None, - Some(SnapshotType::FullSnapshot), - ) - .unwrap(); - bank_forks.insert(bank); - if slot == saved_slot as u64 { - // Find the relevant snapshot storages - let snapshot_storage_files: HashSet<_> = bank_forks[slot] - .get_snapshot_storages(None) - .into_iter() - .flatten() - .map(|s| s.get_path()) - .collect(); - - // Only save off the files returned by `get_snapshot_storages`. This is because - // some of the storage entries in the accounts directory may be filtered out by - // `get_snapshot_storages()` and will not be included in the snapshot. Ultimately, - // this means copying natively everything in `accounts_dir` to the `saved_accounts_dir` - // will lead to test failure by mismatch when `saved_accounts_dir` is compared to - // the unpacked snapshot later in this test's call to `verify_snapshot_archive()`. - for file in snapshot_storage_files { - fs::copy( - &file, - &saved_accounts_dir.path().join(file.file_name().unwrap()), - ) - .unwrap(); - } - let last_snapshot_path = fs::read_dir(bank_snapshots_dir) - .unwrap() - .filter_map(|entry| { - let e = entry.unwrap(); - let file_path = e.path(); - let file_name = file_path.file_name().unwrap(); - file_name - .to_str() - .map(|s| s.parse::().ok().map(|_| file_path.clone())) - .unwrap_or(None) - }) - .sorted() - .last() - .unwrap(); - // only save off the snapshot of this slot, we don't need the others. - let options = CopyOptions::new(); - fs_extra::dir::copy(&last_snapshot_path, &saved_snapshots_dir, &options).unwrap(); - - saved_archive_path = Some(snapshot_utils::build_full_snapshot_archive_path( - full_snapshot_archives_dir, - slot, - // this needs to match the hash value that we reserialize with later. It is complicated, so just use default. - // This hash value is just used to build the file name. Since this is mocked up test code, it is sufficient to pass default here. - &Hash::default(), - ArchiveFormat::TarBzip2, - )); - } - } + let key2 = Keypair::new().pubkey(); + let tx = system_transaction::transfer(mint_keypair, &key2, 0, bank.last_blockhash()); + assert_eq!(bank.process_transaction(&tx), Ok(())); - // Purge all the outdated snapshots, including the ones needed to generate the package - // currently sitting in the channel - snapshot_utils::purge_old_bank_snapshots(bank_snapshots_dir); + bank.freeze(); + }, + 1, + ); +} - let mut bank_snapshots = snapshot_utils::get_bank_snapshots_pre(&bank_snapshots_dir); - bank_snapshots.sort_unstable(); - assert!(bank_snapshots - .into_iter() - .map(|path| path.slot) - .eq(3..=snapshot_utils::MAX_BANK_SNAPSHOTS_TO_RETAIN as u64 + 2)); - - // Create a SnapshotPackagerService to create tarballs from all the pending - // SnapshotPackage's on the channel. By the time this service starts, we have already - // purged the first two snapshots, which are needed by every snapshot other than - // the last two snapshots. However, the packaging service should still be able to - // correctly construct the earlier snapshots because the SnapshotPackage's on the - // channel hold hard links to these deleted snapshots. We verify this is the case below. - let exit = Arc::new(AtomicBool::new(false)); - - let cluster_info = Arc::new(ClusterInfo::new( - ContactInfo::default(), - Arc::new(Keypair::new()), - SocketAddrSpace::Unspecified, - )); - - let pending_snapshot_package = PendingSnapshotPackage::default(); - let snapshot_packager_service = SnapshotPackagerService::new( - pending_snapshot_package.clone(), - None, - &exit, - &cluster_info, - snapshot_config.clone(), - true, - ); +fn goto_end_of_slot(bank: &mut Bank) { + let mut tick_hash = bank.last_blockhash(); + loop { + tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); + bank.register_tick(&tick_hash); + if tick_hash == bank.last_blockhash() { + bank.freeze(); + return; + } + } +} - let _package_receiver = std::thread::Builder::new() - .name("package-receiver".to_string()) - .spawn(move || { - let accounts_package = real_pending_accounts_package - .lock() - .unwrap() - .take() - .unwrap(); - solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash( - accounts_package.snapshot_links.path(), - accounts_package.slot, - &Hash::default(), - ); - let snapshot_package = SnapshotPackage::new(accounts_package, Hash::default()); - pending_snapshot_package - .lock() - .unwrap() - .replace(snapshot_package); - - // Wait until the package is consumed by SnapshotPackagerService - while pending_snapshot_package.lock().unwrap().is_some() { - std::thread::sleep(Duration::from_millis(100)); - } - - // Shutdown SnapshotPackagerService - exit.store(true, Ordering::Relaxed); - }) - .unwrap(); +#[test_case(V1_2_0, Development)] +#[test_case(V1_2_0, Devnet)] +#[test_case(V1_2_0, Testnet)] +#[test_case(V1_2_0, MainnetBeta)] +fn test_concurrent_snapshot_packaging( + snapshot_version: SnapshotVersion, + cluster_type: ClusterType, +) { + solana_logger::setup(); + + // Set up snapshotting config + let mut snapshot_test_config = + SnapshotTestConfig::new(snapshot_version, cluster_type, 1, 1, Slot::MAX); + + let bank_forks = &mut snapshot_test_config.bank_forks; + let snapshot_config = &snapshot_test_config.snapshot_config; + let bank_snapshots_dir = &snapshot_config.bank_snapshots_dir; + let full_snapshot_archives_dir = &snapshot_config.full_snapshot_archives_dir; + let incremental_snapshot_archives_dir = &snapshot_config.incremental_snapshot_archives_dir; + let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; + let genesis_config = &snapshot_test_config.genesis_config_info.genesis_config; + + // Take snapshot of zeroth bank + let bank0 = bank_forks.get(0).unwrap(); + let storages = bank0.get_snapshot_storages(None); + snapshot_utils::add_bank_snapshot(bank_snapshots_dir, &bank0, &storages, snapshot_version) + .unwrap(); - // Wait for service to finish - snapshot_packager_service - .join() - .expect("SnapshotPackagerService exited with error"); + // Set up snapshotting channels + let real_pending_accounts_package = PendingAccountsPackage::default(); + let fake_pending_accounts_package = PendingAccountsPackage::default(); + + // Create next MAX_CACHE_ENTRIES + 2 banks and snapshots. Every bank will get snapshotted + // and the snapshot purging logic will run on every snapshot taken. This means the three + // (including snapshot for bank0 created above) earliest snapshots will get purged by the + // time this loop is done. + + // Also, make a saved copy of the state of the snapshot for a bank with + // bank.slot == saved_slot, so we can use it for a correctness check later. + let saved_snapshots_dir = TempDir::new().unwrap(); + let saved_accounts_dir = TempDir::new().unwrap(); + let saved_slot = 4; + let mut saved_archive_path = None; + + for forks in 0..snapshot_utils::MAX_BANK_SNAPSHOTS_TO_RETAIN + 2 { + let bank = Bank::new_from_parent( + &bank_forks[forks as u64], + &Pubkey::default(), + (forks + 1) as u64, + ); + let slot = bank.slot(); + let key1 = Keypair::new().pubkey(); + let tx = system_transaction::transfer(mint_keypair, &key1, 1, genesis_config.hash()); + assert_eq!(bank.process_transaction(&tx), Ok(())); + bank.squash(); - // Check the archive we cached the state for earlier was generated correctly + let pending_accounts_package = { + if slot == saved_slot as u64 { + // Only send one package on the real pending_accounts_package so that the + // packaging service doesn't take forever to run the packaging logic on all + // MAX_CACHE_ENTRIES later + &real_pending_accounts_package + } else { + &fake_pending_accounts_package + } + }; - // before we compare, stick an empty status_cache in this dir so that the package comparison works - // This is needed since the status_cache is added by the packager and is not collected from - // the source dir for snapshots - snapshot_utils::serialize_snapshot_data_file( - &saved_snapshots_dir - .path() - .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME), - |stream| { - serialize_into(stream, &[] as &[BankSlotDelta])?; - Ok(()) - }, + snapshot_utils::snapshot_bank( + &bank, + vec![], + pending_accounts_package, + bank_snapshots_dir, + full_snapshot_archives_dir, + incremental_snapshot_archives_dir, + snapshot_config.snapshot_version, + snapshot_config.archive_format, + None, + Some(SnapshotType::FullSnapshot), ) .unwrap(); - // files were saved off before we reserialized the bank in the hacked up accounts_hash_verifier stand-in. - solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash( - saved_snapshots_dir.path(), - saved_slot, - &Hash::default(), - ); + bank_forks.insert(bank); + if slot == saved_slot as u64 { + // Find the relevant snapshot storages + let snapshot_storage_files: HashSet<_> = bank_forks[slot] + .get_snapshot_storages(None) + .into_iter() + .flatten() + .map(|s| s.get_path()) + .collect(); + + // Only save off the files returned by `get_snapshot_storages`. This is because + // some of the storage entries in the accounts directory may be filtered out by + // `get_snapshot_storages()` and will not be included in the snapshot. Ultimately, + // this means copying natively everything in `accounts_dir` to the `saved_accounts_dir` + // will lead to test failure by mismatch when `saved_accounts_dir` is compared to + // the unpacked snapshot later in this test's call to `verify_snapshot_archive()`. + for file in snapshot_storage_files { + fs::copy( + &file, + &saved_accounts_dir.path().join(file.file_name().unwrap()), + ) + .unwrap(); + } + let last_snapshot_path = fs::read_dir(bank_snapshots_dir) + .unwrap() + .filter_map(|entry| { + let e = entry.unwrap(); + let file_path = e.path(); + let file_name = file_path.file_name().unwrap(); + file_name + .to_str() + .map(|s| s.parse::().ok().map(|_| file_path.clone())) + .unwrap_or(None) + }) + .sorted() + .last() + .unwrap(); + // only save off the snapshot of this slot, we don't need the others. + let options = CopyOptions::new(); + fs_extra::dir::copy(&last_snapshot_path, &saved_snapshots_dir, &options).unwrap(); - snapshot_utils::verify_snapshot_archive( - saved_archive_path.unwrap(), - saved_snapshots_dir.path(), - saved_accounts_dir.path(), - ArchiveFormat::TarBzip2, - snapshot_utils::VerifyBank::NonDeterministic(saved_slot), - ); + saved_archive_path = Some(snapshot_utils::build_full_snapshot_archive_path( + full_snapshot_archives_dir, + slot, + // this needs to match the hash value that we reserialize with later. It is complicated, so just use default. + // This hash value is just used to build the file name. Since this is mocked up test code, it is sufficient to pass default here. + &Hash::default(), + ArchiveFormat::TarBzip2, + )); + } } - fn run_test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: ClusterType) { - solana_logger::setup(); - let num_set_roots = MAX_CACHE_ENTRIES * 2; - - for add_root_interval in &[1, 3, 9] { - let (snapshot_sender, _snapshot_receiver) = unbounded(); - // Make sure this test never clears bank.slots_since_snapshot - let mut snapshot_test_config = SnapshotTestConfig::new( - snapshot_version, - cluster_type, - (*add_root_interval * num_set_roots * 2) as Slot, - (*add_root_interval * num_set_roots * 2) as Slot, - Slot::MAX, + // Purge all the outdated snapshots, including the ones needed to generate the package + // currently sitting in the channel + snapshot_utils::purge_old_bank_snapshots(bank_snapshots_dir); + + let mut bank_snapshots = snapshot_utils::get_bank_snapshots_pre(&bank_snapshots_dir); + bank_snapshots.sort_unstable(); + assert!(bank_snapshots + .into_iter() + .map(|path| path.slot) + .eq(3..=snapshot_utils::MAX_BANK_SNAPSHOTS_TO_RETAIN as u64 + 2)); + + // Create a SnapshotPackagerService to create tarballs from all the pending + // SnapshotPackage's on the channel. By the time this service starts, we have already + // purged the first two snapshots, which are needed by every snapshot other than + // the last two snapshots. However, the packaging service should still be able to + // correctly construct the earlier snapshots because the SnapshotPackage's on the + // channel hold hard links to these deleted snapshots. We verify this is the case below. + let exit = Arc::new(AtomicBool::new(false)); + + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::default(), + Arc::new(Keypair::new()), + SocketAddrSpace::Unspecified, + )); + + let pending_snapshot_package = PendingSnapshotPackage::default(); + let snapshot_packager_service = SnapshotPackagerService::new( + pending_snapshot_package.clone(), + None, + &exit, + &cluster_info, + snapshot_config.clone(), + true, + ); + + let _package_receiver = std::thread::Builder::new() + .name("package-receiver".to_string()) + .spawn(move || { + let accounts_package = real_pending_accounts_package + .lock() + .unwrap() + .take() + .unwrap(); + solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash( + accounts_package.snapshot_links.path(), + accounts_package.slot, + &Hash::default(), ); - let mut current_bank = snapshot_test_config.bank_forks[0].clone(); - let request_sender = AbsRequestSender::new(snapshot_sender); - for _ in 0..num_set_roots { - for _ in 0..*add_root_interval { - let new_slot = current_bank.slot() + 1; - let new_bank = - Bank::new_from_parent(¤t_bank, &Pubkey::default(), new_slot); - snapshot_test_config.bank_forks.insert(new_bank); - current_bank = snapshot_test_config.bank_forks[new_slot].clone(); - } - snapshot_test_config.bank_forks.set_root( - current_bank.slot(), - &request_sender, - None, - ); + let snapshot_package = SnapshotPackage::new(accounts_package, Hash::default()); + pending_snapshot_package + .lock() + .unwrap() + .replace(snapshot_package); + + // Wait until the package is consumed by SnapshotPackagerService + while pending_snapshot_package.lock().unwrap().is_some() { + std::thread::sleep(Duration::from_millis(100)); } - let num_old_slots = num_set_roots * *add_root_interval - MAX_CACHE_ENTRIES + 1; - let expected_slots_to_snapshot = - num_old_slots as u64..=num_set_roots as u64 * *add_root_interval as u64; + // Shutdown SnapshotPackagerService + exit.store(true, Ordering::Relaxed); + }) + .unwrap(); - let slots_to_snapshot = snapshot_test_config - .bank_forks - .get(snapshot_test_config.bank_forks.root()) - .unwrap() - .status_cache - .read() - .unwrap() - .roots() - .iter() - .cloned() - .sorted(); - assert!(slots_to_snapshot.into_iter().eq(expected_slots_to_snapshot)); - } - } + // Wait for service to finish + snapshot_packager_service + .join() + .expect("SnapshotPackagerService exited with error"); + + // Check the archive we cached the state for earlier was generated correctly + + // before we compare, stick an empty status_cache in this dir so that the package comparison works + // This is needed since the status_cache is added by the packager and is not collected from + // the source dir for snapshots + snapshot_utils::serialize_snapshot_data_file( + &saved_snapshots_dir + .path() + .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME), + |stream| { + serialize_into(stream, &[] as &[BankSlotDelta])?; + Ok(()) + }, + ) + .unwrap(); + + // files were saved off before we reserialized the bank in the hacked up accounts_hash_verifier stand-in. + solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash( + saved_snapshots_dir.path(), + saved_slot, + &Hash::default(), + ); + + snapshot_utils::verify_snapshot_archive( + saved_archive_path.unwrap(), + saved_snapshots_dir.path(), + saved_accounts_dir.path(), + ArchiveFormat::TarBzip2, + snapshot_utils::VerifyBank::NonDeterministic(saved_slot), + ); +} - fn run_test_bank_forks_status_cache_snapshot_n( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, - ) { - // create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time - // this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves - // ahead. Also tests the status_cache purge and status cache snapshotting. - // Makes sure that the last bank is restored correctly - let key1 = Keypair::new().pubkey(); - let key2 = Keypair::new().pubkey(); - for set_root_interval in &[1, 4] { - run_bank_forks_snapshot_n( - snapshot_version, - cluster_type, - (MAX_CACHE_ENTRIES * 2) as u64, - |bank, mint_keypair| { - let tx = system_transaction::transfer( - mint_keypair, - &key1, - 1, - bank.parent().unwrap().last_blockhash(), - ); - assert_eq!(bank.process_transaction(&tx), Ok(())); - let tx = system_transaction::transfer( - mint_keypair, - &key2, - 1, - bank.parent().unwrap().last_blockhash(), - ); - assert_eq!(bank.process_transaction(&tx), Ok(())); - goto_end_of_slot(bank); - }, - *set_root_interval, - ); +#[test_case(V1_2_0, Development)] +#[test_case(V1_2_0, Devnet)] +#[test_case(V1_2_0, Testnet)] +#[test_case(V1_2_0, MainnetBeta)] +fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: ClusterType) { + solana_logger::setup(); + let num_set_roots = MAX_CACHE_ENTRIES * 2; + + for add_root_interval in &[1, 3, 9] { + let (snapshot_sender, _snapshot_receiver) = unbounded(); + // Make sure this test never clears bank.slots_since_snapshot + let mut snapshot_test_config = SnapshotTestConfig::new( + snapshot_version, + cluster_type, + (*add_root_interval * num_set_roots * 2) as Slot, + (*add_root_interval * num_set_roots * 2) as Slot, + Slot::MAX, + ); + let mut current_bank = snapshot_test_config.bank_forks[0].clone(); + let request_sender = AbsRequestSender::new(snapshot_sender); + for _ in 0..num_set_roots { + for _ in 0..*add_root_interval { + let new_slot = current_bank.slot() + 1; + let new_bank = Bank::new_from_parent(¤t_bank, &Pubkey::default(), new_slot); + snapshot_test_config.bank_forks.insert(new_bank); + current_bank = snapshot_test_config.bank_forks[new_slot].clone(); + } + snapshot_test_config + .bank_forks + .set_root(current_bank.slot(), &request_sender, None); } - } - - fn run_test_bank_forks_incremental_snapshot_n( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, - ) { - solana_logger::setup(); - const SET_ROOT_INTERVAL: Slot = 2; - const INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = SET_ROOT_INTERVAL * 2; - const FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 5; - const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2 - 1; + let num_old_slots = num_set_roots * *add_root_interval - MAX_CACHE_ENTRIES + 1; + let expected_slots_to_snapshot = + num_old_slots as u64..=num_set_roots as u64 * *add_root_interval as u64; - info!("Running bank forks incremental snapshot test, full snapshot interval: {} slots, incremental snapshot interval: {} slots, last slot: {}, set root interval: {} slots", - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, LAST_SLOT, SET_ROOT_INTERVAL); + let slots_to_snapshot = snapshot_test_config + .bank_forks + .get(snapshot_test_config.bank_forks.root()) + .unwrap() + .status_cache + .read() + .unwrap() + .roots() + .iter() + .cloned() + .sorted(); + assert!(slots_to_snapshot.into_iter().eq(expected_slots_to_snapshot)); + } +} - let mut snapshot_test_config = SnapshotTestConfig::new( +#[test_case(V1_2_0, Development)] +#[test_case(V1_2_0, Devnet)] +#[test_case(V1_2_0, Testnet)] +#[test_case(V1_2_0, MainnetBeta)] +fn test_bank_forks_status_cache_snapshot( + snapshot_version: SnapshotVersion, + cluster_type: ClusterType, +) { + // create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time + // this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves + // ahead. Also tests the status_cache purge and status cache snapshotting. + // Makes sure that the last bank is restored correctly + let key1 = Keypair::new().pubkey(); + let key2 = Keypair::new().pubkey(); + for set_root_interval in &[1, 4] { + run_bank_forks_snapshot_n( snapshot_version, cluster_type, - SET_ROOT_INTERVAL, - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + (MAX_CACHE_ENTRIES * 2) as u64, + |bank, mint_keypair| { + let tx = system_transaction::transfer( + mint_keypair, + &key1, + 1, + bank.parent().unwrap().last_blockhash(), + ); + assert_eq!(bank.process_transaction(&tx), Ok(())); + let tx = system_transaction::transfer( + mint_keypair, + &key2, + 1, + bank.parent().unwrap().last_blockhash(), + ); + assert_eq!(bank.process_transaction(&tx), Ok(())); + goto_end_of_slot(bank); + }, + *set_root_interval, ); - trace!("SnapshotTestConfig:\naccounts_dir: {}\nbank_snapshots_dir: {}\nfull_snapshot_archives_dir: {}\nincremental_snapshot_archives_dir: {}", - snapshot_test_config.accounts_dir.path().display(), snapshot_test_config.bank_snapshots_dir.path().display(), snapshot_test_config.full_snapshot_archives_dir.path().display(), snapshot_test_config.incremental_snapshot_archives_dir.path().display()); + } +} - let bank_forks = &mut snapshot_test_config.bank_forks; - let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; +#[test_case(V1_2_0, Development)] +#[test_case(V1_2_0, Devnet)] +#[test_case(V1_2_0, Testnet)] +#[test_case(V1_2_0, MainnetBeta)] +fn test_bank_forks_incremental_snapshot( + snapshot_version: SnapshotVersion, + cluster_type: ClusterType, +) { + solana_logger::setup(); + + const SET_ROOT_INTERVAL: Slot = 2; + const INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = SET_ROOT_INTERVAL * 2; + const FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 5; + const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2 - 1; + + info!("Running bank forks incremental snapshot test, full snapshot interval: {} slots, incremental snapshot interval: {} slots, last slot: {}, set root interval: {} slots", + FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, LAST_SLOT, SET_ROOT_INTERVAL); - let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); - let request_sender = AbsRequestSender::new(snapshot_request_sender); - let snapshot_request_handler = SnapshotRequestHandler { - snapshot_config: snapshot_test_config.snapshot_config.clone(), - snapshot_request_receiver, - pending_accounts_package: PendingAccountsPackage::default(), - }; + let mut snapshot_test_config = SnapshotTestConfig::new( + snapshot_version, + cluster_type, + SET_ROOT_INTERVAL, + FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + ); + trace!("SnapshotTestConfig:\naccounts_dir: {}\nbank_snapshots_dir: {}\nfull_snapshot_archives_dir: {}\nincremental_snapshot_archives_dir: {}", + snapshot_test_config.accounts_dir.path().display(), snapshot_test_config.bank_snapshots_dir.path().display(), snapshot_test_config.full_snapshot_archives_dir.path().display(), snapshot_test_config.incremental_snapshot_archives_dir.path().display()); - let mut last_full_snapshot_slot = None; - for slot in 1..=LAST_SLOT { - // Make a new bank and perform some transactions - let bank = { - let bank = Bank::new_from_parent(&bank_forks[slot - 1], &Pubkey::default(), slot); + let bank_forks = &mut snapshot_test_config.bank_forks; + let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; - let key = Keypair::new().pubkey(); - let tx = system_transaction::transfer(mint_keypair, &key, 1, bank.last_blockhash()); - assert_eq!(bank.process_transaction(&tx), Ok(())); + let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); + let request_sender = AbsRequestSender::new(snapshot_request_sender); + let snapshot_request_handler = SnapshotRequestHandler { + snapshot_config: snapshot_test_config.snapshot_config.clone(), + snapshot_request_receiver, + pending_accounts_package: PendingAccountsPackage::default(), + }; - let key = Keypair::new().pubkey(); - let tx = system_transaction::transfer(mint_keypair, &key, 0, bank.last_blockhash()); - assert_eq!(bank.process_transaction(&tx), Ok(())); + let mut last_full_snapshot_slot = None; + for slot in 1..=LAST_SLOT { + // Make a new bank and perform some transactions + let bank = { + let bank = Bank::new_from_parent(&bank_forks[slot - 1], &Pubkey::default(), slot); - while !bank.is_complete() { - bank.register_tick(&Hash::new_unique()); - } - - bank_forks.insert(bank) - }; - - // Set root to make sure we don't end up with too many account storage entries - // and to allow snapshotting of bank and the purging logic on status_cache to - // kick in - if slot % SET_ROOT_INTERVAL == 0 { - // set_root sends a snapshot request - bank_forks.set_root(bank.slot(), &request_sender, None); - bank.update_accounts_hash(); - snapshot_request_handler.handle_snapshot_requests( - false, - false, - 0, - &mut last_full_snapshot_slot, - ); - } + let key = Keypair::new().pubkey(); + let tx = system_transaction::transfer(mint_keypair, &key, 1, bank.last_blockhash()); + assert_eq!(bank.process_transaction(&tx), Ok(())); - // Since AccountsBackgroundService isn't running, manually make a full snapshot archive - // at the right interval - if snapshot_utils::should_take_full_snapshot(slot, FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS) - { - make_full_snapshot_archive(&bank, &snapshot_test_config.snapshot_config).unwrap(); - } - // Similarly, make an incremental snapshot archive at the right interval, but only if - // there's been at least one full snapshot first, and a full snapshot wasn't already - // taken at this slot. - // - // Then, after making an incremental snapshot, restore the bank and verify it is correct - else if snapshot_utils::should_take_incremental_snapshot( - slot, - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - last_full_snapshot_slot, - ) && slot != last_full_snapshot_slot.unwrap() - { - make_incremental_snapshot_archive( - &bank, - last_full_snapshot_slot.unwrap(), - &snapshot_test_config.snapshot_config, - ) - .unwrap(); + let key = Keypair::new().pubkey(); + let tx = system_transaction::transfer(mint_keypair, &key, 0, bank.last_blockhash()); + assert_eq!(bank.process_transaction(&tx), Ok(())); - restore_from_snapshots_and_check_banks_are_equal( - &bank, - &snapshot_test_config.snapshot_config, - snapshot_test_config.accounts_dir.path().to_path_buf(), - &snapshot_test_config.genesis_config_info.genesis_config, - ) - .unwrap(); + while !bank.is_complete() { + bank.register_tick(&Hash::new_unique()); } - } - } - fn make_full_snapshot_archive( - bank: &Bank, - snapshot_config: &SnapshotConfig, - ) -> snapshot_utils::Result<()> { - let slot = bank.slot(); - info!("Making full snapshot archive from bank at slot: {}", slot); - let bank_snapshot_info = - snapshot_utils::get_bank_snapshots_pre(&snapshot_config.bank_snapshots_dir) - .into_iter() - .find(|elem| elem.slot == slot) - .ok_or_else(|| { - Error::new( - ErrorKind::Other, - "did not find bank snapshot with this path", - ) - })?; - snapshot_utils::package_and_archive_full_snapshot( - bank, - &bank_snapshot_info, - &snapshot_config.bank_snapshots_dir, - &snapshot_config.full_snapshot_archives_dir, - &snapshot_config.incremental_snapshot_archives_dir, - bank.get_snapshot_storages(None), - snapshot_config.archive_format, - snapshot_config.snapshot_version, - snapshot_config.maximum_full_snapshot_archives_to_retain, - snapshot_config.maximum_incremental_snapshot_archives_to_retain, - )?; + bank_forks.insert(bank) + }; - Ok(()) - } + // Set root to make sure we don't end up with too many account storage entries + // and to allow snapshotting of bank and the purging logic on status_cache to + // kick in + if slot % SET_ROOT_INTERVAL == 0 { + // set_root sends a snapshot request + bank_forks.set_root(bank.slot(), &request_sender, None); + bank.update_accounts_hash(); + snapshot_request_handler.handle_snapshot_requests( + false, + false, + 0, + &mut last_full_snapshot_slot, + ); + } - fn make_incremental_snapshot_archive( - bank: &Bank, - incremental_snapshot_base_slot: Slot, - snapshot_config: &SnapshotConfig, - ) -> snapshot_utils::Result<()> { - let slot = bank.slot(); - info!( - "Making incremental snapshot archive from bank at slot: {}, and base slot: {}", - slot, incremental_snapshot_base_slot, - ); - let bank_snapshot_info = - snapshot_utils::get_bank_snapshots_pre(&snapshot_config.bank_snapshots_dir) - .into_iter() - .find(|elem| elem.slot == slot) - .ok_or_else(|| { - Error::new( - ErrorKind::Other, - "did not find bank snapshot with this path", - ) - })?; - let storages = bank.get_snapshot_storages(Some(incremental_snapshot_base_slot)); - snapshot_utils::package_and_archive_incremental_snapshot( - bank, - incremental_snapshot_base_slot, - &bank_snapshot_info, - &snapshot_config.bank_snapshots_dir, - &snapshot_config.full_snapshot_archives_dir, - &snapshot_config.incremental_snapshot_archives_dir, - storages, - snapshot_config.archive_format, - snapshot_config.snapshot_version, - snapshot_config.maximum_full_snapshot_archives_to_retain, - snapshot_config.maximum_incremental_snapshot_archives_to_retain, - )?; + // Since AccountsBackgroundService isn't running, manually make a full snapshot archive + // at the right interval + if snapshot_utils::should_take_full_snapshot(slot, FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS) { + make_full_snapshot_archive(&bank, &snapshot_test_config.snapshot_config).unwrap(); + } + // Similarly, make an incremental snapshot archive at the right interval, but only if + // there's been at least one full snapshot first, and a full snapshot wasn't already + // taken at this slot. + // + // Then, after making an incremental snapshot, restore the bank and verify it is correct + else if snapshot_utils::should_take_incremental_snapshot( + slot, + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + last_full_snapshot_slot, + ) && slot != last_full_snapshot_slot.unwrap() + { + make_incremental_snapshot_archive( + &bank, + last_full_snapshot_slot.unwrap(), + &snapshot_test_config.snapshot_config, + ) + .unwrap(); - Ok(()) + restore_from_snapshots_and_check_banks_are_equal( + &bank, + &snapshot_test_config.snapshot_config, + snapshot_test_config.accounts_dir.path().to_path_buf(), + &snapshot_test_config.genesis_config_info.genesis_config, + ) + .unwrap(); + } } +} - fn restore_from_snapshots_and_check_banks_are_equal( - bank: &Bank, - snapshot_config: &SnapshotConfig, - accounts_dir: PathBuf, - genesis_config: &GenesisConfig, - ) -> snapshot_utils::Result<()> { - let (deserialized_bank, ..) = snapshot_utils::bank_from_latest_snapshot_archives( - &snapshot_config.bank_snapshots_dir, - &snapshot_config.full_snapshot_archives_dir, - &snapshot_config.incremental_snapshot_archives_dir, - &[accounts_dir], - genesis_config, - None, - None, - AccountSecondaryIndexes::default(), - false, - None, - accounts_db::AccountShrinkThreshold::default(), - false, - false, - false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - )?; +fn make_full_snapshot_archive( + bank: &Bank, + snapshot_config: &SnapshotConfig, +) -> snapshot_utils::Result<()> { + let slot = bank.slot(); + info!("Making full snapshot archive from bank at slot: {}", slot); + let bank_snapshot_info = + snapshot_utils::get_bank_snapshots_pre(&snapshot_config.bank_snapshots_dir) + .into_iter() + .find(|elem| elem.slot == slot) + .ok_or_else(|| { + Error::new( + ErrorKind::Other, + "did not find bank snapshot with this path", + ) + })?; + snapshot_utils::package_and_archive_full_snapshot( + bank, + &bank_snapshot_info, + &snapshot_config.bank_snapshots_dir, + &snapshot_config.full_snapshot_archives_dir, + &snapshot_config.incremental_snapshot_archives_dir, + bank.get_snapshot_storages(None), + snapshot_config.archive_format, + snapshot_config.snapshot_version, + snapshot_config.maximum_full_snapshot_archives_to_retain, + snapshot_config.maximum_incremental_snapshot_archives_to_retain, + )?; + + Ok(()) +} - assert_eq!(bank, &deserialized_bank); +fn make_incremental_snapshot_archive( + bank: &Bank, + incremental_snapshot_base_slot: Slot, + snapshot_config: &SnapshotConfig, +) -> snapshot_utils::Result<()> { + let slot = bank.slot(); + info!( + "Making incremental snapshot archive from bank at slot: {}, and base slot: {}", + slot, incremental_snapshot_base_slot, + ); + let bank_snapshot_info = + snapshot_utils::get_bank_snapshots_pre(&snapshot_config.bank_snapshots_dir) + .into_iter() + .find(|elem| elem.slot == slot) + .ok_or_else(|| { + Error::new( + ErrorKind::Other, + "did not find bank snapshot with this path", + ) + })?; + let storages = bank.get_snapshot_storages(Some(incremental_snapshot_base_slot)); + snapshot_utils::package_and_archive_incremental_snapshot( + bank, + incremental_snapshot_base_slot, + &bank_snapshot_info, + &snapshot_config.bank_snapshots_dir, + &snapshot_config.full_snapshot_archives_dir, + &snapshot_config.incremental_snapshot_archives_dir, + storages, + snapshot_config.archive_format, + snapshot_config.snapshot_version, + snapshot_config.maximum_full_snapshot_archives_to_retain, + snapshot_config.maximum_incremental_snapshot_archives_to_retain, + )?; + + Ok(()) +} - Ok(()) - } +fn restore_from_snapshots_and_check_banks_are_equal( + bank: &Bank, + snapshot_config: &SnapshotConfig, + accounts_dir: PathBuf, + genesis_config: &GenesisConfig, +) -> snapshot_utils::Result<()> { + let (deserialized_bank, ..) = snapshot_utils::bank_from_latest_snapshot_archives( + &snapshot_config.bank_snapshots_dir, + &snapshot_config.full_snapshot_archives_dir, + &snapshot_config.incremental_snapshot_archives_dir, + &[accounts_dir], + genesis_config, + None, + None, + AccountSecondaryIndexes::default(), + false, + None, + accounts_db::AccountShrinkThreshold::default(), + false, + false, + false, + Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + None, + )?; + + assert_eq!(bank, &deserialized_bank); + + Ok(()) +} - /// Spin up the background services fully and test taking snapshots - fn run_test_snapshots_with_background_services( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, - ) { - solana_logger::setup(); - - const SET_ROOT_INTERVAL_SLOTS: Slot = 2; - const BANK_SNAPSHOT_INTERVAL_SLOTS: Slot = SET_ROOT_INTERVAL_SLOTS * 2; - const INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = BANK_SNAPSHOT_INTERVAL_SLOTS * 3; - const FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 5; - const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 3 - + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2; - - info!("Running snapshots with background services test..."); - trace!( - "Test configuration parameters:\ +/// Spin up the background services fully and test taking snapshots +#[test_case(V1_2_0, Development)] +#[test_case(V1_2_0, Devnet)] +#[test_case(V1_2_0, Testnet)] +#[test_case(V1_2_0, MainnetBeta)] +fn test_snapshots_with_background_services( + snapshot_version: SnapshotVersion, + cluster_type: ClusterType, +) { + solana_logger::setup(); + + const SET_ROOT_INTERVAL_SLOTS: Slot = 2; + const BANK_SNAPSHOT_INTERVAL_SLOTS: Slot = SET_ROOT_INTERVAL_SLOTS * 2; + const INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = BANK_SNAPSHOT_INTERVAL_SLOTS * 3; + const FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 5; + const LAST_SLOT: Slot = + FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 3 + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2; + + info!("Running snapshots with background services test..."); + trace!( + "Test configuration parameters:\ \n\tfull snapshot archive interval: {} slots\ \n\tincremental snapshot archive interval: {} slots\ \n\tbank snapshot interval: {} slots\ \n\tset root interval: {} slots\ \n\tlast slot: {}", - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - BANK_SNAPSHOT_INTERVAL_SLOTS, - SET_ROOT_INTERVAL_SLOTS, - LAST_SLOT - ); - - let snapshot_test_config = SnapshotTestConfig::new( - snapshot_version, - cluster_type, - BANK_SNAPSHOT_INTERVAL_SLOTS, - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - ); - - let node_keypair = Arc::new(Keypair::new()); - let cluster_info = Arc::new(ClusterInfo::new( - ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), - node_keypair, - SocketAddrSpace::Unspecified, - )); - - let (pruned_banks_sender, pruned_banks_receiver) = unbounded(); - let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); - let pending_accounts_package = PendingAccountsPackage::default(); - let pending_snapshot_package = PendingSnapshotPackage::default(); - - let bank_forks = Arc::new(RwLock::new(snapshot_test_config.bank_forks)); - let callback = bank_forks - .read() - .unwrap() - .root_bank() - .rc - .accounts - .accounts_db - .create_drop_bank_callback(pruned_banks_sender); - for bank in bank_forks.read().unwrap().banks().values() { - bank.set_callback(Some(Box::new(callback.clone()))); - } - - let abs_request_sender = AbsRequestSender::new(snapshot_request_sender); - let snapshot_request_handler = Some(SnapshotRequestHandler { - snapshot_config: snapshot_test_config.snapshot_config.clone(), - snapshot_request_receiver, - pending_accounts_package: Arc::clone(&pending_accounts_package), - }); - let abs_request_handler = AbsRequestHandler { - snapshot_request_handler, - pruned_banks_receiver, - }; - - let exit = Arc::new(AtomicBool::new(false)); - let snapshot_packager_service = SnapshotPackagerService::new( - pending_snapshot_package.clone(), - None, - &exit, - &cluster_info, - snapshot_test_config.snapshot_config.clone(), - true, - ); - - let accounts_hash_verifier = AccountsHashVerifier::new( - pending_accounts_package, - Some(pending_snapshot_package), - &exit, - &cluster_info, - None, - false, - 0, - Some(snapshot_test_config.snapshot_config.clone()), - ); - - let accounts_background_service = AccountsBackgroundService::new( - bank_forks.clone(), - &exit, - abs_request_handler, - false, - true, - None, - ); - - let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; - for slot in 1..=LAST_SLOT { - // Make a new bank and perform some transactions - let bank = { - let bank = Bank::new_from_parent( - &bank_forks.read().unwrap().get(slot - 1).unwrap(), - &Pubkey::default(), - slot, - ); - - let key = Keypair::new().pubkey(); - let tx = system_transaction::transfer(mint_keypair, &key, 1, bank.last_blockhash()); - assert_eq!(bank.process_transaction(&tx), Ok(())); + FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + BANK_SNAPSHOT_INTERVAL_SLOTS, + SET_ROOT_INTERVAL_SLOTS, + LAST_SLOT + ); + + let snapshot_test_config = SnapshotTestConfig::new( + snapshot_version, + cluster_type, + BANK_SNAPSHOT_INTERVAL_SLOTS, + FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + ); + + let node_keypair = Arc::new(Keypair::new()); + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), + node_keypair, + SocketAddrSpace::Unspecified, + )); + + let (pruned_banks_sender, pruned_banks_receiver) = unbounded(); + let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); + let pending_accounts_package = PendingAccountsPackage::default(); + let pending_snapshot_package = PendingSnapshotPackage::default(); + + let bank_forks = Arc::new(RwLock::new(snapshot_test_config.bank_forks)); + let callback = bank_forks + .read() + .unwrap() + .root_bank() + .rc + .accounts + .accounts_db + .create_drop_bank_callback(pruned_banks_sender); + for bank in bank_forks.read().unwrap().banks().values() { + bank.set_callback(Some(Box::new(callback.clone()))); + } - let key = Keypair::new().pubkey(); - let tx = system_transaction::transfer(mint_keypair, &key, 0, bank.last_blockhash()); - assert_eq!(bank.process_transaction(&tx), Ok(())); + let abs_request_sender = AbsRequestSender::new(snapshot_request_sender); + let snapshot_request_handler = Some(SnapshotRequestHandler { + snapshot_config: snapshot_test_config.snapshot_config.clone(), + snapshot_request_receiver, + pending_accounts_package: Arc::clone(&pending_accounts_package), + }); + let abs_request_handler = AbsRequestHandler { + snapshot_request_handler, + pruned_banks_receiver, + }; - while !bank.is_complete() { - bank.register_tick(&Hash::new_unique()); - } + let exit = Arc::new(AtomicBool::new(false)); + let snapshot_packager_service = SnapshotPackagerService::new( + pending_snapshot_package.clone(), + None, + &exit, + &cluster_info, + snapshot_test_config.snapshot_config.clone(), + true, + ); + + let accounts_hash_verifier = AccountsHashVerifier::new( + pending_accounts_package, + Some(pending_snapshot_package), + &exit, + &cluster_info, + None, + false, + 0, + Some(snapshot_test_config.snapshot_config.clone()), + ); + + let accounts_background_service = AccountsBackgroundService::new( + bank_forks.clone(), + &exit, + abs_request_handler, + false, + true, + None, + ); + + let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; + for slot in 1..=LAST_SLOT { + // Make a new bank and perform some transactions + let bank = { + let bank = Bank::new_from_parent( + &bank_forks.read().unwrap().get(slot - 1).unwrap(), + &Pubkey::default(), + slot, + ); - bank_forks.write().unwrap().insert(bank) - }; + let key = Keypair::new().pubkey(); + let tx = system_transaction::transfer(mint_keypair, &key, 1, bank.last_blockhash()); + assert_eq!(bank.process_transaction(&tx), Ok(())); - // Call `BankForks::set_root()` to cause bank snapshots to be taken - if slot % SET_ROOT_INTERVAL_SLOTS == 0 { - bank_forks - .write() - .unwrap() - .set_root(slot, &abs_request_sender, None); - bank.update_accounts_hash(); - } + let key = Keypair::new().pubkey(); + let tx = system_transaction::transfer(mint_keypair, &key, 0, bank.last_blockhash()); + assert_eq!(bank.process_transaction(&tx), Ok(())); - // Sleep for a second when making a snapshot archive so the background services get a - // chance to run (and since FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS is a multiple of - // INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, we only need to check the one here). - if slot % INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS == 0 { - std::thread::sleep(Duration::from_secs(1)); + while !bank.is_complete() { + bank.register_tick(&Hash::new_unique()); } - } - // NOTE: The 5 seconds of sleeping is arbitrary. This should be plenty of time since the - // snapshots should be quite small. If this test fails at `unwrap()` or because the bank - // slots do not match, increase this sleep duration. - info!("Sleeping for 5 seconds to give background services time to process snapshot archives..."); - std::thread::sleep(Duration::from_secs(5)); - info!("Awake! Rebuilding bank from latest snapshot archives..."); - - let (deserialized_bank, ..) = snapshot_utils::bank_from_latest_snapshot_archives( - &snapshot_test_config.snapshot_config.bank_snapshots_dir, - &snapshot_test_config - .snapshot_config - .full_snapshot_archives_dir, - &snapshot_test_config - .snapshot_config - .incremental_snapshot_archives_dir, - &[snapshot_test_config.accounts_dir.as_ref().to_path_buf()], - &snapshot_test_config.genesis_config_info.genesis_config, - None, - None, - AccountSecondaryIndexes::default(), - false, - None, - accounts_db::AccountShrinkThreshold::default(), - false, - false, - false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - ) - .unwrap(); + bank_forks.write().unwrap().insert(bank) + }; - assert_eq!(deserialized_bank.slot(), LAST_SLOT); - assert_eq!( - &deserialized_bank, + // Call `BankForks::set_root()` to cause bank snapshots to be taken + if slot % SET_ROOT_INTERVAL_SLOTS == 0 { bank_forks - .read() - .unwrap() - .get(deserialized_bank.slot()) + .write() .unwrap() - .as_ref() - ); + .set_root(slot, &abs_request_sender, None); + bank.update_accounts_hash(); + } - // Stop the background services - info!("Shutting down background services..."); - exit.store(true, Ordering::Relaxed); - accounts_background_service.join().unwrap(); - accounts_hash_verifier.join().unwrap(); - snapshot_packager_service.join().unwrap(); + // Sleep for a second when making a snapshot archive so the background services get a + // chance to run (and since FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS is a multiple of + // INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, we only need to check the one here). + if slot % INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS == 0 { + std::thread::sleep(Duration::from_secs(1)); + } } + + // NOTE: The 5 seconds of sleeping is arbitrary. This should be plenty of time since the + // snapshots should be quite small. If this test fails at `unwrap()` or because the bank + // slots do not match, increase this sleep duration. + info!( + "Sleeping for 5 seconds to give background services time to process snapshot archives..." + ); + std::thread::sleep(Duration::from_secs(5)); + info!("Awake! Rebuilding bank from latest snapshot archives..."); + + let (deserialized_bank, ..) = snapshot_utils::bank_from_latest_snapshot_archives( + &snapshot_test_config.snapshot_config.bank_snapshots_dir, + &snapshot_test_config + .snapshot_config + .full_snapshot_archives_dir, + &snapshot_test_config + .snapshot_config + .incremental_snapshot_archives_dir, + &[snapshot_test_config.accounts_dir.as_ref().to_path_buf()], + &snapshot_test_config.genesis_config_info.genesis_config, + None, + None, + AccountSecondaryIndexes::default(), + false, + None, + accounts_db::AccountShrinkThreshold::default(), + false, + false, + false, + Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + None, + ) + .unwrap(); + + assert_eq!(deserialized_bank.slot(), LAST_SLOT); + assert_eq!( + &deserialized_bank, + bank_forks + .read() + .unwrap() + .get(deserialized_bank.slot()) + .unwrap() + .as_ref() + ); + + // Stop the background services + info!("Shutting down background services..."); + exit.store(true, Ordering::Relaxed); + accounts_background_service.join().unwrap(); + accounts_hash_verifier.join().unwrap(); + snapshot_packager_service.join().unwrap(); } From 61f0a7d9c36ddc2e8612b4fc7b317e79ebc3141f Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 5 Jul 2022 14:29:44 +0000 Subject: [PATCH 018/100] replaces Mutex with RwLock (#26370) Mutex causes superfluous lock contention when a read-only reference suffices. --- banking-bench/src/main.rs | 16 ++-- core/benches/banking_stage.rs | 4 +- core/src/banking_stage.rs | 116 ++++++++++++------------- core/src/cluster_info_vote_listener.rs | 10 +-- core/src/fetch_stage.rs | 12 +-- core/src/replay_stage.rs | 32 +++---- core/src/tpu.rs | 4 +- core/src/tvu.rs | 4 +- core/src/validator.rs | 6 +- core/src/voting_service.rs | 6 +- core/src/warm_quic_cache_service.rs | 6 +- poh/src/poh_recorder.rs | 6 +- poh/src/poh_service.rs | 32 +++---- rpc/src/cluster_tpu_info.rs | 10 +-- rpc/src/rpc_service.rs | 4 +- 15 files changed, 134 insertions(+), 134 deletions(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index ea0438c36abebe..7f318ef5a11fd3 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -30,7 +30,7 @@ use { }, solana_streamer::socket::SocketAddrSpace, std::{ - sync::{atomic::Ordering, Arc, Mutex, RwLock}, + sync::{atomic::Ordering, Arc, RwLock}, thread::sleep, time::{Duration, Instant}, }, @@ -39,7 +39,7 @@ use { fn check_txs( receiver: &Arc>, ref_tx_count: usize, - poh_recorder: &Arc>, + poh_recorder: &Arc>, ) -> bool { let mut total = 0; let now = Instant::now(); @@ -55,7 +55,7 @@ fn check_txs( if now.elapsed().as_secs() > 60 { break; } - if poh_recorder.lock().unwrap().bank().is_none() { + if poh_recorder.read().unwrap().bank().is_none() { no_bank = true; break; } @@ -358,7 +358,7 @@ fn main() { DEFAULT_TPU_CONNECTION_POOL_SIZE, )), ); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); // This is so that the signal_receiver does not go out of scope after the closure. // If it is dropped before poh_service, then poh_service will error when @@ -396,7 +396,7 @@ fn main() { if bank.get_signature_status(&tx.signatures[0]).is_some() { break; } - if poh_recorder.lock().unwrap().bank().is_none() { + if poh_recorder.read().unwrap().bank().is_none() { break; } sleep(Duration::from_millis(5)); @@ -418,7 +418,7 @@ fn main() { let mut poh_time = Measure::start("poh_time"); poh_recorder - .lock() + .write() .unwrap() .reset(bank.clone(), Some((bank.slot(), bank.slot() + 1))); poh_time.stop(); @@ -439,8 +439,8 @@ fn main() { std::u64::MAX, ); - poh_recorder.lock().unwrap().set_bank(&bank, false); - assert!(poh_recorder.lock().unwrap().bank().is_some()); + poh_recorder.write().unwrap().set_bank(&bank, false); + assert!(poh_recorder.read().unwrap().bank().is_some()); if bank.slot() > 32 { leader_schedule_cache.set_root(&bank); bank_forks.set_root(root, &AbsRequestSender::default(), None); diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index fe788dca2ce277..2562ad3789daae 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -74,7 +74,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) { let (exit, poh_recorder, poh_service, _signal_receiver) = create_test_recorder(&bank, &blockstore, None, None); - let recorder = poh_recorder.lock().unwrap().recorder(); + let recorder = poh_recorder.read().unwrap().recorder(); let tx = test_tx(); let transactions = vec![tx; 4194304]; @@ -233,7 +233,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { Arc::new(RwLock::new(CostModel::default())), Arc::new(ConnectionCache::default()), ); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let chunk_len = verified.len() / CHUNKS; let mut start = 0; diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 7cde63c052bd11..8a3f4e3a9a42af 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -69,7 +69,7 @@ use { rc::Rc, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, Mutex, RwLock, + Arc, RwLock, }, thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, @@ -411,7 +411,7 @@ impl BankingStage { #[allow(clippy::new_ret_no_self)] pub fn new( cluster_info: &Arc, - poh_recorder: &Arc>, + poh_recorder: &Arc>, verified_receiver: BankingPacketReceiver, tpu_verified_vote_receiver: BankingPacketReceiver, verified_vote_receiver: BankingPacketReceiver, @@ -437,7 +437,7 @@ impl BankingStage { #[allow(clippy::too_many_arguments)] pub fn new_num_threads( cluster_info: &Arc, - poh_recorder: &Arc>, + poh_recorder: &Arc>, verified_receiver: BankingPacketReceiver, tpu_verified_vote_receiver: BankingPacketReceiver, verified_vote_receiver: BankingPacketReceiver, @@ -539,7 +539,7 @@ impl BankingStage { connection_cache: &ConnectionCache, forward_option: &ForwardOption, cluster_info: &ClusterInfo, - poh_recorder: &Arc>, + poh_recorder: &Arc>, socket: &UdpSocket, filter_forwarding_results: &FilterForwardingResults, data_budget: &DataBudget, @@ -640,7 +640,7 @@ impl BankingStage { pub fn consume_buffered_packets( my_pubkey: &Pubkey, max_tx_ingestion_ns: u128, - poh_recorder: &Arc>, + poh_recorder: &Arc>, buffered_packet_batches: &mut UnprocessedPacketBatches, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, @@ -672,8 +672,8 @@ impl BankingStage { // TODO: Right now we iterate through buffer and try the highest weighted transaction once // but we should retry the highest weighted transactions more often. let (bank_start, poh_recorder_lock_time) = measure!( - poh_recorder.lock().unwrap().bank_start(), - "poh_recorder_lock", + poh_recorder.read().unwrap().bank_start(), + "poh_recorder.read", ); slot_metrics_tracker.increment_consume_buffered_packets_poh_recorder_lock_us( poh_recorder_lock_time.as_us(), @@ -718,7 +718,7 @@ impl BankingStage { { let poh_recorder_lock_time = { let (poh_recorder_locked, poh_recorder_lock_time) = - measure!(poh_recorder.lock().unwrap(), "poh_recorder_lock"); + measure!(poh_recorder.read().unwrap(), "poh_recorder.read"); reached_end_of_slot = Some(EndOfSlot { next_slot_leader: poh_recorder_locked.next_slot_leader(), @@ -783,7 +783,7 @@ impl BankingStage { // packet batches in buffer let poh_recorder_lock_time = { let (poh_recorder_locked, poh_recorder_lock_time) = - measure!(poh_recorder.lock().unwrap(), "poh_recorder_lock"); + measure!(poh_recorder.read().unwrap(), "poh_recorder.read"); reached_end_of_slot = Some(EndOfSlot { next_slot_leader: poh_recorder_locked.next_slot_leader(), @@ -907,7 +907,7 @@ impl BankingStage { fn process_buffered_packets( my_pubkey: &Pubkey, socket: &UdpSocket, - poh_recorder: &Arc>, + poh_recorder: &Arc>, cluster_info: &ClusterInfo, buffered_packet_batches: &mut UnprocessedPacketBatches, forward_option: &ForwardOption, @@ -930,7 +930,7 @@ impl BankingStage { would_be_leader, would_be_leader_shortly, ) = { - let poh = poh_recorder.lock().unwrap(); + let poh = poh_recorder.read().unwrap(); bank_start = poh.bank_start(); ( poh.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET), @@ -1037,7 +1037,7 @@ impl BankingStage { forward_option: &ForwardOption, cluster_info: &ClusterInfo, buffered_packet_batches: &mut UnprocessedPacketBatches, - poh_recorder: &Arc>, + poh_recorder: &Arc>, socket: &UdpSocket, hold: bool, data_budget: &DataBudget, @@ -1100,7 +1100,7 @@ impl BankingStage { #[allow(clippy::too_many_arguments)] fn process_loop( verified_receiver: &BankingPacketReceiver, - poh_recorder: &Arc>, + poh_recorder: &Arc>, cluster_info: &ClusterInfo, recv_start: &mut Instant, forward_option: ForwardOption, @@ -1112,7 +1112,7 @@ impl BankingStage { cost_model: Arc>, connection_cache: Arc, ) { - let recorder = poh_recorder.lock().unwrap().recorder(); + let recorder = poh_recorder.read().unwrap().recorder(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let mut buffered_packet_batches = UnprocessedPacketBatches::with_capacity(batch_limit); let mut banking_stage_stats = BankingStageStats::new(id); @@ -2237,35 +2237,35 @@ impl BankingStage { pub(crate) fn next_leader_tpu( cluster_info: &ClusterInfo, - poh_recorder: &Mutex, + poh_recorder: &RwLock, ) -> Option<(Pubkey, std::net::SocketAddr)> { next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu) } fn next_leader_tpu_forwards( cluster_info: &ClusterInfo, - poh_recorder: &Mutex, + poh_recorder: &RwLock, ) -> Option<(Pubkey, std::net::SocketAddr)> { next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu_forwards) } pub(crate) fn next_leader_tpu_vote( cluster_info: &ClusterInfo, - poh_recorder: &Mutex, + poh_recorder: &RwLock, ) -> Option<(Pubkey, std::net::SocketAddr)> { next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu_vote) } fn next_leader_x( cluster_info: &ClusterInfo, - poh_recorder: &Mutex, + poh_recorder: &RwLock, port_selector: F, ) -> Option<(Pubkey, std::net::SocketAddr)> where F: FnOnce(&ContactInfo) -> SocketAddr, { let leader_pubkey = poh_recorder - .lock() + .read() .unwrap() .leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET); if let Some(leader_pubkey) = leader_pubkey { @@ -2711,11 +2711,11 @@ mod tests { Arc::new(AtomicBool::default()), ); let recorder = poh_recorder.recorder(); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let pubkey = solana_sdk::pubkey::new_rand(); let keypair2 = Keypair::new(); let pubkey2 = solana_sdk::pubkey::new_rand(); @@ -2740,7 +2740,7 @@ mod tests { assert!(entry_receiver.try_recv().is_err()); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -2933,11 +2933,11 @@ mod tests { Arc::new(AtomicBool::default()), ); let recorder = poh_recorder.recorder(); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); let process_transactions_batch_output = BankingStage::process_and_record_transactions( @@ -2964,8 +2964,8 @@ mod tests { assert!(commit_transactions_result.is_ok()); // Tick up to max tick height - while poh_recorder.lock().unwrap().tick_height() != bank.max_tick_height() { - poh_recorder.lock().unwrap().tick(); + while poh_recorder.read().unwrap().tick_height() != bank.max_tick_height() { + poh_recorder.write().unwrap().tick(); } let mut done = false; @@ -3021,7 +3021,7 @@ mod tests { ); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -3068,11 +3068,11 @@ mod tests { Arc::new(AtomicBool::default()), ); let recorder = poh_recorder.recorder(); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); let process_transactions_batch_output = BankingStage::process_and_record_transactions( @@ -3104,7 +3104,7 @@ mod tests { ); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -3141,11 +3141,11 @@ mod tests { Arc::new(AtomicBool::default()), ); let recorder = poh_recorder.recorder(); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); let qos_service = QosService::new(Arc::new(RwLock::new(CostModel::default())), 1); @@ -3229,7 +3229,7 @@ mod tests { assert_eq!(get_tx_count(), 2); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -3240,10 +3240,10 @@ mod tests { fn simulate_poh( record_receiver: CrossbeamReceiver, - poh_recorder: &Arc>, + poh_recorder: &Arc>, ) -> JoinHandle<()> { let poh_recorder = poh_recorder.clone(); - let is_exited = poh_recorder.lock().unwrap().is_exited.clone(); + let is_exited = poh_recorder.read().unwrap().is_exited.clone(); let tick_producer = Builder::new() .name("solana-simulate_poh".to_string()) .spawn(move || loop { @@ -3293,9 +3293,9 @@ mod tests { Arc::new(AtomicBool::default()), ); let recorder = poh_recorder.recorder(); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); @@ -3312,7 +3312,7 @@ mod tests { ); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -3431,7 +3431,7 @@ mod tests { // record let recorder = poh_recorder.recorder(); - let poh_simulator = simulate_poh(record_receiver, &Arc::new(Mutex::new(poh_recorder))); + let poh_simulator = simulate_poh(record_receiver, &Arc::new(RwLock::new(poh_recorder))); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); @@ -3493,9 +3493,9 @@ mod tests { Arc::new(AtomicBool::default()), ); let recorder = poh_recorder.recorder(); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); @@ -3512,7 +3512,7 @@ mod tests { ); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -3698,11 +3698,11 @@ mod tests { Arc::new(AtomicBool::default()), ); let recorder = poh_recorder.recorder(); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let shreds = entries_to_test_shreds(&entries, bank.slot(), 0, true, 0); blockstore.insert_shreds(shreds, None, false).unwrap(); @@ -3756,7 +3756,7 @@ mod tests { assert_eq!(actual_tx_results, expected_tx_results); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -3859,11 +3859,11 @@ mod tests { Arc::new(AtomicBool::default()), ); let recorder = poh_recorder.recorder(); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let shreds = entries_to_test_shreds(&entries, bank.slot(), 0, true, 0); blockstore.insert_shreds(shreds, None, false).unwrap(); @@ -3914,7 +3914,7 @@ mod tests { } ); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -3929,7 +3929,7 @@ mod tests { ) -> ( Vec, Arc, - Arc>, + Arc>, Receiver, JoinHandle<()>, ) { @@ -3956,7 +3956,7 @@ mod tests { &Arc::new(PohConfig::default()), exit, ); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); // Set up unparallelizable conflicting transactions let pubkey0 = solana_sdk::pubkey::new_rand(); @@ -3984,7 +3984,7 @@ mod tests { { let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) = setup_conflicting_transactions(ledger_path.path()); - let recorder = poh_recorder.lock().unwrap().recorder(); + let recorder = poh_recorder.read().unwrap().recorder(); let num_conflicting_transactions = transactions.len(); let deserialized_packets = unprocessed_packet_batches::transactions_to_deserialized_packets(&transactions) @@ -3999,7 +3999,7 @@ mod tests { let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); // When the working bank in poh_recorder is None, no packets should be processed - assert!(!poh_recorder.lock().unwrap().has_bank()); + assert!(!poh_recorder.read().unwrap().has_bank()); let max_tx_processing_ns = std::u128::MAX; BankingStage::consume_buffered_packets( &Pubkey::default(), @@ -4020,7 +4020,7 @@ mod tests { // Processes one packet per iteration of the loop let num_packets_to_process_per_iteration = num_conflicting_transactions; for num_expected_unprocessed in (0..num_conflicting_transactions).rev() { - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); BankingStage::consume_buffered_packets( &Pubkey::default(), max_tx_processing_ns, @@ -4042,7 +4042,7 @@ mod tests { } } poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); @@ -4069,9 +4069,9 @@ mod tests { // each iteration of this loop will process one element of the batch per iteration of the // loop. let interrupted_iteration = 1; - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); let poh_recorder_ = poh_recorder.clone(); - let recorder = poh_recorder_.lock().unwrap().recorder(); + let recorder = poh_recorder_.read().unwrap().recorder(); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); // Start up thread to process the banks let t_consume = Builder::new() @@ -4126,7 +4126,7 @@ mod tests { finished_packet_receiver.recv().unwrap(); if i == interrupted_iteration { poh_recorder - .lock() + .write() .unwrap() .schedule_dummy_max_height_reached_failure(); } @@ -4135,7 +4135,7 @@ mod tests { t_consume.join().unwrap(); poh_recorder - .lock() + .read() .unwrap() .is_exited .store(true, Ordering::Relaxed); diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index 02d858a1a1e3cb..98a31b7b08aea8 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -48,7 +48,7 @@ use { iter::repeat, sync::{ atomic::{AtomicBool, Ordering}, - Arc, Mutex, RwLock, + Arc, RwLock, }, thread::{self, sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -234,7 +234,7 @@ impl ClusterInfoVoteListener { exit: Arc, cluster_info: Arc, verified_packets_sender: BankingPacketSender, - poh_recorder: Arc>, + poh_recorder: Arc>, vote_tracker: Arc, bank_forks: Arc>, subscriptions: Arc, @@ -375,7 +375,7 @@ impl ClusterInfoVoteListener { fn bank_send_loop( exit: Arc, verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver, - poh_recorder: Arc>, + poh_recorder: Arc>, verified_packets_sender: &BankingPacketSender, ) -> Result<()> { let mut verified_vote_packets = VerifiedVotePackets::default(); @@ -388,7 +388,7 @@ impl ClusterInfoVoteListener { } let would_be_leader = poh_recorder - .lock() + .read() .unwrap() .would_be_leader(3 * slot_hashes::MAX_ENTRIES as u64 * DEFAULT_TICKS_PER_SLOT); @@ -409,7 +409,7 @@ impl ClusterInfoVoteListener { // Always set this to avoid taking the poh lock too often time_since_lock = Instant::now(); // We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS` - let current_working_bank = poh_recorder.lock().unwrap().bank(); + let current_working_bank = poh_recorder.read().unwrap().bank(); if let Some(current_working_bank) = current_working_bank { Self::check_for_leader_bank_and_send_votes( &mut bank_vote_sender_state_option, diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index d15e9c9bd0d4de..c041739d7c8d4e 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -20,7 +20,7 @@ use { net::UdpSocket, sync::{ atomic::{AtomicBool, Ordering}, - Arc, Mutex, + Arc, RwLock, }, thread::{self, sleep, Builder, JoinHandle}, time::Duration, @@ -38,7 +38,7 @@ impl FetchStage { tpu_forwards_sockets: Vec, tpu_vote_sockets: Vec, exit: &Arc, - poh_recorder: &Arc>, + poh_recorder: &Arc>, coalesce_ms: u64, ) -> (Self, PacketBatchReceiver, PacketBatchReceiver) { let (sender, receiver) = unbounded(); @@ -73,7 +73,7 @@ impl FetchStage { vote_sender: &PacketBatchSender, forward_sender: &PacketBatchSender, forward_receiver: PacketBatchReceiver, - poh_recorder: &Arc>, + poh_recorder: &Arc>, coalesce_ms: u64, in_vote_only_mode: Option>, ) -> Self { @@ -98,7 +98,7 @@ impl FetchStage { fn handle_forwarded_packets( recvr: &PacketBatchReceiver, sendr: &PacketBatchSender, - poh_recorder: &Arc>, + poh_recorder: &Arc>, ) -> Result<()> { let mark_forwarded = |packet: &mut Packet| { packet.meta.flags |= PacketFlags::FORWARDED; @@ -119,7 +119,7 @@ impl FetchStage { } if poh_recorder - .lock() + .read() .unwrap() .would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET.saturating_mul(DEFAULT_TICKS_PER_SLOT)) { @@ -147,7 +147,7 @@ impl FetchStage { vote_sender: &PacketBatchSender, forward_sender: &PacketBatchSender, forward_receiver: PacketBatchReceiver, - poh_recorder: &Arc>, + poh_recorder: &Arc>, coalesce_ms: u64, in_vote_only_mode: Option>, ) -> Self { diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index a70f763f6e71bb..2cd43155ac4aa6 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -71,7 +71,7 @@ use { result, sync::{ atomic::{AtomicBool, Ordering}, - Arc, Mutex, RwLock, + Arc, RwLock, }, thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, @@ -360,7 +360,7 @@ impl ReplayStage { cluster_info: Arc, ledger_signal_receiver: Receiver, duplicate_slots_receiver: DuplicateSlotReceiver, - poh_recorder: Arc>, + poh_recorder: Arc>, maybe_process_blockstore: Option, vote_tracker: Arc, cluster_slots: Arc, @@ -476,7 +476,7 @@ impl ReplayStage { ); generate_new_bank_forks_time.stop(); - let mut tpu_has_bank = poh_recorder.lock().unwrap().has_bank(); + let mut tpu_has_bank = poh_recorder.read().unwrap().has_bank(); let mut replay_active_banks_time = Measure::start("replay_active_banks_time"); let mut ancestors = bank_forks.read().unwrap().ancestors(); @@ -833,7 +833,7 @@ impl ReplayStage { let mut start_leader_time = Measure::start("start_leader_time"); let mut dump_then_repair_correct_slots_time = Measure::start("dump_then_repair_correct_slots_time"); // Used for correctness check - let poh_bank = poh_recorder.lock().unwrap().bank(); + let poh_bank = poh_recorder.read().unwrap().bank(); // Dump any duplicate slots that have been confirmed by the network in // anticipation of repairing the confirmed version of the slot. // @@ -868,7 +868,7 @@ impl ReplayStage { transaction_status_sender.is_some(), ); - let poh_bank = poh_recorder.lock().unwrap().bank(); + let poh_bank = poh_recorder.read().unwrap().bank(); if let Some(bank) = poh_bank { Self::log_leader_change( &my_pubkey, @@ -1000,11 +1000,11 @@ impl ReplayStage { } fn retransmit_latest_unpropagated_leader_slot( - poh_recorder: &Arc>, + poh_recorder: &Arc>, retransmit_slots_sender: &RetransmitSlotsSender, progress: &mut ProgressMap, ) { - let start_slot = poh_recorder.lock().unwrap().start_slot(); + let start_slot = poh_recorder.read().unwrap().start_slot(); if let (false, Some(latest_leader_slot)) = progress.get_leader_propagation_slot_must_exist(start_slot) @@ -1545,7 +1545,7 @@ impl ReplayStage { fn maybe_start_leader( my_pubkey: &Pubkey, bank_forks: &Arc>, - poh_recorder: &Arc>, + poh_recorder: &Arc>, leader_schedule_cache: &Arc, rpc_subscriptions: &Arc, progress_map: &mut ProgressMap, @@ -1554,12 +1554,12 @@ impl ReplayStage { has_new_vote_been_rooted: bool, track_transaction_indexes: bool, ) { - // all the individual calls to poh_recorder.lock() are designed to + // all the individual calls to poh_recorder.read() are designed to // increase granularity, decrease contention - assert!(!poh_recorder.lock().unwrap().has_bank()); + assert!(!poh_recorder.read().unwrap().has_bank()); - let (poh_slot, parent_slot) = match poh_recorder.lock().unwrap().reached_leader_slot() { + let (poh_slot, parent_slot) = match poh_recorder.read().unwrap().reached_leader_slot() { PohLeaderStatus::Reached { poh_slot, parent_slot, @@ -1674,7 +1674,7 @@ impl ReplayStage { let tpu_bank = bank_forks.write().unwrap().insert(tpu_bank); poh_recorder - .lock() + .write() .unwrap() .set_bank(&tpu_bank, track_transaction_indexes); } else { @@ -2150,7 +2150,7 @@ impl ReplayStage { my_pubkey: &Pubkey, blockstore: &Blockstore, bank: &Arc, - poh_recorder: &Mutex, + poh_recorder: &RwLock, leader_schedule_cache: &LeaderScheduleCache, ) { let next_leader_slot = leader_schedule_cache.next_leader_slot( @@ -2161,7 +2161,7 @@ impl ReplayStage { GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, ); poh_recorder - .lock() + .write() .unwrap() .reset(bank.clone(), next_leader_slot); @@ -3271,7 +3271,7 @@ pub(crate) mod tests { my_pubkey: Pubkey, cluster_info: ClusterInfo, leader_schedule_cache: Arc, - poh_recorder: Mutex, + poh_recorder: RwLock, tower: Tower, rpc_subscriptions: Arc, pub vote_simulator: VoteSimulator, @@ -3322,7 +3322,7 @@ pub(crate) mod tests { // PohRecorder let working_bank = bank_forks.read().unwrap().working_bank(); - let poh_recorder = Mutex::new( + let poh_recorder = RwLock::new( PohRecorder::new( working_bank.tick_height(), working_bank.last_blockhash(), diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 720d7cf3c5c4d9..72acd127db0baa 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -36,7 +36,7 @@ use { }, std::{ net::UdpSocket, - sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, + sync::{atomic::AtomicBool, Arc, RwLock}, thread, }, }; @@ -73,7 +73,7 @@ impl Tpu { #[allow(clippy::too_many_arguments)] pub fn new( cluster_info: &Arc, - poh_recorder: &Arc>, + poh_recorder: &Arc>, entry_receiver: Receiver, retransmit_slots_receiver: RetransmitSlotsReceiver, sockets: TpuSockets, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 7b4de2cdd31c31..6e14d19794ac5e 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -52,7 +52,7 @@ use { std::{ collections::HashSet, net::UdpSocket, - sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, + sync::{atomic::AtomicBool, Arc, RwLock}, thread, }, }; @@ -105,7 +105,7 @@ impl Tvu { blockstore: Arc, ledger_signal_receiver: Receiver, rpc_subscriptions: &Arc, - poh_recorder: &Arc>, + poh_recorder: &Arc>, maybe_process_block_store: Option, tower_storage: Arc, leader_schedule_cache: &Arc, diff --git a/core/src/validator.rs b/core/src/validator.rs index 0a8245d326806f..6e6b4b07914751 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -106,7 +106,7 @@ use { path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, Mutex, RwLock, + Arc, RwLock, }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -341,7 +341,7 @@ pub struct Validator { serve_repair_service: ServeRepairService, completed_data_sets_service: CompletedDataSetsService, snapshot_packager_service: Option, - poh_recorder: Arc>, + poh_recorder: Arc>, poh_service: PohService, tpu: Tpu, tvu: Tvu, @@ -755,7 +755,7 @@ impl Validator { exit.clone(), ) }; - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let use_quic = UseQUIC::new(use_quic).expect("Failed to initialize QUIC flags"); let connection_cache = Arc::new(ConnectionCache::new(use_quic, tpu_connection_pool_size)); diff --git a/core/src/voting_service.rs b/core/src/voting_service.rs index 4f7e585d61ae73..29cf4699dd575e 100644 --- a/core/src/voting_service.rs +++ b/core/src/voting_service.rs @@ -7,7 +7,7 @@ use { solana_runtime::bank_forks::BankForks, solana_sdk::{clock::Slot, transaction::Transaction}, std::{ - sync::{Arc, Mutex, RwLock}, + sync::{Arc, RwLock}, thread::{self, Builder, JoinHandle}, }, }; @@ -41,7 +41,7 @@ impl VotingService { pub fn new( vote_receiver: Receiver, cluster_info: Arc, - poh_recorder: Arc>, + poh_recorder: Arc>, tower_storage: Arc, bank_forks: Arc>, ) -> Self { @@ -66,7 +66,7 @@ impl VotingService { pub fn handle_vote( cluster_info: &ClusterInfo, - poh_recorder: &Mutex, + poh_recorder: &RwLock, tower_storage: &dyn TowerStorage, vote_op: VoteOp, send_to_tpu_vote_port: bool, diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index 86fb9c80dd8b0a..2632d031019ed9 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -9,7 +9,7 @@ use { std::{ sync::{ atomic::{AtomicBool, Ordering}, - Arc, Mutex, + Arc, RwLock, }, thread::{self, sleep, Builder, JoinHandle}, time::Duration, @@ -28,7 +28,7 @@ impl WarmQuicCacheService { pub fn new( connection_cache: Arc, cluster_info: Arc, - poh_recorder: Arc>, + poh_recorder: Arc>, exit: Arc, ) -> Self { let thread_hdl = Builder::new() @@ -38,7 +38,7 @@ impl WarmQuicCacheService { let mut maybe_last_leader = None; while !exit.load(Ordering::Relaxed) { let leader_pubkey = poh_recorder - .lock() + .read() .unwrap() .leader_after_n_slots((CACHE_OFFSET_SLOT + slot_jitter) as u64); if let Some(leader_pubkey) = leader_pubkey { diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 000d58ab4650ee..e685b950beb6a5 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -32,7 +32,7 @@ use { cmp, sync::{ atomic::{AtomicBool, Ordering}, - Arc, Mutex, + Arc, Mutex, RwLock, }, time::{Duration, Instant}, }, @@ -949,7 +949,7 @@ pub fn create_test_recorder( leader_schedule_cache: Option>, ) -> ( Arc, - Arc>, + Arc>, PohService, Receiver, ) { @@ -973,7 +973,7 @@ pub fn create_test_recorder( ); poh_recorder.set_bank(bank, false); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let poh_service = PohService::new( poh_recorder.clone(), &poh_config, diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index fda686f1156eb8..70f4d2f1ed47ac 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -10,7 +10,7 @@ use { std::{ sync::{ atomic::{AtomicBool, Ordering}, - Arc, Mutex, + Arc, Mutex, RwLock, }, thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, @@ -95,7 +95,7 @@ impl PohTiming { impl PohService { pub fn new( - poh_recorder: Arc>, + poh_recorder: Arc>, poh_config: &Arc, poh_exit: &Arc, ticks_per_slot: u64, @@ -163,7 +163,7 @@ impl PohService { } fn sleepy_tick_producer( - poh_recorder: Arc>, + poh_recorder: Arc>, poh_config: &PohConfig, poh_exit: &AtomicBool, record_receiver: Receiver, @@ -180,13 +180,13 @@ impl PohService { ); if remaining_tick_time.is_zero() { last_tick = Instant::now(); - poh_recorder.lock().unwrap().tick(); + poh_recorder.write().unwrap().tick(); } } } pub fn read_record_receiver_and_process( - poh_recorder: &Arc>, + poh_recorder: &Arc>, record_receiver: &Receiver, timeout: Duration, ) { @@ -194,7 +194,7 @@ impl PohService { if let Ok(record) = record { if record .sender - .send(poh_recorder.lock().unwrap().record( + .send(poh_recorder.write().unwrap().record( record.slot, record.mixin, record.transactions, @@ -207,7 +207,7 @@ impl PohService { } fn short_lived_sleepy_tick_producer( - poh_recorder: Arc>, + poh_recorder: Arc>, poh_config: &PohConfig, poh_exit: &AtomicBool, record_receiver: Receiver, @@ -227,7 +227,7 @@ impl PohService { ); if remaining_tick_time.is_zero() { last_tick = Instant::now(); - poh_recorder.lock().unwrap().tick(); + poh_recorder.write().unwrap().tick(); elapsed_ticks += 1; } if poh_exit.load(Ordering::Relaxed) && !warned { @@ -240,7 +240,7 @@ impl PohService { // returns true if we need to tick fn record_or_hash( next_record: &mut Option, - poh_recorder: &Arc>, + poh_recorder: &Arc>, timing: &mut PohTiming, record_receiver: &Receiver, hashes_per_batch: u64, @@ -252,7 +252,7 @@ impl PohService { // received message to record // so, record for as long as we have queued up record requests let mut lock_time = Measure::start("lock"); - let mut poh_recorder_l = poh_recorder.lock().unwrap(); + let mut poh_recorder_l = poh_recorder.write().unwrap(); lock_time.stop(); timing.total_lock_time_ns += lock_time.as_ns(); let mut record_time = Measure::start("record"); @@ -332,14 +332,14 @@ impl PohService { } fn tick_producer( - poh_recorder: Arc>, + poh_recorder: Arc>, poh_exit: &AtomicBool, ticks_per_slot: u64, hashes_per_batch: u64, record_receiver: Receiver, target_ns_per_tick: u64, ) { - let poh = poh_recorder.lock().unwrap().poh.clone(); + let poh = poh_recorder.read().unwrap().poh.clone(); let mut timing = PohTiming::new(); let mut next_record = None; loop { @@ -356,7 +356,7 @@ impl PohService { // Lock PohRecorder only for the final hash. record_or_hash will lock PohRecorder for record calls but not for hashing. { let mut lock_time = Measure::start("lock"); - let mut poh_recorder_l = poh_recorder.lock().unwrap(); + let mut poh_recorder_l = poh_recorder.write().unwrap(); lock_time.stop(); timing.total_lock_time_ns += lock_time.as_ns(); let mut tick_time = Measure::start("tick"); @@ -436,7 +436,7 @@ mod tests { &poh_config, exit.clone(), ); - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let ticks_per_slot = bank.ticks_per_slot(); let bank_slot = bank.slot(); @@ -462,7 +462,7 @@ mod tests { loop { // send some data let mut time = Measure::start("record"); - let _ = poh_recorder.lock().unwrap().record( + let _ = poh_recorder.write().unwrap().record( bank_slot, h1, vec![tx.clone()], @@ -500,7 +500,7 @@ mod tests { hashes_per_batch, record_receiver, ); - poh_recorder.lock().unwrap().set_bank(&bank, false); + poh_recorder.write().unwrap().set_bank(&bank, false); // get some events let mut hashes = 0; diff --git a/rpc/src/cluster_tpu_info.rs b/rpc/src/cluster_tpu_info.rs index bd7ad572d90f3e..7e1982cf50b945 100644 --- a/rpc/src/cluster_tpu_info.rs +++ b/rpc/src/cluster_tpu_info.rs @@ -6,19 +6,19 @@ use { std::{ collections::HashMap, net::SocketAddr, - sync::{Arc, Mutex}, + sync::{Arc, RwLock}, }, }; #[derive(Clone)] pub struct ClusterTpuInfo { cluster_info: Arc, - poh_recorder: Arc>, + poh_recorder: Arc>, recent_peers: HashMap, } impl ClusterTpuInfo { - pub fn new(cluster_info: Arc, poh_recorder: Arc>) -> Self { + pub fn new(cluster_info: Arc, poh_recorder: Arc>) -> Self { Self { cluster_info, poh_recorder, @@ -38,7 +38,7 @@ impl TpuInfo for ClusterTpuInfo { } fn get_leader_tpus(&self, max_count: u64) -> Vec<&SocketAddr> { - let recorder = self.poh_recorder.lock().unwrap(); + let recorder = self.poh_recorder.read().unwrap(); let leaders: Vec<_> = (0..max_count) .filter_map(|i| recorder.leader_after_n_slots(i * NUM_CONSECUTIVE_LEADER_SLOTS)) .collect(); @@ -141,7 +141,7 @@ mod test { .collect(); let leader_info = ClusterTpuInfo { cluster_info, - poh_recorder: Arc::new(Mutex::new(poh_recorder)), + poh_recorder: Arc::new(RwLock::new(poh_recorder)), recent_peers: recent_peers.clone(), }; diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index ba17587012584b..02c2198b9763c2 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -45,7 +45,7 @@ use { path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, Mutex, RwLock, + Arc, RwLock, }, thread::{self, Builder, JoinHandle}, }, @@ -342,7 +342,7 @@ impl JsonRpcService { block_commitment_cache: Arc>, blockstore: Arc, cluster_info: Arc, - poh_recorder: Option>>, + poh_recorder: Option>>, genesis_hash: Hash, ledger_path: &Path, validator_exit: Arc>, From 0ab521d5f12d7dc71a5a124f4ec02338ab358e75 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 5 Jul 2022 11:27:01 -0500 Subject: [PATCH 019/100] inc counter when append vec drop fails (#26396) --- runtime/src/append_vec.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 0e50ccec7dd046..babd1cf1580b42 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -198,6 +198,7 @@ impl Drop for AppendVec { // disabled due to many false positive warnings while running tests. // blocked by rpc's upgrade to jsonrpc v17 //error!("AppendVec failed to remove {:?}: {:?}", &self.path, e); + inc_new_counter_info!("append_vec_drop_fail", 1); } } } From b83203403f96c8708675c604133e2ef898fbac85 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 5 Jul 2022 12:04:25 -0500 Subject: [PATCH 020/100] Update openssl-src to fix cargo audit (#26410) update openssl-src to fix cargo audit --- Cargo.lock | 4 ++-- programs/bpf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9044dd13bf853..c9699f6c676029 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2870,9 +2870,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-src" -version = "111.20.0+1.1.1o" +version = "111.22.0+1.1.1q" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92892c4f87d56e376e469ace79f1128fdaded07646ddf73aa0be4706ff712dec" +checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" dependencies = [ "cc", ] diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index cf966d1b296103..ea1210094043ce 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -2598,9 +2598,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.20.0+1.1.1o" +version = "111.22.0+1.1.1q" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92892c4f87d56e376e469ace79f1128fdaded07646ddf73aa0be4706ff712dec" +checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" dependencies = [ "cc", ] From 7e4a5de99c43b0d3d678d0c559af0804d3005a2e Mon Sep 17 00:00:00 2001 From: Nick Rempel Date: Tue, 5 Jul 2022 10:49:42 -0700 Subject: [PATCH 021/100] Refactor ConnectionCache::use_quic (#26235) * Remove UseQuic type Move to storing the UdpSocket on ConnectionCache and accepting a bool * Remove use_quic from ConnectionCache constructor Replace with separate with_udp constructor to force callers to choose --- banking-bench/src/main.rs | 14 +++---- bench-tps/src/main.rs | 18 +++++---- client/src/connection_cache.rs | 60 ++++++++++++------------------ core/src/tvu.rs | 2 +- core/src/validator.rs | 8 ++-- dos/src/main.rs | 25 +++++++------ local-cluster/src/local_cluster.rs | 12 +++--- rpc-test/tests/rpc.rs | 11 +++--- 8 files changed, 71 insertions(+), 79 deletions(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 7f318ef5a11fd3..e3a910079dfb2c 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -5,7 +5,7 @@ use { log::*, rand::{thread_rng, Rng}, rayon::prelude::*, - solana_client::connection_cache::{ConnectionCache, UseQUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE}, + solana_client::connection_cache::{ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE}, solana_core::banking_stage::BankingStage, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -341,8 +341,11 @@ fn main() { SocketAddrSpace::Unspecified, ); let cluster_info = Arc::new(cluster_info); - let tpu_use_quic = UseQUIC::new(matches.is_present("tpu_use_quic")) - .expect("Failed to initialize QUIC flags"); + let tpu_use_quic = matches.is_present("tpu_use_quic"); + let connection_cache = match tpu_use_quic { + true => ConnectionCache::new(DEFAULT_TPU_CONNECTION_POOL_SIZE), + false => ConnectionCache::with_udp(DEFAULT_TPU_CONNECTION_POOL_SIZE), + }; let banking_stage = BankingStage::new_num_threads( &cluster_info, &poh_recorder, @@ -353,10 +356,7 @@ fn main() { None, replay_vote_sender, Arc::new(RwLock::new(CostModel::default())), - Arc::new(ConnectionCache::new( - tpu_use_quic, - DEFAULT_TPU_CONNECTION_POOL_SIZE, - )), + Arc::new(connection_cache), ); poh_recorder.write().unwrap().set_bank(&bank, false); diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 0e2e1e842f296d..5b96ffb029375f 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -8,7 +8,7 @@ use { keypairs::get_keypairs, }, solana_client::{ - connection_cache::{ConnectionCache, UseQUIC}, + connection_cache::ConnectionCache, rpc_client::RpcClient, thin_client::ThinClient, tpu_client::{TpuClient, TpuClientConfig}, @@ -103,9 +103,10 @@ fn main() { do_bench_tps(client, cli_config, keypairs); } ExternalClientType::ThinClient => { - let use_quic = UseQUIC::new(*use_quic).expect("Failed to initialize QUIC flags"); - let connection_cache = - Arc::new(ConnectionCache::new(use_quic, *tpu_connection_pool_size)); + let connection_cache = match use_quic { + true => Arc::new(ConnectionCache::new(*tpu_connection_pool_size)), + false => Arc::new(ConnectionCache::with_udp(*tpu_connection_pool_size)), + }; let client = if let Ok(rpc_addr) = value_t!(matches, "rpc_addr", String) { let rpc = rpc_addr.parse().unwrap_or_else(|e| { @@ -176,16 +177,17 @@ fn main() { json_rpc_url.to_string(), CommitmentConfig::confirmed(), )); - let use_quic = UseQUIC::new(*use_quic).expect("Failed to initialize QUIC flags"); - let connection_cache = - Arc::new(ConnectionCache::new(use_quic, *tpu_connection_pool_size)); + let connection_cache = match use_quic { + true => ConnectionCache::new(*tpu_connection_pool_size), + false => ConnectionCache::with_udp(*tpu_connection_pool_size), + }; let client = Arc::new( TpuClient::new_with_connection_cache( rpc_client, websocket_url, TpuClientConfig::default(), - connection_cache, + Arc::new(connection_cache), ) .unwrap_or_else(|err| { eprintln!("Could not create TpuClient {:?}", err); diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 98da0f85bbbbae..34a56cd24a0cf1 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -17,7 +17,6 @@ use { Arc, RwLock, }, }, - tokio::io, }; // Should be non-zero @@ -219,29 +218,12 @@ impl ConnectionCacheStats { } } -pub enum UseQUIC { - Yes, - No(Arc), -} - -impl UseQUIC { - pub fn new(use_quic: bool) -> io::Result { - if use_quic { - Ok(UseQUIC::Yes) - } else { - let socket = - solana_net_utils::bind_with_any_port(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))?; - Ok(UseQUIC::No(Arc::new(socket))) - } - } -} - pub struct ConnectionCache { map: RwLock>, stats: Arc, last_stats: AtomicInterval, - use_quic: UseQUIC, connection_pool_size: usize, + tpu_udp_socket: Option>, } /// Models the pool of connections @@ -270,25 +252,31 @@ impl ConnectionPool { } impl ConnectionCache { - pub fn new(use_quic: UseQUIC, connection_pool_size: usize) -> Self { + pub fn new(connection_pool_size: usize) -> Self { // The minimum pool size is 1. let connection_pool_size = 1.max(connection_pool_size); Self { - use_quic, + tpu_udp_socket: None, connection_pool_size, ..Self::default() } } - pub fn get_use_quic(&self) -> bool { - match self.use_quic { - UseQUIC::Yes => true, - UseQUIC::No(_) => false, + pub fn with_udp(connection_pool_size: usize) -> Self { + // The minimum pool size is 1. + let connection_pool_size = 1.max(connection_pool_size); + Self { + connection_pool_size, + ..Self::default() } } + pub fn use_quic(&self) -> bool { + matches!(self.tpu_udp_socket, None) + } + fn create_endpoint(&self) -> Option> { - if self.get_use_quic() { + if self.use_quic() { Some(Arc::new(QuicLazyInitializedEndpoint::new())) } else { None @@ -320,12 +308,12 @@ impl ConnectionCache { }); let (cache_hit, num_evictions, eviction_timing_ms) = if to_create_connection { - let connection = match &self.use_quic { - UseQUIC::Yes => BaseTpuConnection::Quic(Arc::new(QuicClient::new( + let connection = match &self.tpu_udp_socket { + Some(socket) => BaseTpuConnection::Udp(socket.clone()), + None => BaseTpuConnection::Quic(Arc::new(QuicClient::new( endpoint.as_ref().unwrap().clone(), *addr, ))), - UseQUIC::No(socket) => BaseTpuConnection::Udp(socket.clone()), }; let connection = Arc::new(connection); @@ -380,11 +368,7 @@ impl ConnectionCache { let map = self.map.read().unwrap(); get_connection_map_lock_measure.stop(); - let port_offset = if self.get_use_quic() { - QUIC_PORT_OFFSET - } else { - 0 - }; + let port_offset = if self.use_quic() { QUIC_PORT_OFFSET } else { 0 }; let addr = SocketAddr::new(addr.ip(), addr.port() + port_offset); @@ -504,13 +488,17 @@ impl ConnectionCache { impl Default for ConnectionCache { fn default() -> Self { - let use_quic = UseQUIC::new(DEFAULT_TPU_USE_QUIC).expect("Failed to initialize QUIC flags"); Self { map: RwLock::new(IndexMap::with_capacity(MAX_CONNECTIONS)), stats: Arc::new(ConnectionCacheStats::default()), last_stats: AtomicInterval::default(), - use_quic, connection_pool_size: DEFAULT_TPU_CONNECTION_POOL_SIZE, + tpu_udp_socket: (!DEFAULT_TPU_USE_QUIC).then(|| { + Arc::new( + solana_net_utils::bind_with_any_port(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + .expect("Unable to bind to UDP socket"), + ) + }), } } } diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 6e14d19794ac5e..3418bf84c76e7a 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -227,7 +227,7 @@ impl Tvu { bank_forks.clone(), ); - let warm_quic_cache_service = if connection_cache.get_use_quic() { + let warm_quic_cache_service = if connection_cache.use_quic() { Some(WarmQuicCacheService::new( connection_cache.clone(), cluster_info.clone(), diff --git a/core/src/validator.rs b/core/src/validator.rs index 6e6b4b07914751..684791ff30dfbe 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -25,7 +25,7 @@ use { }, crossbeam_channel::{bounded, unbounded, Receiver}, rand::{thread_rng, Rng}, - solana_client::connection_cache::{ConnectionCache, UseQUIC}, + solana_client::connection_cache::ConnectionCache, solana_entry::poh::compute_hash_time_ns, solana_geyser_plugin_manager::geyser_plugin_service::GeyserPluginService, solana_gossip::{ @@ -757,8 +757,10 @@ impl Validator { }; let poh_recorder = Arc::new(RwLock::new(poh_recorder)); - let use_quic = UseQUIC::new(use_quic).expect("Failed to initialize QUIC flags"); - let connection_cache = Arc::new(ConnectionCache::new(use_quic, tpu_connection_pool_size)); + let connection_cache = match use_quic { + true => Arc::new(ConnectionCache::new(tpu_connection_pool_size)), + false => Arc::new(ConnectionCache::with_udp(tpu_connection_pool_size)), + }; let rpc_override_health_check = Arc::new(AtomicBool::new(false)); let ( diff --git a/dos/src/main.rs b/dos/src/main.rs index 9153356188aca5..6fefd16af3536c 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -45,7 +45,7 @@ use { rand::{thread_rng, Rng}, solana_bench_tps::{bench::generate_and_fund_keypairs, bench_tps_client::BenchTpsClient}, solana_client::{ - connection_cache::{ConnectionCache, UseQUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE}, + connection_cache::{ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE}, rpc_client::RpcClient, tpu_connection::TpuConnection, }, @@ -423,8 +423,10 @@ fn run_dos_transactions( //let connection_cache_stats = Arc::new(ConnectionCacheStats::default()); //let udp_client = UdpTpuConnection::new(target, connection_cache_stats); - let tpu_use_quic = UseQUIC::new(tpu_use_quic).expect("Failed to initialize QUIC flags"); - let connection_cache = ConnectionCache::new(tpu_use_quic, DEFAULT_TPU_CONNECTION_POOL_SIZE); + let connection_cache = match tpu_use_quic { + true => ConnectionCache::new(DEFAULT_TPU_CONNECTION_POOL_SIZE), + false => ConnectionCache::with_udp(DEFAULT_TPU_CONNECTION_POOL_SIZE), + }; let connection = connection_cache.get_connection(&target); let mut count = 0; @@ -622,14 +624,15 @@ fn main() { exit(1); }); - let tpu_use_quic = - UseQUIC::new(cmd_params.tpu_use_quic).expect("Failed to initialize QUIC flags"); - let connection_cache = Arc::new(ConnectionCache::new( - tpu_use_quic, - DEFAULT_TPU_CONNECTION_POOL_SIZE, - )); - let (client, num_clients) = - get_multi_client(&validators, &SocketAddrSpace::Unspecified, connection_cache); + let connection_cache = match cmd_params.tpu_use_quic { + true => ConnectionCache::new(DEFAULT_TPU_CONNECTION_POOL_SIZE), + false => ConnectionCache::with_udp(DEFAULT_TPU_CONNECTION_POOL_SIZE), + }; + let (client, num_clients) = get_multi_client( + &validators, + &SocketAddrSpace::Unspecified, + Arc::new(connection_cache), + ); if validators.len() < num_clients { eprintln!( "Error: Insufficient nodes discovered. Expecting {} or more", diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 30192a2d05ec1b..6f3b13e5e0aed5 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -8,7 +8,7 @@ use { log::*, solana_client::{ connection_cache::{ - ConnectionCache, UseQUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_USE_QUIC, + ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_USE_QUIC, }, thin_client::ThinClient, }, @@ -299,17 +299,15 @@ impl LocalCluster { validators.insert(leader_pubkey, cluster_leader); - let tpu_use_quic = - UseQUIC::new(config.tpu_use_quic).expect("Failed to initialize QUIC flags"); let mut cluster = Self { funding_keypair: mint_keypair, entry_point_info: leader_contact_info, validators, genesis_config, - connection_cache: Arc::new(ConnectionCache::new( - tpu_use_quic, - config.tpu_connection_pool_size, - )), + connection_cache: match config.tpu_use_quic { + true => Arc::new(ConnectionCache::new(config.tpu_connection_pool_size)), + false => Arc::new(ConnectionCache::with_udp(config.tpu_connection_pool_size)), + }, }; let node_pubkey_to_vote_key: HashMap> = keys_in_genesis diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index b5190c744a1283..72c8488e7a8325 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -8,7 +8,7 @@ use { solana_account_decoder::UiAccount, solana_client::{ client_error::{ClientErrorKind, Result as ClientResult}, - connection_cache::{ConnectionCache, UseQUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE}, + connection_cache::{ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE}, nonblocking::pubsub_client::PubsubClient, rpc_client::RpcClient, rpc_config::{RpcAccountInfoConfig, RpcSignatureSubscribeConfig}, @@ -420,11 +420,10 @@ fn run_tpu_send_transaction(tpu_use_quic: bool) { test_validator.rpc_url(), CommitmentConfig::processed(), )); - let tpu_use_quic = UseQUIC::new(tpu_use_quic).expect("Failed to initialize QUIC flags"); - let connection_cache = Arc::new(ConnectionCache::new( - tpu_use_quic, - DEFAULT_TPU_CONNECTION_POOL_SIZE, - )); + let connection_cache = match tpu_use_quic { + true => Arc::new(ConnectionCache::new(DEFAULT_TPU_CONNECTION_POOL_SIZE)), + false => Arc::new(ConnectionCache::with_udp(DEFAULT_TPU_CONNECTION_POOL_SIZE)), + }; let tpu_client = TpuClient::new_with_connection_cache( rpc_client.clone(), &test_validator.rpc_pubsub_url(), From ce39c1402511c658c27facf3b75862a46639d4a1 Mon Sep 17 00:00:00 2001 From: carllin Date: Tue, 5 Jul 2022 13:58:51 -0500 Subject: [PATCH 022/100] Add end-to-end replay slot metrics (#25752) --- Cargo.lock | 9 ++ core/Cargo.toml | 1 + core/src/lib.rs | 4 + core/src/progress_map.rs | 252 +++++------------------------ ledger/src/blockstore_processor.rs | 202 +++++++++++++++++------ program-runtime/Cargo.toml | 2 + program-runtime/src/lib.rs | 7 + program-runtime/src/timings.rs | 249 +++++++++++++++++++++++++++- programs/bpf/Cargo.lock | 9 ++ runtime/src/bank.rs | 11 ++ 10 files changed, 481 insertions(+), 265 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9699f6c676029..2296d09d9d5d5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1213,6 +1213,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "eager" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abe71d579d1812060163dff96056261deb5bf6729b100fa2e36a68b9649ba3d3" + [[package]] name = "ed25519" version = "1.2.0" @@ -4932,6 +4938,7 @@ dependencies = [ "chrono", "crossbeam-channel", "dashmap", + "eager", "etcd-client", "fs_extra", "histogram", @@ -5705,6 +5712,7 @@ version = "1.11.2" dependencies = [ "base64 0.13.0", "bincode", + "eager", "enum-iterator", "itertools", "libc", @@ -5718,6 +5726,7 @@ dependencies = [ "solana-frozen-abi-macro 1.11.2", "solana-logger 1.11.2", "solana-measure", + "solana-metrics", "solana-sdk 1.11.2", "thiserror", ] diff --git a/core/Cargo.toml b/core/Cargo.toml index 781f8975b2e621..0cd8959106f91f 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -21,6 +21,7 @@ bs58 = "0.4.0" chrono = { version = "0.4.11", features = ["serde"] } crossbeam-channel = "0.5" dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] } +eager = "0.1.0" etcd-client = { version = "0.8.1", features = ["tls"] } fs_extra = "1.2.0" histogram = "0.6.9" diff --git a/core/src/lib.rs b/core/src/lib.rs index 5062c4a8f0d97b..b47a5f125e89a9 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -1,5 +1,6 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] #![allow(clippy::integer_arithmetic)] +#![recursion_limit = "2048"] //! The `solana` library implements the Solana high-performance blockchain architecture. //! It includes a full Rust implementation of the architecture (see //! [Validator](server/struct.Validator.html)) as well as hooks to GPU implementations of its most @@ -78,6 +79,9 @@ pub mod voting_service; pub mod warm_quic_cache_service; pub mod window_service; +#[macro_use] +extern crate eager; + #[macro_use] extern crate log; diff --git a/core/src/progress_map.rs b/core/src/progress_map.rs index 24aa6168941519..823d3cbb2a70bd 100644 --- a/core/src/progress_map.rs +++ b/core/src/progress_map.rs @@ -6,7 +6,7 @@ use { replay_stage::SUPERMINORITY_THRESHOLD, }, solana_ledger::blockstore_processor::{ConfirmationProgress, ConfirmationTiming}, - solana_program_runtime::timings::ExecuteTimingType, + solana_program_runtime::{report_execute_timings, timings::ExecuteTimingType}, solana_runtime::{bank::Bank, bank_forks::BankForks, vote_account::VoteAccountsHashMap}, solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}, std::{ @@ -43,219 +43,43 @@ impl ReplaySlotStats { num_shreds: u64, bank_complete_time_us: u64, ) { - datapoint_info!( - "replay-slot-stats", - ("slot", slot as i64, i64), - ("fetch_entries_time", self.fetch_elapsed as i64, i64), - ( - "fetch_entries_fail_time", - self.fetch_fail_elapsed as i64, - i64 - ), - ( - "entry_poh_verification_time", - self.poh_verify_elapsed as i64, - i64 - ), - ( - "entry_transaction_verification_time", - self.transaction_verify_elapsed as i64, - i64 - ), - ("replay_time", self.replay_elapsed as i64, i64), - ( - "replay_total_elapsed", - self.started.elapsed().as_micros() as i64, - i64 - ), - ("total_entries", num_entries as i64, i64), - ("total_shreds", num_shreds as i64, i64), - ( - "check_us", - *self - .execute_timings - .metrics - .index(ExecuteTimingType::CheckUs), - i64 - ), - ( - "load_us", - *self - .execute_timings - .metrics - .index(ExecuteTimingType::LoadUs), - i64 - ), - ( - "execute_us", - *self - .execute_timings - .metrics - .index(ExecuteTimingType::ExecuteUs), - i64 - ), - ( - "store_us", - *self - .execute_timings - .metrics - .index(ExecuteTimingType::StoreUs), - i64 - ), - ( - "update_stakes_cache_us", - *self - .execute_timings - .metrics - .index(ExecuteTimingType::UpdateStakesCacheUs), - i64 - ), - ("bank_complete_time_us", bank_complete_time_us, i64), - ( - "total_batches_len", - *self - .execute_timings - .metrics - .index(ExecuteTimingType::TotalBatchesLen), - i64 - ), - ( - "num_execute_batches", - *self - .execute_timings - .metrics - .index(ExecuteTimingType::NumExecuteBatches), - i64 - ), - ( - "execute_details_serialize_us", - self.execute_timings.details.serialize_us, - i64 - ), - ( - "execute_details_create_vm_us", - self.execute_timings.details.create_vm_us, - i64 - ), - ( - "execute_details_execute_inner_us", - self.execute_timings.details.execute_us, - i64 - ), - ( - "execute_details_deserialize_us", - self.execute_timings.details.deserialize_us, - i64 - ), - ( - "execute_details_get_or_create_executor_us", - self.execute_timings.details.get_or_create_executor_us, - i64 - ), - ( - "execute_details_changed_account_count", - self.execute_timings.details.changed_account_count, - i64 - ), - ( - "execute_details_total_account_count", - self.execute_timings.details.total_account_count, - i64 - ), - ( - "execute_details_total_data_size", - self.execute_timings.details.total_data_size, - i64 - ), - ( - "execute_details_data_size_changed", - self.execute_timings.details.data_size_changed, - i64 - ), - ( - "execute_details_create_executor_register_syscalls_us", - self.execute_timings - .details - .create_executor_register_syscalls_us, - i64 - ), - ( - "execute_details_create_executor_load_elf_us", - self.execute_timings.details.create_executor_load_elf_us, - i64 - ), - ( - "execute_details_create_executor_verify_code_us", - self.execute_timings.details.create_executor_verify_code_us, - i64 - ), - ( - "execute_details_create_executor_jit_compile_us", - self.execute_timings.details.create_executor_jit_compile_us, - i64 - ), - ( - "execute_accessories_feature_set_clone_us", - self.execute_timings - .execute_accessories - .feature_set_clone_us, - i64 - ), - ( - "execute_accessories_compute_budget_process_transaction_us", - self.execute_timings - .execute_accessories - .compute_budget_process_transaction_us, - i64 - ), - ( - "execute_accessories_get_executors_us", - self.execute_timings.execute_accessories.get_executors_us, - i64 - ), - ( - "execute_accessories_process_message_us", - self.execute_timings.execute_accessories.process_message_us, - i64 - ), - ( - "execute_accessories_update_executors_us", - self.execute_timings.execute_accessories.update_executors_us, - i64 - ), - ( - "execute_accessories_process_instructions_total_us", - self.execute_timings - .execute_accessories - .process_instructions - .total_us, - i64 - ), - ( - "execute_accessories_process_instructions_verify_caller_us", - self.execute_timings - .execute_accessories - .process_instructions - .verify_caller_us, - i64 - ), - ( - "execute_accessories_process_instructions_process_executable_chain_us", - self.execute_timings - .execute_accessories - .process_instructions - .process_executable_chain_us, - i64 - ), - ( - "execute_accessories_process_instructions_verify_callee_us", - self.execute_timings - .execute_accessories - .process_instructions - .verify_callee_us, - i64 - ), - ); + lazy! { + datapoint_info!( + "replay-slot-stats", + ("slot", slot as i64, i64), + ("fetch_entries_time", self.fetch_elapsed as i64, i64), + ( + "fetch_entries_fail_time", + self.fetch_fail_elapsed as i64, + i64 + ), + ( + "entry_poh_verification_time", + self.poh_verify_elapsed as i64, + i64 + ), + ( + "entry_transaction_verification_time", + self.transaction_verify_elapsed as i64, + i64 + ), + ("replay_time", self.replay_elapsed as i64, i64), + ("execute_batches_us", self.execute_batches_us as i64, i64), + ( + "replay_total_elapsed", + self.started.elapsed().as_micros() as i64, + i64 + ), + ("bank_complete_time_us", bank_complete_time_us, i64), + ("total_entries", num_entries as i64, i64), + ("total_shreds", num_shreds as i64, i64), + // Everything inside the `eager!` block will be eagerly expanded before + // evaluation of the rest of the surrounding macro. + eager!{report_execute_timings!(self.execute_timings)} + ); + }; + + self.end_to_end_execute_timings.report_stats(slot); let mut per_pubkey_timings: Vec<_> = self .execute_timings diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 28854bdacf5a97..ce63057303ab0b 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -12,9 +12,9 @@ use { solana_entry::entry::{ self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers, }, - solana_measure::measure::Measure, + solana_measure::{measure, measure::Measure}, solana_metrics::{datapoint_error, inc_new_counter_debug}, - solana_program_runtime::timings::{ExecuteTimingType, ExecuteTimings}, + solana_program_runtime::timings::{ExecuteTimingType, ExecuteTimings, ThreadExecuteTimings}, solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, solana_runtime::{ accounts_background_service::AbsRequestSender, @@ -58,7 +58,7 @@ use { collections::{HashMap, HashSet}, path::PathBuf, result, - sync::{Arc, RwLock}, + sync::{Arc, Mutex, RwLock}, time::{Duration, Instant}, }, thiserror::Error, @@ -266,47 +266,91 @@ fn execute_batch( first_err.map(|(result, _)| result).unwrap_or(Ok(())) } +#[derive(Default)] +struct ExecuteBatchesInternalMetrics { + execution_timings_per_thread: HashMap, + total_batches_len: u64, + execute_batches_us: u64, +} + fn execute_batches_internal( bank: &Arc, batches: &[TransactionBatchWithIndexes], entry_callback: Option<&ProcessCallback>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, - timings: &mut ExecuteTimings, cost_capacity_meter: Arc>, tx_costs: &[u64], -) -> Result<()> { +) -> Result { inc_new_counter_debug!("bank-par_execute_entries-count", batches.len()); - let (results, new_timings): (Vec>, Vec) = - PAR_THREAD_POOL.install(|| { - batches - .into_par_iter() - .enumerate() - .map(|(index, batch)| { - let mut timings = ExecuteTimings::default(); - let result = execute_batch( - batch, - bank, - transaction_status_sender, - replay_vote_sender, - &mut timings, - cost_capacity_meter.clone(), - tx_costs[index], - ); - if let Some(entry_callback) = entry_callback { - entry_callback(bank); - } - (result, timings) - }) - .unzip() - }); - timings.saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, batches.len() as u64); - timings.saturating_add_in_place(ExecuteTimingType::NumExecuteBatches, 1); - for timing in new_timings { - timings.accumulate(&timing); - } + let execution_timings_per_thread: Mutex> = + Mutex::new(HashMap::new()); - first_err(&results) + let mut execute_batches_elapsed = Measure::start("execute_batches_elapsed"); + let results: Vec> = PAR_THREAD_POOL.install(|| { + batches + .into_par_iter() + .enumerate() + .map(|(index, transaction_batch_with_indexes)| { + let transaction_count = transaction_batch_with_indexes + .batch + .sanitized_transactions() + .len() as u64; + let mut timings = ExecuteTimings::default(); + let (result, execute_batches_time): (Result<()>, Measure) = measure!( + { + let result = execute_batch( + transaction_batch_with_indexes, + bank, + transaction_status_sender, + replay_vote_sender, + &mut timings, + cost_capacity_meter.clone(), + tx_costs[index], + ); + if let Some(entry_callback) = entry_callback { + entry_callback(bank); + } + result + }, + "execute_batch", + ); + + let thread_index = PAR_THREAD_POOL.current_thread_index().unwrap(); + execution_timings_per_thread + .lock() + .unwrap() + .entry(thread_index) + .and_modify(|thread_execution_time| { + let ThreadExecuteTimings { + total_thread_us, + total_transactions_executed, + execute_timings: total_thread_execute_timings, + } = thread_execution_time; + *total_thread_us += execute_batches_time.as_us(); + *total_transactions_executed += transaction_count; + total_thread_execute_timings + .saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, 1); + total_thread_execute_timings.accumulate(&timings); + }) + .or_insert(ThreadExecuteTimings { + total_thread_us: execute_batches_time.as_us(), + total_transactions_executed: transaction_count, + execute_timings: timings, + }); + result + }) + .collect() + }); + execute_batches_elapsed.stop(); + + first_err(&results)?; + + Ok(ExecuteBatchesInternalMetrics { + execution_timings_per_thread: execution_timings_per_thread.into_inner().unwrap(), + total_batches_len: batches.len() as u64, + execute_batches_us: execute_batches_elapsed.as_us(), + }) } fn rebatch_transactions<'a>( @@ -335,7 +379,7 @@ fn execute_batches( entry_callback: Option<&ProcessCallback>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, - timings: &mut ExecuteTimings, + confirmation_timing: &mut ConfirmationTiming, cost_capacity_meter: Arc>, cost_model: &CostModel, ) -> Result<()> { @@ -414,16 +458,18 @@ fn execute_batches( batches }; - execute_batches_internal( + let execute_batches_internal_metrics = execute_batches_internal( bank, rebatched_txs, entry_callback, transaction_status_sender, replay_vote_sender, - timings, cost_capacity_meter, &tx_batch_costs, - ) + )?; + + confirmation_timing.process_execute_batches_internal_metrics(execute_batches_internal_metrics); + Ok(()) } /// Process an ordered list of entries in parallel @@ -448,8 +494,8 @@ pub fn process_entries_for_tests( } }; - let mut timings = ExecuteTimings::default(); let mut entry_starting_index: usize = bank.transaction_count().try_into().unwrap(); + let mut confirmation_timing = ConfirmationTiming::default(); let mut replay_entries: Vec<_> = entry::verify_transactions(entries, Arc::new(verify_transaction))? .into_iter() @@ -464,6 +510,7 @@ pub fn process_entries_for_tests( } }) .collect(); + let result = process_entries_with_callback( bank, &mut replay_entries, @@ -472,11 +519,11 @@ pub fn process_entries_for_tests( transaction_status_sender, replay_vote_sender, None, - &mut timings, + &mut confirmation_timing, Arc::new(RwLock::new(BlockCostCapacityMeter::default())), ); - debug!("process_entries: {:?}", timings); + debug!("process_entries: {:?}", confirmation_timing); result } @@ -490,7 +537,7 @@ fn process_entries_with_callback( transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, - timings: &mut ExecuteTimings, + confirmation_timing: &mut ConfirmationTiming, cost_capacity_meter: Arc>, ) -> Result<()> { // accumulator for entries that can be processed in parallel @@ -517,7 +564,7 @@ fn process_entries_with_callback( entry_callback, transaction_status_sender, replay_vote_sender, - timings, + confirmation_timing, cost_capacity_meter.clone(), &cost_model, )?; @@ -587,7 +634,7 @@ fn process_entries_with_callback( entry_callback, transaction_status_sender, replay_vote_sender, - timings, + confirmation_timing, cost_capacity_meter.clone(), &cost_model, )?; @@ -603,7 +650,7 @@ fn process_entries_with_callback( entry_callback, transaction_status_sender, replay_vote_sender, - timings, + confirmation_timing, cost_capacity_meter, &cost_model, )?; @@ -765,8 +812,8 @@ pub fn process_blockstore_from_root( } let mut timing = ExecuteTimings::default(); - // Iterate and replay slots from blockstore starting from `start_slot` + // Iterate and replay slots from blockstore starting from `start_slot` if let Some(start_slot_meta) = blockstore .meta(start_slot) .unwrap_or_else(|_| panic!("Failed to get meta for slot {}", start_slot)) @@ -926,14 +973,70 @@ fn confirm_full_slot( } } +#[derive(Debug)] pub struct ConfirmationTiming { pub started: Instant, pub replay_elapsed: u64, + pub execute_batches_us: u64, pub poh_verify_elapsed: u64, pub transaction_verify_elapsed: u64, pub fetch_elapsed: u64, pub fetch_fail_elapsed: u64, pub execute_timings: ExecuteTimings, + pub end_to_end_execute_timings: ThreadExecuteTimings, +} + +impl ConfirmationTiming { + fn process_execute_batches_internal_metrics( + &mut self, + execute_batches_internal_metrics: ExecuteBatchesInternalMetrics, + ) { + let ConfirmationTiming { + execute_timings: ref mut cumulative_execute_timings, + execute_batches_us: ref mut cumulative_execute_batches_us, + ref mut end_to_end_execute_timings, + .. + } = self; + + saturating_add_assign!( + *cumulative_execute_batches_us, + execute_batches_internal_metrics.execute_batches_us + ); + + cumulative_execute_timings.saturating_add_in_place( + ExecuteTimingType::TotalBatchesLen, + execute_batches_internal_metrics.total_batches_len, + ); + cumulative_execute_timings.saturating_add_in_place(ExecuteTimingType::NumExecuteBatches, 1); + + let mut current_max_thread_execution_time: Option = None; + for (_, thread_execution_time) in execute_batches_internal_metrics + .execution_timings_per_thread + .into_iter() + { + let ThreadExecuteTimings { + total_thread_us, + execute_timings, + .. + } = &thread_execution_time; + cumulative_execute_timings.accumulate(execute_timings); + if *total_thread_us + > current_max_thread_execution_time + .as_ref() + .map(|thread_execution_time| thread_execution_time.total_thread_us) + .unwrap_or(0) + { + current_max_thread_execution_time = Some(thread_execution_time); + } + } + + if let Some(current_max_thread_execution_time) = current_max_thread_execution_time { + end_to_end_execute_timings.accumulate(¤t_max_thread_execution_time); + end_to_end_execute_timings + .execute_timings + .saturating_add_in_place(ExecuteTimingType::NumExecuteBatches, 1); + }; + } } impl Default for ConfirmationTiming { @@ -941,11 +1044,13 @@ impl Default for ConfirmationTiming { Self { started: Instant::now(), replay_elapsed: 0, + execute_batches_us: 0, poh_verify_elapsed: 0, transaction_verify_elapsed: 0, fetch_elapsed: 0, fetch_fail_elapsed: 0, execute_timings: ExecuteTimings::default(), + end_to_end_execute_timings: ThreadExecuteTimings::default(), } } } @@ -1104,7 +1209,6 @@ fn confirm_slot_entries( assert!(entries.is_some()); let mut replay_elapsed = Measure::start("replay_elapsed"); - let mut execute_timings = ExecuteTimings::default(); let cost_capacity_meter = Arc::new(RwLock::new(BlockCostCapacityMeter::default())); let mut replay_entries: Vec<_> = entries .unwrap() @@ -1124,15 +1228,13 @@ fn confirm_slot_entries( transaction_status_sender, replay_vote_sender, transaction_cost_metrics_sender, - &mut execute_timings, + timing, cost_capacity_meter, ) .map_err(BlockstoreProcessorError::from); replay_elapsed.stop(); timing.replay_elapsed += replay_elapsed.as_us(); - timing.execute_timings.accumulate(&execute_timings); - // If running signature verification on the GPU, wait for that // computation to finish, and get the result of it. If we did the // signature verification on the CPU, this just returns the diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index 99201beeba4325..0ba4b59be62c14 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -12,6 +12,7 @@ edition = "2021" [dependencies] base64 = "0.13" bincode = "1.3.3" +eager = "0.1.0" itertools = "0.10.1" libc = "0.2.101" libloading = "0.7.0" @@ -22,6 +23,7 @@ serde = { version = "1.0.129", features = ["derive", "rc"] } solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } solana-measure = { path = "../measure", version = "=1.11.2" } +solana-metrics = { path = "../metrics", version = "=1.11.2" } solana-sdk = { path = "../sdk", version = "=1.11.2" } thiserror = "1.0" enum-iterator = "0.8.1" diff --git a/program-runtime/src/lib.rs b/program-runtime/src/lib.rs index 89a6996b2a4304..2a9be8d8c4e3bd 100644 --- a/program-runtime/src/lib.rs +++ b/program-runtime/src/lib.rs @@ -1,6 +1,13 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] #![deny(clippy::integer_arithmetic)] #![deny(clippy::indexing_slicing)] +#![recursion_limit = "2048"] + +#[macro_use] +extern crate eager; + +#[macro_use] +extern crate solana_metrics; pub mod accounts_data_meter; pub mod compute_budget; diff --git a/program-runtime/src/timings.rs b/program-runtime/src/timings.rs index f342b9d5cadce6..41a850f31d469a 100644 --- a/program-runtime/src/timings.rs +++ b/program-runtime/src/timings.rs @@ -1,7 +1,7 @@ use { core::fmt, enum_iterator::IntoEnumIterator, - solana_sdk::{pubkey::Pubkey, saturating_add_assign}, + solana_sdk::{clock::Slot, pubkey::Pubkey, saturating_add_assign}, std::{ collections::HashMap, ops::{Index, IndexMut}, @@ -50,6 +50,7 @@ pub enum ExecuteTimingType { NumExecuteBatches, CollectLogsUs, TotalBatchesLen, + UpdateTransactionStatuses, } pub struct Metrics([u64; ExecuteTimingType::ITEM_COUNT]); @@ -79,6 +80,252 @@ impl core::fmt::Debug for Metrics { } } +// The auxiliary variable that must always be provided to eager_macro_rules! must use the +// identifier `eager_1`. Macros declared with `eager_macro_rules!` can then be used inside +// an eager! block. +eager_macro_rules! { $eager_1 + #[macro_export] + macro_rules! report_execute_timings { + ($self: expr) => { + ( + "validate_transactions_us", + *$self + .metrics + .index(ExecuteTimingType::CheckUs), + i64 + ), + ( + "load_us", + *$self + .metrics + .index(ExecuteTimingType::LoadUs), + i64 + ), + ( + "execute_us", + *$self + .metrics + .index(ExecuteTimingType::ExecuteUs), + i64 + ), + ( + "collect_logs_us", + *$self + .metrics + .index(ExecuteTimingType::CollectLogsUs), + i64 + ), + ( + "store_us", + *$self + + .metrics + .index(ExecuteTimingType::StoreUs), + i64 + ), + ( + "update_stakes_cache_us", + *$self + + .metrics + .index(ExecuteTimingType::UpdateStakesCacheUs), + i64 + ), + ( + "total_batches_len", + *$self + + .metrics + .index(ExecuteTimingType::TotalBatchesLen), + i64 + ), + ( + "num_execute_batches", + *$self + + .metrics + .index(ExecuteTimingType::NumExecuteBatches), + i64 + ), + ( + "update_transaction_statuses", + *$self + + .metrics + .index(ExecuteTimingType::UpdateTransactionStatuses), + i64 + ), + ( + "execute_details_serialize_us", + $self.details.serialize_us, + i64 + ), + ( + "execute_details_create_vm_us", + $self.details.create_vm_us, + i64 + ), + ( + "execute_details_execute_inner_us", + $self.details.execute_us, + i64 + ), + ( + "execute_details_deserialize_us", + $self.details.deserialize_us, + i64 + ), + ( + "execute_details_get_or_create_executor_us", + $self.details.get_or_create_executor_us, + i64 + ), + ( + "execute_details_changed_account_count", + $self.details.changed_account_count, + i64 + ), + ( + "execute_details_total_account_count", + $self.details.total_account_count, + i64 + ), + ( + "execute_details_total_data_size", + $self.details.total_data_size, + i64 + ), + ( + "execute_details_data_size_changed", + $self.details.data_size_changed, + i64 + ), + ( + "execute_details_create_executor_register_syscalls_us", + $self + .details + .create_executor_register_syscalls_us, + i64 + ), + ( + "execute_details_create_executor_load_elf_us", + $self.details.create_executor_load_elf_us, + i64 + ), + ( + "execute_details_create_executor_verify_code_us", + $self.details.create_executor_verify_code_us, + i64 + ), + ( + "execute_details_create_executor_jit_compile_us", + $self.details.create_executor_jit_compile_us, + i64 + ), + ( + "execute_accessories_feature_set_clone_us", + $self + .execute_accessories + .feature_set_clone_us, + i64 + ), + ( + "execute_accessories_compute_budget_process_transaction_us", + $self + .execute_accessories + .compute_budget_process_transaction_us, + i64 + ), + ( + "execute_accessories_get_executors_us", + $self.execute_accessories.get_executors_us, + i64 + ), + ( + "execute_accessories_process_message_us", + $self.execute_accessories.process_message_us, + i64 + ), + ( + "execute_accessories_update_executors_us", + $self.execute_accessories.update_executors_us, + i64 + ), + ( + "execute_accessories_process_instructions_total_us", + $self + .execute_accessories + .process_instructions + .total_us, + i64 + ), + ( + "execute_accessories_process_instructions_verify_caller_us", + $self + .execute_accessories + .process_instructions + .verify_caller_us, + i64 + ), + ( + "execute_accessories_process_instructions_process_executable_chain_us", + $self + .execute_accessories + .process_instructions + .process_executable_chain_us, + i64 + ), + ( + "execute_accessories_process_instructions_verify_callee_us", + $self + .execute_accessories + .process_instructions + .verify_callee_us, + i64 + ), + } + } +} + +#[derive(Debug, Default)] +pub struct ThreadExecuteTimings { + pub total_thread_us: u64, + pub total_transactions_executed: u64, + pub execute_timings: ExecuteTimings, +} + +impl ThreadExecuteTimings { + pub fn report_stats(&self, slot: Slot) { + lazy! { + datapoint_info!( + "replay-slot-end-to-end-stats", + ("slot", slot as i64, i64), + ("total_thread_us", self.total_thread_us as i64, i64), + ("total_transactions_executed", self.total_transactions_executed as i64, i64), + // Everything inside the `eager!` block will be eagerly expanded before + // evaluation of the rest of the surrounding macro. + eager!{report_execute_timings!(self.execute_timings)} + ); + }; + } + + pub fn accumulate(&mut self, other: &ThreadExecuteTimings) { + self.execute_timings.saturating_add_in_place( + ExecuteTimingType::TotalBatchesLen, + *other + .execute_timings + .metrics + .index(ExecuteTimingType::TotalBatchesLen), + ); + self.execute_timings.accumulate(&other.execute_timings); + saturating_add_assign!(self.total_thread_us, other.total_thread_us); + saturating_add_assign!( + self.total_transactions_executed, + other.total_transactions_executed + ); + } +} + #[derive(Debug, Default)] pub struct ExecuteTimings { pub metrics: Metrics, diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index ea1210094043ce..8597619f8ea96c 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -1023,6 +1023,12 @@ dependencies = [ "syn 0.15.44", ] +[[package]] +name = "eager" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abe71d579d1812060163dff96056261deb5bf6729b100fa2e36a68b9649ba3d3" + [[package]] name = "ed25519" version = "1.0.1" @@ -4561,6 +4567,7 @@ dependencies = [ "chrono", "crossbeam-channel", "dashmap", + "eager", "etcd-client", "fs_extra", "histogram", @@ -5063,6 +5070,7 @@ version = "1.11.2" dependencies = [ "base64 0.13.0", "bincode", + "eager", "enum-iterator", "itertools", "libc", @@ -5075,6 +5083,7 @@ dependencies = [ "solana-frozen-abi 1.11.2", "solana-frozen-abi-macro 1.11.2", "solana-measure", + "solana-metrics", "solana-sdk 1.11.2", "thiserror", ] diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 44d993c24dc7b3..4395cd272a4862 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4575,6 +4575,7 @@ impl Bank { let transaction_log_collector_config = self.transaction_log_collector_config.read().unwrap(); + let mut collect_logs_time = Measure::start("collect_logs_time"); for (execution_result, tx) in execution_results.iter().zip(sanitized_txs) { if let Some(debug_keys) = &self.transaction_debug_keys { for key in tx.message().account_keys().iter() { @@ -4664,6 +4665,10 @@ impl Bank { } } } + collect_logs_time.stop(); + timings + .saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_time.as_us()); + if *err_count > 0 { debug!( "{} errors of {} txs", @@ -5003,9 +5008,15 @@ impl Bank { update_stakes_cache_time.as_us(), ); + let mut update_transaction_statuses_time = Measure::start("update_transaction_statuses"); self.update_transaction_statuses(sanitized_txs, &execution_results); let fee_collection_results = self.filter_program_errors_and_collect_fee(sanitized_txs, &execution_results); + update_transaction_statuses_time.stop(); + timings.saturating_add_in_place( + ExecuteTimingType::UpdateTransactionStatuses, + update_transaction_statuses_time.as_us(), + ); TransactionResults { fee_collection_results, From 26176af4aad8019024270044e3662920ba6ff473 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 5 Jul 2022 14:49:05 -0500 Subject: [PATCH 023/100] cleanup cli arg help (#26366) --- ledger-tool/src/main.rs | 4 ++-- validator/src/main.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 27dec4d50264d8..a06b31f7250aad 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1002,10 +1002,10 @@ fn main() { .value_name("MEGABYTES") .validator(is_parsable::) .takes_value(true) - .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk. If missing, the entire index is stored in memory."); + .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk."); let disable_disk_index = Arg::with_name("disable_accounts_disk_index") .long("disable-accounts-disk-index") - .help("Disable the disk-based accounts index if it is enabled by default.") + .help("Disable the disk-based accounts index. It is enabled by default. The entire accounts index will be kept in memory.") .conflicts_with("accounts_index_memory_limit_mb"); let accountsdb_skip_shrink = Arg::with_name("accounts_db_skip_shrink") .long("accounts-db-skip-shrink") diff --git a/validator/src/main.rs b/validator/src/main.rs index 62a18b8a22bd8b..21d3e6cc7b4fe1 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1654,7 +1654,7 @@ pub fn main() { .value_name("MEGABYTES") .validator(is_parsable::) .takes_value(true) - .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk. If missing, the entire index is stored in memory."), + .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk."), ) .arg( Arg::with_name("disable_accounts_disk_index") From 61f8769039d8d4aae0fe7e25dd6c1036d974b274 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Jul 2022 20:07:08 +0000 Subject: [PATCH 024/100] chore: bump reqwest from 0.11.10 to 0.11.11 (#26162) * chore: bump reqwest from 0.11.10 to 0.11.11 Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.11.10 to 0.11.11. - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/compare/v0.11.10...v0.11.11) --- updated-dependencies: - dependency-name: reqwest dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 28 ++++++++++------------------ cli/Cargo.toml | 2 +- client/Cargo.toml | 2 +- download-utils/Cargo.toml | 2 +- install/Cargo.toml | 2 +- metrics/Cargo.toml | 2 +- notifier/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 18 +++++------------- rpc-test/Cargo.toml | 2 +- 9 files changed, 22 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2296d09d9d5d5c..75c1422eafc61a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1925,9 +1925,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.5.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" [[package]] name = "httpdate" @@ -1943,9 +1943,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.14" +version = "0.14.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b91bb1f221b6ea1f1e4371216b70f40748774c2fb5971b450c07773fb92d26b" +checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" dependencies = [ "bytes", "futures-channel", @@ -1956,7 +1956,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.8", + "itoa 1.0.1", "pin-project-lite", "socket2", "tokio", @@ -3793,9 +3793,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.10" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a1f7aa4f35e5e8b4160449f51afc758f0ce6454315a9fa7d0d113e958c41eb" +checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" dependencies = [ "async-compression", "base64 0.13.0", @@ -3818,14 +3818,15 @@ dependencies = [ "percent-encoding 2.1.0", "pin-project-lite", "rustls 0.20.6", - "rustls-pemfile 0.3.0", + "rustls-pemfile 1.0.0", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", "tokio-rustls 0.23.3", - "tokio-util 0.6.9", + "tokio-util 0.7.1", + "tower-service", "url 2.2.2", "wasm-bindgen", "wasm-bindgen-futures", @@ -3961,15 +3962,6 @@ dependencies = [ "base64 0.13.0", ] -[[package]] -name = "rustls-pemfile" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee86d63972a7c661d1536fefe8c3c8407321c3df668891286de28abcd087360" -dependencies = [ - "base64 0.13.0", -] - [[package]] name = "rustls-pemfile" version = "1.0.0" diff --git a/cli/Cargo.toml b/cli/Cargo.toml index d3cdff5eecee75..8478b0a8e47cb6 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -22,7 +22,7 @@ humantime = "2.0.1" log = "0.4.17" num-traits = "0.2" pretty-hex = "0.3.0" -reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } +reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } semver = "1.0.10" serde = "1.0.137" serde_derive = "1.0.103" diff --git a/client/Cargo.toml b/client/Cargo.toml index b56824a7499418..e6dc8bfa5d3b72 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -32,7 +32,7 @@ quinn-proto = "0.8.3" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" -reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } +reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } rustls = { version = "0.20.6", features = ["dangerous_configuration"] } semver = "1.0.10" serde = "1.0.137" diff --git a/download-utils/Cargo.toml b/download-utils/Cargo.toml index 553bfc1efdb776..677581933bac02 100644 --- a/download-utils/Cargo.toml +++ b/download-utils/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" console = "0.15.0" indicatif = "0.16.2" log = "0.4.17" -reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } +reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } solana-runtime = { path = "../runtime", version = "=1.11.2" } solana-sdk = { path = "../sdk", version = "=1.11.2" } diff --git a/install/Cargo.toml b/install/Cargo.toml index d58370ab10d0af..b769c7eb00e874 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -22,7 +22,7 @@ dirs-next = "2.0.0" indicatif = "0.16.2" lazy_static = "1.4.0" nix = "0.24.0" -reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } +reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } semver = "1.0.10" serde = { version = "1.0.137", features = ["derive"] } serde_yaml = "0.8.24" diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index 35bb2ab00b76e6..c1aecc89eaf1a2 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -14,7 +14,7 @@ crossbeam-channel = "0.5" gethostname = "0.2.3" lazy_static = "1.4.0" log = "0.4.17" -reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } +reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } solana-sdk = { path = "../sdk", version = "=1.11.2" } [dev-dependencies] diff --git a/notifier/Cargo.toml b/notifier/Cargo.toml index 6abbd786f1f707..b64c5dec858404 100644 --- a/notifier/Cargo.toml +++ b/notifier/Cargo.toml @@ -11,7 +11,7 @@ edition = "2021" [dependencies] log = "0.4.17" -reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } +reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } serde_json = "1.0" [lib] diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 8597619f8ea96c..9470c597b55090 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -3377,9 +3377,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.10" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a1f7aa4f35e5e8b4160449f51afc758f0ce6454315a9fa7d0d113e958c41eb" +checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" dependencies = [ "async-compression", "base64 0.13.0", @@ -3402,14 +3402,15 @@ dependencies = [ "percent-encoding 2.1.0", "pin-project-lite", "rustls 0.20.6", - "rustls-pemfile 0.3.0", + "rustls-pemfile 1.0.0", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", "tokio-rustls 0.23.2", - "tokio-util 0.6.9", + "tokio-util 0.7.1", + "tower-service", "url 2.2.2", "wasm-bindgen", "wasm-bindgen-futures", @@ -3536,15 +3537,6 @@ dependencies = [ "base64 0.13.0", ] -[[package]] -name = "rustls-pemfile" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee86d63972a7c661d1536fefe8c3c8407321c3df668891286de28abcd087360" -dependencies = [ - "base64 0.13.0", -] - [[package]] name = "rustls-pemfile" version = "1.0.0" diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index 5c0e2ec0f3eb19..7dd55e84e40179 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -16,7 +16,7 @@ bs58 = "0.4.0" crossbeam-channel = "0.5" futures-util = "0.3.21" log = "0.4.17" -reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } +reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } serde = "1.0.137" serde_json = "1.0.81" solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } From 690efc0b7c1206e75955e1d5b690e6fb647b18f5 Mon Sep 17 00:00:00 2001 From: Jack May Date: Tue, 5 Jul 2022 13:36:37 -0700 Subject: [PATCH 025/100] Missing privkey, add 2nd feature key (#26415) --- sdk/src/feature_set.rs | 5 +++++ sdk/src/secp256k1_instruction.rs | 8 ++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 21698e6e7b1988..a40180a6967a44 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -157,6 +157,10 @@ pub mod libsecp256k1_fail_on_bad_count { solana_sdk::declare_id!("8aXvSuopd1PUj7UhehfXJRg6619RHp8ZvwTyyJHdUYsj"); } +pub mod libsecp256k1_fail_on_bad_count2 { + solana_sdk::declare_id!("54KAoNiUERNoWWUhTWWwXgym94gzoXFVnHyQwPA18V9A"); +} + pub mod instructions_sysvar_owned_by_sysvar { solana_sdk::declare_id!("H3kBSaKdeiUsyHmeHqjJYNc27jesXZ6zWj3zWkowQbkV"); } @@ -478,6 +482,7 @@ lazy_static! { (curve25519_syscall_enabled::id(), "enable curve25519 syscalls"), (versioned_tx_message_enabled::id(), "enable versioned transaction message processing"), (libsecp256k1_fail_on_bad_count::id(), "fail libsec256k1_verify if count appears wrong"), + (libsecp256k1_fail_on_bad_count2::id(), "fail libsec256k1_verify if count appears wrong"), (instructions_sysvar_owned_by_sysvar::id(), "fix owner for instructions sysvar"), (stake_program_advance_activating_credits_observed::id(), "Enable advancing credits observed for activation epoch #19309"), (credits_auto_rewind::id(), "Auto rewind stake's credits_observed if (accidental) vote recreation is detected #22546"), diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index f284e1f2dab5ab..de9bb89fca1c1f 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -3,7 +3,8 @@ use { crate::{ feature_set::{ - libsecp256k1_0_5_upgrade_enabled, libsecp256k1_fail_on_bad_count, FeatureSet, + libsecp256k1_0_5_upgrade_enabled, libsecp256k1_fail_on_bad_count, + libsecp256k1_fail_on_bad_count2, FeatureSet, }, instruction::Instruction, precompiles::PrecompileError, @@ -109,7 +110,10 @@ pub fn verify( return Err(PrecompileError::InvalidInstructionDataSize); } let count = data[0] as usize; - if feature_set.is_active(&libsecp256k1_fail_on_bad_count::id()) && count == 0 && data.len() > 1 + if (feature_set.is_active(&libsecp256k1_fail_on_bad_count::id()) + || feature_set.is_active(&libsecp256k1_fail_on_bad_count2::id())) + && count == 0 + && data.len() > 1 { // count is zero but the instruction data indicates that is probably not // correct, fail the instruction to catch probable invalid secp256k1 From 53b9420562828cdf56cf2f3956c885f0b3ac3a03 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Tue, 5 Jul 2022 16:37:20 -0500 Subject: [PATCH 026/100] Remove `INTO!` macros in serde_snapshot (#26409) --- runtime/src/serde_snapshot.rs | 121 +++++++++++++++------------------- 1 file changed, 53 insertions(+), 68 deletions(-) diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index acd724b7087d42..b66eb9c5e745e2 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -247,52 +247,55 @@ pub(crate) fn bank_from_streams( where R: Read, { - macro_rules! INTO { - ($style:ident) => {{ + let ( + full_snapshot_bank_fields, + full_snapshot_accounts_db_fields, + incremental_snapshot_bank_fields, + incremental_snapshot_accounts_db_fields, + ) = match serde_style { + SerdeStyle::Newer => { let (full_snapshot_bank_fields, full_snapshot_accounts_db_fields) = - $style::Context::deserialize_bank_fields(snapshot_streams.full_snapshot_stream)?; + newer::Context::deserialize_bank_fields(snapshot_streams.full_snapshot_stream)?; let (incremental_snapshot_bank_fields, incremental_snapshot_accounts_db_fields) = if let Some(ref mut incremental_snapshot_stream) = snapshot_streams.incremental_snapshot_stream { let (bank_fields, accounts_db_fields) = - $style::Context::deserialize_bank_fields(incremental_snapshot_stream)?; + newer::Context::deserialize_bank_fields(incremental_snapshot_stream)?; (Some(bank_fields), Some(accounts_db_fields)) } else { (None, None) }; - - let snapshot_accounts_db_fields = SnapshotAccountsDbFields { + ( + full_snapshot_bank_fields, full_snapshot_accounts_db_fields, + incremental_snapshot_bank_fields, incremental_snapshot_accounts_db_fields, - }; - let bank = reconstruct_bank_from_fields( - incremental_snapshot_bank_fields.unwrap_or(full_snapshot_bank_fields), - snapshot_accounts_db_fields, - genesis_config, - account_paths, - unpacked_append_vec_map, - debug_keys, - additional_builtins, - account_secondary_indexes, - caching_enabled, - limit_load_slot_count_from_snapshot, - shrink_ratio, - verify_index, - accounts_db_config, - accounts_update_notifier, - accounts_db_skip_shrink, - )?; - Ok(bank) - }}; - } - match serde_style { - SerdeStyle::Newer => INTO!(newer), - } - .map_err(|err| { - warn!("bankrc_from_stream error: {:?}", err); - err - }) + ) + } + }; + + let snapshot_accounts_db_fields = SnapshotAccountsDbFields { + full_snapshot_accounts_db_fields, + incremental_snapshot_accounts_db_fields, + }; + reconstruct_bank_from_fields( + incremental_snapshot_bank_fields.unwrap_or(full_snapshot_bank_fields), + snapshot_accounts_db_fields, + genesis_config, + account_paths, + unpacked_append_vec_map, + debug_keys, + additional_builtins, + account_secondary_indexes, + caching_enabled, + limit_load_slot_count_from_snapshot, + shrink_ratio, + verify_index, + accounts_db_config, + accounts_update_notifier, + accounts_db_skip_shrink, + ) } pub(crate) fn bank_to_stream( @@ -304,25 +307,16 @@ pub(crate) fn bank_to_stream( where W: Write, { - macro_rules! INTO { - ($style:ident) => { - bincode::serialize_into( - stream, - &SerializableBankAndStorage::<$style::Context> { - bank, - snapshot_storages, - phantom: std::marker::PhantomData::default(), - }, - ) - }; - } match serde_style { - SerdeStyle::Newer => INTO!(newer), + SerdeStyle::Newer => bincode::serialize_into( + stream, + &SerializableBankAndStorage:: { + bank, + snapshot_storages, + phantom: std::marker::PhantomData::default(), + }, + ), } - .map_err(|err| { - warn!("bankrc_to_stream error: {:?}", err); - err - }) } #[cfg(test)] @@ -335,25 +329,16 @@ pub(crate) fn bank_to_stream_no_extra_fields( where W: Write, { - macro_rules! INTO { - ($style:ident) => { - bincode::serialize_into( - stream, - &SerializableBankAndStorageNoExtra::<$style::Context> { - bank, - snapshot_storages, - phantom: std::marker::PhantomData::default(), - }, - ) - }; - } match serde_style { - SerdeStyle::Newer => INTO!(newer), + SerdeStyle::Newer => bincode::serialize_into( + stream, + &SerializableBankAndStorageNoExtra:: { + bank, + snapshot_storages, + phantom: std::marker::PhantomData::default(), + }, + ), } - .map_err(|err| { - warn!("bankrc_to_stream error: {:?}", err); - err - }) } /// deserialize the bank from 'stream_reader' From d3a14f5b30111aeced85d121502bac00c8cb627f Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 5 Jul 2022 21:41:19 +0000 Subject: [PATCH 027/100] simplifies packet/shred sanity checks (#26356) --- core/src/shred_fetch_stage.rs | 32 +++++++-------- ledger/src/shred.rs | 75 ++++++++++++++++++++++------------- 2 files changed, 63 insertions(+), 44 deletions(-) diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 97fed9e92ac93c..a0e836724acdeb 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -11,7 +11,6 @@ use { solana_streamer::streamer::{self, PacketBatchReceiver, StreamerReceiveStats}, std::{ net::UdpSocket, - ops::RangeBounds, sync::{atomic::AtomicBool, Arc, RwLock}, thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, @@ -63,12 +62,12 @@ impl ShredFetchStage { } stats.shred_count += packet_batch.len(); // Limit shreds to 2 epochs away. - let slot_bounds = (last_root + 1)..(last_slot + 2 * slots_per_epoch); + let max_slot = last_slot + 2 * slots_per_epoch; for packet in packet_batch.iter_mut() { if should_discard_packet( packet, last_root, - slot_bounds.clone(), + max_slot, shred_version, &packet_hasher, &mut shreds_received, @@ -197,14 +196,13 @@ impl ShredFetchStage { fn should_discard_packet( packet: &Packet, root: Slot, - // Range of slots to ingest shreds for. - slot_bounds: impl RangeBounds, + max_slot: Slot, // Max slot to ingest shreds for. shred_version: u16, packet_hasher: &PacketHasher, shreds_received: &mut ShredsReceived, stats: &mut ShredFetchStats, ) -> bool { - if should_discard_shred(packet, root, shred_version, slot_bounds, stats) { + if should_discard_shred(packet, root, max_slot, shred_version, stats) { return true; } let hash = packet_hasher.hash_packet(packet); @@ -253,11 +251,11 @@ mod tests { let last_root = 0; let last_slot = 100; let slots_per_epoch = 10; - let slot_bounds = (last_root + 1)..(last_slot + 2 * slots_per_epoch); + let max_slot = last_slot + 2 * slots_per_epoch; assert!(!should_discard_packet( &packet, last_root, - slot_bounds.clone(), + max_slot, shred_version, &hasher, &mut shreds_received, @@ -272,7 +270,7 @@ mod tests { assert!(!should_discard_packet( &packet, last_root, - slot_bounds, + max_slot, shred_version, &hasher, &mut shreds_received, @@ -290,7 +288,7 @@ mod tests { let last_slot = 100; let slots_per_epoch = 10; let shred_version = 59445; - let slot_bounds = (last_root + 1)..(last_slot + 2 * slots_per_epoch); + let max_slot = last_slot + 2 * slots_per_epoch; let hasher = PacketHasher::default(); @@ -298,7 +296,7 @@ mod tests { assert!(should_discard_packet( &packet, last_root, - slot_bounds.clone(), + max_slot, shred_version, &hasher, &mut shreds_received, @@ -321,7 +319,7 @@ mod tests { assert!(should_discard_packet( &packet, 3, - 3..slot_bounds.end, + max_slot, shred_version, &hasher, &mut shreds_received, @@ -332,7 +330,7 @@ mod tests { assert!(should_discard_packet( &packet, last_root, - slot_bounds.clone(), + max_slot, 345, // shred_version &hasher, &mut shreds_received, @@ -344,7 +342,7 @@ mod tests { assert!(!should_discard_packet( &packet, last_root, - slot_bounds.clone(), + max_slot, shred_version, &hasher, &mut shreds_received, @@ -355,7 +353,7 @@ mod tests { assert!(should_discard_packet( &packet, last_root, - slot_bounds.clone(), + max_slot, shred_version, &hasher, &mut shreds_received, @@ -379,7 +377,7 @@ mod tests { assert!(should_discard_packet( &packet, last_root, - slot_bounds.clone(), + max_slot, shred_version, &hasher, &mut shreds_received, @@ -392,7 +390,7 @@ mod tests { assert!(should_discard_packet( &packet, last_root, - slot_bounds, + max_slot, shred_version, &hasher, &mut shreds_received, diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 656f324fbd7ca7..dda68ff093a309 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -69,7 +69,7 @@ use { signature::{Keypair, Signature, Signer}, }, static_assertions::const_assert_eq, - std::{fmt::Debug, ops::RangeBounds}, + std::fmt::Debug, thiserror::Error, }; @@ -532,9 +532,7 @@ pub mod layout { pub fn get_shred(packet: &Packet) -> Option<&[u8]> { let size = get_shred_size(packet)?; - let shred = packet.data(..size)?; - // Should at least have a signature. - (size >= SIZE_OF_SIGNATURE).then(|| shred) + packet.data(..size) } pub(crate) fn get_signature(shred: &[u8]) -> Option { @@ -678,15 +676,16 @@ impl TryFrom for ShredVariant { } } +// Accepts shreds in the slot range [root + 1, max_slot]. #[must_use] pub fn should_discard_shred( packet: &Packet, root: Slot, + max_slot: Slot, shred_version: u16, - // Range of slots to ingest shreds for. - slot_bounds: impl RangeBounds, stats: &mut ShredFetchStats, ) -> bool { + debug_assert!(root < max_slot); let shred = match layout::get_shred(packet) { None => { stats.index_overrun += 1; @@ -694,9 +693,17 @@ pub fn should_discard_shred( } Some(shred) => shred, }; - if OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX > shred.len() { - stats.index_overrun += 1; - return true; + match layout::get_version(shred) { + None => { + stats.index_overrun += 1; + return true; + } + Some(version) => { + if version != shred_version { + stats.shred_version_mismatch += 1; + return true; + } + } } let shred_type = match layout::get_shred_type(shred) { Ok(shred_type) => shred_type, @@ -707,7 +714,7 @@ pub fn should_discard_shred( }; let slot = match layout::get_slot(shred) { Some(slot) => { - if !slot_bounds.contains(&slot) { + if slot > max_slot { stats.slot_out_of_range += 1; return true; } @@ -725,10 +732,6 @@ pub fn should_discard_shred( return true; } }; - if layout::get_version(shred) != Some(shred_version) { - stats.shred_version_mismatch += 1; - return true; - } match shred_type { ShredType::Code => { if index >= shred_code::MAX_CODE_SHREDS_PER_SLOT as u32 { @@ -739,7 +742,6 @@ pub fn should_discard_shred( stats.slot_out_of_range += 1; return true; } - false } ShredType::Data => { if index >= MAX_DATA_SHREDS_PER_SLOT as u32 { @@ -764,9 +766,9 @@ pub fn should_discard_shred( stats.slot_out_of_range += 1; return true; } - false } } + false } pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option) -> u64 { @@ -940,7 +942,7 @@ mod tests { let mut packet = Packet::default(); let root = 1; let shred_version = 798; - let slot_bounds = ..16; + let max_slot = 16; let shred = Shred::new_from_data( 2, // slot 3, // index @@ -956,8 +958,8 @@ mod tests { assert!(!should_discard_shred( &packet, root, + max_slot, shred_version, - slot_bounds, &mut stats )); assert_eq!(stats, ShredFetchStats::default()); @@ -966,8 +968,8 @@ mod tests { assert!(should_discard_shred( &packet, root, + max_slot, shred_version, - slot_bounds, &mut stats )); assert_eq!(stats.index_overrun, 1); @@ -976,8 +978,8 @@ mod tests { assert!(should_discard_shred( &packet, root, + max_slot, shred_version, - slot_bounds, &mut stats )); assert_eq!(stats.index_overrun, 2); @@ -986,8 +988,8 @@ mod tests { assert!(should_discard_shred( &packet, root, + max_slot, shred_version, - slot_bounds, &mut stats )); assert_eq!(stats.index_overrun, 3); @@ -996,8 +998,8 @@ mod tests { assert!(should_discard_shred( &packet, root, + max_slot, shred_version, - slot_bounds, &mut stats )); assert_eq!(stats.index_overrun, 4); @@ -1006,8 +1008,8 @@ mod tests { assert!(should_discard_shred( &packet, root, + max_slot, shred_version, - slot_bounds, &mut stats )); assert_eq!(stats.bad_parent_offset, 1); @@ -1026,8 +1028,8 @@ mod tests { assert!(!should_discard_shred( &packet, root, + max_slot, shred_version, - slot_bounds, &mut stats )); @@ -1045,8 +1047,8 @@ mod tests { assert!(should_discard_shred( &packet, root, + max_slot, shred_version, - slot_bounds, &mut stats )); assert_eq!(1, stats.index_out_of_bounds); @@ -1059,19 +1061,38 @@ mod tests { 30, // num_data_shreds 4, // num_coding_shreds 3, // position - 200, // version + shred_version, ); shred.copy_to_packet(&mut packet); + assert!(!should_discard_shred( + &packet, + root, + max_slot, + shred_version, + &mut stats + )); packet.buffer_mut()[OFFSET_OF_SHRED_VARIANT] = u8::MAX; assert!(should_discard_shred( &packet, root, + max_slot, + shred_version, + &mut stats + )); + assert_eq!(1, stats.bad_shred_type); + assert_eq!(stats.shred_version_mismatch, 0); + + packet.buffer_mut()[OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX + 1] = u8::MAX; + assert!(should_discard_shred( + &packet, + root, + max_slot, shred_version, - slot_bounds, &mut stats )); assert_eq!(1, stats.bad_shred_type); + assert_eq!(stats.shred_version_mismatch, 1); } // Asserts that ShredType is backward compatible with u8. From b7e34aea15a849b0f4d8bb16606ee91bb44384d9 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 5 Jul 2022 16:45:59 -0500 Subject: [PATCH 028/100] refactor calculate_rent_result (#26416) reactor calculate_rent_result --- runtime/src/rent_collector.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index fe2c9d42e55047..046535c16064e3 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -172,14 +172,14 @@ impl RentCollector { return RentResult::LeaveAloneNoRent; } - let epoch_increment = match rent_due { + let new_rent_epoch = match rent_due { // Rent isn't collected for the next epoch // Make sure to check exempt status again later in current epoch - RentDue::Exempt => 0, + RentDue::Exempt => self.epoch, // Rent is collected for next epoch - RentDue::Paying(_) => 1, + RentDue::Paying(_) => self.epoch + 1, }; - RentResult::CollectRent((self.epoch + epoch_increment, rent_due.lamports())) + RentResult::CollectRent((new_rent_epoch, rent_due.lamports())) } #[must_use = "add to Bank::collected_rent"] From 90d9118048840872f184ee1665ae4556feb82bae Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Wed, 6 Jul 2022 02:52:11 +0400 Subject: [PATCH 029/100] Add `get_confirmed_transactions` to `storage-bigtable` (#25404) * Add get_confirmed_transactions to storage-bigtable * remove zip * HashMap::new instead of default * extract txs in order --- storage-bigtable/src/lib.rs | 62 +++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/storage-bigtable/src/lib.rs b/storage-bigtable/src/lib.rs index 69d3d95861d4e7..ba8e8c0e39dc4b 100644 --- a/storage-bigtable/src/lib.rs +++ b/storage-bigtable/src/lib.rs @@ -552,6 +552,68 @@ impl LedgerStorage { Ok(transaction_info.into()) } + // Fetches and gets a vector of confirmed transactions via a multirow fetch + pub async fn get_confirmed_transactions( + &self, + signatures: &[Signature], + ) -> Result> { + debug!( + "LedgerStorage::get_confirmed_transactions request received: {:?}", + signatures + ); + inc_new_counter_debug!("storage-bigtable-query", 1); + let mut bigtable = self.connection.client(); + + // Fetch transactions info + let keys = signatures.iter().map(|s| s.to_string()).collect::>(); + let cells = bigtable + .get_bincode_cells::("tx", &keys) + .await?; + + // Collect by slot + let mut order: Vec<(Slot, u32, String)> = Vec::new(); + let mut slots: HashSet = HashSet::new(); + for cell in cells { + if let (signature, Ok(TransactionInfo { slot, index, .. })) = cell { + order.push((slot, index, signature)); + slots.insert(slot); + } + } + + // Fetch blocks + let blocks = self + .get_confirmed_blocks_with_data(&slots.into_iter().collect::>()) + .await? + .collect::>(); + + // Extract transactions + Ok(order + .into_iter() + .filter_map(|(slot, index, signature)| { + blocks.get(&slot).and_then(|block| { + block + .transactions + .get(index as usize) + .and_then(|tx_with_meta| { + if tx_with_meta.transaction_signature().to_string() != *signature { + warn!( + "Transaction info or confirmed block for {} is corrupt", + signature + ); + None + } else { + Some(ConfirmedTransactionWithStatusMeta { + slot, + tx_with_meta: tx_with_meta.clone(), + block_time: block.block_time, + }) + } + }) + }) + }) + .collect::>()) + } + /// Fetch a confirmed transaction pub async fn get_confirmed_transaction( &self, From 2aafef38f910e62265314331f8ec9da14edaca3f Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 5 Jul 2022 17:53:29 -0500 Subject: [PATCH 030/100] is_shrinking_productive calls avoid arc clone (#26422) --- runtime/src/accounts_db.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index e65e7290f785e4..a01f620ddab561 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -3385,7 +3385,7 @@ impl AccountsDb { if let Some(stores_lock) = self.storage.get_slot_stores(slot) { let stores: Vec> = stores_lock.read().unwrap().values().cloned().collect(); - if !Self::is_shrinking_productive(slot, &stores) { + if !Self::is_shrinking_productive(slot, stores.iter()) { return 0; } self.do_shrink_slot_stores(slot, stores.iter()) @@ -7140,13 +7140,18 @@ impl AccountsDb { aligned_bytes + PAGE_SIZE > total_bytes && num_stores == 1 } - fn is_shrinking_productive(slot: Slot, stores: &[Arc]) -> bool { + fn is_shrinking_productive<'a, I>(slot: Slot, stores: I) -> bool + where + I: IntoIterator>, + { let mut alive_count = 0; let mut stored_count = 0; let mut alive_bytes = 0; let mut total_bytes = 0; + let mut count = 0; for store in stores { + count += 1; alive_count += store.count(); stored_count += store.approx_stored_count(); alive_bytes += store.alive_bytes(); @@ -7154,11 +7159,11 @@ impl AccountsDb { } let aligned_bytes = Self::page_align(alive_bytes as u64); - if Self::should_not_shrink(aligned_bytes, total_bytes, stores.len()) { + if Self::should_not_shrink(aligned_bytes, total_bytes, count) { trace!( "shrink_slot_forced ({}, {}): not able to shrink at all: alive/stored: ({} / {}) ({}b / {}b) save: {}", slot, - stores.len(), + count, alive_count, stored_count, aligned_bytes, @@ -7237,7 +7242,7 @@ impl AccountsDb { .insert((*slot, store.append_vec_id()), store.clone()); dead_slots.insert(*slot); } else if self.caching_enabled - && Self::is_shrinking_productive(*slot, &[store.clone()]) + && Self::is_shrinking_productive(*slot, [&store].into_iter()) && self.is_candidate_for_shrink(&store, false) { // Checking that this single storage entry is ready for shrinking, From 8eba4d1698de7143efc9acbab09b9918cb682cc9 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 5 Jul 2022 18:01:02 -0500 Subject: [PATCH 031/100] add 2nd pass at hash calc when failure seen (#26392) --- core/src/accounts_hash_verifier.rs | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 351b683d4bdf79..80bd1b1edae5f8 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -147,6 +147,30 @@ impl AccountsHashVerifier { ) .unwrap(); + assert_eq!(accounts_package.expected_capitalization, lamports); + if let Some(expected_hash) = accounts_package.accounts_hash_for_testing { + // before we assert, run the hash calc again. This helps track down whether it could have been a failure in a race condition possibly with shrink. + // We could add diagnostics to the hash calc here to produce a per bin cap or something to help narrow down how many pubkeys are different. + if expected_hash != accounts_hash { + let _ = accounts_package + .accounts + .accounts_db + .calculate_accounts_hash_without_index( + &CalcAccountsHashConfig { + use_bg_thread_pool: false, + check_hash: false, + ancestors: None, + use_write_cache: false, + epoch_schedule: &accounts_package.epoch_schedule, + rent_collector: &accounts_package.rent_collector, + }, + &sorted_storages, + HashStats::default(), + ); + } + assert_eq!(expected_hash, accounts_hash); + }; + accounts_package .accounts .accounts_db @@ -155,11 +179,6 @@ impl AccountsHashVerifier { &accounts_package.epoch_schedule, ); - assert_eq!(accounts_package.expected_capitalization, lamports); - if let Some(expected_hash) = accounts_package.accounts_hash_for_testing { - assert_eq!(expected_hash, accounts_hash); - }; - measure_hash.stop(); solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash( accounts_package.snapshot_links.path(), From 75149fd624c7339e7675913eab294a574d84849b Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 5 Jul 2022 23:11:32 -0500 Subject: [PATCH 032/100] move mark_old_slots_as_dirty to be called from ahv (#26411) --- runtime/src/accounts_db.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index a01f620ddab561..685fca08875b86 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -6534,8 +6534,8 @@ impl AccountsDb { .collect() } - // storages are sorted by slot and have range info. - // if we know slots_per_epoch, then add all stores older than slots_per_epoch to dirty_stores so clean visits these slots + /// storages are sorted by slot and have range info. + /// if we know slots_per_epoch, then add all stores older than slots_per_epoch to dirty_stores so clean visits these slots fn mark_old_slots_as_dirty(&self, storages: &SortedStorages, slots_per_epoch: Option) { if let Some(slots_per_epoch) = slots_per_epoch { let max = storages.max_slot_inclusive(); @@ -6572,8 +6572,6 @@ impl AccountsDb { min_root, Some(slot), ); - - self.mark_old_slots_as_dirty(&storages, Some(config.epoch_schedule.slots_per_epoch)); sort_time.stop(); let mut timings = HashStats { @@ -6760,6 +6758,8 @@ impl AccountsDb { storages: &SortedStorages<'_>, mut stats: HashStats, ) -> Result<(Hash, u64), BankHashVerificationError> { + self.mark_old_slots_as_dirty(storages, Some(config.epoch_schedule.slots_per_epoch)); + let (num_hash_scan_passes, bins_per_pass) = Self::bins_per_pass(self.num_hash_scan_passes); let use_bg_thread_pool = config.use_bg_thread_pool; let mut scan_and_hash = move || { From 38216aa7818ff30290572fe61f9a8b1ed1353402 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 5 Jul 2022 23:12:35 -0500 Subject: [PATCH 033/100] update comment for the next weary traveller (#26413) --- runtime/src/snapshot_utils.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index c0b1ce54bf47a0..e25840376833af 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1847,6 +1847,7 @@ fn get_snapshot_storages(bank: &Bank) -> SnapshotStorages { /// Convenience function to create a full snapshot archive out of any Bank, regardless of state. /// The Bank will be frozen during the process. +/// This is only called from ledger-tool or tests. Warping is a special case as well. /// /// Requires: /// - `bank` is complete @@ -1890,6 +1891,7 @@ pub fn bank_to_full_snapshot_archive( /// Convenience function to create an incremental snapshot archive out of any Bank, regardless of /// state. The Bank will be frozen during the process. +/// This is only called from ledger-tool or tests. Warping is a special case as well. /// /// Requires: /// - `bank` is complete From c1d89ad749283d6b4edb4bcd2834736900fe43ce Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Tue, 5 Jul 2022 23:24:58 -0500 Subject: [PATCH 034/100] forward packets by prioritization in desc order (#25406) - Forward packets by prioritization in desc order - Add support of cost-tracking by transaction requested compute units - Hook up account buckets to forwarder - Add metrics for forwardable batches count - Remove redundant invalid packets filtering at end of slot since forwarder will do the same when batch forwardable packets - Add bench test for forwarding --- banking-bench/src/main.rs | 20 +- core/benches/banking_stage.rs | 6 +- core/benches/unprocessed_packet_batches.rs | 86 ++- core/src/banking_stage.rs | 623 +++++++----------- .../src/forward_packet_batches_by_accounts.rs | 343 ++++++++++ core/src/leader_slot_banking_stage_metrics.rs | 30 +- ...eader_slot_banking_stage_timing_metrics.rs | 8 - core/src/lib.rs | 1 + core/src/tpu.rs | 1 + core/src/unprocessed_packet_batches.rs | 206 +++++- runtime/src/cost_tracker.rs | 76 ++- 11 files changed, 946 insertions(+), 454 deletions(-) create mode 100644 core/src/forward_packet_batches_by_accounts.rs diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index e3a910079dfb2c..a3c9019ea99a62 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -249,8 +249,8 @@ fn main() { let (tpu_vote_sender, tpu_vote_receiver) = unbounded(); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let bank0 = Bank::new_for_benches(&genesis_config); - let mut bank_forks = BankForks::new(bank0); - let mut bank = bank_forks.working_bank(); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0))); + let mut bank = bank_forks.read().unwrap().working_bank(); // set cost tracker limits to MAX so it will not filter out TXs bank.write_cost_tracker() @@ -357,6 +357,7 @@ fn main() { replay_vote_sender, Arc::new(RwLock::new(CostModel::default())), Arc::new(connection_cache), + bank_forks.clone(), ); poh_recorder.write().unwrap().set_bank(&bank, false); @@ -428,8 +429,8 @@ fn main() { new_bank_time.stop(); let mut insert_time = Measure::start("insert_time"); - bank_forks.insert(new_bank); - bank = bank_forks.working_bank(); + bank_forks.write().unwrap().insert(new_bank); + bank = bank_forks.read().unwrap().working_bank(); insert_time.stop(); // set cost tracker limits to MAX so it will not filter out TXs @@ -443,7 +444,10 @@ fn main() { assert!(poh_recorder.read().unwrap().bank().is_some()); if bank.slot() > 32 { leader_schedule_cache.set_root(&bank); - bank_forks.set_root(root, &AbsRequestSender::default(), None); + bank_forks + .write() + .unwrap() + .set_root(root, &AbsRequestSender::default(), None); root += 1; } debug!( @@ -476,7 +480,11 @@ fn main() { } } } - let txs_processed = bank_forks.working_bank().transaction_count(); + let txs_processed = bank_forks + .read() + .unwrap() + .working_bank() + .transaction_count(); debug!("processed: {} base: {}", txs_processed, base_tx_count); eprintln!( "{{'name': 'banking_bench_total', 'median': '{:.2}'}}", diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 2562ad3789daae..fc01bdf1ac3fa5 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -25,7 +25,7 @@ use { }, solana_perf::{packet::to_packet_batches, test_tx::test_tx}, solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry}, - solana_runtime::{bank::Bank, cost_model::CostModel}, + solana_runtime::{bank::Bank, bank_forks::BankForks, cost_model::CostModel}, solana_sdk::{ genesis_config::GenesisConfig, hash::Hash, @@ -170,7 +170,8 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { let mut bank = Bank::new_for_benches(&genesis_config); // Allow arbitrary transaction processing time for the purposes of this bench bank.ns_per_slot = u128::MAX; - let bank = Arc::new(bank); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let bank = bank_forks.read().unwrap().get(0).unwrap(); // set cost tracker limits to MAX so it will not filter out TXs bank.write_cost_tracker() @@ -232,6 +233,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { s, Arc::new(RwLock::new(CostModel::default())), Arc::new(ConnectionCache::default()), + bank_forks, ); poh_recorder.write().unwrap().set_bank(&bank, false); diff --git a/core/benches/unprocessed_packet_batches.rs b/core/benches/unprocessed_packet_batches.rs index 1e5caf0130e25b..11a2e46c6d197c 100644 --- a/core/benches/unprocessed_packet_batches.rs +++ b/core/benches/unprocessed_packet_batches.rs @@ -5,10 +5,19 @@ extern crate test; use { rand::distributions::{Distribution, Uniform}, - solana_core::unprocessed_packet_batches::*, + solana_core::{ + banking_stage::*, forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, + unprocessed_packet_batches::*, + }, solana_measure::measure::Measure, solana_perf::packet::{Packet, PacketBatch}, + solana_runtime::{ + bank::Bank, + bank_forks::BankForks, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + }, solana_sdk::{hash::Hash, signature::Keypair, system_transaction}, + std::sync::{Arc, RwLock}, test::Bencher, }; @@ -174,3 +183,78 @@ fn bench_unprocessed_packet_batches_randomized_beyond_limit(bencher: &mut Benche insert_packet_batches(buffer_capacity, batch_count, packet_per_batch_count, true); }); } + +fn build_bank_forks_for_test() -> Arc> { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = BankForks::new(bank); + Arc::new(RwLock::new(bank_forks)) +} + +fn buffer_iter_desc_and_forward( + buffer_max_size: usize, + batch_count: usize, + packet_per_batch_count: usize, + randomize: bool, +) { + solana_logger::setup(); + let mut unprocessed_packet_batches = UnprocessedPacketBatches::with_capacity(buffer_max_size); + + // fill buffer + { + let mut timer = Measure::start("fill_buffer"); + (0..batch_count).for_each(|_| { + let (packet_batch, packet_indexes) = if randomize { + build_randomized_packet_batch(packet_per_batch_count) + } else { + build_packet_batch(packet_per_batch_count) + }; + let deserialized_packets = deserialize_packets(&packet_batch, &packet_indexes); + unprocessed_packet_batches.insert_batch(deserialized_packets); + }); + timer.stop(); + log::info!( + "inserted {} batch, elapsed {}", + buffer_max_size, + timer.as_us() + ); + } + + // forward whole buffer + { + let mut timer = Measure::start("forward_time"); + let mut forward_packet_batches_by_accounts = + ForwardPacketBatchesByAccounts::new_with_default_batch_limits( + build_bank_forks_for_test().read().unwrap().root_bank(), + ); + // iter_desc buffer + let filter_forwarding_results = BankingStage::filter_valid_packets_for_forwarding( + &mut unprocessed_packet_batches, + &mut forward_packet_batches_by_accounts, + ); + timer.stop(); + + let batched_filter_forwarding_results: usize = forward_packet_batches_by_accounts + .iter_batches() + .map(|forward_batch| forward_batch.len()) + .sum(); + log::info!( + "filter_forwarding_results {:?}, batched_forwardable packets {}, elapsed {}", + filter_forwarding_results, + batched_filter_forwarding_results, + timer.as_us() + ); + } +} + +#[bench] +#[ignore] +fn bench_forwarding_unprocessed_packet_batches(bencher: &mut Bencher) { + let batch_count = 1_000; + let packet_per_batch_count = 64; + let buffer_capacity = batch_count * packet_per_batch_count; + + bencher.iter(|| { + buffer_iter_desc_and_forward(buffer_capacity, batch_count, packet_per_batch_count, true); + }); +} diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 8a3f4e3a9a42af..48e213dcdeb2fc 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -3,6 +3,7 @@ //! can do its processing in parallel with signature verification on the GPU. use { crate::{ + forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, leader_slot_banking_stage_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary}, leader_slot_banking_stage_timing_metrics::{ LeaderExecuteAndCommitTimings, RecordTransactionsTimings, @@ -37,6 +38,7 @@ use { Bank, CommitTransactionCounts, LoadAndExecuteTransactionsOutput, TransactionBalancesSet, TransactionCheckResult, }, + bank_forks::BankForks, bank_utils, cost_model::{CostModel, TransactionCost}, transaction_batch::TransactionBatch, @@ -48,13 +50,10 @@ use { Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU, }, - feature_set, pubkey::Pubkey, saturating_add_assign, timing::{duration_as_ms, timestamp, AtomicInterval}, - transaction::{ - self, AddressLoader, SanitizedTransaction, TransactionError, VersionedTransaction, - }, + transaction::{self, SanitizedTransaction, TransactionError, VersionedTransaction}, transport::TransportError, }, solana_streamer::sendmmsg::batch_send, @@ -151,7 +150,6 @@ pub struct BankingStageStats { current_buffered_packet_batches_count: AtomicUsize, rebuffered_packets_count: AtomicUsize, consumed_buffered_packets_count: AtomicUsize, - end_of_slot_filtered_invalid_count: AtomicUsize, forwarded_transaction_count: AtomicUsize, forwarded_vote_count: AtomicUsize, batch_packet_indexes_len: Histogram, @@ -162,7 +160,6 @@ pub struct BankingStageStats { handle_retryable_packets_elapsed: AtomicU64, filter_pending_packets_elapsed: AtomicU64, packet_conversion_elapsed: AtomicU64, - unprocessed_packet_conversion_elapsed: AtomicU64, transaction_processing_elapsed: AtomicU64, } @@ -204,9 +201,6 @@ impl BankingStageStats { .load(Ordering::Relaxed) + self.filter_pending_packets_elapsed.load(Ordering::Relaxed) + self.packet_conversion_elapsed.load(Ordering::Relaxed) - + self - .unprocessed_packet_conversion_elapsed - .load(Ordering::Relaxed) + self.transaction_processing_elapsed.load(Ordering::Relaxed) + self.forwarded_transaction_count.load(Ordering::Relaxed) as u64 + self.forwarded_vote_count.load(Ordering::Relaxed) as u64 @@ -267,12 +261,6 @@ impl BankingStageStats { .swap(0, Ordering::Relaxed) as i64, i64 ), - ( - "end_of_slot_filtered_invalid_count", - self.end_of_slot_filtered_invalid_count - .swap(0, Ordering::Relaxed) as i64, - i64 - ), ( "forwarded_transaction_count", self.forwarded_transaction_count.swap(0, Ordering::Relaxed) as i64, @@ -312,12 +300,6 @@ impl BankingStageStats { self.packet_conversion_elapsed.swap(0, Ordering::Relaxed) as i64, i64 ), - ( - "unprocessed_packet_conversion_elapsed", - self.unprocessed_packet_conversion_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), ( "transaction_processing_elapsed", self.transaction_processing_elapsed @@ -374,12 +356,6 @@ pub struct BatchedTransactionErrorDetails { pub batched_dropped_txs_per_account_data_total_limit_count: u64, } -#[derive(Debug, Default)] -struct EndOfSlot { - next_slot_leader: Option, - working_bank: Option>, -} - /// Stores the stage's thread handle and output receiver. pub struct BankingStage { bank_thread_hdls: Vec>, @@ -400,8 +376,9 @@ pub enum ForwardOption { ForwardTransaction, } -struct FilterForwardingResults<'a> { - forwardable_packets: Vec<&'a Packet>, +#[derive(Debug)] +pub struct FilterForwardingResults { + total_forwardable_packets: usize, total_tracer_packets_in_buffer: usize, total_forwardable_tracer_packets: usize, } @@ -409,6 +386,7 @@ struct FilterForwardingResults<'a> { impl BankingStage { /// Create the stage using `bank`. Exit when `verified_receiver` is dropped. #[allow(clippy::new_ret_no_self)] + #[allow(clippy::too_many_arguments)] pub fn new( cluster_info: &Arc, poh_recorder: &Arc>, @@ -419,6 +397,7 @@ impl BankingStage { gossip_vote_sender: ReplayVoteSender, cost_model: Arc>, connection_cache: Arc, + bank_forks: Arc>, ) -> Self { Self::new_num_threads( cluster_info, @@ -431,6 +410,7 @@ impl BankingStage { gossip_vote_sender, cost_model, connection_cache, + bank_forks, ) } @@ -446,6 +426,7 @@ impl BankingStage { gossip_vote_sender: ReplayVoteSender, cost_model: Arc>, connection_cache: Arc, + bank_forks: Arc>, ) -> Self { assert!(num_threads >= MIN_TOTAL_THREADS); // Single thread to generate entries from many banks. @@ -478,6 +459,7 @@ impl BankingStage { let data_budget = data_budget.clone(); let cost_model = cost_model.clone(); let connection_cache = connection_cache.clone(); + let bank_forks = bank_forks.clone(); Builder::new() .name(format!("solana-banking-stage-tx-{}", i)) .spawn(move || { @@ -494,6 +476,7 @@ impl BankingStage { &data_budget, cost_model, connection_cache, + &bank_forks, ); }) .unwrap() @@ -502,32 +485,53 @@ impl BankingStage { Self { bank_thread_hdls } } - fn filter_valid_packets_for_forwarding<'a>( - deserialized_packets: impl Iterator, - ) -> FilterForwardingResults<'a> { - let mut total_forwardable_tracer_packets = 0; - let mut total_tracer_packets_in_buffer = 0; + // filter forwardable Rcs that: + // 1. are not forwarded, and + // 2. in priority order from max to min, and + // 3. not exceeding account bucket limit + // returns forwarded packets count + pub fn filter_valid_packets_for_forwarding( + buffered_packet_batches: &mut UnprocessedPacketBatches, + forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, + ) -> FilterForwardingResults { + let mut total_forwardable_tracer_packets: usize = 0; + let mut total_tracer_packets_in_buffer: usize = 0; + let mut total_forwardable_packets: usize = 0; + let mut dropped_tx_before_forwarding_count: usize = 0; + + let filter_forwardable_packet = |deserialized_packet: &mut DeserializedPacket| -> bool { + let mut result = true; + let is_tracer_packet = deserialized_packet + .immutable_section() + .original_packet() + .meta + .is_tracer_packet(); + if is_tracer_packet { + saturating_add_assign!(total_tracer_packets_in_buffer, 1); + } + if !deserialized_packet.forwarded { + saturating_add_assign!(total_forwardable_packets, 1); + if is_tracer_packet { + saturating_add_assign!(total_forwardable_tracer_packets, 1); + } + result = forward_packet_batches_by_accounts + .add_packet(deserialized_packet.immutable_section().clone()); + if !result { + saturating_add_assign!(dropped_tx_before_forwarding_count, 1); + } + } + result + }; + + buffered_packet_batches.iter_desc(filter_forwardable_packet); + + inc_new_counter_info!( + "banking_stage-dropped_tx_before_forwarding", + dropped_tx_before_forwarding_count + ); + FilterForwardingResults { - forwardable_packets: deserialized_packets - .filter_map(|deserialized_packet| { - let is_tracer_packet = deserialized_packet - .immutable_section() - .original_packet() - .meta - .is_tracer_packet(); - if is_tracer_packet { - total_tracer_packets_in_buffer += 1; - } - if !deserialized_packet.forwarded { - if is_tracer_packet { - total_forwardable_tracer_packets += 1; - } - Some(deserialized_packet.immutable_section().original_packet()) - } else { - None - } - }) - .collect(), + total_forwardable_packets, total_tracer_packets_in_buffer, total_forwardable_tracer_packets, } @@ -535,19 +539,22 @@ impl BankingStage { /// Forwards all valid, unprocessed packets in the buffer, up to a rate limit. Returns /// the number of successfully forwarded packets in second part of tuple - fn forward_buffered_packets( + fn forward_buffered_packets<'a>( connection_cache: &ConnectionCache, forward_option: &ForwardOption, cluster_info: &ClusterInfo, poh_recorder: &Arc>, socket: &UdpSocket, - filter_forwarding_results: &FilterForwardingResults, + forwardable_packets: impl Iterator, data_budget: &DataBudget, banking_stage_stats: &BankingStageStats, - tracer_packet_stats: &mut TracerPacketStats, - ) -> (std::result::Result<(), TransportError>, usize) { + ) -> ( + std::result::Result<(), TransportError>, + usize, + Option, + ) { let leader_and_addr = match forward_option { - ForwardOption::NotForward => return (Ok(()), 0), + ForwardOption::NotForward => return (Ok(()), 0, None), ForwardOption::ForwardTransaction => { next_leader_tpu_forwards(cluster_info, poh_recorder) } @@ -556,20 +563,9 @@ impl BankingStage { }; let (leader_pubkey, addr) = match leader_and_addr { Some(leader_and_addr) => leader_and_addr, - None => return (Ok(()), 0), + None => return (Ok(()), 0, None), }; - let FilterForwardingResults { - forwardable_packets, - total_forwardable_tracer_packets, - .. - } = filter_forwarding_results; - - tracer_packet_stats.increment_total_forwardable_tracer_packets( - *total_forwardable_tracer_packets, - leader_pubkey, - ); - const INTERVAL_MS: u64 = 100; const MAX_BYTES_PER_SECOND: usize = 10_000 * 1200; const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000; @@ -582,7 +578,6 @@ impl BankingStage { }); let packet_vec: Vec<_> = forwardable_packets - .iter() .filter_map(|p| { if !p.meta.forwarded() && data_budget.take(p.meta.size) { Some(p.data(..)?.to_vec()) @@ -629,16 +624,16 @@ impl BankingStage { if let Err(err) = res { inc_new_counter_info!("banking_stage-forward_packets-failed-batches", 1); - return (Err(err), 0); + return (Err(err), 0, Some(leader_pubkey)); } } - (Ok(()), packet_vec_len) + (Ok(()), packet_vec_len, Some(leader_pubkey)) } #[allow(clippy::too_many_arguments)] pub fn consume_buffered_packets( - my_pubkey: &Pubkey, + _my_pubkey: &Pubkey, max_tx_ingestion_ns: u128, poh_recorder: &Arc>, buffered_packet_batches: &mut UnprocessedPacketBatches, @@ -655,7 +650,7 @@ impl BankingStage { let mut consumed_buffered_packets_count = 0; let buffered_packets_len = buffered_packet_batches.len(); let mut proc_start = Measure::start("consume_buffered_process"); - let mut reached_end_of_slot: Option = None; + let mut reached_end_of_slot = false; let mut retryable_packets = MinMaxHeap::with_capacity(buffered_packet_batches.capacity()); std::mem::swap( @@ -717,13 +712,10 @@ impl BankingStage { ) { let poh_recorder_lock_time = { - let (poh_recorder_locked, poh_recorder_lock_time) = + let (_poh_recorder_locked, poh_recorder_lock_time) = measure!(poh_recorder.read().unwrap(), "poh_recorder.read"); - reached_end_of_slot = Some(EndOfSlot { - next_slot_leader: poh_recorder_locked.next_slot_leader(), - working_bank: Some(working_bank), - }); + reached_end_of_slot = true; poh_recorder_lock_time }; @@ -776,19 +768,16 @@ impl BankingStage { ); result - } else if reached_end_of_slot.is_some() { + } else if reached_end_of_slot { packets_to_process } else { // mark as end-of-slot to avoid aggressively lock poh for the remaining for // packet batches in buffer let poh_recorder_lock_time = { - let (poh_recorder_locked, poh_recorder_lock_time) = + let (_poh_recorder_locked, poh_recorder_lock_time) = measure!(poh_recorder.read().unwrap(), "poh_recorder.read"); - reached_end_of_slot = Some(EndOfSlot { - next_slot_leader: poh_recorder_locked.next_slot_leader(), - working_bank: None, - }); + reached_end_of_slot = true; poh_recorder_lock_time }; slot_metrics_tracker.increment_consume_buffered_packets_poh_recorder_lock_us( @@ -805,43 +794,12 @@ impl BankingStage { &mut buffered_packet_batches.packet_priority_queue, ); - if let Some(end_of_slot) = &reached_end_of_slot { + if reached_end_of_slot { slot_metrics_tracker .set_end_of_slot_unprocessed_buffer_len(buffered_packet_batches.len() as u64); // We've hit the end of this slot, no need to perform more processing, - // just filter the remaining packets for the invalid (e.g. too old) ones - // if the working_bank is available - let mut end_of_slot_filtering_time = Measure::start("end_of_slot_filtering"); - // TODO: This doesn't have to be done at the end of every slot, can instead - // hold multiple unbuffered queues without merging them - - // TODO: update this here to filter the rest of the packets remaining - // TODO: this needs to be done even if there is no end_of_slot.working_bank - // to put retryable packets back in buffer - let end_of_slot_filtered_invalid_count = - Self::filter_unprocessed_packets_at_end_of_slot( - &end_of_slot.working_bank, - buffered_packet_batches, - my_pubkey, - end_of_slot.next_slot_leader, - banking_stage_stats, - ); - - inc_new_counter_info!( - "banking_stage-dropped_tx_before_forwarding", - end_of_slot_filtered_invalid_count - ); - slot_metrics_tracker.increment_end_of_slot_filtered_invalid_count( - end_of_slot_filtered_invalid_count as u64, - ); - banking_stage_stats - .end_of_slot_filtered_invalid_count - .fetch_add(end_of_slot_filtered_invalid_count, Ordering::Relaxed); - - end_of_slot_filtering_time.stop(); - slot_metrics_tracker - .increment_end_of_slot_filtering_us(end_of_slot_filtering_time.as_us()); + // Packet filtering will be done at `forward_packet_batches_by_accounts.add_packet()` } proc_start.stop(); @@ -920,6 +878,7 @@ impl BankingStage { slot_metrics_tracker: &mut LeaderSlotMetricsTracker, connection_cache: &ConnectionCache, tracer_packet_stats: &mut TracerPacketStats, + bank_forks: &Arc>, ) { let ((metrics_action, decision), make_decision_time) = measure!( { @@ -999,6 +958,7 @@ impl BankingStage { banking_stage_stats, connection_cache, tracer_packet_stats, + bank_forks, ), "forward", ); @@ -1021,6 +981,7 @@ impl BankingStage { banking_stage_stats, connection_cache, tracer_packet_stats, + bank_forks, ), "forward_and_hold", ); @@ -1045,6 +1006,7 @@ impl BankingStage { banking_stage_stats: &BankingStageStats, connection_cache: &ConnectionCache, tracer_packet_stats: &mut TracerPacketStats, + bank_forks: &Arc>, ) { if let ForwardOption::NotForward = forward_option { if !hold { @@ -1053,43 +1015,65 @@ impl BankingStage { return; } - let filter_forwarding_result = - Self::filter_valid_packets_for_forwarding(buffered_packet_batches.iter()); - - let forwardable_packets_len = filter_forwarding_result.forwardable_packets.len(); - let (_forward_result, sucessful_forwarded_packets_count) = Self::forward_buffered_packets( - connection_cache, - forward_option, - cluster_info, - poh_recorder, - socket, - &filter_forwarding_result, - data_budget, - banking_stage_stats, - tracer_packet_stats, + // get current root bank from bank_forks, use it to sanitize transaction and + // load all accounts from address loader; + let current_bank = bank_forks.read().unwrap().root_bank(); + let mut forward_packet_batches_by_accounts = + ForwardPacketBatchesByAccounts::new_with_default_batch_limits(current_bank); + let filter_forwarding_result = Self::filter_valid_packets_for_forwarding( + buffered_packet_batches, + &mut forward_packet_batches_by_accounts, ); - let failed_forwarded_packets_count = - forwardable_packets_len.saturating_sub(sucessful_forwarded_packets_count); + forward_packet_batches_by_accounts + .iter_batches() + .filter(|&batch| !batch.is_empty()) + .for_each(|forward_batch| { + slot_metrics_tracker.increment_forwardable_batches_count(1); + + let batched_forwardable_packets_count = forward_batch.len(); + let (_forward_result, sucessful_forwarded_packets_count, leader_pubkey) = + Self::forward_buffered_packets( + connection_cache, + forward_option, + cluster_info, + poh_recorder, + socket, + forward_batch.get_forwardable_packets(), + data_budget, + banking_stage_stats, + ); - if failed_forwarded_packets_count > 0 { - slot_metrics_tracker - .increment_failed_forwarded_packets_count(failed_forwarded_packets_count as u64); - slot_metrics_tracker.increment_packet_batch_forward_failure_count(1); - } + if let Some(leader_pubkey) = leader_pubkey { + tracer_packet_stats.increment_total_forwardable_tracer_packets( + filter_forwarding_result.total_forwardable_tracer_packets, + leader_pubkey, + ); + } + let failed_forwarded_packets_count = batched_forwardable_packets_count + .saturating_sub(sucessful_forwarded_packets_count); - if sucessful_forwarded_packets_count > 0 { - slot_metrics_tracker.increment_successful_forwarded_packets_count( - sucessful_forwarded_packets_count as u64, - ); - } + if failed_forwarded_packets_count > 0 { + slot_metrics_tracker.increment_failed_forwarded_packets_count( + failed_forwarded_packets_count as u64, + ); + slot_metrics_tracker.increment_packet_batch_forward_failure_count(1); + } + + if sucessful_forwarded_packets_count > 0 { + slot_metrics_tracker.increment_successful_forwarded_packets_count( + sucessful_forwarded_packets_count as u64, + ); + } + }); if hold { for deserialized_packet in buffered_packet_batches.iter_mut() { deserialized_packet.forwarded = true; } } else { - slot_metrics_tracker - .increment_cleared_from_buffer_after_forward_count(forwardable_packets_len as u64); + slot_metrics_tracker.increment_cleared_from_buffer_after_forward_count( + filter_forwarding_result.total_forwardable_packets as u64, + ); tracer_packet_stats.increment_total_cleared_from_buffer_after_forward( filter_forwarding_result.total_tracer_packets_in_buffer, ); @@ -1111,6 +1095,7 @@ impl BankingStage { data_budget: &DataBudget, cost_model: Arc>, connection_cache: Arc, + bank_forks: &Arc>, ) { let recorder = poh_recorder.read().unwrap().recorder(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -1144,6 +1129,7 @@ impl BankingStage { &mut slot_metrics_tracker, &connection_cache, &mut tracer_packet_stats, + bank_forks, ), "process_buffered_packets", ); @@ -1831,31 +1817,6 @@ impl BankingStage { .collect_vec() } - // This function deserializes packets into transactions, computes the blake3 hash of transaction - // messages, and verifies secp256k1 instructions. A list of sanitized transactions are returned - // with their packet indexes. - #[allow(clippy::needless_collect)] - fn transaction_from_deserialized_packet( - deserialized_packet: &ImmutableDeserializedPacket, - feature_set: &Arc, - votes_only: bool, - address_loader: impl AddressLoader, - ) -> Option { - if votes_only && !deserialized_packet.is_simple_vote() { - return None; - } - - let tx = SanitizedTransaction::try_new( - deserialized_packet.transaction().clone(), - *deserialized_packet.message_hash(), - deserialized_packet.is_simple_vote(), - address_loader, - ) - .ok()?; - tx.verify_precompiles(feature_set).ok()?; - Some(tx) - } - /// This function filters pending packets that are still valid /// # Arguments /// * `transactions` - a batch of transactions deserialized from packets @@ -1934,7 +1895,7 @@ impl BankingStage { deserialized_packets .enumerate() .filter_map(|(i, deserialized_packet)| { - Self::transaction_from_deserialized_packet( + unprocessed_packet_batches::transaction_from_deserialized_packet( deserialized_packet, &bank.feature_set, bank.vote_only_bank(), @@ -2019,54 +1980,6 @@ impl BankingStage { process_transactions_summary } - // Returns the number of packets that were filtered out for - // no longer being valid (could be too old, a duplicate of something - // already processed, etc.) - fn filter_unprocessed_packets_at_end_of_slot( - bank: &Option>, - unprocessed_packets: &mut UnprocessedPacketBatches, - my_pubkey: &Pubkey, - next_leader: Option, - banking_stage_stats: &BankingStageStats, - ) -> usize { - // Check if we are the next leader. If so, let's not filter the packets - // as we'll filter it again while processing the packets. - // Filtering helps if we were going to forward the packets to some other node - let will_still_be_leader = next_leader - .map(|next_leader| next_leader == *my_pubkey) - .unwrap_or(false); - let should_filter_unprocessed_packets = !will_still_be_leader && bank.is_some(); - let original_unprocessed_packets_len = unprocessed_packets.len(); - - if should_filter_unprocessed_packets { - // If `should_filter_unprocessed_packets` is true, then the bank - // must be `Some` - let bank = bank.as_ref().unwrap(); - let mut unprocessed_packet_conversion_time = - Measure::start("unprocessed_packet_conversion"); - - let should_retain = |deserialized_packet: &mut DeserializedPacket| { - Self::transaction_from_deserialized_packet( - deserialized_packet.immutable_section(), - &bank.feature_set, - bank.vote_only_bank(), - bank.as_ref(), - ) - .is_some() - }; - unprocessed_packets.retain(should_retain); - unprocessed_packet_conversion_time.stop(); - banking_stage_stats - .unprocessed_packet_conversion_elapsed - .fetch_add( - unprocessed_packet_conversion_time.as_us(), - Ordering::Relaxed, - ); - } - - original_unprocessed_packets_len.saturating_sub(unprocessed_packets.len()) - } - fn generate_packet_indexes(vers: &PacketBatch) -> Vec { vers.iter() .enumerate() @@ -2298,6 +2211,7 @@ mod tests { }, solana_program_runtime::timings::ProgramTiming, solana_rpc::transaction_status_service::TransactionStatusService, + solana_runtime::bank_forks::BankForks, solana_sdk::{ account::AccountSharedData, hash::Hash, @@ -2309,14 +2223,10 @@ mod tests { poh_config::PohConfig, signature::{Keypair, Signer}, system_transaction, - transaction::{ - MessageHash, SimpleAddressLoader, Transaction, TransactionError, - VersionedTransaction, - }, + transaction::{MessageHash, Transaction, TransactionError, VersionedTransaction}, }, solana_streamer::{recvmmsg::recv_mmsg, socket::SocketAddrSpace}, solana_transaction_status::{TransactionStatusMeta, VersionedTransactionWithStatusMeta}, - solana_vote_program::vote_transaction, std::{ borrow::Cow, collections::HashSet, @@ -2338,7 +2248,9 @@ mod tests { #[test] fn test_banking_stage_shutdown1() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap()); let (verified_sender, verified_receiver) = unbounded(); let (gossip_verified_vote_sender, gossip_verified_vote_receiver) = unbounded(); let (tpu_vote_sender, tpu_vote_receiver) = unbounded(); @@ -2364,6 +2276,7 @@ mod tests { gossip_vote_sender, Arc::new(RwLock::new(CostModel::default())), Arc::new(ConnectionCache::default()), + bank_forks, ); drop(verified_sender); drop(gossip_verified_vote_sender); @@ -2383,7 +2296,9 @@ mod tests { } = create_genesis_config(2); genesis_config.ticks_per_slot = 4; let num_extra_ticks = 2; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap()); let start_hash = bank.last_blockhash(); let (verified_sender, verified_receiver) = unbounded(); let (tpu_vote_sender, tpu_vote_receiver) = unbounded(); @@ -2414,6 +2329,7 @@ mod tests { gossip_vote_sender, Arc::new(RwLock::new(CostModel::default())), Arc::new(ConnectionCache::default()), + bank_forks, ); trace!("sending bank"); drop(verified_sender); @@ -2456,7 +2372,9 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap()); let start_hash = bank.last_blockhash(); let (verified_sender, verified_receiver) = unbounded(); let (tpu_vote_sender, tpu_vote_receiver) = unbounded(); @@ -2489,6 +2407,7 @@ mod tests { gossip_vote_sender, Arc::new(RwLock::new(CostModel::default())), Arc::new(ConnectionCache::default()), + bank_forks, ); // fund another account so we can send 2 good transactions in a single batch. @@ -2615,7 +2534,9 @@ mod tests { let entry_receiver = { // start a banking_stage to eat verified receiver - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap()); let blockstore = Arc::new( Blockstore::open(ledger_path.path()) .expect("Expected to be able to open database ledger"), @@ -2641,6 +2562,7 @@ mod tests { gossip_vote_sender, Arc::new(RwLock::new(CostModel::default())), Arc::new(ConnectionCache::default()), + bank_forks, ); // wait for banking_stage to eat the packets @@ -3337,6 +3259,11 @@ mod tests { #[test] fn test_filter_valid_packets() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, .. } = create_slow_genesis_config(10); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let current_bank = bank_forks.read().unwrap().root_bank(); + let mut packets: Vec = (0..256) .map(|packets_id| { // packets are deserialized upon receiving, failed packets will not be @@ -3352,43 +3279,69 @@ mod tests { }) .collect_vec(); - let FilterForwardingResults { - forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - } = BankingStage::filter_valid_packets_for_forwarding(packets.iter()); - assert_eq!(forwardable_packets.len(), 256); - assert_eq!(total_tracer_packets_in_buffer, 256); - assert_eq!(total_forwardable_tracer_packets, 256); - - // packets in a batch are forwarded in arbitrary order; verify the ports match after - // sorting - let expected_ports: Vec<_> = (0..256).collect(); - let mut forwarded_ports: Vec<_> = forwardable_packets - .into_iter() - .map(|p| p.meta.port) - .collect(); - forwarded_ports.sort_unstable(); - assert_eq!(expected_ports, forwarded_ports); + // all packets are forwarded + { + let mut buffered_packet_batches: UnprocessedPacketBatches = + UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len()); + let mut forward_packet_batches_by_accounts = + ForwardPacketBatchesByAccounts::new(current_bank.clone(), 1, 2); + + let FilterForwardingResults { + total_forwardable_packets, + total_tracer_packets_in_buffer, + total_forwardable_tracer_packets, + } = BankingStage::filter_valid_packets_for_forwarding( + &mut buffered_packet_batches, + &mut forward_packet_batches_by_accounts, + ); + assert_eq!(total_forwardable_packets, 256); + assert_eq!(total_tracer_packets_in_buffer, 256); + assert_eq!(total_forwardable_tracer_packets, 256); + + // packets in a batch are forwarded in arbitrary order; verify the ports match after + // sorting + let expected_ports: Vec<_> = (0..256).collect(); + let mut forwarded_ports: Vec<_> = forward_packet_batches_by_accounts + .iter_batches() + .flat_map(|batch| { + batch + .get_forwardable_packets() + .into_iter() + .map(|p| p.meta.port) + }) + .collect(); + forwarded_ports.sort_unstable(); + assert_eq!(expected_ports, forwarded_ports); + } - let num_already_forwarded = 16; - for packet in &mut packets[0..num_already_forwarded] { - packet.forwarded = true; + // some packets are forwarded + { + let num_already_forwarded = 16; + for packet in &mut packets[0..num_already_forwarded] { + packet.forwarded = true; + } + let mut buffered_packet_batches: UnprocessedPacketBatches = + UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len()); + let mut forward_packet_batches_by_accounts = + ForwardPacketBatchesByAccounts::new(current_bank, 1, 2); + let FilterForwardingResults { + total_forwardable_packets, + total_tracer_packets_in_buffer, + total_forwardable_tracer_packets, + } = BankingStage::filter_valid_packets_for_forwarding( + &mut buffered_packet_batches, + &mut forward_packet_batches_by_accounts, + ); + assert_eq!( + total_forwardable_packets, + packets.len() - num_already_forwarded + ); + assert_eq!(total_tracer_packets_in_buffer, packets.len()); + assert_eq!( + total_forwardable_tracer_packets, + packets.len() - num_already_forwarded + ); } - let FilterForwardingResults { - forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - } = BankingStage::filter_valid_packets_for_forwarding(packets.iter()); - assert_eq!( - forwardable_packets.len(), - packets.len() - num_already_forwarded - ); - assert_eq!(total_tracer_packets_in_buffer, packets.len()); - assert_eq!( - total_forwardable_tracer_packets, - packets.len() - num_already_forwarded - ); } #[test] @@ -4164,7 +4117,9 @@ mod tests { .. } = &genesis_config_info; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap()); let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Arc::new( @@ -4211,6 +4166,7 @@ mod tests { &stats, &connection_cache, &mut TracerPacketStats::new(0), + &bank_forks, ); recv_socket @@ -4263,7 +4219,9 @@ mod tests { validator_pubkey, .. } = &genesis_config_info; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap()); let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Arc::new( @@ -4325,6 +4283,7 @@ mod tests { &stats, &connection_cache, &mut TracerPacketStats::new(0), + &bank_forks, ); recv_socket @@ -4360,134 +4319,6 @@ mod tests { Blockstore::destroy(ledger_path.path()).unwrap(); } - #[cfg(test)] - fn make_test_packets( - transactions: Vec, - vote_indexes: Vec, - ) -> Vec { - let capacity = transactions.len(); - let mut packet_vector = Vec::with_capacity(capacity); - for tx in transactions.iter() { - packet_vector.push(Packet::from_data(None, &tx).unwrap()); - } - for index in vote_indexes.iter() { - packet_vector[*index].meta.flags |= PacketFlags::SIMPLE_VOTE_TX; - } - - packet_vector - .into_iter() - .map(|p| DeserializedPacket::new(p).unwrap()) - .collect() - } - - #[test] - fn test_transaction_from_deserialized_packet() { - use solana_sdk::feature_set::FeatureSet; - let keypair = Keypair::new(); - let transfer_tx = - system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default()); - let vote_tx = vote_transaction::new_vote_transaction( - vec![42], - Hash::default(), - Hash::default(), - &keypair, - &keypair, - &keypair, - None, - ); - - // packets with no votes - { - let vote_indexes = vec![]; - let packet_vector = - make_test_packets(vec![transfer_tx.clone(), transfer_tx.clone()], vote_indexes); - - let mut votes_only = false; - let txs = packet_vector.iter().filter_map(|tx| { - BankingStage::transaction_from_deserialized_packet( - tx.immutable_section(), - &Arc::new(FeatureSet::default()), - votes_only, - SimpleAddressLoader::Disabled, - ) - }); - assert_eq!(2, txs.count()); - - votes_only = true; - let txs = packet_vector.iter().filter_map(|tx| { - BankingStage::transaction_from_deserialized_packet( - tx.immutable_section(), - &Arc::new(FeatureSet::default()), - votes_only, - SimpleAddressLoader::Disabled, - ) - }); - assert_eq!(0, txs.count()); - } - - // packets with some votes - { - let vote_indexes = vec![0, 2]; - let packet_vector = make_test_packets( - vec![vote_tx.clone(), transfer_tx, vote_tx.clone()], - vote_indexes, - ); - - let mut votes_only = false; - let txs = packet_vector.iter().filter_map(|tx| { - BankingStage::transaction_from_deserialized_packet( - tx.immutable_section(), - &Arc::new(FeatureSet::default()), - votes_only, - SimpleAddressLoader::Disabled, - ) - }); - assert_eq!(3, txs.count()); - - votes_only = true; - let txs = packet_vector.iter().filter_map(|tx| { - BankingStage::transaction_from_deserialized_packet( - tx.immutable_section(), - &Arc::new(FeatureSet::default()), - votes_only, - SimpleAddressLoader::Disabled, - ) - }); - assert_eq!(2, txs.count()); - } - - // packets with all votes - { - let vote_indexes = vec![0, 1, 2]; - let packet_vector = make_test_packets( - vec![vote_tx.clone(), vote_tx.clone(), vote_tx], - vote_indexes, - ); - - let mut votes_only = false; - let txs = packet_vector.iter().filter_map(|tx| { - BankingStage::transaction_from_deserialized_packet( - tx.immutable_section(), - &Arc::new(FeatureSet::default()), - votes_only, - SimpleAddressLoader::Disabled, - ) - }); - assert_eq!(3, txs.count()); - - votes_only = true; - let txs = packet_vector.iter().filter_map(|tx| { - BankingStage::transaction_from_deserialized_packet( - tx.immutable_section(), - &Arc::new(FeatureSet::default()), - votes_only, - SimpleAddressLoader::Disabled, - ) - }); - assert_eq!(3, txs.count()); - } - } - #[test] fn test_accumulate_batched_transaction_costs() { let signature_cost = 1; diff --git a/core/src/forward_packet_batches_by_accounts.rs b/core/src/forward_packet_batches_by_accounts.rs new file mode 100644 index 00000000000000..889747ece16cf1 --- /dev/null +++ b/core/src/forward_packet_batches_by_accounts.rs @@ -0,0 +1,343 @@ +use { + crate::unprocessed_packet_batches::{self, ImmutableDeserializedPacket}, + solana_perf::packet::Packet, + solana_runtime::{ + bank::Bank, + block_cost_limits, + cost_tracker::{CostTracker, CostTrackerError}, + }, + solana_sdk::pubkey::Pubkey, + std::{rc::Rc, sync::Arc}, +}; + +/// `ForwardBatch` to have half of default cost_tracker limits, as smaller batch +/// allows better granularity in composing forwarding transactions; e.g., +/// transactions in each batch are potentially more evenly distributed across accounts. +const FORWARDED_BLOCK_COMPUTE_RATIO: u32 = 2; +/// this number divided by`FORWARDED_BLOCK_COMPUTE_RATIO` is the total blocks to forward. +/// To accommodate transactions without `compute_budget` instruction, which will +/// have default 200_000 compute units, it has 100 batches as default to forward +/// up to 12_000 such transaction. (120 such transactions fill up a batch, 100 +/// batches allows 12_000 transactions) +const DEFAULT_NUMBER_OF_BATCHES: u32 = 100; + +/// `ForwardBatch` represents one forwardable batch of transactions with a +/// limited number of total compute units +#[derive(Debug)] +pub struct ForwardBatch { + cost_tracker: CostTracker, + // `forwardable_packets` keeps forwardable packets in a vector in its + // original fee prioritized order + forwardable_packets: Vec>, +} + +impl Default for ForwardBatch { + /// default ForwardBatch has cost_tracker with default limits + fn default() -> Self { + Self::new(1) + } +} + +impl ForwardBatch { + /// `ForwardBatch` keeps forwardable packets in a vector in its original fee prioritized order, + /// Number of packets are limited by `cost_tracker` with customized `limit_ratio` to lower + /// (when `limit_ratio` > 1) `cost_tracker`'s default limits. + /// Lower limits yield smaller batch for forwarding. + fn new(limit_ratio: u32) -> Self { + let mut cost_tracker = CostTracker::default(); + cost_tracker.set_limits( + block_cost_limits::MAX_WRITABLE_ACCOUNT_UNITS.saturating_div(limit_ratio as u64), + block_cost_limits::MAX_BLOCK_UNITS.saturating_div(limit_ratio as u64), + block_cost_limits::MAX_VOTE_UNITS.saturating_div(limit_ratio as u64), + ); + Self { + cost_tracker, + forwardable_packets: Vec::default(), + } + } + + fn try_add( + &mut self, + write_lock_accounts: &[Pubkey], + compute_units: u64, + immutable_packet: Rc, + ) -> Result { + let res = self.cost_tracker.try_add_requested_cus( + write_lock_accounts, + compute_units, + immutable_packet.is_simple_vote(), + ); + + if res.is_ok() { + self.forwardable_packets.push(immutable_packet); + } + res + } + + pub fn get_forwardable_packets(&self) -> impl Iterator { + self.forwardable_packets + .iter() + .map(|immutable_packet| immutable_packet.original_packet()) + } + + pub fn len(&self) -> usize { + self.forwardable_packets.len() + } + + pub fn is_empty(&self) -> bool { + self.forwardable_packets.is_empty() + } +} + +/// To avoid forward queue being saturated by transactions for single hot account, +/// the forwarder will group and send prioritized transactions by account limit +/// to allow transactions on non-congested accounts to be forwarded alongside higher fee +/// transactions that saturate those highly demanded accounts. +#[derive(Debug)] +pub struct ForwardPacketBatchesByAccounts { + // Need a `bank` to load all accounts for VersionedTransaction. Currently + // using current rooted bank for it. + current_bank: Arc, + // Forwardable packets are staged in number of batches, each batch is limited + // by cost_tracker on both account limit and block limits. Those limits are + // set as `limit_ratio` of regular block limits to facilitate quicker iteration. + forward_batches: Vec, +} + +impl ForwardPacketBatchesByAccounts { + pub fn new_with_default_batch_limits(current_bank: Arc) -> Self { + Self::new( + current_bank, + FORWARDED_BLOCK_COMPUTE_RATIO, + DEFAULT_NUMBER_OF_BATCHES, + ) + } + + pub fn new(current_bank: Arc, limit_ratio: u32, number_of_batches: u32) -> Self { + let forward_batches = (0..number_of_batches) + .map(|_| ForwardBatch::new(limit_ratio)) + .collect(); + Self { + current_bank, + forward_batches, + } + } + + pub fn add_packet(&mut self, packet: Rc) -> bool { + // do not forward packet that cannot be sanitized + if let Some(sanitized_transaction) = + unprocessed_packet_batches::transaction_from_deserialized_packet( + &packet, + &self.current_bank.feature_set, + self.current_bank.vote_only_bank(), + self.current_bank.as_ref(), + ) + { + // get write_lock_accounts + let message = sanitized_transaction.message(); + let write_lock_accounts: Vec<_> = message + .account_keys() + .iter() + .enumerate() + .filter_map(|(i, account_key)| { + if message.is_writable(i) { + Some(*account_key) + } else { + None + } + }) + .collect(); + + // get requested CUs + let requested_cu = packet.compute_unit_limit(); + + // try to fill into forward batches + self.add_packet_to_batches(&write_lock_accounts, requested_cu, packet) + } else { + false + } + } + + pub fn iter_batches(&self) -> impl Iterator { + self.forward_batches.iter() + } + + /// transaction will try to be filled into 'batches', if can't fit into first batch + /// due to cost_tracker (eg., exceeding account limit or block limit), it will try + /// next batch until either being added to one of 'bucket' or not being forwarded. + fn add_packet_to_batches( + &mut self, + write_lock_accounts: &[Pubkey], + compute_units: u64, + immutable_packet: Rc, + ) -> bool { + for forward_batch in self.forward_batches.iter_mut() { + if forward_batch + .try_add(write_lock_accounts, compute_units, immutable_packet.clone()) + .is_ok() + { + return true; + } + } + false + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::unprocessed_packet_batches::{DeserializedPacket, TransactionPriorityDetails}, + solana_runtime::{ + bank::Bank, + bank_forks::BankForks, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + }, + solana_sdk::{hash::Hash, signature::Keypair, system_transaction}, + std::sync::RwLock, + }; + + fn build_bank_forks_for_test() -> Arc> { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = BankForks::new(bank); + Arc::new(RwLock::new(bank_forks)) + } + + fn build_deserialized_packet_for_test( + priority: u64, + compute_unit_limit: u64, + ) -> DeserializedPacket { + let tx = system_transaction::transfer( + &Keypair::new(), + &solana_sdk::pubkey::new_rand(), + 1, + Hash::new_unique(), + ); + let packet = Packet::from_data(None, &tx).unwrap(); + DeserializedPacket::new_with_priority_details( + packet, + TransactionPriorityDetails { + priority, + compute_unit_limit, + }, + ) + .unwrap() + } + + #[test] + fn test_try_add_to_forward_batch() { + // set test batch limit to be 1 millionth of regular block limit + let limit_ratio = 1_000_000u32; + // set requested_cu to be half of batch account limit + let requested_cu = + block_cost_limits::MAX_WRITABLE_ACCOUNT_UNITS.saturating_div(limit_ratio as u64); + + let mut forward_batch = ForwardBatch::new(limit_ratio); + + let write_lock_accounts = vec![Pubkey::new_unique(), Pubkey::new_unique()]; + let packet = build_deserialized_packet_for_test(10, requested_cu); + // first packet will be successful + assert!(forward_batch + .try_add( + &write_lock_accounts, + requested_cu, + packet.immutable_section().clone() + ) + .is_ok()); + assert_eq!(1, forward_batch.forwardable_packets.len()); + // second packet will hit account limit, therefore not added + assert!(forward_batch + .try_add( + &write_lock_accounts, + requested_cu, + packet.immutable_section().clone() + ) + .is_err()); + assert_eq!(1, forward_batch.forwardable_packets.len()); + } + + #[test] + fn test_add_packet_to_batches() { + solana_logger::setup(); + // set test batch limit to be 1 millionth of regular block limit + let limit_ratio = 1_000_000u32; + let number_of_batches = 2; + // set requested_cu to be half of batch account limit + let requested_cu = + block_cost_limits::MAX_WRITABLE_ACCOUNT_UNITS.saturating_div(limit_ratio as u64); + + let mut forward_packet_batches_by_accounts = ForwardPacketBatchesByAccounts::new( + build_bank_forks_for_test().read().unwrap().root_bank(), + limit_ratio, + number_of_batches, + ); + + // initially both batches are empty + { + let mut batches = forward_packet_batches_by_accounts.iter_batches(); + assert_eq!(0, batches.next().unwrap().len()); + assert_eq!(0, batches.next().unwrap().len()); + assert!(batches.next().is_none()); + } + + let hot_account = solana_sdk::pubkey::new_rand(); + let other_account = solana_sdk::pubkey::new_rand(); + let packet_high_priority = build_deserialized_packet_for_test(10, requested_cu); + let packet_low_priority = build_deserialized_packet_for_test(0, requested_cu); + // with 4 packets, first 3 write to same hot_account with higher priority, + // the 4th write to other_account with lower priority; + // assert the 1st and 4th fit in fist batch, the 2nd in 2nd batch and 3rd will be dropped. + + // 1st high-priority packet added to 1st batch + { + forward_packet_batches_by_accounts.add_packet_to_batches( + &[hot_account], + requested_cu, + packet_high_priority.immutable_section().clone(), + ); + let mut batches = forward_packet_batches_by_accounts.iter_batches(); + assert_eq!(1, batches.next().unwrap().len()); + assert_eq!(0, batches.next().unwrap().len()); + assert!(batches.next().is_none()); + } + + // 2nd high-priority packet added to 2nd packet + { + forward_packet_batches_by_accounts.add_packet_to_batches( + &[hot_account], + requested_cu, + packet_high_priority.immutable_section().clone(), + ); + let mut batches = forward_packet_batches_by_accounts.iter_batches(); + assert_eq!(1, batches.next().unwrap().len()); + assert_eq!(1, batches.next().unwrap().len()); + } + + // 3rd high-priority packet not included in forwarding + { + forward_packet_batches_by_accounts.add_packet_to_batches( + &[hot_account], + requested_cu, + packet_high_priority.immutable_section().clone(), + ); + let mut batches = forward_packet_batches_by_accounts.iter_batches(); + assert_eq!(1, batches.next().unwrap().len()); + assert_eq!(1, batches.next().unwrap().len()); + assert!(batches.next().is_none()); + } + + // 4rd lower priority packet added to 1st bucket on non-content account + { + forward_packet_batches_by_accounts.add_packet_to_batches( + &[other_account], + requested_cu, + packet_low_priority.immutable_section().clone(), + ); + let mut batches = forward_packet_batches_by_accounts.iter_batches(); + assert_eq!(2, batches.next().unwrap().len()); + assert_eq!(1, batches.next().unwrap().len()); + assert!(batches.next().is_none()); + } + } +} diff --git a/core/src/leader_slot_banking_stage_metrics.rs b/core/src/leader_slot_banking_stage_metrics.rs index 2307b2f0e5f4d6..06e767ed358ae5 100644 --- a/core/src/leader_slot_banking_stage_metrics.rs +++ b/core/src/leader_slot_banking_stage_metrics.rs @@ -122,8 +122,9 @@ struct LeaderSlotPacketCountMetrics { // total number of valid unprocessed packets in the buffer that were removed after being forwarded cleared_from_buffer_after_forward_count: u64, - // total number of packets removed at the end of the slot due to being too old, duplicate, etc. - end_of_slot_filtered_invalid_count: u64, + // total number of forwardable batches that were attempted for forwarding. A forwardable batch + // is defined in `ForwardPacketBatchesByAccounts` in `forward_packet_batches_by_accounts.rs` + forwardable_batches_count: u64, } impl LeaderSlotPacketCountMetrics { @@ -222,8 +223,8 @@ impl LeaderSlotPacketCountMetrics { i64 ), ( - "end_of_slot_filtered_invalid_count", - self.end_of_slot_filtered_invalid_count as i64, + "forwardable_batches_count", + self.forwardable_batches_count as i64, i64 ), ( @@ -573,23 +574,23 @@ impl LeaderSlotMetricsTracker { } } - pub(crate) fn increment_retryable_packets_count(&mut self, count: u64) { + pub(crate) fn increment_forwardable_batches_count(&mut self, count: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { saturating_add_assign!( leader_slot_metrics .packet_count_metrics - .retryable_packets_count, + .forwardable_batches_count, count ); } } - pub(crate) fn increment_end_of_slot_filtered_invalid_count(&mut self, count: u64) { + pub(crate) fn increment_retryable_packets_count(&mut self, count: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { saturating_add_assign!( leader_slot_metrics .packet_count_metrics - .end_of_slot_filtered_invalid_count, + .retryable_packets_count, count ); } @@ -684,19 +685,6 @@ impl LeaderSlotMetricsTracker { } } - // Consuming buffered packets timing metrics - pub(crate) fn increment_end_of_slot_filtering_us(&mut self, us: u64) { - if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .consume_buffered_packets_timings - .end_of_slot_filtering_us, - us - ); - } - } - pub(crate) fn increment_consume_buffered_packets_poh_recorder_lock_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { saturating_add_assign!( diff --git a/core/src/leader_slot_banking_stage_timing_metrics.rs b/core/src/leader_slot_banking_stage_timing_metrics.rs index a56f8a754a3364..2005bc2df05f04 100644 --- a/core/src/leader_slot_banking_stage_timing_metrics.rs +++ b/core/src/leader_slot_banking_stage_timing_metrics.rs @@ -227,9 +227,6 @@ pub(crate) struct ConsumeBufferedPacketsTimings { // Time spent grabbing poh recorder lock pub poh_recorder_lock_us: u64, - // Time spent filtering invalid packets after leader slot has ended - pub end_of_slot_filtering_us: u64, - // Time spent processing transactions pub process_packets_transactions_us: u64, } @@ -245,11 +242,6 @@ impl ConsumeBufferedPacketsTimings { self.poh_recorder_lock_us as i64, i64 ), - ( - "end_of_slot_filtering_us", - self.end_of_slot_filtering_us as i64, - i64 - ), ( "process_packets_transactions_us", self.process_packets_transactions_us as i64, diff --git a/core/src/lib.rs b/core/src/lib.rs index b47a5f125e89a9..53d580a03aaf7c 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -27,6 +27,7 @@ pub mod duplicate_repair_status; pub mod fetch_stage; pub mod find_packet_sender_stake_stage; pub mod fork_choice; +pub mod forward_packet_batches_by_accounts; pub mod gen_keys; pub mod heaviest_subtree_fork_choice; pub mod latest_validator_votes_for_frozen_banks; diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 72acd127db0baa..41b3f434e3a1b1 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -230,6 +230,7 @@ impl Tpu { replay_vote_sender, cost_model.clone(), connection_cache.clone(), + bank_forks.clone(), ); let broadcast_stage = broadcast_type.new_broadcast_stage( diff --git a/core/src/unprocessed_packet_batches.rs b/core/src/unprocessed_packet_batches.rs index 22e8d9cce929a5..e5e14e346df5db 100644 --- a/core/src/unprocessed_packet_batches.rs +++ b/core/src/unprocessed_packet_batches.rs @@ -3,18 +3,23 @@ use { solana_perf::packet::{Packet, PacketBatch}, solana_program_runtime::compute_budget::ComputeBudget, solana_sdk::{ + feature_set, hash::Hash, message::{Message, SanitizedVersionedMessage}, sanitize::SanitizeError, short_vec::decode_shortu16_len, signature::Signature, - transaction::{SanitizedVersionedTransaction, Transaction, VersionedTransaction}, + transaction::{ + AddressLoader, SanitizedTransaction, SanitizedVersionedTransaction, Transaction, + VersionedTransaction, + }, }, std::{ cmp::Ordering, collections::{hash_map::Entry, HashMap}, mem::size_of, rc::Rc, + sync::Arc, }, thiserror::Error, }; @@ -36,8 +41,8 @@ pub enum DeserializedPacketError { #[derive(Debug, PartialEq, Eq)] pub struct TransactionPriorityDetails { - priority: u64, - compute_unit_limit: u64, + pub priority: u64, + pub compute_unit_limit: u64, } #[derive(Debug, PartialEq, Eq)] @@ -93,7 +98,7 @@ impl DeserializedPacket { } #[cfg(test)] - fn new_with_priority_details( + pub fn new_with_priority_details( packet: Packet, priority_details: TransactionPriorityDetails, ) -> Result { @@ -254,12 +259,40 @@ impl UnprocessedPacketBatches { self.message_hash_to_transaction.iter_mut().map(|(_k, v)| v) } + /// Iterates DeserializedPackets in descending priority (max-first) order, + /// calls FnMut for each DeserializedPacket. + pub fn iter_desc(&mut self, mut f: F) + where + F: FnMut(&mut DeserializedPacket) -> bool, + { + let mut packet_priority_queue_clone = self.packet_priority_queue.clone(); + + for immutable_packet in packet_priority_queue_clone.drain_desc() { + match self + .message_hash_to_transaction + .entry(*immutable_packet.message_hash()) + { + Entry::Vacant(_vacant_entry) => { + panic!( + "entry {} must exist to be consistent with `packet_priority_queue`", + immutable_packet.message_hash() + ); + } + Entry::Occupied(mut occupied_entry) => { + if !f(occupied_entry.get_mut()) { + return; + } + } + } + } + } + pub fn retain(&mut self, mut f: F) where F: FnMut(&mut DeserializedPacket) -> bool, { // TODO: optimize this only when number of packets - // with oudated blockhash is high + // with outdated blockhash is high let new_packet_priority_queue: MinMaxHeap> = self .packet_priority_queue .drain() @@ -415,14 +448,45 @@ pub fn transactions_to_deserialized_packets( .collect() } +// This function deserializes packets into transactions, computes the blake3 hash of transaction +// messages, and verifies secp256k1 instructions. A list of sanitized transactions are returned +// with their packet indexes. +#[allow(clippy::needless_collect)] +pub fn transaction_from_deserialized_packet( + deserialized_packet: &ImmutableDeserializedPacket, + feature_set: &Arc, + votes_only: bool, + address_loader: impl AddressLoader, +) -> Option { + if votes_only && !deserialized_packet.is_simple_vote() { + return None; + } + + let tx = SanitizedTransaction::try_new( + deserialized_packet.transaction().clone(), + *deserialized_packet.message_hash(), + deserialized_packet.is_simple_vote(), + address_loader, + ) + .ok()?; + tx.verify_precompiles(feature_set).ok()?; + Some(tx) +} + #[cfg(test)] mod tests { use { super::*, + solana_perf::packet::PacketFlags, solana_sdk::{ - compute_budget::ComputeBudgetInstruction, message::VersionedMessage, pubkey::Pubkey, - signature::Keypair, system_instruction, system_transaction, + compute_budget::ComputeBudgetInstruction, + message::VersionedMessage, + pubkey::Pubkey, + signature::{Keypair, Signer}, + system_instruction, system_transaction, + transaction::{SimpleAddressLoader, Transaction}, }, + solana_vote_program::vote_transaction, std::net::IpAddr, }; @@ -622,4 +686,132 @@ mod tests { }) ); } + + #[cfg(test)] + fn make_test_packets( + transactions: Vec, + vote_indexes: Vec, + ) -> Vec { + let capacity = transactions.len(); + let mut packet_vector = Vec::with_capacity(capacity); + for tx in transactions.iter() { + packet_vector.push(Packet::from_data(None, &tx).unwrap()); + } + for index in vote_indexes.iter() { + packet_vector[*index].meta.flags |= PacketFlags::SIMPLE_VOTE_TX; + } + + packet_vector + .into_iter() + .map(|p| DeserializedPacket::new(p).unwrap()) + .collect() + } + + #[test] + fn test_transaction_from_deserialized_packet() { + use solana_sdk::feature_set::FeatureSet; + let keypair = Keypair::new(); + let transfer_tx = + system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default()); + let vote_tx = vote_transaction::new_vote_transaction( + vec![42], + Hash::default(), + Hash::default(), + &keypair, + &keypair, + &keypair, + None, + ); + + // packets with no votes + { + let vote_indexes = vec![]; + let packet_vector = + make_test_packets(vec![transfer_tx.clone(), transfer_tx.clone()], vote_indexes); + + let mut votes_only = false; + let txs = packet_vector.iter().filter_map(|tx| { + transaction_from_deserialized_packet( + tx.immutable_section(), + &Arc::new(FeatureSet::default()), + votes_only, + SimpleAddressLoader::Disabled, + ) + }); + assert_eq!(2, txs.count()); + + votes_only = true; + let txs = packet_vector.iter().filter_map(|tx| { + transaction_from_deserialized_packet( + tx.immutable_section(), + &Arc::new(FeatureSet::default()), + votes_only, + SimpleAddressLoader::Disabled, + ) + }); + assert_eq!(0, txs.count()); + } + + // packets with some votes + { + let vote_indexes = vec![0, 2]; + let packet_vector = make_test_packets( + vec![vote_tx.clone(), transfer_tx, vote_tx.clone()], + vote_indexes, + ); + + let mut votes_only = false; + let txs = packet_vector.iter().filter_map(|tx| { + transaction_from_deserialized_packet( + tx.immutable_section(), + &Arc::new(FeatureSet::default()), + votes_only, + SimpleAddressLoader::Disabled, + ) + }); + assert_eq!(3, txs.count()); + + votes_only = true; + let txs = packet_vector.iter().filter_map(|tx| { + transaction_from_deserialized_packet( + tx.immutable_section(), + &Arc::new(FeatureSet::default()), + votes_only, + SimpleAddressLoader::Disabled, + ) + }); + assert_eq!(2, txs.count()); + } + + // packets with all votes + { + let vote_indexes = vec![0, 1, 2]; + let packet_vector = make_test_packets( + vec![vote_tx.clone(), vote_tx.clone(), vote_tx], + vote_indexes, + ); + + let mut votes_only = false; + let txs = packet_vector.iter().filter_map(|tx| { + transaction_from_deserialized_packet( + tx.immutable_section(), + &Arc::new(FeatureSet::default()), + votes_only, + SimpleAddressLoader::Disabled, + ) + }); + assert_eq!(3, txs.count()); + + votes_only = true; + let txs = packet_vector.iter().filter_map(|tx| { + transaction_from_deserialized_packet( + tx.immutable_section(), + &Arc::new(FeatureSet::default()), + votes_only, + SimpleAddressLoader::Disabled, + ) + }); + assert_eq!(3, txs.count()); + } + } } diff --git a/runtime/src/cost_tracker.rs b/runtime/src/cost_tracker.rs index 05f35ce9d1943d..a1d779a8a581b3 100644 --- a/runtime/src/cost_tracker.rs +++ b/runtime/src/cost_tracker.rs @@ -77,7 +77,7 @@ impl CostTracker { } } - // bench tests needs to reset limits + /// allows to adjust limits initiated during construction pub fn set_limits( &mut self, account_cost_limit: u64, @@ -95,6 +95,18 @@ impl CostTracker { Ok(self.block_cost) } + /// Using user requested compute-units to track cost. + pub fn try_add_requested_cus( + &mut self, + write_lock_accounts: &[Pubkey], + requested_cus: u64, + is_vote: bool, + ) -> Result { + self.would_fit_internal(write_lock_accounts.iter(), requested_cus, is_vote, 0)?; + self.add_transaction_cost_internal(write_lock_accounts.iter(), requested_cus, is_vote, 0); + Ok(self.block_cost) + } + pub fn update_execution_cost( &mut self, estimated_tx_cost: &TransactionCost, @@ -165,9 +177,22 @@ impl CostTracker { } fn would_fit(&self, tx_cost: &TransactionCost) -> Result<(), CostTrackerError> { - let writable_accounts = &tx_cost.writable_accounts; - let cost = tx_cost.sum(); - let vote_cost = if tx_cost.is_simple_vote { cost } else { 0 }; + self.would_fit_internal( + tx_cost.writable_accounts.iter(), + tx_cost.sum(), + tx_cost.is_simple_vote, + tx_cost.account_data_size, + ) + } + + fn would_fit_internal<'a>( + &self, + write_lock_accounts: impl Iterator, + cost: u64, + is_vote: bool, + account_data_size: u64, + ) -> Result<(), CostTrackerError> { + let vote_cost = if is_vote { cost } else { 0 }; // check against the total package cost if self.block_cost.saturating_add(cost) > self.block_cost_limit { @@ -186,9 +211,7 @@ impl CostTracker { // NOTE: Check if the total accounts data size is exceeded *before* the block accounts data // size. This way, transactions are not unnecessarily retried. - let account_data_size = self - .account_data_size - .saturating_add(tx_cost.account_data_size); + let account_data_size = self.account_data_size.saturating_add(account_data_size); if let Some(account_data_size_limit) = self.account_data_size_limit { if account_data_size > account_data_size_limit { return Err(CostTrackerError::WouldExceedAccountDataTotalLimit); @@ -200,7 +223,7 @@ impl CostTracker { } // check each account against account_cost_limit, - for account_key in writable_accounts.iter() { + for account_key in write_lock_accounts { match self.cost_by_writable_accounts.get(account_key) { Some(chained_cost) => { if chained_cost.saturating_add(cost) > self.account_cost_limit { @@ -217,9 +240,23 @@ impl CostTracker { } fn add_transaction_cost(&mut self, tx_cost: &TransactionCost) { - let cost = tx_cost.sum(); - self.add_transaction_execution_cost(tx_cost, cost); - saturating_add_assign!(self.account_data_size, tx_cost.account_data_size); + self.add_transaction_cost_internal( + tx_cost.writable_accounts.iter(), + tx_cost.sum(), + tx_cost.is_simple_vote, + tx_cost.account_data_size, + ) + } + + fn add_transaction_cost_internal<'a>( + &mut self, + write_lock_accounts: impl Iterator, + cost: u64, + is_vote: bool, + account_data_size: u64, + ) { + self.add_transaction_execution_cost_internal(write_lock_accounts, is_vote, cost); + saturating_add_assign!(self.account_data_size, account_data_size); saturating_add_assign!(self.transaction_count, 1); } @@ -234,7 +271,20 @@ impl CostTracker { /// Apply additional actual execution units to cost_tracker fn add_transaction_execution_cost(&mut self, tx_cost: &TransactionCost, adjustment: u64) { - for account_key in tx_cost.writable_accounts.iter() { + self.add_transaction_execution_cost_internal( + tx_cost.writable_accounts.iter(), + tx_cost.is_simple_vote, + adjustment, + ) + } + + fn add_transaction_execution_cost_internal<'a>( + &mut self, + write_lock_accounts: impl Iterator, + is_vote: bool, + adjustment: u64, + ) { + for account_key in write_lock_accounts { let account_cost = self .cost_by_writable_accounts .entry(*account_key) @@ -242,7 +292,7 @@ impl CostTracker { *account_cost = account_cost.saturating_add(adjustment); } self.block_cost = self.block_cost.saturating_add(adjustment); - if tx_cost.is_simple_vote { + if is_vote { self.vote_cost = self.vote_cost.saturating_add(adjustment); } } From 37f4621c064b0f2dbc596618b4c31f38bbc78afe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Jul 2022 23:18:08 -0600 Subject: [PATCH 035/100] chore: bump serde from 1.0.137 to 1.0.138 (#26421) * chore: bump serde from 1.0.137 to 1.0.138 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.137 to 1.0.138. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.137...v1.0.138) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- account-decoder/Cargo.toml | 2 +- banks-interface/Cargo.toml | 2 +- bloom/Cargo.toml | 2 +- cli-config/Cargo.toml | 2 +- cli-output/Cargo.toml | 2 +- cli/Cargo.toml | 2 +- client/Cargo.toml | 2 +- core/Cargo.toml | 2 +- dos/Cargo.toml | 2 +- entry/Cargo.toml | 2 +- faucet/Cargo.toml | 2 +- frozen-abi/Cargo.toml | 2 +- genesis/Cargo.toml | 2 +- gossip/Cargo.toml | 2 +- install/Cargo.toml | 2 +- ledger/Cargo.toml | 2 +- log-analyzer/Cargo.toml | 2 +- net-shaper/Cargo.toml | 2 +- net-utils/Cargo.toml | 2 +- perf/Cargo.toml | 2 +- program-test/Cargo.toml | 2 +- programs/address-lookup-table/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 8 ++++---- programs/config/Cargo.toml | 2 +- programs/stake/Cargo.toml | 2 +- programs/vote/Cargo.toml | 2 +- rbpf-cli/Cargo.toml | 2 +- rpc-test/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- runtime/Cargo.toml | 2 +- sdk/Cargo.toml | 2 +- storage-bigtable/Cargo.toml | 2 +- storage-proto/Cargo.toml | 2 +- transaction-status/Cargo.toml | 2 +- validator/Cargo.toml | 2 +- version/Cargo.toml | 2 +- 37 files changed, 43 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75c1422eafc61a..2dcd70b2e017ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4112,9 +4112,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" dependencies = [ "serde_derive", ] @@ -4140,9 +4140,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" dependencies = [ "proc-macro2 1.0.38", "quote 1.0.18", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index 493d0aa286a99f..7abc3631ed7fc5 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -16,7 +16,7 @@ bincode = "1.3.3" bs58 = "0.4.0" bv = "0.11.1" lazy_static = "1.4.0" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" solana-config-program = { path = "../programs/config", version = "=1.11.2" } diff --git a/banks-interface/Cargo.toml b/banks-interface/Cargo.toml index 79e97ae66fee65..6c770e1f46437d 100644 --- a/banks-interface/Cargo.toml +++ b/banks-interface/Cargo.toml @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-banks-interface" edition = "2021" [dependencies] -serde = { version = "1.0.137", features = ["derive"] } +serde = { version = "1.0.138", features = ["derive"] } solana-sdk = { path = "../sdk", version = "=1.11.2" } tarpc = { version = "0.29.0", features = ["full"] } diff --git a/bloom/Cargo.toml b/bloom/Cargo.toml index b2443046b73ec4..e730fbcfd42d29 100644 --- a/bloom/Cargo.toml +++ b/bloom/Cargo.toml @@ -15,7 +15,7 @@ fnv = "1.0.7" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -serde = { version = "1.0.137", features = ["rc"] } +serde = { version = "1.0.138", features = ["rc"] } serde_derive = "1.0.103" solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } diff --git a/cli-config/Cargo.toml b/cli-config/Cargo.toml index a23eb2d10af849..d4c27b4b967799 100644 --- a/cli-config/Cargo.toml +++ b/cli-config/Cargo.toml @@ -12,7 +12,7 @@ documentation = "https://docs.rs/solana-cli-config" [dependencies] dirs-next = "2.0.0" lazy_static = "1.4.0" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" serde_yaml = "0.8.24" solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index 54c6d6c6ed2e70..667fec840cd73a 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -19,7 +19,7 @@ humantime = "2.0.1" indicatif = "0.16.2" pretty-hex = "0.3.0" semver = "1.0.10" -serde = "1.0.137" +serde = "1.0.138" serde_json = "1.0.81" solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 8478b0a8e47cb6..e8978e3f74f7a5 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -24,7 +24,7 @@ num-traits = "0.2" pretty-hex = "0.3.0" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } semver = "1.0.10" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } diff --git a/client/Cargo.toml b/client/Cargo.toml index e6dc8bfa5d3b72..44eed684a0f8ac 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -35,7 +35,7 @@ rayon = "1.5.3" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } rustls = { version = "0.20.6", features = ["dangerous_configuration"] } semver = "1.0.10" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } diff --git a/core/Cargo.toml b/core/Cargo.toml index 0cd8959106f91f..328b3dee265043 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -33,7 +33,7 @@ min-max-heap = "1.3.0" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.2" } solana-bloom = { path = "../bloom", version = "=1.11.2" } diff --git a/dos/Cargo.toml b/dos/Cargo.toml index 0bcc7712912a3a..8db04713aa633f 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -15,7 +15,7 @@ clap = { version = "3.1.5", features = ["derive", "cargo"] } itertools = "0.10.3" log = "0.4.17" rand = "0.7.0" -serde = "1.0.137" +serde = "1.0.138" solana-bench-tps = { path = "../bench-tps", version = "=1.11.2" } solana-client = { path = "../client", version = "=1.11.2" } solana-core = { path = "../core", version = "=1.11.2" } diff --git a/entry/Cargo.toml b/entry/Cargo.toml index 431b5a7a48bd85..a825cf6a5ed194 100644 --- a/entry/Cargo.toml +++ b/entry/Cargo.toml @@ -18,7 +18,7 @@ lazy_static = "1.4.0" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -serde = "1.0.137" +serde = "1.0.138" solana-measure = { path = "../measure", version = "=1.11.2" } solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.2" } solana-metrics = { path = "../metrics", version = "=1.11.2" } diff --git a/faucet/Cargo.toml b/faucet/Cargo.toml index 148070b42d9fb1..32974f83ac9061 100644 --- a/faucet/Cargo.toml +++ b/faucet/Cargo.toml @@ -15,7 +15,7 @@ byteorder = "1.4.3" clap = "2.33" crossbeam-channel = "0.5" log = "0.4.17" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } solana-cli-config = { path = "../cli-config", version = "=1.11.2" } diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index b2a9f191e1c58b..be0b1226e11318 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -15,7 +15,7 @@ bv = { version = "0.11.1", features = ["serde"] } lazy_static = "1.4.0" log = "0.4.17" once_cell = "1.12.0" -serde = "1.0.137" +serde = "1.0.138" serde_bytes = "0.11" serde_derive = "1.0.103" sha2 = "0.10.2" diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index bcf96050359483..a1427fc5d28b03 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -12,7 +12,7 @@ documentation = "https://docs.rs/solana-genesis" [dependencies] base64 = "0.13.0" clap = "2.33.1" -serde = "1.0.137" +serde = "1.0.138" serde_json = "1.0.81" serde_yaml = "0.8.24" solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 2e4d0cdc0ffa2e..dbae09b54c6e62 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -24,7 +24,7 @@ num-traits = "0.2" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" -serde = "1.0.137" +serde = "1.0.138" serde_bytes = "0.11" serde_derive = "1.0.103" solana-bloom = { path = "../bloom", version = "=1.11.2" } diff --git a/install/Cargo.toml b/install/Cargo.toml index b769c7eb00e874..b59eeee734eeeb 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -24,7 +24,7 @@ lazy_static = "1.4.0" nix = "0.24.0" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } semver = "1.0.10" -serde = { version = "1.0.137", features = ["derive"] } +serde = { version = "1.0.138", features = ["derive"] } serde_yaml = "0.8.24" solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } solana-client = { path = "../client", version = "=1.11.2" } diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index d118ce81b6e429..4b03a6e8614014 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -31,7 +31,7 @@ rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" reed-solomon-erasure = { version = "5.0.2", features = ["simd-accel"] } -serde = "1.0.137" +serde = "1.0.138" serde_bytes = "0.11.6" sha2 = "0.10.2" solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.2" } diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml index d5b18fdca99d78..b01c9f90dfa8c4 100644 --- a/log-analyzer/Cargo.toml +++ b/log-analyzer/Cargo.toml @@ -12,7 +12,7 @@ publish = false [dependencies] byte-unit = "4.0.14" clap = { version = "3.1.5", features = ["cargo"] } -serde = "1.0.137" +serde = "1.0.138" serde_json = "1.0.81" solana-logger = { path = "../logger", version = "=1.11.2" } solana-version = { path = "../version", version = "=1.11.2" } diff --git a/net-shaper/Cargo.toml b/net-shaper/Cargo.toml index 74f1c77963ca3f..d9cbb8b2b95f22 100644 --- a/net-shaper/Cargo.toml +++ b/net-shaper/Cargo.toml @@ -12,7 +12,7 @@ publish = false [dependencies] clap = { version = "3.1.5", features = ["cargo"] } rand = "0.7.0" -serde = { version = "1.0.137", features = ["derive"] } +serde = { version = "1.0.138", features = ["derive"] } serde_json = "1.0.81" solana-logger = { path = "../logger", version = "=1.11.2" } diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index 979ed2bb314707..e3035812986cdf 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -16,7 +16,7 @@ crossbeam-channel = "0.5" log = "0.4.17" nix = "0.24.0" rand = "0.7.0" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" socket2 = "0.4.4" solana-logger = { path = "../logger", version = "=1.11.2" } diff --git a/perf/Cargo.toml b/perf/Cargo.toml index ba355ed12a6558..dd2c26fd5bcd8b 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -21,7 +21,7 @@ lazy_static = "1.4.0" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -serde = "1.0.137" +serde = "1.0.138" solana-metrics = { path = "../metrics", version = "=1.11.2" } solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } solana-sdk = { path = "../sdk", version = "=1.11.2" } diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 3960e9a523cdbd..3c531d069bf420 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -14,7 +14,7 @@ base64 = "0.13.0" bincode = "1.3.3" chrono-humanize = "0.2.1" log = "0.4.17" -serde = "1.0.137" +serde = "1.0.138" solana-banks-client = { path = "../banks-client", version = "=1.11.2" } solana-banks-server = { path = "../banks-server", version = "=1.11.2" } solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.2" } diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index 9c26e31c875f25..a784c1555277f9 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -15,7 +15,7 @@ bytemuck = "1.9.1" log = "0.4.17" num-derive = "0.3" num-traits = "0.2" -serde = { version = "1.0.137", features = ["derive"] } +serde = { version = "1.0.138", features = ["derive"] } solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.2" } solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.2" } solana-program = { path = "../../sdk/program", version = "=1.11.2" } diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 9470c597b55090..35bc3a35a173e5 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -3654,9 +3654,9 @@ checksum = "a41d061efea015927ac527063765e73601444cdc344ba855bc7bd44578b25e1c" [[package]] name = "serde" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" dependencies = [ "serde_derive", ] @@ -3672,9 +3672,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" dependencies = [ "proc-macro2 1.0.38", "quote 1.0.18", diff --git a/programs/config/Cargo.toml b/programs/config/Cargo.toml index 6359b44f3164b8..7d293686795be6 100644 --- a/programs/config/Cargo.toml +++ b/programs/config/Cargo.toml @@ -12,7 +12,7 @@ edition = "2021" [dependencies] bincode = "1.3.3" chrono = { version = "0.4.11", features = ["serde"] } -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } solana-sdk = { path = "../../sdk", version = "=1.11.2" } diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 59ed2a502bfc40..74838f3763b338 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -14,7 +14,7 @@ bincode = "1.3.3" log = "0.4.17" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" solana-config-program = { path = "../config", version = "=1.11.2" } solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.2" } diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index ffc2a3d2fa3fd8..9d6ed359cba0f7 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -14,7 +14,7 @@ bincode = "1.3.3" log = "0.4.17" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.2" } solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.2" } diff --git a/rbpf-cli/Cargo.toml b/rbpf-cli/Cargo.toml index e238c262c9d4ac..4cfca09317f871 100644 --- a/rbpf-cli/Cargo.toml +++ b/rbpf-cli/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dependencies] clap = { version = "3.1.5", features = ["cargo"] } -serde = "1.0.137" +serde = "1.0.138" serde_json = "1.0.81" solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.2" } solana-logger = { path = "../logger", version = "=1.11.2" } diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index 7dd55e84e40179..deb0aa8d1273f5 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -17,7 +17,7 @@ crossbeam-channel = "0.5" futures-util = "0.3.21" log = "0.4.17" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } -serde = "1.0.137" +serde = "1.0.138" serde_json = "1.0.81" solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } solana-client = { path = "../client", version = "=1.11.2" } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 5ca0d1b1aed800..1944ad3c11b154 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -25,7 +25,7 @@ libc = "0.2.126" log = "0.4.17" rayon = "1.5.3" regex = "1.5.6" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" soketto = "0.7" diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index cacaa531be72f2..7f149b06c9813c 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -37,7 +37,7 @@ ouroboros = "0.15.0" rand = "0.7.0" rayon = "1.5.3" regex = "1.5.6" -serde = { version = "1.0.137", features = ["rc"] } +serde = { version = "1.0.138", features = ["rc"] } serde_derive = "1.0.103" solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.2" } solana-bucket-map = { path = "../bucket_map", version = "=1.11.2" } diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 27b3db48e3e256..e826a14917c250 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -65,7 +65,7 @@ qstring = "0.7.2" rand = { version = "0.7.0", optional = true } rand_chacha = { version = "0.2.2", optional = true } rustversion = "1.0.6" -serde = "1.0.137" +serde = "1.0.138" serde_bytes = "0.11" serde_derive = "1.0.103" serde_json = { version = "1.0.81", optional = true } diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index ec8ede5487ee7d..6de50a80cb37ce 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -24,7 +24,7 @@ hyper-proxy = "0.9.1" log = "0.4.17" prost = "0.10.4" prost-types = "0.10.0" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" smpl_jwt = "0.7.1" solana-metrics = { path = "../metrics", version = "=1.11.2" } diff --git a/storage-proto/Cargo.toml b/storage-proto/Cargo.toml index a8e5e0c2abfa58..f3d941de85504b 100644 --- a/storage-proto/Cargo.toml +++ b/storage-proto/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" bincode = "1.3.3" bs58 = "0.4.0" prost = "0.10.4" -serde = "1.0.137" +serde = "1.0.138" solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } solana-sdk = { path = "../sdk", version = "=1.11.2" } solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 378157fc22f330..23834f77cc469d 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -17,7 +17,7 @@ borsh = "0.9.1" bs58 = "0.4.0" lazy_static = "1.4.0" log = "0.4.17" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } diff --git a/validator/Cargo.toml b/validator/Cargo.toml index cc9a5795acd9df..da99a5fc8f7399 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -26,7 +26,7 @@ jsonrpc-server-utils = "18.0.0" log = "0.4.17" num_cpus = "1.13.1" rand = "0.7.0" -serde = "1.0.137" +serde = "1.0.138" serde_json = "1.0.81" solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } solana-cli-config = { path = "../cli-config", version = "=1.11.2" } diff --git a/version/Cargo.toml b/version/Cargo.toml index a4eb255d23a6a4..fd5b46062fb7ae 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -12,7 +12,7 @@ edition = "2021" [dependencies] log = "0.4.17" semver = "1.0.10" -serde = "1.0.137" +serde = "1.0.138" serde_derive = "1.0.103" solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } From d33c5486609a1dc7a6d4f30433f1f9d36ca11eff Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 6 Jul 2022 11:49:58 +0000 Subject: [PATCH 036/100] bypasses window-service stage before retransmitting shreds (#26291) With recent patches, window-service recv-window does not do much other than redirecting packets/shreds to downstream channels. The commit removes window-service recv-window and instead sends packets/shreds directly from sigverify to retransmit-stage and window-service insert thread. --- core/src/retransmit_stage.rs | 126 +++---------- core/src/sigverify_shreds.rs | 36 +++- core/src/tvu.rs | 73 +++++--- core/src/window_service.rs | 340 ++++++++++------------------------- 4 files changed, 208 insertions(+), 367 deletions(-) diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index 94fab21bc956db..c6d3855f72b640 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -3,17 +3,10 @@ use { crate::{ - ancestor_hashes_service::AncestorHashesReplayUpdateReceiver, - cluster_info_vote_listener::VerifiedVoteReceiver, cluster_nodes::{ClusterNodes, ClusterNodesCache}, - cluster_slots::ClusterSlots, - cluster_slots_service::{ClusterSlotsService, ClusterSlotsUpdateReceiver}, - completed_data_sets_service::CompletedDataSetsSender, packet_hasher::PacketHasher, - repair_service::{DuplicateSlotsResetSender, RepairInfo}, - window_service::WindowService, }, - crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, + crossbeam_channel::{Receiver, RecvTimeoutError}, itertools::{izip, Itertools}, lru::LruCache, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, @@ -23,27 +16,25 @@ use { contact_info::ContactInfo, }, solana_ledger::{ - blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::{self, ShredId}, }, solana_measure::measure::Measure, - solana_perf::packet::PacketBatch, solana_rayon_threadlimit::get_thread_count, solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, solana_runtime::{bank::Bank, bank_forks::BankForks}, - solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp}, + solana_sdk::{clock::Slot, pubkey::Pubkey, timing::timestamp}, solana_streamer::{ sendmmsg::{multi_target_send, SendPktsError}, socket::SocketAddrSpace, }, std::{ - collections::{HashMap, HashSet}, + collections::HashMap, iter::repeat, net::UdpSocket, ops::AddAssign, sync::{ - atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, RwLock, }, thread::{self, Builder, JoinHandle}, @@ -378,122 +369,61 @@ pub fn retransmitter( .unwrap(); Builder::new() .name("solana-retransmitter".to_string()) - .spawn(move || { - trace!("retransmitter started"); - loop { - match retransmit( - &thread_pool, - &bank_forks, - &leader_schedule_cache, - &cluster_info, - &shreds_receiver, - &sockets, - &mut stats, - &cluster_nodes_cache, - &mut hasher_reset_ts, - &mut shreds_received, - &mut packet_hasher, - &max_slots, - rpc_subscriptions.as_deref(), - ) { - Ok(()) => (), - Err(RecvTimeoutError::Timeout) => (), - Err(RecvTimeoutError::Disconnected) => break, - } + .spawn(move || loop { + match retransmit( + &thread_pool, + &bank_forks, + &leader_schedule_cache, + &cluster_info, + &shreds_receiver, + &sockets, + &mut stats, + &cluster_nodes_cache, + &mut hasher_reset_ts, + &mut shreds_received, + &mut packet_hasher, + &max_slots, + rpc_subscriptions.as_deref(), + ) { + Ok(()) => (), + Err(RecvTimeoutError::Timeout) => (), + Err(RecvTimeoutError::Disconnected) => break, } - trace!("exiting retransmitter"); }) .unwrap() } pub struct RetransmitStage { retransmit_thread_handle: JoinHandle<()>, - window_service: WindowService, - cluster_slots_service: ClusterSlotsService, } impl RetransmitStage { - #[allow(clippy::new_ret_no_self)] - #[allow(clippy::too_many_arguments)] pub(crate) fn new( bank_forks: Arc>, leader_schedule_cache: Arc, - blockstore: Arc, cluster_info: Arc, retransmit_sockets: Arc>, - repair_socket: Arc, - ancestor_hashes_socket: Arc, - verified_receiver: Receiver>, - exit: Arc, - cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, - epoch_schedule: EpochSchedule, - turbine_disabled: Arc, - cluster_slots: Arc, - duplicate_slots_reset_sender: DuplicateSlotsResetSender, - verified_vote_receiver: VerifiedVoteReceiver, - repair_validators: Option>, - completed_data_sets_sender: CompletedDataSetsSender, + retransmit_receiver: Receiver>>, max_slots: Arc, rpc_subscriptions: Option>, - duplicate_slots_sender: Sender, - ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, ) -> Self { - let (retransmit_sender, retransmit_receiver) = unbounded(); - let retransmit_thread_handle = retransmitter( retransmit_sockets, - bank_forks.clone(), - leader_schedule_cache.clone(), - cluster_info.clone(), + bank_forks, + leader_schedule_cache, + cluster_info, retransmit_receiver, max_slots, rpc_subscriptions, ); - let cluster_slots_service = ClusterSlotsService::new( - blockstore.clone(), - cluster_slots.clone(), - bank_forks.clone(), - cluster_info.clone(), - cluster_slots_update_receiver, - exit.clone(), - ); - - let repair_info = RepairInfo { - bank_forks, - epoch_schedule, - duplicate_slots_reset_sender, - repair_validators, - cluster_info, - cluster_slots, - }; - let window_service = WindowService::new( - blockstore, - verified_receiver, - retransmit_sender, - repair_socket, - ancestor_hashes_socket, - exit, - repair_info, - leader_schedule_cache, - turbine_disabled, - verified_vote_receiver, - completed_data_sets_sender, - duplicate_slots_sender, - ancestor_hashes_replay_update_receiver, - ); - Self { retransmit_thread_handle, - window_service, - cluster_slots_service, } } pub(crate) fn join(self) -> thread::Result<()> { - self.retransmit_thread_handle.join()?; - self.window_service.join()?; - self.cluster_slots_service.join() + self.retransmit_thread_handle.join() } } diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index 2da6e428cdb54c..b32d045bc39682 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -14,7 +14,10 @@ use { solana_sdk::{clock::Slot, pubkey::Pubkey}, std::{ collections::HashMap, - sync::{Arc, RwLock}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, }, }; @@ -24,7 +27,9 @@ pub struct ShredSigVerifier { bank_forks: Arc>, leader_schedule_cache: Arc, recycler_cache: RecyclerCache, + retransmit_sender: Sender>>, packet_sender: Sender>, + turbine_disabled: Arc, } impl ShredSigVerifier { @@ -32,7 +37,9 @@ impl ShredSigVerifier { pubkey: Pubkey, bank_forks: Arc>, leader_schedule_cache: Arc, + retransmit_sender: Sender>>, packet_sender: Sender>, + turbine_disabled: Arc, ) -> Self { sigverify::init(); Self { @@ -40,7 +47,9 @@ impl ShredSigVerifier { bank_forks, leader_schedule_cache, recycler_cache: RecyclerCache::warmed(), + retransmit_sender, packet_sender, + turbine_disabled, } } } @@ -52,6 +61,20 @@ impl SigVerifier for ShredSigVerifier { &mut self, packet_batches: Vec, ) -> Result<(), SigVerifyServiceError> { + if self.turbine_disabled.load(Ordering::Relaxed) { + return Ok(()); + } + // Exclude repair packets from retransmit. + // TODO: return the error here! + let _ = self.retransmit_sender.send( + packet_batches + .iter() + .flat_map(PacketBatch::iter) + .filter(|packet| !packet.meta.discard() && !packet.meta.repair()) + .filter_map(shred::layout::get_shred) + .map(<[u8]>::to_vec) + .collect(), + ); self.packet_sender.send(packet_batches)?; Ok(()) } @@ -140,8 +163,15 @@ pub mod tests { let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let bf = Arc::new(RwLock::new(BankForks::new(bank))); let (sender, receiver) = unbounded(); - let mut verifier = ShredSigVerifier::new(Pubkey::new_unique(), bf, cache, sender); - + let (retransmit_sender, _retransmit_receiver) = unbounded(); + let mut verifier = ShredSigVerifier::new( + Pubkey::new_unique(), + bf, + cache, + retransmit_sender, + sender, + Arc::::default(), // turbine_disabled + ); let batch_size = 2; let mut batch = PacketBatch::with_capacity(batch_size); batch.resize(batch_size, Packet::default()); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 3418bf84c76e7a..fe7d1d94b0f6b6 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -10,10 +10,12 @@ use { VerifiedVoteReceiver, VoteTracker, }, cluster_slots::ClusterSlots, + cluster_slots_service::ClusterSlotsService, completed_data_sets_service::CompletedDataSetsSender, cost_update_service::CostUpdateService, drop_bank_service::DropBankService, ledger_cleanup_service::LedgerCleanupService, + repair_service::RepairInfo, replay_stage::{ReplayStage, ReplayStageConfig}, retransmit_stage::RetransmitStage, rewards_recorder_service::RewardsRecorderSender, @@ -24,6 +26,7 @@ use { validator::ProcessBlockStore, voting_service::VotingService, warm_quic_cache_service::WarmQuicCacheService, + window_service::WindowService, }, crossbeam_channel::{unbounded, Receiver}, solana_client::connection_cache::ConnectionCache, @@ -61,6 +64,8 @@ pub struct Tvu { fetch_stage: ShredFetchStage, sigverify_stage: SigVerifyStage, retransmit_stage: RetransmitStage, + window_service: WindowService, + cluster_slots_service: ClusterSlotsService, replay_stage: ReplayStage, ledger_cleanup_service: Option, cost_update_service: CostUpdateService, @@ -157,45 +162,69 @@ impl Tvu { ); let (verified_sender, verified_receiver) = unbounded(); + let (retransmit_sender, retransmit_receiver) = unbounded(); let sigverify_stage = SigVerifyStage::new( fetch_receiver, ShredSigVerifier::new( cluster_info.id(), bank_forks.clone(), leader_schedule_cache.clone(), + retransmit_sender.clone(), verified_sender, + turbine_disabled, ), "shred-verifier", ); + let retransmit_stage = RetransmitStage::new( + bank_forks.clone(), + leader_schedule_cache.clone(), + cluster_info.clone(), + Arc::new(retransmit_sockets), + retransmit_receiver, + max_slots.clone(), + Some(rpc_subscriptions.clone()), + ); + let cluster_slots = Arc::new(ClusterSlots::default()); let (duplicate_slots_reset_sender, duplicate_slots_reset_receiver) = unbounded(); let (duplicate_slots_sender, duplicate_slots_receiver) = unbounded(); - let (cluster_slots_update_sender, cluster_slots_update_receiver) = unbounded(); let (ancestor_hashes_replay_update_sender, ancestor_hashes_replay_update_receiver) = unbounded(); - let retransmit_stage = RetransmitStage::new( - bank_forks.clone(), - leader_schedule_cache.clone(), + let window_service = { + let epoch_schedule = *bank_forks.read().unwrap().working_bank().epoch_schedule(); + let repair_info = RepairInfo { + bank_forks: bank_forks.clone(), + epoch_schedule, + duplicate_slots_reset_sender, + repair_validators: tvu_config.repair_validators, + cluster_info: cluster_info.clone(), + cluster_slots: cluster_slots.clone(), + }; + WindowService::new( + blockstore.clone(), + verified_receiver, + retransmit_sender, + repair_socket, + ancestor_hashes_socket, + exit.clone(), + repair_info, + leader_schedule_cache.clone(), + verified_vote_receiver, + completed_data_sets_sender, + duplicate_slots_sender, + ancestor_hashes_replay_update_receiver, + ) + }; + + let (cluster_slots_update_sender, cluster_slots_update_receiver) = unbounded(); + let cluster_slots_service = ClusterSlotsService::new( blockstore.clone(), + cluster_slots.clone(), + bank_forks.clone(), cluster_info.clone(), - Arc::new(retransmit_sockets), - repair_socket, - ancestor_hashes_socket, - verified_receiver, - exit.clone(), cluster_slots_update_receiver, - *bank_forks.read().unwrap().working_bank().epoch_schedule(), - turbine_disabled, - cluster_slots.clone(), - duplicate_slots_reset_sender, - verified_vote_receiver, - tvu_config.repair_validators, - completed_data_sets_sender, - max_slots.clone(), - Some(rpc_subscriptions.clone()), - duplicate_slots_sender, - ancestor_hashes_replay_update_receiver, + exit.clone(), ); let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = unbounded(); @@ -292,6 +321,8 @@ impl Tvu { fetch_stage, sigverify_stage, retransmit_stage, + window_service, + cluster_slots_service, replay_stage, ledger_cleanup_service, cost_update_service, @@ -304,6 +335,8 @@ impl Tvu { pub fn join(self) -> thread::Result<()> { self.retransmit_stage.join()?; + self.window_service.join()?; + self.cluster_slots_service.join()?; self.fetch_stage.join()?; self.sigverify_stage.join()?; if self.ledger_cleanup_service.is_some() { diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 55e448df4ff196..da4cbcb4452b11 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -22,7 +22,7 @@ use { solana_metrics::inc_new_counter_error, solana_perf::packet::{Packet, PacketBatch}, solana_rayon_threadlimit::get_thread_count, - solana_sdk::{clock::Slot, pubkey::Pubkey}, + solana_sdk::clock::Slot, std::{ cmp::Reverse, collections::{HashMap, HashSet}, @@ -43,7 +43,10 @@ pub(crate) type DuplicateSlotReceiver = Receiver; #[derive(Default)] struct WindowServiceMetrics { run_insert_count: u64, - num_shreds_received: u64, + num_packets: usize, + num_repairs: usize, + num_shreds_received: usize, + handle_packets_elapsed_us: u64, shred_receiver_elapsed_us: u64, prune_shreds_elapsed_us: u64, num_shreds_pruned_invalid_repair: usize, @@ -52,14 +55,23 @@ struct WindowServiceMetrics { num_errors_cross_beam_recv_timeout: u64, num_errors_other: u64, num_errors_try_crossbeam_send: u64, + addrs: HashMap, } impl WindowServiceMetrics { fn report_metrics(&self, metric_name: &'static str) { + const MAX_NUM_ADDRS: usize = 5; datapoint_info!( metric_name, + ( + "handle_packets_elapsed_us", + self.handle_packets_elapsed_us, + i64 + ), ("run_insert_count", self.run_insert_count as i64, i64), - ("num_shreds_received", self.num_shreds_received as i64, i64), + ("num_packets", self.num_packets, i64), + ("num_repairs", self.num_repairs, i64), + ("num_shreds_received", self.num_shreds_received, i64), ( "shred_receiver_elapsed_us", self.shred_receiver_elapsed_us as i64, @@ -89,6 +101,19 @@ impl WindowServiceMetrics { i64 ), ); + + let mut addrs: Vec<_> = self.addrs.iter().collect(); + let reverse_count = |(_addr, count): &_| Reverse(*count); + if addrs.len() > MAX_NUM_ADDRS { + addrs.select_nth_unstable_by_key(MAX_NUM_ADDRS, reverse_count); + addrs.truncate(MAX_NUM_ADDRS); + } + addrs.sort_unstable_by_key(reverse_count); + info!( + "num addresses: {}, top packets by source: {:?}", + self.addrs.len(), + addrs + ); } fn record_error(&mut self, err: &Error) { @@ -105,52 +130,6 @@ impl WindowServiceMetrics { } } -#[derive(Default)] -struct ReceiveWindowStats { - num_iters: usize, - num_packets: usize, - num_repairs: usize, - num_shreds: usize, // num_discards: num_packets - num_shreds - elapsed: Duration, // excludes waiting time on the receiver channel. - addrs: HashMap, - since: Option, -} - -impl ReceiveWindowStats { - fn maybe_submit(&mut self) { - const MAX_NUM_ADDRS: usize = 5; - const SUBMIT_CADENCE: Duration = Duration::from_secs(2); - let elapsed = self.since.as_ref().map(Instant::elapsed); - if elapsed.unwrap_or(Duration::MAX) < SUBMIT_CADENCE { - return; - } - datapoint_info!( - "receive_window_stats", - ("num_iters", self.num_iters, i64), - ("num_packets", self.num_packets, i64), - ("num_shreds", self.num_shreds, i64), - ("num_repairs", self.num_repairs, i64), - ("elapsed_micros", self.elapsed.as_micros(), i64), - ); - let mut addrs: Vec<_> = std::mem::take(&mut self.addrs).into_iter().collect(); - let reverse_count = |(_addr, count): &_| Reverse(*count); - if addrs.len() > MAX_NUM_ADDRS { - addrs.select_nth_unstable_by_key(MAX_NUM_ADDRS, reverse_count); - addrs.truncate(MAX_NUM_ADDRS); - } - addrs.sort_unstable_by_key(reverse_count); - info!( - "num addresses: {}, top packets by source: {:?}", - self.addrs.len(), - addrs - ); - *self = Self { - since: Some(Instant::now()), - ..Self::default() - }; - } -} - fn run_check_duplicate( cluster_info: &ClusterInfo, blockstore: &Blockstore, @@ -229,8 +208,10 @@ fn prune_shreds_invalid_repair( assert_eq!(shreds.len(), repair_infos.len()); } +#[allow(clippy::too_many_arguments)] fn run_insert( - shred_receiver: &Receiver<(Vec, Vec>)>, + thread_pool: &ThreadPool, + verified_receiver: &Receiver>, blockstore: &Blockstore, leader_schedule_cache: &LeaderScheduleCache, handle_duplicate: F, @@ -243,26 +224,46 @@ fn run_insert( where F: Fn(Shred), { - ws_metrics.run_insert_count += 1; + const RECV_TIMEOUT: Duration = Duration::from_millis(200); let mut shred_receiver_elapsed = Measure::start("shred_receiver_elapsed"); - let timer = Duration::from_millis(200); - let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?; - while let Ok((more_shreds, more_repair_infos)) = shred_receiver.try_recv() { - shreds.extend(more_shreds); - repair_infos.extend(more_repair_infos); - } + let mut packets = verified_receiver.recv_timeout(RECV_TIMEOUT)?; + packets.extend(verified_receiver.try_iter().flatten()); shred_receiver_elapsed.stop(); ws_metrics.shred_receiver_elapsed_us += shred_receiver_elapsed.as_us(); - ws_metrics.num_shreds_received += shreds.len() as u64; - // TODO: Consider using thread-pool here instead of recv_window. - let (mut shreds, mut repair_infos): (Vec<_>, Vec<_>) = shreds - .into_iter() - .zip(repair_infos) - .filter_map(|(shred, repair_info)| { - let shred = Shred::new_from_serialized_shred(shred).ok()?; - Some((shred, repair_info)) - }) - .unzip(); + ws_metrics.run_insert_count += 1; + let handle_packet = |packet: &Packet| { + if packet.meta.discard() { + return None; + } + let shred = shred::layout::get_shred(packet)?; + let shred = Shred::new_from_serialized_shred(shred.to_vec()).ok()?; + if packet.meta.repair() { + let repair_info = RepairMeta { + _from_addr: packet.meta.socket_addr(), + // If can't parse the nonce, dump the packet. + nonce: repair_response::nonce(packet)?, + }; + Some((shred, Some(repair_info))) + } else { + Some((shred, None)) + } + }; + let now = Instant::now(); + let (mut shreds, mut repair_infos): (Vec<_>, Vec<_>) = thread_pool.install(|| { + packets + .par_iter() + .flat_map_iter(|packets| packets.iter().filter_map(handle_packet)) + .unzip() + }); + ws_metrics.handle_packets_elapsed_us += now.elapsed().as_micros() as u64; + ws_metrics.num_packets += packets.iter().map(PacketBatch::len).sum::(); + ws_metrics.num_repairs += repair_infos.iter().filter(|r| r.is_some()).count(); + ws_metrics.num_shreds_received += shreds.len(); + for packet in packets.iter().flat_map(PacketBatch::iter) { + let addr = packet.meta.socket_addr(); + *ws_metrics.addrs.entry(addr).or_default() += 1; + } + let mut prune_shreds_elapsed = Measure::start("prune_shreds_elapsed"); let num_shreds = shreds.len(); prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, outstanding_requests); @@ -293,90 +294,12 @@ where Ok(()) } -fn recv_window( - insert_shred_sender: &Sender<(Vec, Vec>)>, - verified_receiver: &Receiver>, - retransmit_sender: &Sender>, - turbine_disabled: &AtomicBool, - thread_pool: &ThreadPool, - stats: &mut ReceiveWindowStats, -) -> Result<()> { - const RECV_TIMEOUT: Duration = Duration::from_millis(200); - let mut packet_batches = verified_receiver.recv_timeout(RECV_TIMEOUT)?; - packet_batches.extend(verified_receiver.try_iter().flatten()); - let now = Instant::now(); - let turbine_disabled = turbine_disabled.load(Ordering::Relaxed); - let handle_packet = |packet: &Packet| { - if turbine_disabled || packet.meta.discard() { - return None; - } - let shred = shred::layout::get_shred(packet)?; - if packet.meta.repair() { - let repair_info = RepairMeta { - _from_addr: packet.meta.socket_addr(), - // If can't parse the nonce, dump the packet. - nonce: repair_response::nonce(packet)?, - }; - Some((shred.to_vec(), Some(repair_info))) - } else { - Some((shred.to_vec(), None)) - } - }; - let (shreds, repair_infos): (Vec<_>, Vec<_>) = thread_pool.install(|| { - packet_batches - .par_iter() - .flat_map_iter(|packet_batch| packet_batch.iter().filter_map(handle_packet)) - .unzip() - }); - // Exclude repair packets from retransmit. - let _ = retransmit_sender.send( - shreds - .iter() - .zip(&repair_infos) - .filter(|(_, repair_info)| repair_info.is_none()) - .map(|(shred, _)| shred) - .cloned() - .collect(), - ); - stats.num_repairs += repair_infos.iter().filter(|r| r.is_some()).count(); - stats.num_shreds += shreds.len(); - insert_shred_sender.send((shreds, repair_infos))?; - - stats.num_iters += 1; - stats.num_packets += packet_batches.iter().map(PacketBatch::len).sum::(); - for packet in packet_batches.iter().flat_map(PacketBatch::iter) { - let addr = packet.meta.socket_addr(); - *stats.addrs.entry(addr).or_default() += 1; - } - stats.elapsed += now.elapsed(); - Ok(()) -} - struct RepairMeta { _from_addr: SocketAddr, nonce: Nonce, } -// Implement a destructor for the window_service thread to signal it exited -// even on panics -struct Finalizer { - exit_sender: Arc, -} - -impl Finalizer { - fn new(exit_sender: Arc) -> Self { - Finalizer { exit_sender } - } -} -// Implement a destructor for Finalizer. -impl Drop for Finalizer { - fn drop(&mut self) { - self.exit_sender.clone().store(true, Ordering::Relaxed); - } -} - pub(crate) struct WindowService { - t_window: JoinHandle<()>, t_insert: JoinHandle<()>, t_check_duplicate: JoinHandle<()>, repair_service: RepairService, @@ -393,7 +316,6 @@ impl WindowService { exit: Arc, repair_info: RepairInfo, leader_schedule_cache: Arc, - turbine_disabled: Arc, verified_vote_receiver: VerifiedVoteReceiver, completed_data_sets_sender: CompletedDataSetsSender, duplicate_slots_sender: DuplicateSlotSender, @@ -402,7 +324,6 @@ impl WindowService { let outstanding_requests = Arc::>::default(); let cluster_info = repair_info.cluster_info.clone(); - let id = cluster_info.id(); let repair_service = RepairService::new( blockstore.clone(), @@ -415,7 +336,6 @@ impl WindowService { ancestor_hashes_replay_update_receiver, ); - let (insert_sender, insert_receiver) = unbounded(); let (duplicate_sender, duplicate_receiver) = unbounded(); let t_check_duplicate = Self::start_check_duplicate_thread( @@ -427,27 +347,17 @@ impl WindowService { ); let t_insert = Self::start_window_insert_thread( - exit.clone(), + exit, blockstore, leader_schedule_cache, - insert_receiver, + verified_receiver, duplicate_sender, completed_data_sets_sender, - retransmit_sender.clone(), - outstanding_requests, - ); - - let t_window = Self::start_recv_window_thread( - id, - exit, - insert_sender, - verified_receiver, - turbine_disabled, retransmit_sender, + outstanding_requests, ); WindowService { - t_window, t_insert, t_check_duplicate, repair_service, @@ -466,20 +376,17 @@ impl WindowService { }; Builder::new() .name("solana-check-duplicate".to_string()) - .spawn(move || loop { - if exit.load(Ordering::Relaxed) { - break; - } - - let mut noop = || {}; - if let Err(e) = run_check_duplicate( - &cluster_info, - &blockstore, - &duplicate_receiver, - &duplicate_slots_sender, - ) { - if Self::should_exit_on_error(e, &mut noop, &handle_error) { - break; + .spawn(move || { + while !exit.load(Ordering::Relaxed) { + if let Err(e) = run_check_duplicate( + &cluster_info, + &blockstore, + &duplicate_receiver, + &duplicate_slots_sender, + ) { + if Self::should_exit_on_error(e, &handle_error) { + break; + } } } }) @@ -490,17 +397,20 @@ impl WindowService { exit: Arc, blockstore: Arc, leader_schedule_cache: Arc, - insert_receiver: Receiver<(Vec, Vec>)>, + verified_receiver: Receiver>, check_duplicate_sender: Sender, completed_data_sets_sender: CompletedDataSetsSender, retransmit_sender: Sender>, outstanding_requests: Arc>, ) -> JoinHandle<()> { - let mut handle_timeout = || {}; let handle_error = || { inc_new_counter_error!("solana-window-insert-error", 1, 1); }; - + let thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(get_thread_count().min(8)) + .thread_name(|i| format!("window-insert-{}", i)) + .build() + .unwrap(); Builder::new() .name("solana-window-insert".to_string()) .spawn(move || { @@ -510,13 +420,10 @@ impl WindowService { let mut metrics = BlockstoreInsertionMetrics::default(); let mut ws_metrics = WindowServiceMetrics::default(); let mut last_print = Instant::now(); - loop { - if exit.load(Ordering::Relaxed) { - break; - } - + while !exit.load(Ordering::Relaxed) { if let Err(e) = run_insert( - &insert_receiver, + &thread_pool, + &verified_receiver, &blockstore, &leader_schedule_cache, &handle_duplicate, @@ -527,7 +434,7 @@ impl WindowService { &outstanding_requests, ) { ws_metrics.record_error(&e); - if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) { + if Self::should_exit_on_error(e, &handle_error) { break; } } @@ -544,71 +451,13 @@ impl WindowService { .unwrap() } - #[allow(clippy::too_many_arguments)] - fn start_recv_window_thread( - id: Pubkey, - exit: Arc, - insert_sender: Sender<(Vec, Vec>)>, - verified_receiver: Receiver>, - turbine_disabled: Arc, - retransmit_sender: Sender>, - ) -> JoinHandle<()> { - let mut stats = ReceiveWindowStats::default(); - Builder::new() - .name("solana-window".to_string()) - .spawn(move || { - let _exit = Finalizer::new(exit.clone()); - trace!("{}: RECV_WINDOW started", id); - let thread_pool = rayon::ThreadPoolBuilder::new() - .num_threads(get_thread_count()) - .build() - .unwrap(); - let mut now = Instant::now(); - let handle_error = || { - inc_new_counter_error!("solana-window-error", 1, 1); - }; - - while !exit.load(Ordering::Relaxed) { - let mut handle_timeout = || { - if now.elapsed() > Duration::from_secs(30) { - warn!( - "Window does not seem to be receiving data. \ - Ensure port configuration is correct..." - ); - now = Instant::now(); - } - }; - if let Err(e) = recv_window( - &insert_sender, - &verified_receiver, - &retransmit_sender, - &turbine_disabled, - &thread_pool, - &mut stats, - ) { - if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) { - break; - } - } else { - now = Instant::now(); - } - stats.maybe_submit(); - } - }) - .unwrap() - } - - fn should_exit_on_error(e: Error, handle_timeout: &mut F, handle_error: &H) -> bool + fn should_exit_on_error(e: Error, handle_error: &H) -> bool where - F: FnMut(), H: Fn(), { match e { Error::RecvTimeout(RecvTimeoutError::Disconnected) => true, - Error::RecvTimeout(RecvTimeoutError::Timeout) => { - handle_timeout(); - false - } + Error::RecvTimeout(RecvTimeoutError::Timeout) => false, Error::Send => true, _ => { handle_error(); @@ -619,7 +468,6 @@ impl WindowService { } pub(crate) fn join(self) -> thread::Result<()> { - self.t_window.join()?; self.t_insert.join()?; self.t_check_duplicate.join()?; self.repair_service.join() From 6f5857a5dba8bb75f5a56bf6210142b4db86f182 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 6 Jul 2022 12:03:13 +0000 Subject: [PATCH 037/100] removes feature gate code separating durable nonce from blockhash domain (#26055) --- account-decoder/src/parse_account_data.rs | 5 +- account-decoder/src/parse_nonce.rs | 5 +- cli/src/nonce.rs | 68 +++----- client/src/blockhash_query.rs | 8 +- rpc/src/rpc.rs | 9 +- rpc/src/transaction_status_service.rs | 13 +- runtime/src/accounts.rs | 145 ++++++------------ runtime/src/bank.rs | 77 +++------- runtime/src/nonce_keyed_account.rs | 54 +++---- runtime/src/system_instruction_processor.rs | 63 ++------ sdk/program/src/nonce/state/current.rs | 13 +- sdk/program/src/nonce/state/mod.rs | 139 +++++------------ sdk/src/nonce_account.rs | 109 +++---------- .../src/send_transaction_service.rs | 35 ++--- 14 files changed, 220 insertions(+), 523 deletions(-) diff --git a/account-decoder/src/parse_account_data.rs b/account-decoder/src/parse_account_data.rs index 417a1e30604705..89d256dce7c28c 100644 --- a/account-decoder/src/parse_account_data.rs +++ b/account-decoder/src/parse_account_data.rs @@ -145,10 +145,7 @@ mod test { assert_eq!(parsed.program, "vote".to_string()); assert_eq!(parsed.space, VoteState::size_of() as u64); - let nonce_data = Versions::new( - State::Initialized(Data::default()), - true, // separate_domains - ); + let nonce_data = Versions::new(State::Initialized(Data::default())); let nonce_account_data = bincode::serialize(&nonce_data).unwrap(); let parsed = parse_account_data( &account_pubkey, diff --git a/account-decoder/src/parse_nonce.rs b/account-decoder/src/parse_nonce.rs index 5e541bd6db6938..0b589b0cd04cb4 100644 --- a/account-decoder/src/parse_nonce.rs +++ b/account-decoder/src/parse_nonce.rs @@ -57,10 +57,7 @@ mod test { #[test] fn test_parse_nonce() { - let nonce_data = Versions::new( - State::Initialized(Data::default()), - true, // separate_domains - ); + let nonce_data = Versions::new(State::Initialized(Data::default())); let nonce_account_data = bincode::serialize(&nonce_data).unwrap(); assert_eq!( parse_nonce(&nonce_account_data).unwrap(), diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index 97aca199d9ba12..3047afa3784689 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -1006,14 +1006,14 @@ mod tests { #[test] fn test_check_nonce_account() { - let durable_nonce = - DurableNonce::from_blockhash(&Hash::default(), /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(&Hash::default()); let blockhash = *durable_nonce.as_hash(); let nonce_pubkey = solana_sdk::pubkey::new_rand(); - let data = Versions::new( - State::Initialized(nonce::state::Data::new(nonce_pubkey, durable_nonce, 0)), - true, // separate_domains - ); + let data = Versions::new(State::Initialized(nonce::state::Data::new( + nonce_pubkey, + durable_nonce, + 0, + ))); let valid = Account::new_data(1, &data, &system_program::ID); assert!(check_nonce_account(&valid.unwrap(), &nonce_pubkey, &blockhash).is_ok()); @@ -1031,16 +1031,12 @@ mod tests { assert_eq!(err, Error::InvalidAccountData,); } - let invalid_durable_nonce = - DurableNonce::from_blockhash(&hash(b"invalid"), /*separate_domains:*/ true); - let data = Versions::new( - State::Initialized(nonce::state::Data::new( - nonce_pubkey, - invalid_durable_nonce, - 0, - )), - true, // separate_domains - ); + let invalid_durable_nonce = DurableNonce::from_blockhash(&hash(b"invalid")); + let data = Versions::new(State::Initialized(nonce::state::Data::new( + nonce_pubkey, + invalid_durable_nonce, + 0, + ))); let invalid_hash = Account::new_data(1, &data, &system_program::ID).unwrap(); if let CliError::InvalidNonce(err) = check_nonce_account(&invalid_hash, &nonce_pubkey, &blockhash).unwrap_err() @@ -1055,14 +1051,11 @@ mod tests { } let new_nonce_authority = solana_sdk::pubkey::new_rand(); - let data = Versions::new( - State::Initialized(nonce::state::Data::new( - new_nonce_authority, - durable_nonce, - 0, - )), - true, // separate_domains - ); + let data = Versions::new(State::Initialized(nonce::state::Data::new( + new_nonce_authority, + durable_nonce, + 0, + ))); let invalid_authority = Account::new_data(1, &data, &system_program::ID); if let CliError::InvalidNonce(err) = check_nonce_account(&invalid_authority.unwrap(), &nonce_pubkey, &blockhash).unwrap_err() @@ -1076,7 +1069,7 @@ mod tests { ); } - let data = Versions::new(State::Uninitialized, /*separate_domains:*/ true); + let data = Versions::new(State::Uninitialized); let invalid_state = Account::new_data(1, &data, &system_program::ID); if let CliError::InvalidNonce(err) = check_nonce_account(&invalid_state.unwrap(), &nonce_pubkey, &blockhash).unwrap_err() @@ -1087,8 +1080,7 @@ mod tests { #[test] fn test_account_identity_ok() { - let nonce_account = - nonce_account::create_account(1, /*separate_domains:*/ true).into_inner(); + let nonce_account = nonce_account::create_account(1).into_inner(); assert_eq!(account_identity_ok(&nonce_account), Ok(())); let system_account = Account::new(1, 0, &system_program::id()); @@ -1107,18 +1099,13 @@ mod tests { #[test] fn test_state_from_account() { - let mut nonce_account = - nonce_account::create_account(1, /*separate_domains:*/ true).into_inner(); + let mut nonce_account = nonce_account::create_account(1).into_inner(); assert_eq!(state_from_account(&nonce_account), Ok(State::Uninitialized)); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new(&[42u8; 32]), /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[42u8; 32])); let data = nonce::state::Data::new(Pubkey::new(&[1u8; 32]), durable_nonce, 42); nonce_account - .set_state(&Versions::new( - State::Initialized(data.clone()), - true, // separate_domains - )) + .set_state(&Versions::new(State::Initialized(data.clone()))) .unwrap(); assert_eq!( state_from_account(&nonce_account), @@ -1134,8 +1121,7 @@ mod tests { #[test] fn test_data_from_helpers() { - let mut nonce_account = - nonce_account::create_account(1, /*separate_domains:*/ true).into_inner(); + let mut nonce_account = nonce_account::create_account(1).into_inner(); let state = state_from_account(&nonce_account).unwrap(); assert_eq!( data_from_state(&state), @@ -1146,14 +1132,10 @@ mod tests { Err(Error::InvalidStateForOperation) ); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new(&[42u8; 32]), /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[42u8; 32])); let data = nonce::state::Data::new(Pubkey::new(&[1u8; 32]), durable_nonce, 42); nonce_account - .set_state(&Versions::new( - State::Initialized(data.clone()), - true, // separate_domains - )) + .set_state(&Versions::new(State::Initialized(data.clone()))) .unwrap(); let state = state_from_account(&nonce_account).unwrap(); assert_eq!(data_from_state(&state), Ok(&data)); diff --git a/client/src/blockhash_query.rs b/client/src/blockhash_query.rs index 638c51910314ed..1b630351c94471 100644 --- a/client/src/blockhash_query.rs +++ b/client/src/blockhash_query.rs @@ -416,8 +416,7 @@ mod tests { .get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default()) .is_err()); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new(&[2u8; 32]), /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[2u8; 32])); let nonce_blockhash = *durable_nonce.as_hash(); let nonce_fee_calc = FeeCalculator::new(4242); let data = nonce::state::Data { @@ -427,10 +426,7 @@ mod tests { }; let nonce_account = Account::new_data_with_space( 42, - &nonce::state::Versions::new( - nonce::State::Initialized(data), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(data)), nonce::State::size(), &system_program::id(), ) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 86245401d5f3a6..7d4b3235b82748 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -5582,10 +5582,11 @@ pub mod tests { let authority = Pubkey::new_unique(); let account = AccountSharedData::new_data( 42, - &nonce::state::Versions::new( - nonce::State::new_initialized(&authority, DurableNonce::default(), 1000), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::new_initialized( + &authority, + DurableNonce::default(), + 1000, + )), &system_program::id(), ) .unwrap(); diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index abdb96b1754f08..53493780345a3f 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -347,16 +347,13 @@ pub(crate) mod tests { let expected_transaction = transaction.clone(); let pubkey = Pubkey::new_unique(); - let mut nonce_account = - nonce_account::create_account(1, /*separate_domains:*/ true).into_inner(); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new(&[42u8; 32]), /*separate_domains:*/ true); + let mut nonce_account = nonce_account::create_account(1).into_inner(); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[42u8; 32])); let data = nonce::state::Data::new(Pubkey::new(&[1u8; 32]), durable_nonce, 42); nonce_account - .set_state(&nonce::state::Versions::new( - nonce::State::Initialized(data), - true, // separate_domains - )) + .set_state(&nonce::state::Versions::new(nonce::State::Initialized( + data, + ))) .unwrap(); let message = build_message(); diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 2b9040930a7f9e..cbe07fd6a45ea7 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1191,7 +1191,7 @@ impl Accounts { res: &'a [TransactionExecutionResult], loaded: &'a mut [TransactionLoadResult], rent_collector: &RentCollector, - durable_nonce: &(DurableNonce, /*separate_domains:*/ bool), + durable_nonce: &DurableNonce, lamports_per_signature: u64, leave_nonce_on_success: bool, ) { @@ -1227,7 +1227,7 @@ impl Accounts { execution_results: &'a [TransactionExecutionResult], load_results: &'a mut [TransactionLoadResult], rent_collector: &RentCollector, - durable_nonce: &(DurableNonce, /*separate_domains:*/ bool), + durable_nonce: &DurableNonce, lamports_per_signature: u64, leave_nonce_on_success: bool, ) -> ( @@ -1319,7 +1319,7 @@ fn prepare_if_nonce_account<'a>( execution_result: &Result<()>, is_fee_payer: bool, maybe_nonce: Option<(&'a NonceFull, bool)>, - &(durable_nonce, separate_domains): &(DurableNonce, bool), + &durable_nonce: &DurableNonce, lamports_per_signature: u64, ) -> bool { if let Some((nonce, rollback)) = maybe_nonce { @@ -1345,7 +1345,7 @@ fn prepare_if_nonce_account<'a>( durable_nonce, lamports_per_signature, ); - let nonce_versions = NonceVersions::new(nonce_state, separate_domains); + let nonce_versions = NonceVersions::new(nonce_state); account.set_state(&nonce_versions).unwrap(); } true @@ -1728,10 +1728,7 @@ mod tests { nonce.pubkey(), AccountSharedData::new_data( min_balance + lamports_per_signature, - &NonceVersions::new( - NonceState::Initialized(nonce::state::Data::default()), - true, // separate_domains - ), + &NonceVersions::new(NonceState::Initialized(nonce::state::Data::default())), &system_program::id(), ) .unwrap(), @@ -3038,7 +3035,7 @@ mod tests { &execution_results, loaded.as_mut_slice(), &rent_collector, - &(DurableNonce::default(), /*separate_domains:*/ true), + &DurableNonce::default(), 0, true, // leave_nonce_on_success ); @@ -3192,24 +3189,20 @@ mod tests { Pubkey, AccountSharedData, AccountSharedData, - (DurableNonce, /*separate_domains:*/ bool), + DurableNonce, u64, Option, ) { - let data = NonceVersions::new( - NonceState::Initialized(nonce::state::Data::default()), - true, // separate_domains - ); + let data = NonceVersions::new(NonceState::Initialized(nonce::state::Data::default())); let account = AccountSharedData::new_data(42, &data, &system_program::id()).unwrap(); let mut pre_account = account.clone(); pre_account.set_lamports(43); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new(&[1u8; 32]), /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[1u8; 32])); ( Pubkey::default(), pre_account, account, - (durable_nonce, /*separate_domains:*/ true), + durable_nonce, 1234, None, ) @@ -3221,7 +3214,7 @@ mod tests { tx_result: &Result<()>, is_fee_payer: bool, maybe_nonce: Option<(&NonceFull, bool)>, - durable_nonce: &(DurableNonce, /*separate_domains:*/ bool), + durable_nonce: &DurableNonce, lamports_per_signature: u64, expect_account: &AccountSharedData, ) -> bool { @@ -3267,14 +3260,9 @@ mod tests { let mut expect_account = pre_account; expect_account - .set_state(&NonceVersions::new( - NonceState::Initialized(nonce::state::Data::new( - Pubkey::default(), - blockhash.0, - lamports_per_signature, - )), - true, // separate_domains - )) + .set_state(&NonceVersions::new(NonceState::Initialized( + nonce::state::Data::new(Pubkey::default(), blockhash, lamports_per_signature), + ))) .unwrap(); assert!(run_prepare_if_nonce_account_test( @@ -3358,14 +3346,9 @@ mod tests { let nonce = NonceFull::new(pre_account_address, pre_account, maybe_fee_payer_account); expect_account - .set_state(&NonceVersions::new( - NonceState::Initialized(nonce::state::Data::new( - Pubkey::default(), - blockhash.0, - lamports_per_signature, - )), - true, // separate_domains - )) + .set_state(&NonceVersions::new(NonceState::Initialized( + nonce::state::Data::new(Pubkey::default(), blockhash, lamports_per_signature), + ))) .unwrap(); assert!(run_prepare_if_nonce_account_test( @@ -3405,7 +3388,7 @@ mod tests { )), false, Some((&nonce, true)), - &(DurableNonce::default(), /*separate_domains:*/ true), + &DurableNonce::default(), 1, &post_fee_payer_account.clone(), )); @@ -3416,7 +3399,7 @@ mod tests { &Ok(()), true, Some((&nonce, true)), - &(DurableNonce::default(), /*separate_domains:*/ true), + &DurableNonce::default(), 1, &post_fee_payer_account.clone(), )); @@ -3430,7 +3413,7 @@ mod tests { )), true, None, - &(DurableNonce::default(), /*separate_domains:*/ true), + &DurableNonce::default(), 1, &post_fee_payer_account.clone(), )); @@ -3444,7 +3427,7 @@ mod tests { )), true, Some((&nonce, true)), - &(DurableNonce::default(), /*separate_domains:*/ true), + &DurableNonce::default(), 1, &pre_fee_payer_account, )); @@ -3459,16 +3442,12 @@ mod tests { let from = keypair_from_seed(&[1; 32]).unwrap(); let from_address = from.pubkey(); let to_address = Pubkey::new_unique(); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); - let nonce_state = NonceVersions::new( - NonceState::Initialized(nonce::state::Data::new( - nonce_authority.pubkey(), - durable_nonce, - 0, - )), - true, // separate_domains - ); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = NonceVersions::new(NonceState::Initialized(nonce::state::Data::new( + nonce_authority.pubkey(), + durable_nonce, + 0, + ))); let nonce_account_post = AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); let from_account_post = AccountSharedData::new(4199, 0, &Pubkey::default()); @@ -3491,16 +3470,12 @@ mod tests { ]; let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); - let nonce_state = NonceVersions::new( - NonceState::Initialized(nonce::state::Data::new( - nonce_authority.pubkey(), - durable_nonce, - 0, - )), - true, // separate_domains - ); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = NonceVersions::new(NonceState::Initialized(nonce::state::Data::new( + nonce_authority.pubkey(), + durable_nonce, + 0, + ))); let nonce_account_pre = AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); @@ -3523,8 +3498,7 @@ mod tests { let mut loaded = vec![loaded]; - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let accounts = Accounts::new_with_config_for_tests( Vec::new(), &ClusterType::Development, @@ -3545,7 +3519,7 @@ mod tests { &execution_results, loaded.as_mut_slice(), &rent_collector, - &(durable_nonce, /*separate_domains:*/ true), + &durable_nonce, 0, true, // leave_nonce_on_success ); @@ -3570,11 +3544,7 @@ mod tests { nonce_account_pre.lamports(), ); assert_matches!( - nonce_account::verify_nonce_account( - &collected_nonce_account, - durable_nonce.as_hash(), - true, // separate_domins - ), + nonce_account::verify_nonce_account(&collected_nonce_account, durable_nonce.as_hash()), Some(_) ); } @@ -3588,16 +3558,12 @@ mod tests { let from = keypair_from_seed(&[1; 32]).unwrap(); let from_address = from.pubkey(); let to_address = Pubkey::new_unique(); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); - let nonce_state = NonceVersions::new( - NonceState::Initialized(nonce::state::Data::new( - nonce_authority.pubkey(), - durable_nonce, - 0, - )), - true, // separate_domains - ); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = NonceVersions::new(NonceState::Initialized(nonce::state::Data::new( + nonce_authority.pubkey(), + durable_nonce, + 0, + ))); let nonce_account_post = AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); let from_account_post = AccountSharedData::new(4200, 0, &Pubkey::default()); @@ -3620,16 +3586,12 @@ mod tests { ]; let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); - let nonce_state = NonceVersions::new( - NonceState::Initialized(nonce::state::Data::new( - nonce_authority.pubkey(), - durable_nonce, - 0, - )), - true, // separate_domains - ); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = NonceVersions::new(NonceState::Initialized(nonce::state::Data::new( + nonce_authority.pubkey(), + durable_nonce, + 0, + ))); let nonce_account_pre = AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); @@ -3651,8 +3613,7 @@ mod tests { let mut loaded = vec![loaded]; - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let accounts = Accounts::new_with_config_for_tests( Vec::new(), &ClusterType::Development, @@ -3673,7 +3634,7 @@ mod tests { &execution_results, loaded.as_mut_slice(), &rent_collector, - &(durable_nonce, /*separate_domains:*/ true), + &durable_nonce, 0, true, // leave_nonce_on_success ); @@ -3689,11 +3650,7 @@ mod tests { nonce_account_pre.lamports() ); assert_matches!( - nonce_account::verify_nonce_account( - &collected_nonce_account, - durable_nonce.as_hash(), - true, // separate_domins - ), + nonce_account::verify_nonce_account(&collected_nonce_account, durable_nonce.as_hash()), Some(_) ); } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4395cd272a4862..82c0149e930c89 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4061,11 +4061,6 @@ impl Bank { self.rc.accounts.accounts_db.set_shrink_paths(paths); } - pub fn separate_nonce_from_blockhash(&self) -> bool { - self.feature_set - .is_active(&feature_set::separate_nonce_from_blockhash::id()) - } - fn check_age<'a>( &self, txs: impl Iterator, @@ -4073,15 +4068,12 @@ impl Bank { max_age: usize, error_counters: &mut TransactionErrorMetrics, ) -> Vec { - let separate_nonce_from_blockhash = self.separate_nonce_from_blockhash(); - let enable_durable_nonce = separate_nonce_from_blockhash - && self - .feature_set - .is_active(&feature_set::enable_durable_nonce::id()); + let enable_durable_nonce = self + .feature_set + .is_active(&feature_set::enable_durable_nonce::id()); let hash_queue = self.blockhash_queue.read().unwrap(); let last_blockhash = hash_queue.last_hash(); - let next_durable_nonce = - DurableNonce::from_blockhash(&last_blockhash, separate_nonce_from_blockhash); + let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash); txs.zip(lock_results) .map(|(tx, lock_res)| match lock_res { @@ -4155,11 +4147,8 @@ impl Bank { let nonce_address = message.get_durable_nonce(self.feature_set.is_active(&nonce_must_be_writable::id()))?; let nonce_account = self.get_account_with_fixed_root(nonce_address)?; - let nonce_data = nonce_account::verify_nonce_account( - &nonce_account, - message.recent_blockhash(), - self.separate_nonce_from_blockhash(), - )?; + let nonce_data = + nonce_account::verify_nonce_account(&nonce_account, message.recent_blockhash())?; if self .feature_set @@ -4946,12 +4935,7 @@ impl Bank { } let mut write_time = Measure::start("write_time"); - let durable_nonce = { - let separate_nonce_from_blockhash = self.separate_nonce_from_blockhash(); - let durable_nonce = - DurableNonce::from_blockhash(&last_blockhash, separate_nonce_from_blockhash); - (durable_nonce, separate_nonce_from_blockhash) - }; + let durable_nonce = DurableNonce::from_blockhash(&last_blockhash); self.rc.accounts.store_cached( self.slot(), sanitized_txs, @@ -7893,18 +7877,14 @@ pub(crate) mod tests { let from_address = from.pubkey(); let to_address = Pubkey::new_unique(); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let nonce_account = AccountSharedData::new_data( 43, - &nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::new( - Pubkey::default(), - durable_nonce, - lamports_per_signature, - )), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::new( + Pubkey::default(), + durable_nonce, + lamports_per_signature, + ))), &system_program::id(), ) .unwrap(); @@ -10516,10 +10496,7 @@ pub(crate) mod tests { let nonce = Keypair::new(); let nonce_account = AccountSharedData::new_data( min_balance + 42, - &nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::default()), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::default())), &system_program::id(), ) .unwrap(); @@ -12844,12 +12821,9 @@ pub(crate) mod tests { impl Bank { fn next_durable_nonce(&self) -> DurableNonce { - let separate_nonce_from_blockhash = self - .feature_set - .is_active(&feature_set::separate_nonce_from_blockhash::id()); let hash_queue = self.blockhash_queue.read().unwrap(); let last_blockhash = hash_queue.last_hash(); - DurableNonce::from_blockhash(&last_blockhash, separate_nonce_from_blockhash) + DurableNonce::from_blockhash(&last_blockhash) } } @@ -13029,10 +13003,7 @@ pub(crate) mod tests { let nonce = Keypair::new(); let nonce_account = AccountSharedData::new_data( 42_424_242, - &nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::default()), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::default())), &system_program::id(), ) .unwrap(); @@ -13058,18 +13029,14 @@ pub(crate) mod tests { let bank = Arc::new(bank); let nonce_keypair = Keypair::new(); let nonce_authority = nonce_keypair.pubkey(); - let durable_nonce = - DurableNonce::from_blockhash(&bank.last_blockhash(), true /* separate domains */); + let durable_nonce = DurableNonce::from_blockhash(&bank.last_blockhash()); let nonce_account = AccountSharedData::new_data( 42_424_242, - &nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::new( - nonce_authority, - durable_nonce, - 5000, - )), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::new( + nonce_authority, + durable_nonce, + 5000, + ))), &system_program::id(), ) .unwrap(); diff --git a/runtime/src/nonce_keyed_account.rs b/runtime/src/nonce_keyed_account.rs index fb0468278d43e6..2e0ae14893d6de 100644 --- a/runtime/src/nonce_keyed_account.rs +++ b/runtime/src/nonce_keyed_account.rs @@ -16,15 +16,6 @@ use { std::collections::HashSet, }; -fn get_durable_nonce(invoke_context: &InvokeContext) -> (DurableNonce, /*separate_domains:*/ bool) { - let separate_nonce_from_blockhash = invoke_context - .feature_set - .is_active(&feature_set::separate_nonce_from_blockhash::id()); - let durable_nonce = - DurableNonce::from_blockhash(&invoke_context.blockhash, separate_nonce_from_blockhash); - (durable_nonce, separate_nonce_from_blockhash) -} - pub fn advance_nonce_account( account: &mut BorrowedAccount, signers: &HashSet, @@ -58,7 +49,7 @@ pub fn advance_nonce_account( ); return Err(InstructionError::MissingRequiredSignature); } - let (next_durable_nonce, separate_domains) = get_durable_nonce(invoke_context); + let next_durable_nonce = DurableNonce::from_blockhash(&invoke_context.blockhash); if data.durable_nonce == next_durable_nonce { ic_msg!( invoke_context, @@ -75,10 +66,7 @@ pub fn advance_nonce_account( next_durable_nonce, invoke_context.lamports_per_signature, ); - account.set_state(&Versions::new( - State::Initialized(new_data), - separate_domains, - )) + account.set_state(&Versions::new(State::Initialized(new_data))) } State::Uninitialized => { ic_msg!( @@ -139,7 +127,7 @@ pub fn withdraw_nonce_account( } State::Initialized(ref data) => { if lamports == from.get_lamports() { - let (durable_nonce, separate_domains) = get_durable_nonce(invoke_context); + let durable_nonce = DurableNonce::from_blockhash(&invoke_context.blockhash); if data.durable_nonce == durable_nonce { ic_msg!( invoke_context, @@ -150,7 +138,7 @@ pub fn withdraw_nonce_account( merge_nonce_error_into_system_error, )); } - from.set_state(&Versions::new(State::Uninitialized, separate_domains))?; + from.set_state(&Versions::new(State::Uninitialized))?; } else { let min_balance = rent.minimum_balance(from.get_data().len()); let amount = checked_add(lamports, min_balance)?; @@ -221,14 +209,14 @@ pub fn initialize_nonce_account( ); return Err(InstructionError::InsufficientFunds); } - let (durable_nonce, separate_domains) = get_durable_nonce(invoke_context); + let durable_nonce = DurableNonce::from_blockhash(&invoke_context.blockhash); let data = nonce::state::Data::new( *nonce_authority, durable_nonce, invoke_context.lamports_per_signature, ); let state = State::Initialized(data); - account.set_state(&Versions::new(state, separate_domains)) + account.set_state(&Versions::new(state)) } State::Initialized(_) => { ic_msg!( @@ -335,12 +323,9 @@ mod test { let accounts = vec![ ( Pubkey::new_unique(), - create_account(from_lamports, /*separate_domains:*/ true).into_inner(), - ), - ( - Pubkey::new_unique(), - create_account(42, /*separate_domains:*/ true).into_inner(), + create_account(from_lamports).into_inner(), ), + (Pubkey::new_unique(), create_account(42).into_inner()), (system_program::id(), AccountSharedData::default()), ]; let $instruction_accounts = vec![ @@ -403,7 +388,7 @@ mod test { let versions = nonce_account.get_state::().unwrap(); let data = nonce::state::Data::new( data.authority, - get_durable_nonce(&invoke_context).0, + DurableNonce::from_blockhash(&invoke_context.blockhash), invoke_context.lamports_per_signature, ); // First nonce instruction drives state from Uninitialized to Initialized @@ -413,7 +398,7 @@ mod test { let versions = nonce_account.get_state::().unwrap(); let data = nonce::state::Data::new( data.authority, - get_durable_nonce(&invoke_context).0, + DurableNonce::from_blockhash(&invoke_context.blockhash), invoke_context.lamports_per_signature, ); // Second nonce instruction consumes and replaces stored nonce @@ -423,7 +408,7 @@ mod test { let versions = nonce_account.get_state::().unwrap(); let data = nonce::state::Data::new( data.authority, - get_durable_nonce(&invoke_context).0, + DurableNonce::from_blockhash(&invoke_context.blockhash), invoke_context.lamports_per_signature, ); // Third nonce instruction for fun and profit @@ -482,7 +467,7 @@ mod test { let versions = nonce_account.get_state::().unwrap(); let data = nonce::state::Data::new( authority, - get_durable_nonce(&invoke_context).0, + DurableNonce::from_blockhash(&invoke_context.blockhash), invoke_context.lamports_per_signature, ); assert_eq!(versions.state(), &State::Initialized(data)); @@ -794,7 +779,7 @@ mod test { let versions = nonce_account.get_state::().unwrap(); let data = nonce::state::Data::new( authority, - get_durable_nonce(&invoke_context).0, + DurableNonce::from_blockhash(&invoke_context.blockhash), invoke_context.lamports_per_signature, ); assert_eq!(versions.state(), &State::Initialized(data.clone())); @@ -823,7 +808,7 @@ mod test { let versions = nonce_account.get_state::().unwrap(); let data = nonce::state::Data::new( data.authority, - get_durable_nonce(&invoke_context).0, + DurableNonce::from_blockhash(&invoke_context.blockhash), invoke_context.lamports_per_signature, ); assert_eq!(versions.state(), &State::Initialized(data)); @@ -1015,7 +1000,7 @@ mod test { initialize_nonce_account(&mut nonce_account, &authorized, &rent, &invoke_context); let data = nonce::state::Data::new( authorized, - get_durable_nonce(&invoke_context).0, + DurableNonce::from_blockhash(&invoke_context.blockhash), invoke_context.lamports_per_signature, ); assert_eq!(result, Ok(())); @@ -1084,7 +1069,7 @@ mod test { let authority = Pubkey::default(); let data = nonce::state::Data::new( authority, - get_durable_nonce(&invoke_context).0, + DurableNonce::from_blockhash(&invoke_context.blockhash), invoke_context.lamports_per_signature, ); authorize_nonce_account(&mut nonce_account, &authority, &signers, &invoke_context).unwrap(); @@ -1164,8 +1149,7 @@ mod test { .get_account_at_index(NONCE_ACCOUNT_INDEX) .unwrap() .borrow(), - get_durable_nonce(&invoke_context).0.as_hash(), - true, // separate_domins + DurableNonce::from_blockhash(&invoke_context.blockhash).as_hash(), ), Some(_) ); @@ -1187,7 +1171,6 @@ mod test { .unwrap() .borrow(), &Hash::default(), - true, // separate_domins ), None ); @@ -1227,8 +1210,7 @@ mod test { .get_account_at_index(NONCE_ACCOUNT_INDEX) .unwrap() .borrow(), - get_durable_nonce(&invoke_context).0.as_hash(), - true, // separate_domins + DurableNonce::from_blockhash(&invoke_context.blockhash).as_hash(), ), None ); diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index e69c8680c515a7..2eb7621cf59164 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -488,12 +488,6 @@ pub fn process_instruction( authorize_nonce_account(&mut me, &nonce_authority, &signers, invoke_context) } SystemInstruction::UpgradeNonceAccount => { - let separate_nonce_from_blockhash = invoke_context - .feature_set - .is_active(&feature_set::separate_nonce_from_blockhash::id()); - if !separate_nonce_from_blockhash { - return Err(InstructionError::InvalidInstructionData); - } instruction_context.check_number_of_instruction_accounts(1)?; let mut nonce_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; @@ -1224,10 +1218,7 @@ mod tests { let nonce = Pubkey::new_unique(); let nonce_account = AccountSharedData::new_data( 42, - &nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::default()), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::default())), &system_program::id(), ) .unwrap(); @@ -1516,13 +1507,10 @@ mod tests { let from = Pubkey::new_unique(); let from_account = AccountSharedData::new_data( 100, - &nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data { - authority: from, - ..nonce::state::Data::default() - }), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data { + authority: from, + ..nonce::state::Data::default() + })), &system_program::id(), ) .unwrap(); @@ -1819,8 +1807,7 @@ mod tests { #[test] fn test_process_nonce_ix_ok() { let nonce_address = Pubkey::new_unique(); - let nonce_account = - nonce_account::create_account(1_000_000, /*separate_domains:*/ true).into_inner(); + let nonce_account = nonce_account::create_account(1_000_000).into_inner(); #[allow(deprecated)] let blockhash_id = sysvar::recent_blockhashes::id(); let accounts = process_instruction( @@ -1927,8 +1914,7 @@ mod tests { #[test] fn test_process_withdraw_ix_ok() { let nonce_address = Pubkey::new_unique(); - let nonce_account = - nonce_account::create_account(1_000_000, /*separate_domains:*/ true).into_inner(); + let nonce_account = nonce_account::create_account(1_000_000).into_inner(); let pubkey = Pubkey::new_unique(); #[allow(deprecated)] let blockhash_id = sysvar::recent_blockhashes::id(); @@ -1981,8 +1967,7 @@ mod tests { #[test] fn test_process_initialize_ix_only_nonce_acc_fail() { let nonce_address = Pubkey::new_unique(); - let nonce_account = - nonce_account::create_account(1_000_000, /*separate_domains:*/ true).into_inner(); + let nonce_account = nonce_account::create_account(1_000_000).into_inner(); process_instruction( &serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(), vec![(nonce_address, nonce_account)], @@ -1999,8 +1984,7 @@ mod tests { #[test] fn test_process_initialize_ix_ok() { let nonce_address = Pubkey::new_unique(); - let nonce_account = - nonce_account::create_account(1_000_000, /*separate_domains:*/ true).into_inner(); + let nonce_account = nonce_account::create_account(1_000_000).into_inner(); #[allow(deprecated)] let blockhash_id = sysvar::recent_blockhashes::id(); process_instruction( @@ -2035,8 +2019,7 @@ mod tests { #[test] fn test_process_authorize_ix_ok() { let nonce_address = Pubkey::new_unique(); - let nonce_account = - nonce_account::create_account(1_000_000, /*separate_domains:*/ true).into_inner(); + let nonce_account = nonce_account::create_account(1_000_000).into_inner(); #[allow(deprecated)] let blockhash_id = sysvar::recent_blockhashes::id(); let accounts = process_instruction( @@ -2105,10 +2088,7 @@ mod tests { fn test_get_system_account_kind_nonce_ok() { let nonce_account = AccountSharedData::new_data( 42, - &nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::default()), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::default())), &system_program::id(), ) .unwrap(); @@ -2121,9 +2101,7 @@ mod tests { #[test] fn test_get_system_account_kind_uninitialized_nonce_account_fail() { assert_eq!( - get_system_account_kind( - &nonce_account::create_account(42, /*separate_domains:*/ true).borrow() - ), + get_system_account_kind(&nonce_account::create_account(42).borrow()), None ); } @@ -2139,10 +2117,7 @@ mod tests { fn test_get_system_account_kind_nonsystem_owner_with_nonce_data_fail() { let nonce_account = AccountSharedData::new_data( 42, - &nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::default()), - true, // separate_domains - ), + &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::default())), &Pubkey::new_unique(), ) .unwrap(); @@ -2152,8 +2127,7 @@ mod tests { #[test] fn test_nonce_initialize_with_empty_recent_blockhashes_fail() { let nonce_address = Pubkey::new_unique(); - let nonce_account = - nonce_account::create_account(1_000_000, /*separate_domains:*/ true).into_inner(); + let nonce_account = nonce_account::create_account(1_000_000).into_inner(); #[allow(deprecated)] let blockhash_id = sysvar::recent_blockhashes::id(); #[allow(deprecated)] @@ -2193,8 +2167,7 @@ mod tests { #[test] fn test_nonce_advance_with_empty_recent_blockhashes_fail() { let nonce_address = Pubkey::new_unique(); - let nonce_account = - nonce_account::create_account(1_000_000, /*separate_domains:*/ true).into_inner(); + let nonce_account = nonce_account::create_account(1_000_000).into_inner(); #[allow(deprecated)] let blockhash_id = sysvar::recent_blockhashes::id(); let accounts = process_instruction( @@ -2328,8 +2301,7 @@ mod tests { assert_eq!(accounts.len(), 1); assert_eq!(accounts[0], nonce_account); let blockhash = Hash::from([171; 32]); - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ false); + let durable_nonce = DurableNonce::from_blockhash(&blockhash); let data = NonceData { authority: Pubkey::new_unique(), durable_nonce, @@ -2365,8 +2337,7 @@ mod tests { ); assert_eq!(accounts.len(), 1); let nonce_account = accounts.remove(0); - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(durable_nonce.as_hash()); assert_ne!(data.durable_nonce, durable_nonce); let data = NonceData { durable_nonce, diff --git a/sdk/program/src/nonce/state/current.rs b/sdk/program/src/nonce/state/current.rs index 0839ff9fa298a1..a688cfbe9d1f26 100644 --- a/sdk/program/src/nonce/state/current.rs +++ b/sdk/program/src/nonce/state/current.rs @@ -53,12 +53,8 @@ impl Data { } impl DurableNonce { - pub fn from_blockhash(blockhash: &Hash, separate_domains: bool) -> Self { - Self(if separate_domains { - hashv(&[DURABLE_NONCE_HASH_PREFIX, blockhash.as_ref()]) - } else { - *blockhash - }) + pub fn from_blockhash(blockhash: &Hash) -> Self { + Self(hashv(&[DURABLE_NONCE_HASH_PREFIX, blockhash.as_ref()])) } /// Hash value used as recent_blockhash field in Transactions. @@ -110,10 +106,7 @@ mod test { #[test] fn test_nonce_state_size() { - let data = Versions::new( - State::Initialized(Data::default()), - true, // separate_domains - ); + let data = Versions::new(State::Initialized(Data::default())); let size = bincode::serialized_size(&data).unwrap(); assert_eq!(State::size() as u64, size); } diff --git a/sdk/program/src/nonce/state/mod.rs b/sdk/program/src/nonce/state/mod.rs index cb34e467d5f4fe..a4a850b93c1cdc 100644 --- a/sdk/program/src/nonce/state/mod.rs +++ b/sdk/program/src/nonce/state/mod.rs @@ -22,12 +22,8 @@ pub enum AuthorizeNonceError { } impl Versions { - pub fn new(state: State, separate_domains: bool) -> Self { - if separate_domains { - Self::Current(Box::new(state)) - } else { - Self::Legacy(Box::new(state)) - } + pub fn new(state: State) -> Self { + Self::Current(Box::new(state)) } pub fn state(&self) -> &State { @@ -42,23 +38,17 @@ impl Versions { pub fn verify_recent_blockhash( &self, recent_blockhash: &Hash, // Transaction.message.recent_blockhash - separate_domains: bool, ) -> Option<&Data> { - let state = match self { - Self::Legacy(state) => { - if separate_domains { - // Legacy durable nonces are invalid and should not - // allow durable transactions. - return None; - } else { - state + match self { + // Legacy durable nonces are invalid and should not + // allow durable transactions. + Self::Legacy(_) => None, + Self::Current(state) => match **state { + State::Uninitialized => None, + State::Initialized(ref data) => { + (recent_blockhash == &data.blockhash()).then(|| data) } - } - Self::Current(state) => state, - }; - match **state { - State::Uninitialized => None, - State::Initialized(ref data) => (recent_blockhash == &data.blockhash()).then(|| data), + }, } } @@ -73,10 +63,7 @@ impl Versions { // upgrade Uninitialized legacy nonces. State::Uninitialized => None, State::Initialized(ref mut data) => { - data.durable_nonce = DurableNonce::from_blockhash( - &data.blockhash(), - true, // separate_domains - ); + data.durable_nonce = DurableNonce::from_blockhash(&data.blockhash()); Some(Self::Current(state)) } } @@ -136,29 +123,12 @@ mod tests { fn test_verify_recent_blockhash() { let blockhash = Hash::from([171; 32]); let versions = Versions::Legacy(Box::new(State::Uninitialized)); - for separate_domains in [false, true] { - assert_eq!( - versions.verify_recent_blockhash(&blockhash, separate_domains), - None - ); - assert_eq!( - versions.verify_recent_blockhash(&Hash::default(), separate_domains), - None - ); - } + assert_eq!(versions.verify_recent_blockhash(&blockhash), None); + assert_eq!(versions.verify_recent_blockhash(&Hash::default()), None); let versions = Versions::Current(Box::new(State::Uninitialized)); - for separate_domains in [false, true] { - assert_eq!( - versions.verify_recent_blockhash(&blockhash, separate_domains), - None - ); - assert_eq!( - versions.verify_recent_blockhash(&Hash::default(), separate_domains), - None - ); - } - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ false); + assert_eq!(versions.verify_recent_blockhash(&blockhash), None); + assert_eq!(versions.verify_recent_blockhash(&Hash::default()), None); + let durable_nonce = DurableNonce::from_blockhash(&blockhash); let data = Data { authority: Pubkey::new_unique(), durable_nonce, @@ -167,66 +137,30 @@ mod tests { }, }; let versions = Versions::Legacy(Box::new(State::Initialized(data.clone()))); - let separate_domains = false; - assert_eq!( - versions.verify_recent_blockhash(&Hash::default(), separate_domains), - None - ); - assert_eq!( - versions.verify_recent_blockhash(&blockhash, separate_domains), - Some(&data) - ); - assert_eq!( - versions.verify_recent_blockhash(&data.blockhash(), separate_domains), - Some(&data) - ); - assert_eq!( - versions.verify_recent_blockhash(durable_nonce.as_hash(), separate_domains), - Some(&data) - ); - let separate_domains = true; - assert_eq!( - versions.verify_recent_blockhash(&Hash::default(), separate_domains), - None - ); - assert_eq!( - versions.verify_recent_blockhash(&blockhash, separate_domains), - None - ); - assert_eq!( - versions.verify_recent_blockhash(&data.blockhash(), separate_domains), - None - ); + assert_eq!(versions.verify_recent_blockhash(&Hash::default()), None); + assert_eq!(versions.verify_recent_blockhash(&blockhash), None); + assert_eq!(versions.verify_recent_blockhash(&data.blockhash()), None); assert_eq!( - versions.verify_recent_blockhash(durable_nonce.as_hash(), separate_domains), + versions.verify_recent_blockhash(durable_nonce.as_hash()), None ); - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(durable_nonce.as_hash()); assert_ne!(data.durable_nonce, durable_nonce); let data = Data { durable_nonce, ..data }; let versions = Versions::Current(Box::new(State::Initialized(data.clone()))); - for separate_domains in [false, true] { - assert_eq!( - versions.verify_recent_blockhash(&blockhash, separate_domains), - None - ); - assert_eq!( - versions.verify_recent_blockhash(&Hash::default(), separate_domains), - None - ); - assert_eq!( - versions.verify_recent_blockhash(&data.blockhash(), separate_domains), - Some(&data) - ); - assert_eq!( - versions.verify_recent_blockhash(durable_nonce.as_hash(), separate_domains), - Some(&data) - ); - } + assert_eq!(versions.verify_recent_blockhash(&blockhash), None); + assert_eq!(versions.verify_recent_blockhash(&Hash::default()), None); + assert_eq!( + versions.verify_recent_blockhash(&data.blockhash()), + Some(&data) + ); + assert_eq!( + versions.verify_recent_blockhash(durable_nonce.as_hash()), + Some(&data) + ); } #[test] @@ -236,8 +170,7 @@ mod tests { assert_eq!(versions.upgrade(), None); // Initialized let blockhash = Hash::from([171; 32]); - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ false); + let durable_nonce = DurableNonce::from_blockhash(&blockhash); let data = Data { authority: Pubkey::new_unique(), durable_nonce, @@ -246,8 +179,7 @@ mod tests { }, }; let versions = Versions::Legacy(Box::new(State::Initialized(data.clone()))); - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(durable_nonce.as_hash()); assert_ne!(data.durable_nonce, durable_nonce); let data = Data { durable_nonce, @@ -277,8 +209,7 @@ mod tests { ); // Initialized, Legacy let blockhash = Hash::from([171; 32]); - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ false); + let durable_nonce = DurableNonce::from_blockhash(&blockhash); let data = Data { authority: Pubkey::new_unique(), durable_nonce, diff --git a/sdk/src/nonce_account.rs b/sdk/src/nonce_account.rs index 4b64b6d15daa0c..19cbca73b5ab5e 100644 --- a/sdk/src/nonce_account.rs +++ b/sdk/src/nonce_account.rs @@ -11,11 +11,11 @@ use { std::cell::RefCell, }; -pub fn create_account(lamports: u64, separate_domains: bool) -> RefCell { +pub fn create_account(lamports: u64) -> RefCell { RefCell::new( AccountSharedData::new_data_with_space( lamports, - &Versions::new(State::Uninitialized, separate_domains), + &Versions::new(State::Uninitialized), State::size(), &crate::system_program::id(), ) @@ -28,13 +28,12 @@ pub fn create_account(lamports: u64, separate_domains: bool) -> RefCell Option { (account.owner() == &crate::system_program::id()) .then(|| { StateMut::::state(account) .ok()? - .verify_recent_blockhash(recent_blockhash, separate_domains) + .verify_recent_blockhash(recent_blockhash) .cloned() }) .flatten() @@ -65,17 +64,12 @@ mod tests { assert_ne!(program_id, crate::system_program::id()); let account = AccountSharedData::new_data_with_space( 42, - &Versions::new(State::Uninitialized, /*separate_domains:*/ true), + &Versions::new(State::Uninitialized), State::size(), &program_id, ) .expect("nonce_account"); - for separate_domains in [false, true] { - assert_eq!( - verify_nonce_account(&account, &Hash::default(), separate_domains), - None - ); - } + assert_eq!(verify_nonce_account(&account, &Hash::default()), None); } fn new_nonce_account(versions: Versions) -> AccountSharedData { @@ -92,30 +86,13 @@ mod tests { let blockhash = Hash::from([171; 32]); let versions = Versions::Legacy(Box::new(State::Uninitialized)); let account = new_nonce_account(versions); - for separate_domains in [false, true] { - assert_eq!( - verify_nonce_account(&account, &blockhash, separate_domains), - None - ); - assert_eq!( - verify_nonce_account(&account, &Hash::default(), separate_domains), - None - ); - } + assert_eq!(verify_nonce_account(&account, &blockhash), None); + assert_eq!(verify_nonce_account(&account, &Hash::default()), None); let versions = Versions::Current(Box::new(State::Uninitialized)); let account = new_nonce_account(versions); - for separate_domains in [false, true] { - assert_eq!( - verify_nonce_account(&account, &blockhash, separate_domains), - None - ); - assert_eq!( - verify_nonce_account(&account, &Hash::default(), separate_domains), - None - ); - } - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ false); + assert_eq!(verify_nonce_account(&account, &blockhash), None); + assert_eq!(verify_nonce_account(&account, &Hash::default()), None); + let durable_nonce = DurableNonce::from_blockhash(&blockhash); let data = Data { authority: Pubkey::new_unique(), durable_nonce, @@ -125,42 +102,14 @@ mod tests { }; let versions = Versions::Legacy(Box::new(State::Initialized(data.clone()))); let account = new_nonce_account(versions); - let separate_domains = false; + assert_eq!(verify_nonce_account(&account, &blockhash), None); + assert_eq!(verify_nonce_account(&account, &Hash::default()), None); + assert_eq!(verify_nonce_account(&account, &data.blockhash()), None); assert_eq!( - verify_nonce_account(&account, &blockhash, separate_domains), - Some(data.clone()) - ); - assert_eq!( - verify_nonce_account(&account, &Hash::default(), separate_domains), - None - ); - assert_eq!( - verify_nonce_account(&account, &data.blockhash(), separate_domains), - Some(data.clone()) - ); - assert_eq!( - verify_nonce_account(&account, durable_nonce.as_hash(), separate_domains), - Some(data.clone()) - ); - let separate_domains = true; - assert_eq!( - verify_nonce_account(&account, &blockhash, separate_domains), - None - ); - assert_eq!( - verify_nonce_account(&account, &Hash::default(), separate_domains), + verify_nonce_account(&account, durable_nonce.as_hash()), None ); - assert_eq!( - verify_nonce_account(&account, &data.blockhash(), separate_domains), - None - ); - assert_eq!( - verify_nonce_account(&account, durable_nonce.as_hash(), separate_domains), - None - ); - let durable_nonce = - DurableNonce::from_blockhash(&blockhash, /*separate_domains:*/ true); + let durable_nonce = DurableNonce::from_blockhash(durable_nonce.as_hash()); assert_ne!(data.durable_nonce, durable_nonce); let data = Data { durable_nonce, @@ -168,23 +117,15 @@ mod tests { }; let versions = Versions::Current(Box::new(State::Initialized(data.clone()))); let account = new_nonce_account(versions); - for separate_domains in [false, true] { - assert_eq!( - verify_nonce_account(&account, &blockhash, separate_domains), - None - ); - assert_eq!( - verify_nonce_account(&account, &Hash::default(), separate_domains), - None - ); - assert_eq!( - verify_nonce_account(&account, &data.blockhash(), separate_domains), - Some(data.clone()) - ); - assert_eq!( - verify_nonce_account(&account, durable_nonce.as_hash(), separate_domains), - Some(data.clone()) - ); - } + assert_eq!(verify_nonce_account(&account, &blockhash), None); + assert_eq!(verify_nonce_account(&account, &Hash::default()), None); + assert_eq!( + verify_nonce_account(&account, &data.blockhash()), + Some(data.clone()) + ); + assert_eq!( + verify_nonce_account(&account, durable_nonce.as_hash()), + Some(data) + ); } } diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 2c2648fc03f0c2..9195aeb0a7b4b7 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -605,11 +605,8 @@ impl SendTransactionService { .last_sent_time .map(|last| now.duration_since(last) >= retry_rate) .unwrap_or(false); - let verify_nonce_account = nonce_account::verify_nonce_account( - &nonce_account, - &durable_nonce, - working_bank.separate_nonce_from_blockhash(), - ); + let verify_nonce_account = + nonce_account::verify_nonce_account(&nonce_account, &durable_nonce); if verify_nonce_account.is_none() && signature_status.is_none() && expired { info!("Dropping expired durable-nonce transaction: {}", signature); result.expired += 1; @@ -1094,16 +1091,10 @@ mod test { .unwrap(); let nonce_address = Pubkey::new_unique(); - let durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); - let nonce_state = nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::new( - Pubkey::default(), - durable_nonce, - 42, - )), - true, // separate_domains - ); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = nonce::state::Versions::new(nonce::State::Initialized( + nonce::state::Data::new(Pubkey::default(), durable_nonce, 42), + )); let nonce_account = AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); root_bank.store_account(&nonce_address, &nonce_account); @@ -1351,16 +1342,10 @@ mod test { for mut transaction in transactions.values_mut() { transaction.last_sent_time = Some(Instant::now().sub(Duration::from_millis(4000))); } - let new_durable_nonce = - DurableNonce::from_blockhash(&Hash::new_unique(), /*separate_domains:*/ true); - let new_nonce_state = nonce::state::Versions::new( - nonce::State::Initialized(nonce::state::Data::new( - Pubkey::default(), - new_durable_nonce, - 42, - )), - true, // separate_domains - ); + let new_durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let new_nonce_state = nonce::state::Versions::new(nonce::State::Initialized( + nonce::state::Data::new(Pubkey::default(), new_durable_nonce, 42), + )); let nonce_account = AccountSharedData::new_data(43, &new_nonce_state, &system_program::id()).unwrap(); working_bank.store_account(&nonce_address, &nonce_account); From 71c7b4f9cbdb63e6fe9781fc1a8b92204a9465b4 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 6 Jul 2022 08:49:35 -0500 Subject: [PATCH 038/100] use append vec iter in construct_candidate_clean_keys (#26426) --- runtime/src/accounts_db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 685fca08875b86..f3dad072c3eb9a 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2422,9 +2422,9 @@ impl AccountsDb { let dirty_stores_len = dirty_stores.len(); let pubkeys = DashSet::new(); for (_slot, store) in dirty_stores { - for account in store.accounts.accounts(0) { + AppendVecAccountsIter::new(&store.accounts).for_each(|account| { pubkeys.insert(account.meta.pubkey); - } + }); } trace!( "dirty_stores.len: {} pubkeys.len: {}", From 5afe4d938d6305f3b27617c7fbb640b800deb2b8 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 6 Jul 2022 08:50:14 -0500 Subject: [PATCH 039/100] combine lookup in calc_delete_dependencies (#26429) --- runtime/src/accounts_db.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index f3dad072c3eb9a..d37c7711711509 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2240,11 +2240,10 @@ impl AccountsDb { while !pending_store_ids.is_empty() { let id = pending_store_ids.iter().next().cloned().unwrap(); pending_store_ids.remove(&id); - if already_counted.contains(&id) { + if !already_counted.insert(id) { continue; } store_counts.get_mut(&id).unwrap().0 += 1; - already_counted.insert(id); let affected_pubkeys = &store_counts.get(&id).unwrap().1; for key in affected_pubkeys { From f8dccd46021968647cb29ad72905db85d0352e4f Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 6 Jul 2022 17:06:03 +0200 Subject: [PATCH 040/100] Clean up `max_tx_account_locks` feature (#26440) Clean up max_tx_account_locks feature --- runtime/src/accounts.rs | 58 +++++++------------------------- runtime/src/bank.rs | 26 +++++--------- sdk/src/transaction/sanitized.rs | 9 ++--- 3 files changed, 23 insertions(+), 70 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index cbe07fd6a45ea7..8dac900c79cb63 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1108,10 +1108,9 @@ impl Accounts { pub fn lock_accounts<'a>( &self, txs: impl Iterator, - feature_set: &FeatureSet, ) -> Vec> { let tx_account_locks_results: Vec> = - txs.map(|tx| tx.get_account_locks(feature_set)).collect(); + txs.map(|tx| tx.get_account_locks()).collect(); self.lock_accounts_inner(tx_account_locks_results) } @@ -1121,12 +1120,11 @@ impl Accounts { &self, txs: impl Iterator, results: impl Iterator>, - feature_set: &FeatureSet, ) -> Vec> { let tx_account_locks_results: Vec> = txs .zip(results) .map(|(tx, result)| match result { - Ok(()) => tx.get_account_locks(feature_set), + Ok(()) => tx.get_account_locks(), Err(err) => Err(err.clone()), }) .collect(); @@ -2499,7 +2497,7 @@ mod tests { }; let tx = new_sanitized_tx(&[&keypair], message, Hash::default()); - let results = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled()); + let results = accounts.lock_accounts([tx].iter()); assert_eq!(results[0], Err(TransactionError::AccountLoadedTwice)); } @@ -2532,34 +2530,12 @@ mod tests { }; let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; - let results = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled()); - assert_eq!(results[0], Ok(())); - accounts.unlock_accounts(txs.iter(), &results); - } - - // Allow over MAX_TX_ACCOUNT_LOCKS before feature activation - { - let num_account_keys = MAX_TX_ACCOUNT_LOCKS + 1; - let mut account_keys: Vec<_> = (0..num_account_keys) - .map(|_| Pubkey::new_unique()) - .collect(); - account_keys[0] = keypair.pubkey(); - let message = Message { - header: MessageHeader { - num_required_signatures: 1, - ..MessageHeader::default() - }, - account_keys, - ..Message::default() - }; - - let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; - let results = accounts.lock_accounts(txs.iter(), &FeatureSet::default()); + let results = accounts.lock_accounts(txs.iter()); assert_eq!(results[0], Ok(())); accounts.unlock_accounts(txs.iter(), &results); } - // Disallow over MAX_TX_ACCOUNT_LOCKS after feature activation + // Disallow over MAX_TX_ACCOUNT_LOCKS { let num_account_keys = MAX_TX_ACCOUNT_LOCKS + 1; let mut account_keys: Vec<_> = (0..num_account_keys) @@ -2576,7 +2552,7 @@ mod tests { }; let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; - let results = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled()); + let results = accounts.lock_accounts(txs.iter()); assert_eq!(results[0], Err(TransactionError::TooManyAccountLocks)); } } @@ -2615,7 +2591,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair0], message, Hash::default()); - let results0 = accounts.lock_accounts([tx.clone()].iter(), &FeatureSet::all_enabled()); + let results0 = accounts.lock_accounts([tx.clone()].iter()); assert!(results0[0].is_ok()); assert_eq!( @@ -2650,7 +2626,7 @@ mod tests { ); let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default()); let txs = vec![tx0, tx1]; - let results1 = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled()); + let results1 = accounts.lock_accounts(txs.iter()); assert!(results1[0].is_ok()); // Read-only account (keypair1) can be referenced multiple times assert!(results1[1].is_err()); // Read-only account (keypair1) cannot also be locked as writable @@ -2677,7 +2653,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair1], message, Hash::default()); - let results2 = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled()); + let results2 = accounts.lock_accounts([tx].iter()); assert!(results2[0].is_ok()); // Now keypair1 account can be locked as writable // Check that read-only lock with zero references is deleted @@ -2746,9 +2722,7 @@ mod tests { let exit_clone = exit_clone.clone(); loop { let txs = vec![writable_tx.clone()]; - let results = accounts_clone - .clone() - .lock_accounts(txs.iter(), &FeatureSet::all_enabled()); + let results = accounts_clone.clone().lock_accounts(txs.iter()); for result in results.iter() { if result.is_ok() { counter_clone.clone().fetch_add(1, Ordering::SeqCst); @@ -2763,9 +2737,7 @@ mod tests { let counter_clone = counter; for _ in 0..5 { let txs = vec![readonly_tx.clone()]; - let results = accounts_arc - .clone() - .lock_accounts(txs.iter(), &FeatureSet::all_enabled()); + let results = accounts_arc.clone().lock_accounts(txs.iter()); if results[0].is_ok() { let counter_value = counter_clone.clone().load(Ordering::SeqCst); thread::sleep(time::Duration::from_millis(50)); @@ -2811,7 +2783,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair0], message, Hash::default()); - let results0 = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled()); + let results0 = accounts.lock_accounts([tx].iter()); assert!(results0[0].is_ok()); // Instruction program-id account demoted to readonly @@ -2902,11 +2874,7 @@ mod tests { Ok(()), ]; - let results = accounts.lock_accounts_with_results( - txs.iter(), - qos_results.iter(), - &FeatureSet::all_enabled(), - ); + let results = accounts.lock_accounts_with_results(txs.iter(), qos_results.iter()); assert!(results[0].is_ok()); // Read-only account (keypair0) can be referenced multiple times assert!(results[1].is_err()); // is not locked due to !qos_results[1].is_ok() diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 82c0149e930c89..d5265e6f212995 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3866,10 +3866,7 @@ impl Bank { .into_iter() .map(SanitizedTransaction::from_transaction_for_tests) .collect::>(); - let lock_results = self - .rc - .accounts - .lock_accounts(sanitized_txs.iter(), &FeatureSet::all_enabled()); + let lock_results = self.rc.accounts.lock_accounts(sanitized_txs.iter()); TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs)) } @@ -3889,10 +3886,7 @@ impl Bank { ) }) .collect::>>()?; - let lock_results = self - .rc - .accounts - .lock_accounts(sanitized_txs.iter(), &FeatureSet::all_enabled()); + let lock_results = self.rc.accounts.lock_accounts(sanitized_txs.iter()); Ok(TransactionBatch::new( lock_results, self, @@ -3905,10 +3899,7 @@ impl Bank { &'a self, txs: &'b [SanitizedTransaction], ) -> TransactionBatch<'a, 'b> { - let lock_results = self - .rc - .accounts - .lock_accounts(txs.iter(), &self.feature_set); + let lock_results = self.rc.accounts.lock_accounts(txs.iter()); TransactionBatch::new(lock_results, self, Cow::Borrowed(txs)) } @@ -3920,11 +3911,10 @@ impl Bank { transaction_results: impl Iterator>, ) -> TransactionBatch<'a, 'b> { // this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit - let lock_results = self.rc.accounts.lock_accounts_with_results( - transactions.iter(), - transaction_results, - &self.feature_set, - ); + let lock_results = self + .rc + .accounts + .lock_accounts_with_results(transactions.iter(), transaction_results); TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)) } @@ -3933,7 +3923,7 @@ impl Bank { &'a self, transaction: SanitizedTransaction, ) -> TransactionBatch<'a, '_> { - let lock_result = transaction.get_account_locks(&self.feature_set).map(|_| ()); + let lock_result = transaction.get_account_locks().map(|_| ()); let mut batch = TransactionBatch::new(vec![lock_result], self, Cow::Owned(vec![transaction])); batch.set_needs_unlock(false); diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 208dc03f7c841e..4c813e6b76a4fe 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -208,15 +208,10 @@ impl SanitizedTransaction { } /// Validate and return the account keys locked by this transaction - pub fn get_account_locks( - &self, - feature_set: &feature_set::FeatureSet, - ) -> Result { + pub fn get_account_locks(&self) -> Result { if self.message.has_duplicates() { Err(TransactionError::AccountLoadedTwice) - } else if feature_set.is_active(&feature_set::max_tx_account_locks::id()) - && self.message.account_keys().len() > MAX_TX_ACCOUNT_LOCKS - { + } else if self.message.account_keys().len() > MAX_TX_ACCOUNT_LOCKS { Err(TransactionError::TooManyAccountLocks) } else { Ok(self.get_account_locks_unchecked()) From ab164fc975089fcf2108c20cad9870518894680a Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 6 Jul 2022 11:22:26 -0500 Subject: [PATCH 041/100] simplify code (#26443) --- runtime/src/accounts_db.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index d37c7711711509..feca3bd597a1f6 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2518,7 +2518,6 @@ impl AccountsDb { let total_keys_count = pubkeys.len(); let mut accounts_scan = Measure::start("accounts_scan"); let uncleaned_roots = self.accounts_index.clone_uncleaned_roots(); - let uncleaned_roots_len = self.accounts_index.uncleaned_roots_len(); let found_not_zero_accum = AtomicU64::new(0); let not_found_on_fork_accum = AtomicU64::new(0); let missing_accum = AtomicU64::new(0); @@ -2569,13 +2568,11 @@ impl AccountsDb { } else { found_not_zero += 1; } - let slot = *slot; - - if uncleaned_roots.contains(&slot) { + if uncleaned_roots.contains(slot) { // Assertion enforced by `accounts_index.get()`, the latest slot // will not be greater than the given `max_clean_root` if let Some(max_clean_root) = max_clean_root { - assert!(slot <= max_clean_root); + assert!(slot <= &max_clean_root); } purges_old_accounts.push(*pubkey); useless = false; @@ -2776,7 +2773,7 @@ impl AccountsDb { i64 ), ("scan_missing", missing_accum.load(Ordering::Relaxed), i64), - ("uncleaned_roots_len", uncleaned_roots_len, i64), + ("uncleaned_roots_len", uncleaned_roots.len(), i64), ( "clean_old_root_us", self.clean_accounts_stats From 17a99d98dd014d9a1ac690368e2f71cbd946047b Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 6 Jul 2022 11:32:45 -0500 Subject: [PATCH 042/100] =?UTF-8?q?Revert=20"avoid=20adding=20to=20'unclea?= =?UTF-8?q?ned=5Froots'=20when=20generating=20index=20and=20c=E2=80=A6=20(?= =?UTF-8?q?#26441)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert "avoid adding to 'uncleaned_roots' when generating index and caller passes accounts-db-skip-shrink (#25936)" This reverts commit e24cc537a4b577dcd001f008e3795c1040a7d8dd. --- runtime/src/accounts_db.rs | 8 +------- runtime/src/bank.rs | 18 +++--------------- runtime/src/serde_snapshot.rs | 6 ------ runtime/src/serde_snapshot/tests.rs | 4 ---- runtime/src/snapshot_utils.rs | 8 +------- 5 files changed, 5 insertions(+), 39 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index feca3bd597a1f6..3f4e7dc17b385c 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -8142,7 +8142,6 @@ impl AccountsDb { limit_load_slot_count_from_snapshot: Option, verify: bool, genesis_config: &GenesisConfig, - accounts_db_skip_shrink: bool, ) -> IndexGenerationInfo { let mut slots = self.storage.all_slots(); #[allow(clippy::stable_sort_primitive)] @@ -8345,12 +8344,7 @@ impl AccountsDb { if pass == 0 { // Need to add these last, otherwise older updates will be cleaned for slot in &slots { - // passing 'false' to 'add_root' causes all slots to be added to 'uncleaned_slots' - // passing 'true' to 'add_root' does NOT add all slots to 'uncleaned_slots' - // if we are skipping shrink, this potentially massive amount of work is never processed at startup, when all threads can be used. - // This causes failures such as oom during the first bg clean, which is expecting to work in 'normal' operating circumstances. - // So, don't add all slots to 'uncleaned_slots' here since by requesting to skip clean and shrink, caller is expecting the starting snapshot to be reasonable. - self.accounts_index.add_root(*slot, accounts_db_skip_shrink); + self.accounts_index.add_root(*slot, false); } self.set_storage_count_and_alive_bytes(storage_info, &mut timings); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d5265e6f212995..4930d16fe0d28a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6921,21 +6921,9 @@ impl Bank { last_full_snapshot_slot: Option, ) -> bool { let mut clean_time = Measure::start("clean"); - if !accounts_db_skip_shrink { - if self.slot() > 0 { - info!("cleaning.."); - self.clean_accounts(true, true, last_full_snapshot_slot); - } - } else { - // if we are skipping shrink, there should be no uncleaned_roots deferred to later - assert_eq!( - self.rc - .accounts - .accounts_db - .accounts_index - .uncleaned_roots_len(), - 0 - ); + if !accounts_db_skip_shrink && self.slot() > 0 { + info!("cleaning.."); + self.clean_accounts(true, true, last_full_snapshot_slot); } clean_time.stop(); diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index b66eb9c5e745e2..b2586307ec5c84 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -242,7 +242,6 @@ pub(crate) fn bank_from_streams( verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, - accounts_db_skip_shrink: bool, ) -> std::result::Result where R: Read, @@ -294,7 +293,6 @@ where verify_index, accounts_db_config, accounts_update_notifier, - accounts_db_skip_shrink, ) } @@ -475,7 +473,6 @@ fn reconstruct_bank_from_fields( verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, - accounts_db_skip_shrink: bool, ) -> Result where E: SerializableStorage + std::marker::Sync, @@ -492,7 +489,6 @@ where verify_index, accounts_db_config, accounts_update_notifier, - accounts_db_skip_shrink, )?; let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot); @@ -552,7 +548,6 @@ fn reconstruct_accountsdb_from_fields( verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, - accounts_db_skip_shrink: bool, ) -> Result<(AccountsDb, ReconstructedAccountsDbInfo), Error> where E: SerializableStorage + std::marker::Sync, @@ -702,7 +697,6 @@ where limit_load_slot_count_from_snapshot, verify_index, genesis_config, - accounts_db_skip_shrink, ); accounts_db.maybe_add_filler_accounts( diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index ab80c166c3be98..39a7fe7fc4b6c1 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -90,7 +90,6 @@ where false, Some(crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), None, - false, ) .map(|(accounts_db, _)| accounts_db) } @@ -303,7 +302,6 @@ fn test_bank_serialize_style( false, Some(crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), None, - false, ) .unwrap(); dbank.status_cache = Arc::new(RwLock::new(status_cache)); @@ -419,7 +417,6 @@ fn test_extra_fields_eof() { false, Some(crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), None, - false, ) .unwrap(); @@ -541,7 +538,6 @@ fn test_blank_extra_fields() { false, Some(crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), None, - false, ) .unwrap(); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index e25840376833af..e8bbcb13addf55 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -821,9 +821,6 @@ pub fn bank_from_snapshot_archives( incremental_snapshot_archive_info, )?; - let accounts_db_skip_shrink = - accounts_db_skip_shrink || !full_snapshot_archive_info.is_remote(); - let parallel_divisions = std::cmp::min( PARALLEL_UNTAR_READERS_DEFAULT, std::cmp::max(1, num_cpus::get() / 4), @@ -882,7 +879,6 @@ pub fn bank_from_snapshot_archives( verify_index, accounts_db_config, accounts_update_notifier, - accounts_db_skip_shrink, )?; measure_rebuild.stop(); info!("{}", measure_rebuild); @@ -890,7 +886,7 @@ pub fn bank_from_snapshot_archives( let mut measure_verify = Measure::start("verify"); if !bank.verify_snapshot_bank( test_hash_calculation, - accounts_db_skip_shrink, + accounts_db_skip_shrink || !full_snapshot_archive_info.is_remote(), Some(full_snapshot_archive_info.slot()), ) && limit_load_slot_count_from_snapshot.is_none() { @@ -1579,7 +1575,6 @@ fn rebuild_bank_from_snapshots( verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, - accounts_db_skip_shrink: bool, ) -> Result { let (full_snapshot_version, full_snapshot_root_paths) = verify_unpacked_snapshots_dir_and_version( @@ -1628,7 +1623,6 @@ fn rebuild_bank_from_snapshots( verify_index, accounts_db_config, accounts_update_notifier, - accounts_db_skip_shrink, ), }?, ) From 44f499cff33c6feaba023f5ec7e1859b2fa7af0e Mon Sep 17 00:00:00 2001 From: Xiang Zhu Date: Wed, 6 Jul 2022 09:49:11 -0700 Subject: [PATCH 043/100] Refactor account index scan function parameters (#26428) * Refactor account index scan function parameters F: FnMut(bool, &SlotList, &'a Pubkey, RefCount) -> AccountsIndexScanResult, Refactor to take (&'a Pubkey, Option<(&SlotList, RefCount)) * Update comments based on review feedback * cargo fmt --all fixes --- runtime/src/accounts_db.rs | 18 ++++++++---------- runtime/src/accounts_index.rs | 14 +++++++------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 3f4e7dc17b385c..d525d263909c09 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2535,13 +2535,10 @@ impl AccountsDb { let mut not_found_on_fork = 0; let mut missing = 0; let mut useful = 0; - self.accounts_index.scan( - pubkeys.iter(), - |exists, slot_list, pubkey, ref_count| { + self.accounts_index + .scan(pubkeys.iter(), |pubkey, slots_refs| { let mut useless = true; - if !exists { - missing += 1; - } else { + if let Some((slot_list, ref_count)) = slots_refs { let index_in_slot_list = self.accounts_index.latest_slot( None, slot_list, @@ -2590,6 +2587,8 @@ impl AccountsDb { purges_old_accounts.push(*pubkey); } } + } else { + missing += 1; } if !useless { useful += 1; @@ -2599,8 +2598,7 @@ impl AccountsDb { } else { AccountsIndexScanResult::KeepInMemory } - }, - ); + }); found_not_zero_accum.fetch_add(found_not_zero, Ordering::Relaxed); not_found_on_fork_accum.fetch_add(not_found_on_fork, Ordering::Relaxed); missing_accum.fetch_add(missing, Ordering::Relaxed); @@ -3035,9 +3033,9 @@ impl AccountsDb { accounts[..std::cmp::min(accounts.len(), count)] .iter() .map(|(key, _)| key), - |exists, slot_list, pubkey, _ref_count| { + |pubkey, slots_refs| { let mut result = AccountsIndexScanResult::None; - if exists { + if let Some((slot_list, _ref_count)) = slots_refs { let pair = &accounts[index]; let stored_account = &pair.1; let is_alive = slot_list.iter().any(|(_slot, acct_info)| { diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index cc6650af52d5d0..3a61585d340fdd 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -1312,14 +1312,14 @@ impl AccountsIndex { pub(crate) fn scan<'a, F, I>(&'a self, pubkeys: I, mut callback: F) where // params: - // exists: false if not in index at all - // index in slot list where best slot was found or None if nothing found by root criteria // pubkey looked up - // refcount of entry in index - F: FnMut(bool, &SlotList, &'a Pubkey, RefCount) -> AccountsIndexScanResult, + // slots_refs is Option<(slot_list, ref_count)> + // None if 'pubkey' is not in accounts index. + // slot_list: comes from accounts index for 'pubkey' + // ref_count: refcount of entry in index + F: FnMut(&'a Pubkey, Option<(&SlotList, RefCount)>) -> AccountsIndexScanResult, I: IntoIterator, { - let empty_slot_list = vec![]; let mut lock = None; let mut last_bin = self.bins(); // too big, won't match pubkeys.into_iter().for_each(|pubkey| { @@ -1334,7 +1334,7 @@ impl AccountsIndex { match entry { Some(locked_entry) => { let slot_list = &locked_entry.slot_list.read().unwrap(); - let result = callback(true, slot_list, pubkey, locked_entry.ref_count()); + let result = callback(pubkey, Some((slot_list, locked_entry.ref_count()))); cache = match result { AccountsIndexScanResult::Unref => { locked_entry.add_un_ref(false); @@ -1345,7 +1345,7 @@ impl AccountsIndex { }; } None => { - callback(false, &empty_slot_list, pubkey, RefCount::MAX); + callback(pubkey, None); } } (cache, ()) From 611ac3371874f1df8d88c8460d55b36a65661c46 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 6 Jul 2022 19:17:31 +0200 Subject: [PATCH 044/100] Clean up `nonce_must_be_authorized` feature (#26445) Clean up nonce_must_be_authorized feature --- runtime/src/bank.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4930d16fe0d28a..0e536f72427539 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4140,16 +4140,11 @@ impl Bank { let nonce_data = nonce_account::verify_nonce_account(&nonce_account, message.recent_blockhash())?; - if self - .feature_set - .is_active(&feature_set::nonce_must_be_authorized::ID) - { - let nonce_is_authorized = message - .get_ix_signers(NONCED_TX_MARKER_IX_INDEX as usize) - .any(|signer| signer == &nonce_data.authority); - if !nonce_is_authorized { - return None; - } + let nonce_is_authorized = message + .get_ix_signers(NONCED_TX_MARKER_IX_INDEX as usize) + .any(|signer| signer == &nonce_data.authority); + if !nonce_is_authorized { + return None; } Some((*nonce_address, nonce_account)) From 06ebfa1eb246cc20324aa885c3635cd42b2ad60b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Wed, 6 Jul 2022 19:27:42 +0200 Subject: [PATCH 045/100] Replicates `AccountsDataMeter` in `TransactionContext` (#26438) Replicates AccountsDataMeter in TransactionContext. --- cli/src/program.rs | 2 +- program-runtime/src/invoke_context.rs | 54 +++++++++++++++----- programs/bpf_loader/benches/serialization.rs | 2 +- programs/bpf_loader/src/serialization.rs | 2 +- programs/bpf_loader/src/syscalls.rs | 19 +++---- programs/stake/src/stake_state.rs | 9 ++-- programs/vote/benches/process_vote.rs | 3 +- rbpf-cli/src/main.rs | 3 +- runtime/src/bank.rs | 6 ++- runtime/src/message_processor.rs | 6 +-- runtime/src/nonce_keyed_account.rs | 2 +- runtime/src/system_instruction_processor.rs | 2 +- sdk/src/transaction_context.rs | 30 ++++++++++- 13 files changed, 99 insertions(+), 41 deletions(-) diff --git a/cli/src/program.rs b/cli/src/program.rs index 9089ba412ace94..531e2352473590 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -2074,7 +2074,7 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box InvokeContext<'a> { feature_set: Arc, blockhash: Hash, lamports_per_signature: u64, - initial_accounts_data_len: u64, + prev_accounts_data_len: u64, ) -> Self { Self { transaction_context, @@ -265,7 +265,7 @@ impl<'a> InvokeContext<'a> { current_compute_budget: compute_budget, compute_budget, compute_meter: ComputeMeter::new_ref(compute_budget.compute_unit_limit), - accounts_data_meter: AccountsDataMeter::new(initial_accounts_data_len), + accounts_data_meter: AccountsDataMeter::new(prev_accounts_data_len), executors, feature_set, timings: ExecuteDetailsTimings::default(), @@ -1152,6 +1152,7 @@ pub fn with_mock_invoke_context R>( preparation.transaction_accounts, ComputeBudget::default().max_invoke_depth.saturating_add(1), 1, + MAX_ACCOUNTS_DATA_LEN, ); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context @@ -1182,6 +1183,7 @@ pub fn mock_process_instruction( preparation.transaction_accounts, ComputeBudget::default().max_invoke_depth.saturating_add(1), 1, + MAX_ACCOUNTS_DATA_LEN, ); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); if let Some(sysvar_cache) = sysvar_cache_override { @@ -1226,7 +1228,7 @@ mod tests { desired_result: Result<(), InstructionError>, }, Resize { - new_len: usize, + new_len: u64, }, } @@ -1312,7 +1314,7 @@ mod tests { } MockInstruction::Resize { new_len } => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .set_data(&vec![0; new_len]) + .set_data(&vec![0; new_len as usize]) .unwrap(), } } else { @@ -1355,7 +1357,7 @@ mod tests { }); } let mut transaction_context = - TransactionContext::new(accounts, ComputeBudget::default().max_invoke_depth, 1); + TransactionContext::new(accounts, ComputeBudget::default().max_invoke_depth, 1, 0); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); // Check call depth increases and has a limit @@ -1463,7 +1465,7 @@ mod tests { let accounts = vec![(solana_sdk::pubkey::new_rand(), AccountSharedData::default())]; let instruction_accounts = vec![]; let program_indices = vec![0]; - let mut transaction_context = TransactionContext::new(accounts, 1, 1); + let mut transaction_context = TransactionContext::new(accounts, 1, 1, 0); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context .push(&instruction_accounts, &program_indices, &[]) @@ -1508,7 +1510,7 @@ mod tests { is_writable: instruction_account_index < 2, }) .collect::>(); - let mut transaction_context = TransactionContext::new(accounts, 2, 8); + let mut transaction_context = TransactionContext::new(accounts, 2, 8, 0); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, builtin_programs); @@ -1640,7 +1642,7 @@ mod tests { fn test_invoke_context_compute_budget() { let accounts = vec![(solana_sdk::pubkey::new_rand(), AccountSharedData::default())]; - let mut transaction_context = TransactionContext::new(accounts, 1, 3); + let mut transaction_context = TransactionContext::new(accounts, 1, 3, 0); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context.compute_budget = ComputeBudget::new(compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64); @@ -1658,8 +1660,9 @@ mod tests { solana_logger::setup(); let program_key = Pubkey::new_unique(); - let user_account_data_len = 123; - let user_account = AccountSharedData::new(100, user_account_data_len, &program_key); + let user_account_data_len = 123u64; + let user_account = + AccountSharedData::new(100, user_account_data_len as usize, &program_key); let dummy_account = AccountSharedData::new(10, 0, &program_key); let mut program_account = AccountSharedData::new(500, 500, &native_loader::id()); program_account.set_executable(true); @@ -1674,7 +1677,8 @@ mod tests { process_instruction: mock_process_instruction, }]; - let mut transaction_context = TransactionContext::new(accounts, 1, 3); + let mut transaction_context = + TransactionContext::new(accounts, 1, 3, user_account_data_len * 2); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &builtin_programs); @@ -1684,7 +1688,13 @@ mod tests { invoke_context .accounts_data_meter .set_maximum(user_account_data_len as u64 * 3); - let remaining_account_data_len = invoke_context.accounts_data_meter.remaining() as usize; + let remaining_account_data_len = invoke_context + .transaction_context + .get_total_resize_remaining(); + assert_eq!( + remaining_account_data_len, + invoke_context.accounts_data_meter.remaining(), + ); let instruction_accounts = [ InstructionAccount { @@ -1719,6 +1729,12 @@ mod tests { assert!(result.is_ok()); assert_eq!(invoke_context.accounts_data_meter.remaining(), 0); + assert_eq!( + invoke_context + .transaction_context + .get_total_resize_remaining(), + 0 + ); } // Test 2: Resize the account to *the same size*, so not consuming any additional size; this must succeed @@ -1737,6 +1753,12 @@ mod tests { assert!(result.is_ok()); assert_eq!(invoke_context.accounts_data_meter.remaining(), 0); + assert_eq!( + invoke_context + .transaction_context + .get_total_resize_remaining(), + 0 + ); } // Test 3: Resize the account to exceed the budget; this must fail @@ -1759,6 +1781,12 @@ mod tests { Err(solana_sdk::instruction::InstructionError::MaxAccountsDataSizeExceeded) )); assert_eq!(invoke_context.accounts_data_meter.remaining(), 0); + assert_eq!( + invoke_context + .transaction_context + .get_total_resize_remaining(), + 0 + ); } } } diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index 2be8073bf06a19..c119d4266c8d51 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -101,7 +101,7 @@ fn create_inputs() -> TransactionContext { }, ) .collect::>(); - let mut transaction_context = TransactionContext::new(transaction_accounts, 1, 1); + let mut transaction_context = TransactionContext::new(transaction_accounts, 1, 1, 0); let instruction_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; transaction_context .push(&[0], &instruction_accounts, &instruction_data, true) diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 6f3f0add6c2285..8b32e8c63f26ae 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -454,7 +454,7 @@ mod tests { &program_indices, ); let mut transaction_context = - TransactionContext::new(preparation.transaction_accounts, 1, 1); + TransactionContext::new(preparation.transaction_accounts, 1, 1, 0); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context .push( diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 3f69281b8c081f..d67c5cc3ed8356 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -3366,17 +3366,14 @@ mod tests { $program_key:ident, $loader_key:expr $(,)?) => { let $program_key = Pubkey::new_unique(); - let mut $transaction_context = TransactionContext::new( - vec![ - ( - $loader_key, - AccountSharedData::new(0, 0, &native_loader::id()), - ), - ($program_key, AccountSharedData::new(0, 0, &$loader_key)), - ], - 1, - 1, - ); + let transaction_accounts = vec![ + ( + $loader_key, + AccountSharedData::new(0, 0, &native_loader::id()), + ), + ($program_key, AccountSharedData::new(0, 0, &$loader_key)), + ]; + let mut $transaction_context = TransactionContext::new(transaction_accounts, 1, 1, 0); let mut $invoke_context = InvokeContext::new_mock(&mut $transaction_context, &[]); $invoke_context.push(&[], &[0, 1], &[]).unwrap(); }; diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 539dc83152417f..f45916935d6c04 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -2785,6 +2785,7 @@ mod tests { )], 1, 1, + 0, ) } @@ -2894,7 +2895,7 @@ mod tests { #[test] fn test_things_can_merge() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let good_stake = Stake { credits_observed: 4242, @@ -2993,7 +2994,7 @@ mod tests { #[test] fn test_metas_can_merge() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); // Identical Metas can merge assert!(MergeKind::metas_can_merge( @@ -3140,7 +3141,7 @@ mod tests { #[test] fn test_merge_kind_get_if_mergeable() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let authority_pubkey = Pubkey::new_unique(); let initial_lamports = 4242424242; @@ -3379,7 +3380,7 @@ mod tests { #[test] fn test_merge_kind_merge() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let clock = Clock::default(); let lamports = 424242; diff --git a/programs/vote/benches/process_vote.rs b/programs/vote/benches/process_vote.rs index f939c6c26f0abb..43c4a019f9ea79 100644 --- a/programs/vote/benches/process_vote.rs +++ b/programs/vote/benches/process_vote.rs @@ -107,7 +107,8 @@ fn bench_process_vote_instruction( instruction_data: Vec, ) { bencher.iter(|| { - let mut transaction_context = TransactionContext::new(transaction_accounts.clone(), 1, 1); + let mut transaction_context = + TransactionContext::new(transaction_accounts.clone(), 1, 1, 0); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context .push(&instruction_accounts, &[0], &instruction_data) diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index 13601e549c0cd2..0dc51d6deb891c 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -216,7 +216,8 @@ native machine code before execting it in the virtual machine.", let program_indices = [0, 1]; let preparation = prepare_mock_invoke_context(transaction_accounts, instruction_accounts, &program_indices); - let mut transaction_context = TransactionContext::new(preparation.transaction_accounts, 1, 1); + let mut transaction_context = + TransactionContext::new(preparation.transaction_accounts, 1, 1, 0); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context .push( diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0e536f72427539..5b4cbe0686c95a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4283,12 +4283,14 @@ impl Bank { get_executors_time.as_us() ); + let prev_accounts_data_len = self.load_accounts_data_size(); let mut transaction_accounts = Vec::new(); std::mem::swap(&mut loaded_transaction.accounts, &mut transaction_accounts); let mut transaction_context = TransactionContext::new( transaction_accounts, compute_budget.max_invoke_depth.saturating_add(1), tx.message().instructions().len(), + MAX_ACCOUNTS_DATA_LEN.saturating_sub(prev_accounts_data_len), ); let pre_account_state_info = @@ -4319,7 +4321,7 @@ impl Bank { &*self.sysvar_cache.read().unwrap(), blockhash, lamports_per_signature, - self.load_accounts_data_size(), + prev_accounts_data_len, &mut executed_units, ); process_message_time.stop(); @@ -4376,6 +4378,7 @@ impl Bank { accounts, instruction_trace, mut return_data, + .. } = transaction_context.into(); loaded_transaction.accounts = accounts; @@ -18574,6 +18577,7 @@ pub(crate) mod tests { loaded_txs[0].0.as_ref().unwrap().accounts.clone(), compute_budget.max_invoke_depth.saturating_add(1), number_of_instructions_at_transaction_level, + 0, ); assert_eq!( diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index d3e760de28bf11..359ad628eda131 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -282,7 +282,7 @@ mod tests { create_loadable_account_for_test("mock_system_program"), ), ]; - let mut transaction_context = TransactionContext::new(accounts, 1, 3); + let mut transaction_context = TransactionContext::new(accounts, 1, 3, 0); let program_indices = vec![vec![2]]; let executors = Rc::new(RefCell::new(Executors::default())); let account_keys = transaction_context.get_keys_of_accounts().to_vec(); @@ -502,7 +502,7 @@ mod tests { create_loadable_account_for_test("mock_system_program"), ), ]; - let mut transaction_context = TransactionContext::new(accounts, 1, 3); + let mut transaction_context = TransactionContext::new(accounts, 1, 3, 0); let program_indices = vec![vec![2]]; let executors = Rc::new(RefCell::new(Executors::default())); let account_metas = vec![ @@ -661,7 +661,7 @@ mod tests { (secp256k1_program::id(), secp256k1_account), (mock_program_id, mock_program_account), ]; - let mut transaction_context = TransactionContext::new(accounts, 1, 2); + let mut transaction_context = TransactionContext::new(accounts, 1, 2, 0); let message = SanitizedMessage::Legacy(Message::new( &[ diff --git a/runtime/src/nonce_keyed_account.rs b/runtime/src/nonce_keyed_account.rs index 2e0ae14893d6de..f6f02225372d0d 100644 --- a/runtime/src/nonce_keyed_account.rs +++ b/runtime/src/nonce_keyed_account.rs @@ -344,7 +344,7 @@ mod test { is_writable: true, }, ]; - let mut transaction_context = TransactionContext::new(accounts, 1, 2); + let mut transaction_context = TransactionContext::new(accounts, 1, 2, 0); let mut $invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); }; } diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 2eb7621cf59164..a4082c1ccbe349 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -786,7 +786,7 @@ mod tests { #[test] fn test_address_create_with_seed_mismatch() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let from = Pubkey::new_unique(); let seed = "dull boy"; diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index 4dfd10ef9aee57..d073c918988396 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -48,6 +48,8 @@ pub struct TransactionContext { number_of_instructions_at_transaction_level: usize, instruction_trace: InstructionTrace, return_data: TransactionReturnData, + total_resize_limit: u64, + total_resize_delta: RefCell, } impl TransactionContext { @@ -56,6 +58,7 @@ impl TransactionContext { transaction_accounts: Vec, instruction_context_capacity: usize, number_of_instructions_at_transaction_level: usize, + total_resize_limit: u64, ) -> Self { let (account_keys, accounts): (Vec, Vec>) = transaction_accounts @@ -70,6 +73,8 @@ impl TransactionContext { number_of_instructions_at_transaction_level, instruction_trace: Vec::with_capacity(number_of_instructions_at_transaction_level), return_data: TransactionReturnData::default(), + total_resize_limit, + total_resize_delta: RefCell::new(0), } } @@ -249,6 +254,18 @@ impl TransactionContext { pub fn get_instruction_trace(&self) -> &InstructionTrace { &self.instruction_trace } + + /// Returns (in bytes) how much data can still be allocated + pub fn get_total_resize_remaining(&self) -> u64 { + let total_resize_delta = *self.total_resize_delta.borrow(); + if total_resize_delta >= 0 { + self.total_resize_limit + .saturating_sub(total_resize_delta as u64) + } else { + self.total_resize_limit + .saturating_add(total_resize_delta.saturating_neg() as u64) + } + } } /// Return data at the end of a transaction @@ -586,6 +603,9 @@ impl<'a> BorrowedAccount<'a> { if data.len() == self.account.data().len() { self.account.data_as_mut_slice().copy_from_slice(data); } else { + let mut total_resize_delta = self.transaction_context.total_resize_delta.borrow_mut(); + *total_resize_delta = total_resize_delta + .saturating_add((data.len() as i64).saturating_sub(self.get_data().len() as i64)); self.account.set_data_from_slice(data); } Ok(()) @@ -594,8 +614,11 @@ impl<'a> BorrowedAccount<'a> { /// Resizes the account data (transaction wide) /// /// Fills it with zeros at the end if is extended or truncates at the end otherwise. - pub fn set_data_length(&mut self, new_len: usize) -> Result<(), InstructionError> { - self.account.data_mut().resize(new_len, 0); + pub fn set_data_length(&mut self, new_length: usize) -> Result<(), InstructionError> { + let mut total_resize_delta = self.transaction_context.total_resize_delta.borrow_mut(); + *total_resize_delta = total_resize_delta + .saturating_add((new_length as i64).saturating_sub(self.get_data().len() as i64)); + self.account.data_mut().resize(new_length, 0); Ok(()) } @@ -666,7 +689,9 @@ pub struct ExecutionRecord { pub accounts: Vec, pub instruction_trace: InstructionTrace, pub return_data: TransactionReturnData, + pub total_resize_delta: i64, } + /// Used by the bank in the runtime to write back the processed accounts and recorded instructions impl From for ExecutionRecord { fn from(context: TransactionContext) -> Self { @@ -681,6 +706,7 @@ impl From for ExecutionRecord { .collect(), instruction_trace: context.instruction_trace, return_data: context.return_data, + total_resize_delta: RefCell::into_inner(context.total_resize_delta), } } } From ff1e6fcbeda89f0552e7e7276c43dc3a20e783c4 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 6 Jul 2022 19:36:33 +0200 Subject: [PATCH 046/100] Clean up `nonce_must_be_advanceable` feature (#26446) Clean up nonce_must_be_advanceable feature --- runtime/src/bank.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 5b4cbe0686c95a..2d8cc99233e562 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4159,11 +4159,8 @@ impl Bank { let durable_nonces_enabled = enable_durable_nonce || self.slot() <= 135986379 || self.cluster_type() != ClusterType::MainnetBeta; - let nonce_must_be_advanceable = self - .feature_set - .is_active(&feature_set::nonce_must_be_advanceable::ID); let nonce_is_advanceable = tx.message().recent_blockhash() != next_durable_nonce.as_hash(); - (durable_nonces_enabled && (nonce_is_advanceable || !nonce_must_be_advanceable)) + (durable_nonces_enabled && nonce_is_advanceable) .then(|| self.check_message_for_nonce(tx.message())) .flatten() } From f4718be22f0c6cbe7553db9e28c97f830eec3d08 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 6 Jul 2022 19:59:34 +0200 Subject: [PATCH 047/100] Clean up `leave_nonce_on_success` feature (#26447) Clean up leave_nonce_on_success feature --- runtime/src/accounts.rs | 15 +-------------- runtime/src/bank.rs | 6 ------ 2 files changed, 1 insertion(+), 20 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 8dac900c79cb63..3ecca73ea0575a 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1191,7 +1191,6 @@ impl Accounts { rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, - leave_nonce_on_success: bool, ) { let (accounts_to_store, txn_signatures) = self.collect_accounts_to_store( txs, @@ -1200,7 +1199,6 @@ impl Accounts { rent_collector, durable_nonce, lamports_per_signature, - leave_nonce_on_success, ); self.accounts_db .store_cached((slot, &accounts_to_store[..]), Some(&txn_signatures)); @@ -1227,7 +1225,6 @@ impl Accounts { rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, - leave_nonce_on_success: bool, ) -> ( Vec<(&'a Pubkey, &'a AccountSharedData)>, Vec>, @@ -1247,17 +1244,10 @@ impl Accounts { }; let maybe_nonce = match (execution_status, &*nonce) { - (Ok(()), Some(nonce)) => { - if leave_nonce_on_success { - None - } else { - Some((nonce, false /* rollback */)) - } - } + (Ok(_), _) => None, // Success, don't do any additional nonce processing (Err(_), Some(nonce)) => { Some((nonce, true /* rollback */)) } - (Ok(_), None) => None, // Success, don't do any additional nonce processing (Err(_), None) => { // Fees for failed transactions which don't use durable nonces are // deducted in Bank::filter_program_errors_and_collect_fee @@ -3005,7 +2995,6 @@ mod tests { &rent_collector, &DurableNonce::default(), 0, - true, // leave_nonce_on_success ); assert_eq!(collected_accounts.len(), 2); assert!(collected_accounts @@ -3489,7 +3478,6 @@ mod tests { &rent_collector, &durable_nonce, 0, - true, // leave_nonce_on_success ); assert_eq!(collected_accounts.len(), 2); assert_eq!( @@ -3604,7 +3592,6 @@ mod tests { &rent_collector, &durable_nonce, 0, - true, // leave_nonce_on_success ); assert_eq!(collected_accounts.len(), 1); let collected_nonce_account = collected_accounts diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 2d8cc99233e562..7e22e3bb77631a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4929,7 +4929,6 @@ impl Bank { &self.rent_collector, &durable_nonce, lamports_per_signature, - self.leave_nonce_on_success(), ); let rent_debits = self.collect_rent(&execution_results, loaded_txs); @@ -7267,11 +7266,6 @@ impl Bank { .is_active(&feature_set::credits_auto_rewind::id()) } - pub fn leave_nonce_on_success(&self) -> bool { - self.feature_set - .is_active(&feature_set::leave_nonce_on_success::id()) - } - pub fn send_to_tpu_vote_port_enabled(&self) -> bool { self.feature_set .is_active(&feature_set::send_to_tpu_vote_port::id()) From 83a73532b0f53ededc466bd84a31e7951d716592 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Jul 2022 13:44:59 -0600 Subject: [PATCH 048/100] chore: bump indexmap from 1.8.1 to 1.9.1 (#26435) * chore: bump indexmap from 1.8.1 to 1.9.1 Bumps [indexmap](https://github.com/bluss/indexmap) from 1.8.1 to 1.9.1. - [Release notes](https://github.com/bluss/indexmap/releases) - [Changelog](https://github.com/bluss/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/bluss/indexmap/compare/1.8.1...1.9.1) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 16 +++++++++++----- client/Cargo.toml | 2 +- gossip/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 16 +++++++++++----- storage-bigtable/build-proto/Cargo.lock | 8 ++++---- streamer/Cargo.toml | 2 +- tokens/Cargo.toml | 2 +- 7 files changed, 30 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2dcd70b2e017ac..35a6fcb1df13c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -438,7 +438,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ "borsh-derive", - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -1793,6 +1793,12 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" + [[package]] name = "headers" version = "0.3.7" @@ -2073,12 +2079,12 @@ checksum = "5a9d968042a4902e08810946fc7cd5851eb75e80301342305af755ca06cb82ce" [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.1", "rayon", ] @@ -2453,7 +2459,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c84e6fe5655adc6ce00787cf7dcaf8dc4f998a0565d23eafc207a8b08ca3349a" dependencies = [ - "hashbrown", + "hashbrown 0.11.2", ] [[package]] diff --git a/client/Cargo.toml b/client/Cargo.toml index 44eed684a0f8ac..1ad9bbfa1c62d6 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -21,7 +21,7 @@ crossbeam-channel = "0.5" enum_dispatch = "0.3.8" futures = "0.3" futures-util = "0.3.21" -indexmap = "1.8.1" +indexmap = "1.9.1" indicatif = "0.16.2" itertools = "0.10.2" jsonrpc-core = "18.0.0" diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index dbae09b54c6e62..f72abef55499a1 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -15,7 +15,7 @@ bv = { version = "0.11.1", features = ["serde"] } clap = "2.33.1" crossbeam-channel = "0.5" flate2 = "1.0" -indexmap = { version = "1.8", features = ["rayon"] } +indexmap = { version = "1.9", features = ["rayon"] } itertools = "0.10.3" log = "0.4.17" lru = "0.7.7" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 35bc3a35a173e5..7e3ebceede71c9 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -397,7 +397,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ "borsh-derive", - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -1557,6 +1557,12 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" + [[package]] name = "headers" version = "0.3.7" @@ -1820,12 +1826,12 @@ checksum = "5a9d968042a4902e08810946fc7cd5851eb75e80301342305af755ca06cb82ce" [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.1", "rayon", ] @@ -2194,7 +2200,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c84e6fe5655adc6ce00787cf7dcaf8dc4f998a0565d23eafc207a8b08ca3349a" dependencies = [ - "hashbrown", + "hashbrown 0.11.2", ] [[package]] diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index f13dd33e3fcff0..8d5bf7e4709a07 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -70,9 +70,9 @@ checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" [[package]] name = "heck" @@ -82,9 +82,9 @@ checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", "hashbrown", diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 7a89d077315eda..dd12051ac39776 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" crossbeam-channel = "0.5" futures-util = "0.3.21" histogram = "0.6.9" -indexmap = "1.8.1" +indexmap = "1.9.1" itertools = "0.10.3" libc = "0.2.126" log = "0.4.17" diff --git a/tokens/Cargo.toml b/tokens/Cargo.toml index 5ba574561d9591..5173c9bb2aa4a9 100644 --- a/tokens/Cargo.toml +++ b/tokens/Cargo.toml @@ -15,7 +15,7 @@ clap = "2.33.0" console = "0.15.0" csv = "1.1.6" ctrlc = { version = "3.2.2", features = ["termination"] } -indexmap = "1.8.1" +indexmap = "1.9.1" indicatif = "0.16.2" pickledb = "0.4.1" serde = { version = "1.0", features = ["derive"] } From d7201a8d1aa6acecc64c791c0699f6fb99139fc6 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 6 Jul 2022 20:01:16 +0000 Subject: [PATCH 049/100] names fields in RentResullt::CollectRent enum variant (#26449) Avoiding ambiguous raw tuple: CollectRent((Epoch, u64)) Using named fields instead: CollectRent { new_rent_epoch: Epoch, rent_due: u64, }, --- runtime/src/accounts.rs | 4 +- runtime/src/accounts_db.rs | 7 +- runtime/src/bank.rs | 4 +- runtime/src/expected_rent_collection.rs | 36 +++++---- runtime/src/rent_collector.rs | 101 +++++++++++------------- 5 files changed, 76 insertions(+), 76 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 3ecca73ea0575a..2ed469df9cc49a 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1703,9 +1703,9 @@ mod tests { let mut error_counters = TransactionErrorMetrics::default(); let rent_collector = RentCollector::new( 0, - &EpochSchedule::default(), + EpochSchedule::default(), 500_000.0, - &Rent { + Rent { lamports_per_byte_year: 42, ..Rent::default() }, diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index d525d263909c09..e55a977e97cfa9 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -6231,7 +6231,8 @@ impl AccountsDb { ) } - pub fn update_accounts_hash_test(&self, slot: Slot, ancestors: &Ancestors) -> (Hash, u64) { + #[cfg(test)] + fn update_accounts_hash_test(&self, slot: Slot, ancestors: &Ancestors) -> (Hash, u64) { self.update_accounts_hash_with_index_option( true, true, @@ -8151,9 +8152,9 @@ impl AccountsDb { let schedule = genesis_config.epoch_schedule; let rent_collector = RentCollector::new( schedule.get_epoch(max_slot), - &schedule, + schedule, genesis_config.slots_per_year(), - &genesis_config.rent, + genesis_config.rent, ); let accounts_data_len = AtomicU64::new(0); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7e22e3bb77631a..2f2f304c1731b1 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3536,9 +3536,9 @@ impl Bank { self.rent_collector = RentCollector::new( self.epoch, - self.epoch_schedule(), + *self.epoch_schedule(), self.slots_per_year, - &genesis_config.rent, + genesis_config.rent, ); // Add additional builtin programs specified in the genesis config diff --git a/runtime/src/expected_rent_collection.rs b/runtime/src/expected_rent_collection.rs index 537caf8c81bba6..92ad9745118295 100644 --- a/runtime/src/expected_rent_collection.rs +++ b/runtime/src/expected_rent_collection.rs @@ -266,7 +266,7 @@ impl SlotInfoInEpoch { impl ExpectedRentCollection { /// 'account' is being loaded from 'storage_slot' in 'bank_slot' /// adjusts 'account.rent_epoch' if we skipped the last rewrite on this account - pub fn maybe_update_rent_epoch_on_load( + pub(crate) fn maybe_update_rent_epoch_on_load( account: &mut AccountSharedData, storage_slot: &SlotInfoInEpoch, bank_slot: &SlotInfoInEpoch, @@ -302,14 +302,17 @@ impl ExpectedRentCollection { pubkey: &Pubkey, rewrites_skipped_this_slot: &Rewrites, ) -> Option { - if let RentResult::CollectRent((next_epoch, rent_due)) = - rent_collector.calculate_rent_result(pubkey, account, None) + let next_epoch = match rent_collector.calculate_rent_result(pubkey, account, None) { + RentResult::LeaveAloneNoRent => return None, + RentResult::CollectRent { + new_rent_epoch, + rent_due: 0, + } => new_rent_epoch, + // Rent is due on this account in this epoch, + // so we did not skip a rewrite. + RentResult::CollectRent { .. } => return None, + }; { - if rent_due != 0 { - // rent is due on this account in this epoch, so we did not skip a rewrite - return None; - } - // grab epoch infno for bank slot and storage slot let bank_info = bank_slot.get_epoch_info(epoch_schedule); let (current_epoch, partition_from_current_slot) = @@ -533,7 +536,10 @@ impl ExpectedRentCollection { rent_collector.calculate_rent_result(pubkey, loaded_account, filler_account_suffix); let current_rent_epoch = loaded_account.rent_epoch(); let new_rent_epoch = match rent_result { - RentResult::CollectRent((next_epoch, rent_due)) => { + RentResult::CollectRent { + new_rent_epoch: next_epoch, + rent_due, + } => { if next_epoch > current_rent_epoch && rent_due != 0 { // this is an account that would have had rent collected since this storage slot, so just use the hash we have since there must be a newer version of this account already in a newer slot // It would be a waste of time to recalcluate a hash. @@ -595,9 +601,9 @@ pub mod tests { let genesis_config = GenesisConfig::default(); let mut rent_collector = RentCollector::new( epoch, - &epoch_schedule, + epoch_schedule, genesis_config.slots_per_year(), - &genesis_config.rent, + genesis_config.rent, ); rent_collector.rent.lamports_per_byte_year = 0; // temporarily disable rent let find_unskipped_slot = Some; @@ -976,9 +982,9 @@ pub mod tests { let genesis_config = GenesisConfig::default(); let mut rent_collector = RentCollector::new( epoch, - &epoch_schedule, + epoch_schedule, genesis_config.slots_per_year(), - &genesis_config.rent, + genesis_config.rent, ); rent_collector.rent.lamports_per_byte_year = 0; // temporarily disable rent @@ -1169,9 +1175,9 @@ pub mod tests { let genesis_config = GenesisConfig::default(); let mut rent_collector = RentCollector::new( epoch, - &epoch_schedule, + epoch_schedule, genesis_config.slots_per_year(), - &genesis_config.rent, + genesis_config.rent, ); rent_collector.rent.lamports_per_byte_year = 0; // temporarily disable rent diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index 046535c16064e3..6ae978a9a5c7a6 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -34,44 +34,33 @@ impl Default for RentCollector { /// when rent is collected for this account, this is the action to apply to the account #[derive(Debug)] -pub enum RentResult { +pub(crate) enum RentResult { /// maybe collect rent later, leave account alone LeaveAloneNoRent, /// collect rent - /// value is (new rent epoch, lamports of rent_due) - CollectRent((Epoch, u64)), + CollectRent { + new_rent_epoch: Epoch, + rent_due: u64, // lamports + }, } impl RentCollector { - pub fn new( + pub(crate) fn new( epoch: Epoch, - epoch_schedule: &EpochSchedule, + epoch_schedule: EpochSchedule, slots_per_year: f64, - rent: &Rent, + rent: Rent, ) -> Self { Self { epoch, - epoch_schedule: *epoch_schedule, + epoch_schedule, slots_per_year, - rent: *rent, + rent, } } - pub fn clone_with_epoch(&self, epoch: Epoch) -> Self { - self.clone_with_epoch_and_rate(epoch, self.rent.lamports_per_byte_year) - } - - pub fn clone_with_epoch_and_rate(&self, epoch: Epoch, lamports_per_byte_year: u64) -> Self { - let rent = if lamports_per_byte_year != self.rent.lamports_per_byte_year { - Rent { - lamports_per_byte_year, - ..self.rent - } - } else { - self.rent - }; + pub(crate) fn clone_with_epoch(&self, epoch: Epoch) -> Self { Self { - rent, epoch, ..self.clone() } @@ -85,7 +74,7 @@ impl RentCollector { /// given an account that 'should_collect_rent' /// returns (amount rent due, is_exempt_from_rent) - pub fn get_rent_due(&self, account: &impl ReadableAccount) -> RentDue { + pub(crate) fn get_rent_due(&self, account: &impl ReadableAccount) -> RentDue { if self .rent .is_exempt(account.lamports(), account.data().len()) @@ -127,7 +116,7 @@ impl RentCollector { // This is NOT thread safe at some level. If we try to collect from the same account in // parallel, we may collect twice. #[must_use = "add to Bank::collected_rent"] - pub fn collect_from_existing_account( + pub(crate) fn collect_from_existing_account( &self, address: &Pubkey, account: &mut AccountSharedData, @@ -135,28 +124,32 @@ impl RentCollector { ) -> CollectedInfo { match self.calculate_rent_result(address, account, filler_account_suffix) { RentResult::LeaveAloneNoRent => CollectedInfo::default(), - RentResult::CollectRent((next_epoch, rent_due)) => { - account.set_rent_epoch(next_epoch); - - let begin_lamports = account.lamports(); - account.saturating_sub_lamports(rent_due); - let end_lamports = account.lamports(); - let mut account_data_len_reclaimed = 0; - if end_lamports == 0 { - account_data_len_reclaimed = account.data().len() as u64; - *account = AccountSharedData::default(); + RentResult::CollectRent { + new_rent_epoch, + rent_due, + } => match account.lamports().checked_sub(rent_due) { + None | Some(0) => { + let account = std::mem::take(account); + CollectedInfo { + rent_amount: account.lamports(), + account_data_len_reclaimed: account.data().len() as u64, + } } - CollectedInfo { - rent_amount: begin_lamports - end_lamports, - account_data_len_reclaimed, + Some(lamports) => { + account.set_lamports(lamports); + account.set_rent_epoch(new_rent_epoch); + CollectedInfo { + rent_amount: rent_due, + account_data_len_reclaimed: 0u64, + } } - } + }, } } /// determine what should happen to collect rent from this account #[must_use] - pub fn calculate_rent_result( + pub(crate) fn calculate_rent_result( &self, address: &Pubkey, account: &impl ReadableAccount, @@ -165,25 +158,25 @@ impl RentCollector { if self.can_skip_rent_collection(address, account, filler_account_suffix) { return RentResult::LeaveAloneNoRent; } - - let rent_due = self.get_rent_due(account); - if let RentDue::Paying(0) = rent_due { - // maybe collect rent later, leave account alone - return RentResult::LeaveAloneNoRent; + match self.get_rent_due(account) { + // Rent isn't collected for the next epoch. + // Make sure to check exempt status again later in current epoch. + RentDue::Exempt => RentResult::CollectRent { + new_rent_epoch: self.epoch, + rent_due: 0, + }, + // Maybe collect rent later, leave account alone. + RentDue::Paying(0) => RentResult::LeaveAloneNoRent, + // Rent is collected for next epoch. + RentDue::Paying(rent_due) => RentResult::CollectRent { + new_rent_epoch: self.epoch + 1, + rent_due, + }, } - - let new_rent_epoch = match rent_due { - // Rent isn't collected for the next epoch - // Make sure to check exempt status again later in current epoch - RentDue::Exempt => self.epoch, - // Rent is collected for next epoch - RentDue::Paying(_) => self.epoch + 1, - }; - RentResult::CollectRent((new_rent_epoch, rent_due.lamports())) } #[must_use = "add to Bank::collected_rent"] - pub fn collect_from_created_account( + pub(crate) fn collect_from_created_account( &self, address: &Pubkey, account: &mut AccountSharedData, From 0b0549bcd8dc182cfbe19d512bea8a8b4da94392 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Jul 2022 20:02:35 +0000 Subject: [PATCH 050/100] chore:(deps): bump moment from 2.29.2 to 2.29.4 in /explorer (#26452) Bumps [moment](https://github.com/moment/moment) from 2.29.2 to 2.29.4. - [Release notes](https://github.com/moment/moment/releases) - [Changelog](https://github.com/moment/moment/blob/develop/CHANGELOG.md) - [Commits](https://github.com/moment/moment/compare/2.29.2...2.29.4) --- updated-dependencies: - dependency-name: moment dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- explorer/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/explorer/package-lock.json b/explorer/package-lock.json index c432cec17348ca..dcdc3a1f0b6bec 100644 --- a/explorer/package-lock.json +++ b/explorer/package-lock.json @@ -18863,9 +18863,9 @@ } }, "node_modules/moment": { - "version": "2.29.2", - "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.2.tgz", - "integrity": "sha512-UgzG4rvxYpN15jgCmVJwac49h9ly9NurikMWGPdVxm8GZD6XjkKPxDTjQQ43gtGgnV3X0cAyWDdP2Wexoquifg==", + "version": "2.29.4", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.4.tgz", + "integrity": "sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==", "engines": { "node": "*" } @@ -41688,9 +41688,9 @@ } }, "moment": { - "version": "2.29.2", - "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.2.tgz", - "integrity": "sha512-UgzG4rvxYpN15jgCmVJwac49h9ly9NurikMWGPdVxm8GZD6XjkKPxDTjQQ43gtGgnV3X0cAyWDdP2Wexoquifg==" + "version": "2.29.4", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.4.tgz", + "integrity": "sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==" }, "move-concurrently": { "version": "1.0.1", From f2fada9f2164141e99844832abf44c4dd8955eee Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Wed, 6 Jul 2022 13:30:33 -0700 Subject: [PATCH 051/100] document using ldb to drop rocksdb column families for downgrade scenarios (#26424) --- .../validator-troubleshoot.md | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/docs/src/running-validator/validator-troubleshoot.md b/docs/src/running-validator/validator-troubleshoot.md index 28e1679b95b713..4fa35fceddc25e 100644 --- a/docs/src/running-validator/validator-troubleshoot.md +++ b/docs/src/running-validator/validator-troubleshoot.md @@ -15,3 +15,37 @@ testnet participants, [https://discord.gg/pquxPsq](https://discord.gg/pquxPsq). - [Core software repo](https://github.com/solana-labs/solana) Can't find what you're looking for? Send an email to ryan@solana.com or reach out to @rshea\#2622 on Discord. + +## Blockstore + +The validator blockstore rocksdb database can be inspected using the `ldb` tool. +`ldb` is part of the `rocksdb` code base and is also available in the `rocksdb-tools` +package. + +[RocksDB Administration and Data Access Tool](https://github.com/facebook/rocksdb/wiki/Administration-and-Data-Access-Tool) + +## Upgrade + +If a new software version introduces a new column family to the blockstore, +that new (empty) column will be automatically created. This is the same logic +that allows a validator to start fresh without the blockstore directory. + +## Downgrade + +If a new column family has been introduced to the validator blockstore, a +subsequent downgrade of the validator to a version that predates the new column +family will cause the validator to fail while opening the blockstore during +startup. + +List column families: +``` +ldb --db=/rocksdb/ list_column_families +``` + +**Warning**: Please seek guidance on discord before modifying the validator +blockstore. + +Drop a column family: +``` +ldb --db=/rocksdb drop_column_family +``` From 16219e300effa5857bbbe8f0bd7fe9cc3e167916 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 6 Jul 2022 16:12:47 -0500 Subject: [PATCH 052/100] generating index sets uncleaned_roots correctly (#26431) * generating index sets uncleaned_roots correctly * fix test failures * rename * update comments --- runtime/src/accounts_db.rs | 77 +++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 27 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index e55a977e97cfa9..07cc9ef3d9bc7c 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -8281,7 +8281,7 @@ impl AccountsDb { m.stop(); index_flush_us = m.as_us(); - // this has to happen before pubkeys_to_duplicate_accounts_data_len below + // this has to happen before get_duplicate_accounts_slots_and_data_len below // get duplicate keys from acct idx. We have to wait until we've finished flushing. for (slot, key) in self .accounts_index @@ -8300,6 +8300,7 @@ impl AccountsDb { // subtract data.len() from accounts_data_len for all old accounts that are in the index twice let mut accounts_data_len_dedup_timer = Measure::start("handle accounts data len duplicates"); + let uncleaned_roots = Mutex::new(HashSet::::default()); if pass == 0 { let mut unique_pubkeys = HashSet::::default(); self.uncleaned_pubkeys.iter().for_each(|entry| { @@ -8311,7 +8312,15 @@ impl AccountsDb { .into_iter() .collect::>() .par_chunks(4096) - .map(|pubkeys| self.pubkeys_to_duplicate_accounts_data_len(pubkeys)) + .map(|pubkeys| { + let (count, uncleaned_roots_this_group) = + self.get_duplicate_accounts_slots_and_data_len(pubkeys); + let mut uncleaned_roots = uncleaned_roots.lock().unwrap(); + uncleaned_roots_this_group.into_iter().for_each(|slot| { + uncleaned_roots.insert(slot); + }); + count + }) .sum(); accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed); info!( @@ -8341,9 +8350,14 @@ impl AccountsDb { }; if pass == 0 { + let uncleaned_roots = uncleaned_roots.into_inner().unwrap(); // Need to add these last, otherwise older updates will be cleaned - for slot in &slots { - self.accounts_index.add_root(*slot, false); + for root in &slots { + // passing 'false' to 'add_root' causes 'root' to be added to 'accounts_index.roots_tracker.uncleaned_roots' + // passing 'true' to 'add_root' does NOT add 'root' to 'accounts_index.roots_tracker.uncleaned_roots' + // So, don't add all slots to 'uncleaned_roots' here since we know which slots contain duplicate pubkeys. + let uncleaned_root = uncleaned_roots.contains(root); + self.accounts_index.add_root(*root, !uncleaned_root); } self.set_storage_count_and_alive_bytes(storage_info, &mut timings); @@ -8380,10 +8394,17 @@ impl AccountsDb { } } - /// Used during generate_index() to get the _duplicate_ accounts data len from the given pubkeys + /// Used during generate_index() to: + /// 1. get the _duplicate_ accounts data len from the given pubkeys + /// 2. get the slots that contained duplicate pubkeys /// Note this should only be used when ALL entries in the accounts index are roots. - fn pubkeys_to_duplicate_accounts_data_len(&self, pubkeys: &[Pubkey]) -> u64 { + /// returns (data len sum of all older duplicates, slots that contained duplicate pubkeys) + fn get_duplicate_accounts_slots_and_data_len( + &self, + pubkeys: &[Pubkey], + ) -> (u64, HashSet) { let mut accounts_data_len_from_duplicates = 0; + let mut uncleaned_slots = HashSet::::default(); pubkeys.iter().for_each(|pubkey| { if let Some(entry) = self.accounts_index.get_account_read_entry(pubkey) { let slot_list = entry.slot_list(); @@ -8391,26 +8412,29 @@ impl AccountsDb { return; } // Only the account data len in the highest slot should be used, and the rest are - // duplicates. So sort the slot list in descending slot order, skip the first - // item, then sum up the remaining data len, which are the duplicates. - let mut slot_list = slot_list.clone(); - slot_list - .select_nth_unstable_by(0, |a, b| b.0.cmp(&a.0)) - .2 - .iter() - .for_each(|(slot, account_info)| { - let maybe_storage_entry = self - .storage - .get_account_storage_entry(*slot, account_info.store_id()); - let mut accessor = LoadedAccountAccessor::Stored( - maybe_storage_entry.map(|entry| (entry, account_info.offset())), - ); - let loaded_account = accessor.check_and_get_loaded_account(); - accounts_data_len_from_duplicates += loaded_account.data().len(); - }); + // duplicates. So find the max slot to keep. + // Then sum up the remaining data len, which are the duplicates. + // All of the slots need to go in the 'uncleaned_slots' list. For clean to work properly, + // the slot where duplicate accounts are found in the index need to be in 'uncleaned_slots' list, too. + let max = slot_list.iter().map(|(slot, _)| slot).max().unwrap(); + slot_list.iter().for_each(|(slot, account_info)| { + uncleaned_slots.insert(*slot); + if slot == max { + // the info in 'max' is the most recent, current info for this pubkey + return; + } + let maybe_storage_entry = self + .storage + .get_account_storage_entry(*slot, account_info.store_id()); + let mut accessor = LoadedAccountAccessor::Stored( + maybe_storage_entry.map(|entry| (entry, account_info.offset())), + ); + let loaded_account = accessor.check_and_get_loaded_account(); + accounts_data_len_from_duplicates += loaded_account.data().len(); + }); } }); - accounts_data_len_from_duplicates as u64 + (accounts_data_len_from_duplicates as u64, uncleaned_slots) } fn update_storage_info( @@ -10783,9 +10807,8 @@ pub mod tests { fn assert_not_load_account(accounts: &AccountsDb, slot: Slot, pubkey: Pubkey) { let ancestors = vec![(slot, 0)].into_iter().collect(); - assert!(accounts - .load_without_fixed_root(&ancestors, &pubkey) - .is_none()); + let load = accounts.load_without_fixed_root(&ancestors, &pubkey); + assert!(load.is_none(), "{:?}", load); } fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDb, slot: Slot) -> AccountsDb { From ef30f1729c91e46833247f37173655cfded7e02f Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Wed, 6 Jul 2022 16:15:01 -0500 Subject: [PATCH 053/100] Cleanup stake_instruction tests (#26393) --- Cargo.lock | 1 + programs/stake/Cargo.toml | 1 + programs/stake/src/stake_instruction.rs | 600 +++++++----------------- 3 files changed, 163 insertions(+), 439 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35a6fcb1df13c4..bb1a467bcba6fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6096,6 +6096,7 @@ dependencies = [ "solana-program-runtime", "solana-sdk 1.11.2", "solana-vote-program", + "test-case", "thiserror", ] diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 74838f3763b338..02c1a0a5ba97ac 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -29,6 +29,7 @@ thiserror = "1.0" assert_matches = "1.5.0" proptest = "1.0" solana-logger = { path = "../../logger", version = "=1.11.2" } +test-case = "2.1.0" [build-dependencies] rustc_version = "0.4" diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index ae7bcf5e40d513..40a07460795e3d 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -471,8 +471,29 @@ mod tests { }, solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, std::{borrow::BorrowMut, collections::HashSet, str::FromStr, sync::Arc}, + test_case::test_case, }; + /// The "new" behavior enables all features + fn feature_set_new_behavior() -> FeatureSet { + FeatureSet::all_enabled() + } + + /// The "old" behavior is before the stake minimum delegation was raised + fn feature_set_old_behavior() -> FeatureSet { + let mut feature_set = feature_set_new_behavior(); + feature_set.deactivate(&feature_set::stake_raise_minimum_delegation_to_1_sol::id()); + feature_set + } + + /// The "old old" behavior is both before the stake minimum delegation was raised *and* before + /// undelegated stake accounts could have zero lamports beyond rent + fn feature_set_old_old_behavior() -> FeatureSet { + let mut feature_set = feature_set_old_behavior(); + feature_set.deactivate(&feature_set::stake_allow_zero_undelegated_amount::id()); + feature_set + } + fn create_default_account() -> AccountSharedData { AccountSharedData::new(0, 0, &Pubkey::new_unique()) } @@ -599,7 +620,9 @@ mod tests { ) } - fn do_test_stake_process_instruction(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_stake_process_instruction(feature_set: FeatureSet) { process_instruction_as_one_arg( &feature_set, &instruction::initialize( @@ -714,7 +737,9 @@ mod tests { ); } - fn do_test_spoofed_stake_accounts(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_spoofed_stake_accounts(feature_set: FeatureSet) { process_instruction_as_one_arg( &feature_set, &instruction::initialize( @@ -830,7 +855,9 @@ mod tests { ); } - fn do_test_stake_process_instruction_decode_bail(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_stake_process_instruction_decode_bail(feature_set: FeatureSet) { // these will not call stake_state, have bogus contents let stake_address = Pubkey::new_unique(); let stake_account = create_default_stake_account(); @@ -1097,7 +1124,9 @@ mod tests { ); } - fn do_test_stake_checked_instructions(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_stake_checked_instructions(feature_set: FeatureSet) { let stake_address = Pubkey::new_unique(); let staker = Pubkey::new_unique(); let staker_account = create_default_account(); @@ -1457,7 +1486,9 @@ mod tests { ); } - fn do_test_stake_initialize(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_stake_initialize(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let stake_lamports = rent_exempt_reserve; @@ -1563,7 +1594,9 @@ mod tests { ); } - fn do_test_authorize(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_authorize(feature_set: FeatureSet) { let authority_address = solana_sdk::pubkey::new_rand(); let authority_address_2 = solana_sdk::pubkey::new_rand(); let stake_address = solana_sdk::pubkey::new_rand(); @@ -1742,7 +1775,9 @@ mod tests { ); } - fn do_test_authorize_override(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_authorize_override(feature_set: FeatureSet) { let authority_address = solana_sdk::pubkey::new_rand(); let mallory_address = solana_sdk::pubkey::new_rand(); let stake_address = solana_sdk::pubkey::new_rand(); @@ -1859,7 +1894,9 @@ mod tests { ); } - fn do_test_authorize_with_seed(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_authorize_with_seed(feature_set: FeatureSet) { let authority_base_address = solana_sdk::pubkey::new_rand(); let authority_address = solana_sdk::pubkey::new_rand(); let seed = "42"; @@ -1974,7 +2011,9 @@ mod tests { ); } - fn do_test_authorize_delegated_stake(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_authorize_delegated_stake(feature_set: FeatureSet) { let authority_address = solana_sdk::pubkey::new_rand(); let stake_address = solana_sdk::pubkey::new_rand(); let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -2163,7 +2202,9 @@ mod tests { ); } - fn do_test_stake_delegate(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_stake_delegate(feature_set: FeatureSet) { let mut vote_state = VoteState::default(); for i in 0..1000 { vote_state.process_slot_vote_unchecked(i); @@ -2411,7 +2452,9 @@ mod tests { ); } - fn do_test_redelegate_consider_balance_changes(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_redelegate_consider_balance_changes(feature_set: FeatureSet) { let mut clock = Clock::default(); let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); @@ -2627,7 +2670,9 @@ mod tests { ); } - fn do_test_split(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split(feature_set: FeatureSet) { let stake_address = solana_sdk::pubkey::new_rand(); let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = minimum_delegation * 2; @@ -2735,7 +2780,9 @@ mod tests { ); } - fn do_test_withdraw_stake(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_withdraw_stake(feature_set: FeatureSet) { let recipient_address = solana_sdk::pubkey::new_rand(); let authority_address = solana_sdk::pubkey::new_rand(); let custodian_address = solana_sdk::pubkey::new_rand(); @@ -3024,7 +3071,9 @@ mod tests { ); } - fn do_test_withdraw_stake_before_warmup(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_withdraw_stake_before_warmup(feature_set: FeatureSet) { let recipient_address = solana_sdk::pubkey::new_rand(); let stake_address = solana_sdk::pubkey::new_rand(); let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -3155,7 +3204,9 @@ mod tests { ); } - fn do_test_withdraw_lockup(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_withdraw_lockup(feature_set: FeatureSet) { let recipient_address = solana_sdk::pubkey::new_rand(); let custodian_address = solana_sdk::pubkey::new_rand(); let stake_address = solana_sdk::pubkey::new_rand(); @@ -3279,7 +3330,9 @@ mod tests { assert_eq!(from(&accounts[0]).unwrap(), StakeState::Uninitialized); } - fn do_test_withdraw_rent_exempt(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_withdraw_rent_exempt(feature_set: FeatureSet) { let recipient_address = solana_sdk::pubkey::new_rand(); let custodian_address = solana_sdk::pubkey::new_rand(); let stake_address = solana_sdk::pubkey::new_rand(); @@ -3369,7 +3422,9 @@ mod tests { ); } - fn do_test_deactivate(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_deactivate(feature_set: FeatureSet) { let stake_address = solana_sdk::pubkey::new_rand(); let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = minimum_delegation; @@ -3491,7 +3546,9 @@ mod tests { ); } - fn do_test_set_lockup(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_set_lockup(feature_set: FeatureSet) { let custodian_address = solana_sdk::pubkey::new_rand(); let authorized_address = solana_sdk::pubkey::new_rand(); let stake_address = solana_sdk::pubkey::new_rand(); @@ -3775,7 +3832,9 @@ mod tests { /// Ensure that `initialize()` respects the minimum balance requirements /// - Assert 1: accounts with a balance equal-to the rent exemption initialize OK /// - Assert 2: accounts with a balance less-than the rent exemption do not initialize - fn do_test_initialize_minimum_balance(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_initialize_minimum_balance(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let stake_address = solana_sdk::pubkey::new_rand(); @@ -3831,7 +3890,9 @@ mod tests { /// withdrawing below the minimum delegation, then re-delegating successfully (see /// `test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation()` for /// more information.) - fn do_test_delegate_minimum_stake_delegation(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_delegate_minimum_stake_delegation(feature_set: FeatureSet) { let minimum_delegation = crate::get_minimum_delegation(&feature_set); let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); @@ -3924,7 +3985,9 @@ mod tests { /// EQ | LT | Err /// LT | EQ | Err /// LT | LT | Err - fn do_test_split_minimum_stake_delegation(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_minimum_stake_delegation(feature_set: FeatureSet) { let minimum_delegation = crate::get_minimum_delegation(&feature_set); let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); @@ -4017,7 +4080,9 @@ mod tests { /// delegation is OK /// - Assert 2: splitting the full amount from an account that has less than the minimum /// delegation is not OK - fn do_test_split_full_amount_minimum_stake_delegation(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_full_amount_minimum_stake_delegation(feature_set: FeatureSet) { let minimum_delegation = crate::get_minimum_delegation(&feature_set); let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); @@ -4088,7 +4153,9 @@ mod tests { /// Ensure that `split()` correctly handles prefunded destination accounts from /// initialized stakes. When a destination account already has funds, ensure /// the minimum split amount reduces accordingly. - fn do_test_initialized_split_destination_minimum_balance(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_initialized_split_destination_minimum_balance(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let source_address = Pubkey::new_unique(); @@ -4183,7 +4250,9 @@ mod tests { /// Ensure that `split()` correctly handles prefunded destination accounts from staked stakes. /// When a destination account already has funds, ensure the minimum split amount reduces /// accordingly. - fn do_test_staked_split_destination_minimum_balance( + #[test_case(feature_set_old_behavior(), &[Ok(()), Ok(())]; "old_behavior")] + #[test_case(feature_set_new_behavior(), &[ Err(InstructionError::InsufficientFunds), Err(InstructionError::InsufficientFunds) ] ; "new_behavior")] + fn test_staked_split_destination_minimum_balance( feature_set: FeatureSet, expected_results: &[Result<(), InstructionError>], ) { @@ -4334,7 +4403,9 @@ mod tests { /// Ensure that `withdraw()` respects the minimum delegation requirements /// - Assert 1: withdrawing so remaining stake is equal-to the minimum is OK /// - Assert 2: withdrawing so remaining stake is less-than the minimum is not OK - fn do_test_withdraw_minimum_stake_delegation(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_withdraw_minimum_stake_delegation(feature_set: FeatureSet) { let minimum_delegation = crate::get_minimum_delegation(&feature_set); let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); @@ -4437,7 +4508,14 @@ mod tests { /// 3. Deactives the delegation /// 4. Withdraws from the account such that the ending balance is *below* rent + minimum delegation /// 5. Re-delegates, now with less than the minimum delegation, but it still succeeds - fn do_test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation( + // + // The "old old" behavior relies on `validate_delegated_amount()` *not* checking if the + // stake amount meets the minimum delegation. Once the + // `stake_allow_zero_undelegated_amount` feature is activated, `the expected_result` + // parameter can be removed and consolidated. + #[test_case(feature_set_old_old_behavior(), Ok(()); "old_old_behavior")] + #[test_case(feature_set_new_behavior(), Err(StakeError::InsufficientDelegation.into()); "new_behavior")] + fn test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation( feature_set: FeatureSet, expected_result: Result<(), InstructionError>, ) { @@ -4617,7 +4695,9 @@ mod tests { ); } - fn do_test_split_source_uninitialized(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_source_uninitialized(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -4715,7 +4795,9 @@ mod tests { ); } - fn do_test_split_split_not_uninitialized(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_split_not_uninitialized(feature_set: FeatureSet) { let stake_lamports = 42; let stake_address = solana_sdk::pubkey::new_rand(); let stake_account = AccountSharedData::new_data_with_space( @@ -4764,7 +4846,9 @@ mod tests { } } - fn do_test_split_more_than_staked(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_more_than_staked(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -4821,7 +4905,9 @@ mod tests { ); } - fn do_test_split_with_rent(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_with_rent(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -4929,7 +5015,9 @@ mod tests { } } - fn do_test_split_to_account_with_rent_exempt_reserve(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_to_account_with_rent_exempt_reserve(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -5052,7 +5140,9 @@ mod tests { } } - fn do_test_split_from_larger_sized_account(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_from_larger_sized_account(feature_set: FeatureSet) { let rent = Rent::default(); let source_larger_rent_exempt_reserve = rent.minimum_balance(StakeState::size_of() + 100); let split_rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); @@ -5181,7 +5271,9 @@ mod tests { } } - fn do_test_split_from_smaller_sized_account(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_from_smaller_sized_account(feature_set: FeatureSet) { let rent = Rent::default(); let source_smaller_rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let split_rent_exempt_reserve = rent.minimum_balance(StakeState::size_of() + 100); @@ -5258,7 +5350,9 @@ mod tests { } } - fn do_test_split_100_percent_of_source(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_100_percent_of_source(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -5352,7 +5446,9 @@ mod tests { } } - fn do_test_split_100_percent_of_source_to_account_with_lamports(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_100_percent_of_source_to_account_with_lamports(feature_set: FeatureSet) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); let minimum_delegation = crate::get_minimum_delegation(&feature_set); @@ -5446,7 +5542,9 @@ mod tests { } } - fn do_test_split_rent_exemptness(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_split_rent_exemptness(feature_set: FeatureSet) { let rent = Rent::default(); let source_rent_exempt_reserve = rent.minimum_balance(StakeState::size_of() + 100); let split_rent_exempt_reserve = rent.minimum_balance(StakeState::size_of()); @@ -5582,7 +5680,9 @@ mod tests { } } - fn do_test_merge(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_merge(feature_set: FeatureSet) { let stake_address = solana_sdk::pubkey::new_rand(); let merge_from_address = solana_sdk::pubkey::new_rand(); let authorized_address = solana_sdk::pubkey::new_rand(); @@ -5710,7 +5810,9 @@ mod tests { } } - fn do_test_merge_self_fails(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_merge_self_fails(feature_set: FeatureSet) { let stake_address = solana_sdk::pubkey::new_rand(); let authorized_address = solana_sdk::pubkey::new_rand(); let rent = Rent::default(); @@ -5785,7 +5887,9 @@ mod tests { ); } - fn do_test_merge_incorrect_authorized_staker(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_merge_incorrect_authorized_staker(feature_set: FeatureSet) { let stake_address = solana_sdk::pubkey::new_rand(); let merge_from_address = solana_sdk::pubkey::new_rand(); let authorized_address = solana_sdk::pubkey::new_rand(); @@ -5877,7 +5981,9 @@ mod tests { } } - fn do_test_merge_invalid_account_data(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_merge_invalid_account_data(feature_set: FeatureSet) { let stake_address = solana_sdk::pubkey::new_rand(); let merge_from_address = solana_sdk::pubkey::new_rand(); let authorized_address = solana_sdk::pubkey::new_rand(); @@ -5956,7 +6062,9 @@ mod tests { } } - fn do_test_merge_fake_stake_source(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_merge_fake_stake_source(feature_set: FeatureSet) { let stake_address = solana_sdk::pubkey::new_rand(); let merge_from_address = solana_sdk::pubkey::new_rand(); let authorized_address = solana_sdk::pubkey::new_rand(); @@ -6025,7 +6133,9 @@ mod tests { ); } - fn do_test_merge_active_stake(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_merge_active_stake(feature_set: FeatureSet) { let stake_address = solana_sdk::pubkey::new_rand(); let merge_from_address = solana_sdk::pubkey::new_rand(); let authorized_address = solana_sdk::pubkey::new_rand(); @@ -6283,7 +6393,9 @@ mod tests { ); } - fn do_test_stake_get_minimum_delegation(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_stake_get_minimum_delegation(feature_set: FeatureSet) { let stake_address = Pubkey::new_unique(); let stake_account = create_default_stake_account(); let instruction_data = serialize(&StakeInstruction::GetMinimumDelegation).unwrap(); @@ -6336,7 +6448,9 @@ mod tests { // disabled | bad | some || Err InvalidInstructionData // disabled | good | none || Err NotEnoughAccountKeys // disabled | bad | none || Err NotEnoughAccountKeys - fn do_test_stake_process_instruction_error_ordering(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_stake_process_instruction_error_ordering(feature_set: FeatureSet) { let rent = Rent::default(); let rent_address = sysvar::rent::id(); let rent_account = account::create_account_shared_data_for_test(&rent); @@ -6440,7 +6554,9 @@ mod tests { } } - fn do_test_deactivate_delinquent(feature_set: FeatureSet) { + #[test_case(feature_set_old_behavior(); "old_behavior")] + #[test_case(feature_set_new_behavior(); "new_behavior")] + fn test_deactivate_delinquent(feature_set: FeatureSet) { let feature_set = Arc::new(feature_set); let mut sysvar_cache_override = SysvarCache::default(); @@ -6707,398 +6823,4 @@ mod tests { Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()), ); } - - mod old_behavior { - use super::*; - - fn new_feature_set() -> FeatureSet { - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&feature_set::stake_raise_minimum_delegation_to_1_sol::id()); - feature_set - } - - #[test] - fn test_stake_process_instruction() { - do_test_stake_process_instruction(new_feature_set()); - } - #[test] - fn test_stake_process_instruction_decode_bail() { - do_test_stake_process_instruction_decode_bail(new_feature_set()); - } - #[test] - fn test_stake_checked_instructions() { - do_test_stake_checked_instructions(new_feature_set()); - } - #[test] - fn test_stake_initialize() { - do_test_stake_initialize(new_feature_set()); - } - #[test] - fn test_authorize() { - do_test_authorize(new_feature_set()); - } - #[test] - fn test_authorize_override() { - do_test_authorize_override(new_feature_set()); - } - #[test] - fn test_authorize_with_seed() { - do_test_authorize_with_seed(new_feature_set()); - } - #[test] - fn test_authorize_delegated_stake() { - do_test_authorize_delegated_stake(new_feature_set()); - } - #[test] - fn test_stake_delegate() { - do_test_stake_delegate(new_feature_set()); - } - #[test] - fn test_redelegate_consider_balance_changes() { - do_test_redelegate_consider_balance_changes(new_feature_set()); - } - #[test] - fn test_split() { - do_test_split(new_feature_set()); - } - #[test] - fn test_withdraw_stake() { - do_test_withdraw_stake(new_feature_set()); - } - #[test] - fn test_withdraw_stake_before_warmup() { - do_test_withdraw_stake_before_warmup(new_feature_set()); - } - #[test] - fn test_withdraw_lockup() { - do_test_withdraw_lockup(new_feature_set()); - } - #[test] - fn test_withdraw_rent_exempt() { - do_test_withdraw_rent_exempt(new_feature_set()); - } - #[test] - fn test_deactivate() { - do_test_deactivate(new_feature_set()); - } - #[test] - fn test_set_lockup() { - do_test_set_lockup(new_feature_set()); - } - #[test] - fn test_initialize_minimum_balance() { - do_test_initialize_minimum_balance(new_feature_set()); - } - #[test] - fn test_delegate_minimum_stake_delegation() { - do_test_delegate_minimum_stake_delegation(new_feature_set()); - } - #[test] - fn test_split_minimum_stake_delegation() { - do_test_split_minimum_stake_delegation(new_feature_set()); - } - #[test] - fn test_split_full_amount_minimum_stake_delegation() { - do_test_split_full_amount_minimum_stake_delegation(new_feature_set()); - } - #[test] - fn test_initialized_split_destination_minimum_balance() { - do_test_initialized_split_destination_minimum_balance(new_feature_set()); - } - #[test] - fn test_staked_split_destination_minimum_balance() { - do_test_staked_split_destination_minimum_balance(new_feature_set(), &[Ok(()), Ok(())]); - } - #[test] - fn test_withdraw_minimum_stake_delegation() { - do_test_withdraw_minimum_stake_delegation(new_feature_set()); - } - #[test] - fn test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation() { - let mut feature_set = new_feature_set(); - // The "old" behavior relies on `validate_delegated_amount()` *not* checking if the - // stake amount meets the minimum delegation. Once the - // `stake_allow_zero_undelegated_amount` feature is activated, `the expected_result` - // parameter can be removed and consolidated. - feature_set.deactivate(&feature_set::stake_allow_zero_undelegated_amount::id()); - do_test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation( - feature_set, - Ok(()), - ); - } - #[test] - fn test_split_source_uninitialized() { - do_test_split_source_uninitialized(new_feature_set()); - } - #[test] - fn test_split_split_not_uninitialized() { - do_test_split_split_not_uninitialized(new_feature_set()); - } - #[test] - fn test_split_more_than_staked() { - do_test_split_more_than_staked(new_feature_set()); - } - #[test] - fn test_split_with_rent() { - do_test_split_with_rent(new_feature_set()); - } - #[test] - fn test_split_to_account_with_rent_exempt_reserve() { - do_test_split_to_account_with_rent_exempt_reserve(new_feature_set()); - } - #[test] - fn test_split_from_larger_sized_account() { - do_test_split_from_larger_sized_account(new_feature_set()); - } - #[test] - fn test_split_from_smaller_sized_account() { - do_test_split_from_smaller_sized_account(new_feature_set()); - } - #[test] - fn test_split_100_percent_of_source() { - do_test_split_100_percent_of_source(new_feature_set()); - } - #[test] - fn test_split_100_percent_of_source_to_account_with_lamports() { - do_test_split_100_percent_of_source_to_account_with_lamports(new_feature_set()); - } - #[test] - fn test_split_rent_exemptness() { - do_test_split_rent_exemptness(new_feature_set()); - } - #[test] - fn test_merge() { - do_test_merge(new_feature_set()); - } - #[test] - fn test_merge_self_fails() { - do_test_merge_self_fails(new_feature_set()); - } - #[test] - fn test_merge_incorrect_authorized_staker() { - do_test_merge_incorrect_authorized_staker(new_feature_set()); - } - #[test] - fn test_merge_invalid_account_data() { - do_test_merge_invalid_account_data(new_feature_set()); - } - #[test] - fn test_merge_fake_stake_source() { - do_test_merge_fake_stake_source(new_feature_set()); - } - #[test] - fn test_merge_active_stake() { - do_test_merge_active_stake(new_feature_set()); - } - #[test] - fn test_stake_get_minimum_delegation() { - do_test_stake_get_minimum_delegation(new_feature_set()); - } - #[test] - fn test_stake_process_instruction_error_ordering() { - do_test_stake_process_instruction_error_ordering(new_feature_set()); - } - #[test] - fn test_deactivate_delinquent() { - do_test_deactivate_delinquent(new_feature_set()); - } - } - - mod new_behavior { - use super::*; - - fn new_feature_set() -> FeatureSet { - FeatureSet::all_enabled() - } - - #[test] - fn test_stake_process_instruction() { - do_test_stake_process_instruction(new_feature_set()); - } - #[test] - fn test_spoofed_stake_accounts() { - do_test_spoofed_stake_accounts(new_feature_set()); - } - #[test] - fn test_stake_process_instruction_decode_bail() { - do_test_stake_process_instruction_decode_bail(new_feature_set()); - } - #[test] - fn test_stake_checked_instructions() { - do_test_stake_checked_instructions(new_feature_set()); - } - #[test] - fn test_stake_initialize() { - do_test_stake_initialize(new_feature_set()); - } - #[test] - fn test_authorize() { - do_test_authorize(new_feature_set()); - } - #[test] - fn test_authorize_override() { - do_test_authorize_override(new_feature_set()); - } - #[test] - fn test_authorize_with_seed() { - do_test_authorize_with_seed(new_feature_set()); - } - #[test] - fn test_authorize_delegated_stake() { - do_test_authorize_delegated_stake(new_feature_set()); - } - #[test] - fn test_stake_delegate() { - do_test_stake_delegate(new_feature_set()); - } - #[test] - fn test_redelegate_consider_balance_changes() { - do_test_redelegate_consider_balance_changes(new_feature_set()); - } - #[test] - fn test_split() { - do_test_split(new_feature_set()); - } - #[test] - fn test_withdraw_stake() { - do_test_withdraw_stake(new_feature_set()); - } - #[test] - fn test_withdraw_stake_before_warmup() { - do_test_withdraw_stake_before_warmup(new_feature_set()); - } - #[test] - fn test_withdraw_lockup() { - do_test_withdraw_lockup(new_feature_set()); - } - #[test] - fn test_withdraw_rent_exempt() { - do_test_withdraw_rent_exempt(new_feature_set()); - } - #[test] - fn test_deactivate() { - do_test_deactivate(new_feature_set()); - } - #[test] - fn test_set_lockup() { - do_test_set_lockup(new_feature_set()); - } - #[test] - fn test_initialize_minimum_balance() { - do_test_initialize_minimum_balance(new_feature_set()); - } - #[test] - fn test_delegate_minimum_stake_delegation() { - do_test_delegate_minimum_stake_delegation(new_feature_set()); - } - #[test] - fn test_split_minimum_stake_delegation() { - do_test_split_minimum_stake_delegation(new_feature_set()); - } - #[test] - fn test_split_full_amount_minimum_stake_delegation() { - do_test_split_full_amount_minimum_stake_delegation(new_feature_set()); - } - #[test] - fn test_initialized_split_destination_minimum_balance() { - do_test_initialized_split_destination_minimum_balance(new_feature_set()); - } - #[test] - fn test_staked_split_destination_minimum_balance() { - do_test_staked_split_destination_minimum_balance( - new_feature_set(), - &[ - Err(InstructionError::InsufficientFunds), - Err(InstructionError::InsufficientFunds), - ], - ); - } - #[test] - fn test_withdraw_minimum_stake_delegation() { - do_test_withdraw_minimum_stake_delegation(new_feature_set()); - } - #[test] - fn test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation() { - do_test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation( - new_feature_set(), - Err(StakeError::InsufficientDelegation.into()), - ); - } - #[test] - fn test_split_source_uninitialized() { - do_test_split_source_uninitialized(new_feature_set()); - } - #[test] - fn test_split_split_not_uninitialized() { - do_test_split_split_not_uninitialized(new_feature_set()); - } - #[test] - fn test_split_more_than_staked() { - do_test_split_more_than_staked(new_feature_set()); - } - #[test] - fn test_split_with_rent() { - do_test_split_with_rent(new_feature_set()); - } - #[test] - fn test_split_to_account_with_rent_exempt_reserve() { - do_test_split_to_account_with_rent_exempt_reserve(new_feature_set()); - } - #[test] - fn test_split_from_larger_sized_account() { - do_test_split_from_larger_sized_account(new_feature_set()); - } - #[test] - fn test_split_from_smaller_sized_account() { - do_test_split_from_smaller_sized_account(new_feature_set()); - } - #[test] - fn test_split_100_percent_of_source() { - do_test_split_100_percent_of_source(new_feature_set()); - } - #[test] - fn test_split_100_percent_of_source_to_account_with_lamports() { - do_test_split_100_percent_of_source_to_account_with_lamports(new_feature_set()); - } - #[test] - fn test_split_rent_exemptness() { - do_test_split_rent_exemptness(new_feature_set()); - } - #[test] - fn test_merge() { - do_test_merge(new_feature_set()); - } - #[test] - fn test_merge_self_fails() { - do_test_merge_self_fails(new_feature_set()); - } - #[test] - fn test_merge_incorrect_authorized_staker() { - do_test_merge_incorrect_authorized_staker(new_feature_set()); - } - #[test] - fn test_merge_invalid_account_data() { - do_test_merge_invalid_account_data(new_feature_set()); - } - #[test] - fn test_merge_fake_stake_source() { - do_test_merge_fake_stake_source(new_feature_set()); - } - #[test] - fn test_merge_active_stake() { - do_test_merge_active_stake(new_feature_set()); - } - #[test] - fn test_stake_get_minimum_delegation() { - do_test_stake_get_minimum_delegation(new_feature_set()); - } - #[test] - fn test_stake_process_instruction_error_ordering() { - do_test_stake_process_instruction_error_ordering(new_feature_set()); - } - #[test] - fn test_deactivate_delinquent() { - do_test_deactivate_delinquent(new_feature_set()); - } - } } From 90ef2cd02ab8b35ed885628bf1450715fae26c4a Mon Sep 17 00:00:00 2001 From: carllin Date: Wed, 6 Jul 2022 17:30:30 -0500 Subject: [PATCH 054/100] Parse snapshot for bank fields (#26016) --- runtime/src/bank.rs | 2 +- runtime/src/serde_snapshot.rs | 57 ++++--- runtime/src/serde_snapshot/newer.rs | 2 +- runtime/src/serde_snapshot/storage.rs | 2 +- runtime/src/snapshot_utils.rs | 212 +++++++++++++++++++++++--- 5 files changed, 233 insertions(+), 42 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 2f2f304c1731b1..59219c1bdacf8f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -939,7 +939,7 @@ impl NonceInfo for NonceFull { // Sync fields with BankFieldsToSerialize! This is paired with it. // All members are made public to remain Bank's members private and to make versioned deserializer workable on this #[derive(Clone, Debug, Default, PartialEq)] -pub(crate) struct BankFieldsToDeserialize { +pub struct BankFieldsToDeserialize { pub(crate) blockhash_queue: BlockhashQueue, pub(crate) ancestors: AncestorsForSerialization, pub(crate) hash: Hash, diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index b2586307ec5c84..0be5abf4921c36 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -14,6 +14,7 @@ use { epoch_stakes::EpochStakes, hardened_unpack::UnpackedAppendVecMap, rent_collector::RentCollector, + serde_snapshot::storage::SerializableAccountStorageEntry, snapshot_utils::{self, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION}, stakes::Stakes, }, @@ -64,7 +65,7 @@ pub(crate) enum SerdeStyle { const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; #[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq)] -struct AccountsDbFields( +pub struct AccountsDbFields( HashMap>, StoredMetaWriteVersion, Slot, @@ -87,7 +88,7 @@ pub struct SnapshotStreams<'a, R> { /// Helper type to wrap AccountsDbFields when reconstructing AccountsDb from either just a full /// snapshot, or both a full and incremental snapshot #[derive(Debug)] -struct SnapshotAccountsDbFields { +pub struct SnapshotAccountsDbFields { full_snapshot_accounts_db_fields: AccountsDbFields, incremental_snapshot_accounts_db_fields: Option>, } @@ -226,23 +227,16 @@ pub(crate) fn compare_two_serialized_banks( Ok(fields1 == fields2) } -#[allow(clippy::too_many_arguments)] -pub(crate) fn bank_from_streams( +pub(crate) fn fields_from_streams( serde_style: SerdeStyle, snapshot_streams: &mut SnapshotStreams, - account_paths: &[PathBuf], - unpacked_append_vec_map: UnpackedAppendVecMap, - genesis_config: &GenesisConfig, - debug_keys: Option>>, - additional_builtins: Option<&Builtins>, - account_secondary_indexes: AccountSecondaryIndexes, - caching_enabled: bool, - limit_load_slot_count_from_snapshot: Option, - shrink_ratio: AccountShrinkThreshold, - verify_index: bool, - accounts_db_config: Option, - accounts_update_notifier: Option, -) -> std::result::Result +) -> std::result::Result< + ( + BankFieldsToDeserialize, + SnapshotAccountsDbFields, + ), + Error, +> where R: Read, { @@ -278,9 +272,36 @@ where full_snapshot_accounts_db_fields, incremental_snapshot_accounts_db_fields, }; - reconstruct_bank_from_fields( + Ok(( incremental_snapshot_bank_fields.unwrap_or(full_snapshot_bank_fields), snapshot_accounts_db_fields, + )) +} + +#[allow(clippy::too_many_arguments)] +pub(crate) fn bank_from_streams( + serde_style: SerdeStyle, + snapshot_streams: &mut SnapshotStreams, + account_paths: &[PathBuf], + unpacked_append_vec_map: UnpackedAppendVecMap, + genesis_config: &GenesisConfig, + debug_keys: Option>>, + additional_builtins: Option<&Builtins>, + account_secondary_indexes: AccountSecondaryIndexes, + caching_enabled: bool, + limit_load_slot_count_from_snapshot: Option, + shrink_ratio: AccountShrinkThreshold, + verify_index: bool, + accounts_db_config: Option, + accounts_update_notifier: Option, +) -> std::result::Result +where + R: Read, +{ + let (bank_fields, accounts_db_fields) = fields_from_streams(serde_style, snapshot_streams)?; + reconstruct_bank_from_fields( + bank_fields, + accounts_db_fields, genesis_config, account_paths, unpacked_append_vec_map, diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index 6ab9a1b23b1514..6c6f7b63c551f0 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -13,7 +13,7 @@ use { std::{cell::RefCell, collections::HashSet, sync::RwLock}, }; -type AccountsDbFields = super::AccountsDbFields; +pub(super) type AccountsDbFields = super::AccountsDbFields; #[derive(Default, Clone, PartialEq, Eq, Debug, Deserialize, Serialize, AbiExample)] struct UnusedAccounts { diff --git a/runtime/src/serde_snapshot/storage.rs b/runtime/src/serde_snapshot/storage.rs index 2de2987d3118e5..be152d41473867 100644 --- a/runtime/src/serde_snapshot/storage.rs +++ b/runtime/src/serde_snapshot/storage.rs @@ -8,7 +8,7 @@ pub(super) type SerializedAppendVecId = usize; // Serializable version of AccountStorageEntry for snapshot format #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] -pub(super) struct SerializableAccountStorageEntry { +pub struct SerializableAccountStorageEntry { id: SerializedAppendVecId, accounts_current_len: usize, } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index e8bbcb13addf55..d276741c5301e2 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -5,10 +5,12 @@ use { }, accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, - bank::{Bank, BankSlotDelta}, + bank::{Bank, BankFieldsToDeserialize, BankSlotDelta}, builtins::Builtins, hardened_unpack::{unpack_snapshot, ParallelSelector, UnpackError, UnpackedAppendVecMap}, - serde_snapshot::{bank_from_streams, bank_to_stream, SerdeStyle, SnapshotStreams}, + serde_snapshot::{ + bank_from_streams, bank_to_stream, fields_from_streams, SerdeStyle, SnapshotStreams, + }, shared_buffer_reader::{SharedBuffer, SharedBufferReader}, snapshot_archive_info::{ FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfoGetter, @@ -795,27 +797,12 @@ pub struct BankFromArchiveTimings { // From testing, 4 seems to be a sweet spot for ranges of 60M-360M accounts and 16-64 cores. This may need to be tuned later. const PARALLEL_UNTAR_READERS_DEFAULT: usize = 4; -/// Rebuild bank from snapshot archives. Handles either just a full snapshot, or both a full -/// snapshot and an incremental snapshot. -#[allow(clippy::too_many_arguments)] -pub fn bank_from_snapshot_archives( - account_paths: &[PathBuf], +fn verify_and_unarchive_snapshots( bank_snapshots_dir: impl AsRef, full_snapshot_archive_info: &FullSnapshotArchiveInfo, incremental_snapshot_archive_info: Option<&IncrementalSnapshotArchiveInfo>, - genesis_config: &GenesisConfig, - debug_keys: Option>>, - additional_builtins: Option<&Builtins>, - account_secondary_indexes: AccountSecondaryIndexes, - accounts_db_caching_enabled: bool, - limit_load_slot_count_from_snapshot: Option, - shrink_ratio: AccountShrinkThreshold, - test_hash_calculation: bool, - accounts_db_skip_shrink: bool, - verify_index: bool, - accounts_db_config: Option, - accounts_update_notifier: Option, -) -> Result<(Bank, BankFromArchiveTimings)> { + account_paths: &[PathBuf], +) -> Result<(UnarchivedSnapshot, Option)> { check_are_snapshots_compatible( full_snapshot_archive_info, incremental_snapshot_archive_info, @@ -836,7 +823,7 @@ pub fn bank_from_snapshot_archives( parallel_divisions, )?; - let mut unarchived_incremental_snapshot = + let unarchived_incremental_snapshot = if let Some(incremental_snapshot_archive_info) = incremental_snapshot_archive_info { let unarchived_incremental_snapshot = unarchive_snapshot( &bank_snapshots_dir, @@ -852,6 +839,78 @@ pub fn bank_from_snapshot_archives( None }; + Ok((unarchived_full_snapshot, unarchived_incremental_snapshot)) +} + +/// Utility for parsing out bank specific information from a snapshot archive. This utility can be used +/// to parse out bank specific information like the leader schedule, epoch schedule, etc. +pub fn bank_fields_from_snapshot_archives( + bank_snapshots_dir: impl AsRef, + full_snapshot_archives_dir: impl AsRef, + incremental_snapshot_archives_dir: impl AsRef, +) -> Result { + let full_snapshot_archive_info = + get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir) + .ok_or(SnapshotError::NoSnapshotArchives)?; + + let incremental_snapshot_archive_info = get_highest_incremental_snapshot_archive_info( + &incremental_snapshot_archives_dir, + full_snapshot_archive_info.slot(), + ); + + let temp_dir = tempfile::Builder::new() + .prefix("dummy-accounts-path") + .tempdir()?; + + let account_paths = vec![temp_dir.path().to_path_buf()]; + + let (unarchived_full_snapshot, unarchived_incremental_snapshot) = + verify_and_unarchive_snapshots( + &bank_snapshots_dir, + &full_snapshot_archive_info, + incremental_snapshot_archive_info.as_ref(), + &account_paths, + )?; + + bank_fields_from_snapshots( + &unarchived_full_snapshot.unpacked_snapshots_dir_and_version, + unarchived_incremental_snapshot + .as_ref() + .map(|unarchive_preparation_result| { + &unarchive_preparation_result.unpacked_snapshots_dir_and_version + }), + ) +} + +/// Rebuild bank from snapshot archives. Handles either just a full snapshot, or both a full +/// snapshot and an incremental snapshot. +#[allow(clippy::too_many_arguments)] +pub fn bank_from_snapshot_archives( + account_paths: &[PathBuf], + bank_snapshots_dir: impl AsRef, + full_snapshot_archive_info: &FullSnapshotArchiveInfo, + incremental_snapshot_archive_info: Option<&IncrementalSnapshotArchiveInfo>, + genesis_config: &GenesisConfig, + debug_keys: Option>>, + additional_builtins: Option<&Builtins>, + account_secondary_indexes: AccountSecondaryIndexes, + accounts_db_caching_enabled: bool, + limit_load_slot_count_from_snapshot: Option, + shrink_ratio: AccountShrinkThreshold, + test_hash_calculation: bool, + accounts_db_skip_shrink: bool, + verify_index: bool, + accounts_db_config: Option, + accounts_update_notifier: Option, +) -> Result<(Bank, BankFromArchiveTimings)> { + let (unarchived_full_snapshot, mut unarchived_incremental_snapshot) = + verify_and_unarchive_snapshots( + bank_snapshots_dir, + full_snapshot_archive_info, + incremental_snapshot_archive_info, + account_paths, + )?; + let mut unpacked_append_vec_map = unarchived_full_snapshot.unpacked_append_vec_map; if let Some(ref mut unarchive_preparation_result) = unarchived_incremental_snapshot { let incremental_snapshot_unpacked_append_vec_map = @@ -1557,6 +1616,51 @@ fn verify_unpacked_snapshots_dir_and_version( Ok((snapshot_version, root_paths)) } +fn bank_fields_from_snapshots( + full_snapshot_unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion, + incremental_snapshot_unpacked_snapshots_dir_and_version: Option< + &UnpackedSnapshotsDirAndVersion, + >, +) -> Result { + let (full_snapshot_version, full_snapshot_root_paths) = + verify_unpacked_snapshots_dir_and_version( + full_snapshot_unpacked_snapshots_dir_and_version, + )?; + let (incremental_snapshot_version, incremental_snapshot_root_paths) = + if let Some(snapshot_unpacked_snapshots_dir_and_version) = + incremental_snapshot_unpacked_snapshots_dir_and_version + { + let (snapshot_version, bank_snapshot_info) = verify_unpacked_snapshots_dir_and_version( + snapshot_unpacked_snapshots_dir_and_version, + )?; + (Some(snapshot_version), Some(bank_snapshot_info)) + } else { + (None, None) + }; + info!( + "Loading bank from full snapshot {} and incremental snapshot {:?}", + full_snapshot_root_paths.snapshot_path.display(), + incremental_snapshot_root_paths + .as_ref() + .map(|paths| paths.snapshot_path.display()), + ); + + let snapshot_root_paths = SnapshotRootPaths { + full_snapshot_root_file_path: full_snapshot_root_paths.snapshot_path, + incremental_snapshot_root_file_path: incremental_snapshot_root_paths + .map(|root_paths| root_paths.snapshot_path), + }; + + deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| { + Ok( + match incremental_snapshot_version.unwrap_or(full_snapshot_version) { + SnapshotVersion::V1_2_0 => fields_from_streams(SerdeStyle::Newer, snapshot_streams) + .map(|(bank_fields, _accountsdb_fields)| bank_fields), + }?, + ) + }) +} + #[allow(clippy::too_many_arguments)] fn rebuild_bank_from_snapshots( full_snapshot_unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion, @@ -3599,6 +3703,72 @@ mod tests { ); } + #[test] + fn test_bank_fields_from_snapshot() { + solana_logger::setup(); + let collector = Pubkey::new_unique(); + let key1 = Keypair::new(); + + let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + while !bank0.is_complete() { + bank0.register_tick(&Hash::new_unique()); + } + + let slot = 1; + let bank1 = Arc::new(Bank::new_from_parent(&bank0, &collector, slot)); + while !bank1.is_complete() { + bank1.register_tick(&Hash::new_unique()); + } + + let all_snapshots_dir = tempfile::TempDir::new().unwrap(); + let snapshot_archive_format = ArchiveFormat::Tar; + + let full_snapshot_slot = slot; + bank_to_full_snapshot_archive( + &all_snapshots_dir, + &bank1, + None, + &all_snapshots_dir, + &all_snapshots_dir, + snapshot_archive_format, + DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, + DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN, + ) + .unwrap(); + + let slot = slot + 1; + let bank2 = Arc::new(Bank::new_from_parent(&bank1, &collector, slot)); + bank2 + .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .unwrap(); + while !bank2.is_complete() { + bank2.register_tick(&Hash::new_unique()); + } + + bank_to_incremental_snapshot_archive( + &all_snapshots_dir, + &bank2, + full_snapshot_slot, + None, + &all_snapshots_dir, + &all_snapshots_dir, + snapshot_archive_format, + DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, + DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN, + ) + .unwrap(); + + let bank_fields = bank_fields_from_snapshot_archives( + &all_snapshots_dir, + &all_snapshots_dir, + &all_snapshots_dir, + ) + .unwrap(); + assert_eq!(bank_fields.slot, bank2.slot()); + assert_eq!(bank_fields.parent_slot, bank2.parent_slot()); + } + /// All the permutations of `snapshot_type` for the new-and-old accounts packages: /// /// new | old | From 1f2e83039118d910d0bbf9acd6561c3d32c82c3a Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 6 Jul 2022 17:31:10 -0500 Subject: [PATCH 055/100] helpful error message when mmap limit can't change (#26450) --- ledger/src/blockstore.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 64c4a5568e3e4f..a126d7bef66a75 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -4278,12 +4278,13 @@ fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> { } let mut nofile = get_nofile(); - if nofile.rlim_cur < desired_nofile { + let current = nofile.rlim_cur; + if current < desired_nofile { nofile.rlim_cur = desired_nofile; if unsafe { libc::setrlimit(libc::RLIMIT_NOFILE, &nofile) } != 0 { error!( - "Unable to increase the maximum open file descriptor limit to {}", - desired_nofile + "Unable to increase the maximum open file descriptor limit to {} from {}", + nofile.rlim_cur, current, ); if cfg!(target_os = "macos") { From 134303714bdff50c611b0e3deddb9b84091f174e Mon Sep 17 00:00:00 2001 From: Xiang Zhu Date: Wed, 6 Jul 2022 15:35:32 -0700 Subject: [PATCH 056/100] Fix the order assumption of the pubkeys created by Pubkey::new_unique() (#26451) new_unique() does not gurantee the increment order due to the bytes array storage and its eq-partial trait interpreting the bytes in the big-endian way. --- runtime/src/accounts.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 2ed469df9cc49a..881e96f2b78a47 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -3620,13 +3620,27 @@ mod tests { AccountShrinkThreshold::default(), ); - let pubkey0 = Pubkey::new_unique(); + /* This test assumes pubkey0 < pubkey1 < pubkey2. + * But the keys created with new_unique() does not gurantee this + * order because of the endianness. new_unique() calls add 1 at each + * key generaration as the little endian integer. A pubkey stores its + * value in a 32-byte array bytes, and its eq-partial trait considers + * the lower-address bytes more significant, which is the big-endian + * order. + * So, sort first to ensure the order assumption holds. + */ + let mut keys = vec![]; + for _idx in 0..3 { + keys.push(Pubkey::new_unique()); + } + keys.sort(); + let pubkey2 = keys.pop().unwrap(); + let pubkey1 = keys.pop().unwrap(); + let pubkey0 = keys.pop().unwrap(); let account0 = AccountSharedData::new(42, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey0, &account0); - let pubkey1 = Pubkey::new_unique(); let account1 = AccountSharedData::new(42, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey1, &account1); - let pubkey2 = Pubkey::new_unique(); let account2 = AccountSharedData::new(41, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey2, &account2); From e2e3c31250a7c3bd129491f70cc869adc693aaee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Jul 2022 23:46:51 +0000 Subject: [PATCH 057/100] chore: bump wasm-bindgen from 0.2.80 to 0.2.81 (#26453) * chore: bump wasm-bindgen from 0.2.80 to 0.2.81 Bumps [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) from 0.2.80 to 0.2.81. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/compare/0.2.80...0.2.81) --- updated-dependencies: - dependency-name: wasm-bindgen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 20 ++++++++++---------- programs/bpf/Cargo.lock | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb1a467bcba6fe..e916001545f111 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7580,9 +7580,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7590,9 +7590,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" dependencies = [ "bumpalo", "lazy_static", @@ -7617,9 +7617,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" dependencies = [ "quote 1.0.18", "wasm-bindgen-macro-support", @@ -7627,9 +7627,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2 1.0.38", "quote 1.0.18", @@ -7640,9 +7640,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" [[package]] name = "web-sys" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 7e3ebceede71c9..47981f4062ddaa 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -6724,9 +6724,9 @@ checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -6734,9 +6734,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" dependencies = [ "bumpalo", "lazy_static", @@ -6761,9 +6761,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" dependencies = [ "quote 1.0.18", "wasm-bindgen-macro-support", @@ -6771,9 +6771,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2 1.0.38", "quote 1.0.18", @@ -6784,9 +6784,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" [[package]] name = "web-sys" From f2abbcaf9c0eed4cfe64c77132f4315b03865d7f Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Wed, 6 Jul 2022 22:39:03 -0600 Subject: [PATCH 058/100] Enable base58 and base64 encoding parameters for Memcmp filters (#26437) * Minor refactoring of client RpcProgramAccountsConfig handling * Enable explicit base58/base64 encoding of Memcmp filters, including client backward compatibility with nodes on old software * Deprecate Memcmp::Encoding * Remove deprecation warnings in rpc * Remove deprecation warnings in cli * Update docs * Make variants self-documenting --- cli/src/cluster_query.rs | 30 ++-- cli/src/program.rs | 66 ++++----- client/src/nonblocking/rpc_client.rs | 42 ++++-- client/src/rpc_filter.rs | 95 ++++++++++++ docs/src/developing/clients/jsonrpc-api.md | 4 +- rpc/src/rpc.rs | 164 +++++---------------- 6 files changed, 207 insertions(+), 194 deletions(-) diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 0ca373e80cf73c..b64c5fd822fe97 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1776,32 +1776,24 @@ pub fn process_show_stakes( if vote_account_pubkeys.len() == 1 { program_accounts_config.filters = Some(vec![ // Filter by `StakeState::Stake(_, _)` - rpc_filter::RpcFilterType::Memcmp(rpc_filter::Memcmp { - offset: 0, - bytes: rpc_filter::MemcmpEncodedBytes::Base58( - bs58::encode([2, 0, 0, 0]).into_string(), - ), - encoding: Some(rpc_filter::MemcmpEncoding::Binary), - }), + rpc_filter::RpcFilterType::Memcmp(rpc_filter::Memcmp::new_base58_encoded( + 0, + &[2, 0, 0, 0], + )), // Filter by `Delegation::voter_pubkey`, which begins at byte offset 124 - rpc_filter::RpcFilterType::Memcmp(rpc_filter::Memcmp { - offset: 124, - bytes: rpc_filter::MemcmpEncodedBytes::Base58( - vote_account_pubkeys[0].to_string(), - ), - encoding: Some(rpc_filter::MemcmpEncoding::Binary), - }), + rpc_filter::RpcFilterType::Memcmp(rpc_filter::Memcmp::new_base58_encoded( + 124, + vote_account_pubkeys[0].as_ref(), + )), ]); } } if let Some(withdraw_authority_pubkey) = withdraw_authority_pubkey { // withdrawer filter - let withdrawer_filter = rpc_filter::RpcFilterType::Memcmp(rpc_filter::Memcmp { - offset: 44, - bytes: rpc_filter::MemcmpEncodedBytes::Base58(withdraw_authority_pubkey.to_string()), - encoding: Some(rpc_filter::MemcmpEncoding::Binary), - }); + let withdrawer_filter = rpc_filter::RpcFilterType::Memcmp( + rpc_filter::Memcmp::new_base58_encoded(44, withdraw_authority_pubkey.as_ref()), + ); let filters = program_accounts_config.filters.get_or_insert(vec![]); filters.push(withdrawer_filter); diff --git a/cli/src/program.rs b/cli/src/program.rs index 531e2352473590..b3dc020f1268bf 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -22,7 +22,7 @@ use { connection_cache::ConnectionCache, rpc_client::RpcClient, rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSendTransactionConfig}, - rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, + rpc_filter::{Memcmp, RpcFilterType}, tpu_client::{TpuClient, TpuClientConfig}, }, solana_program_runtime::invoke_context::InvokeContext, @@ -1203,24 +1203,19 @@ fn get_buffers( authority_pubkey: Option, use_lamports_unit: bool, ) -> Result> { - let mut filters = vec![RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![1, 0, 0, 0]).into_string()), - encoding: None, - })]; + let mut filters = vec![RpcFilterType::Memcmp(Memcmp::new_base58_encoded( + 0, + &[1, 0, 0, 0], + ))]; if let Some(authority_pubkey) = authority_pubkey { - filters.push(RpcFilterType::Memcmp(Memcmp { - offset: ACCOUNT_TYPE_SIZE, - bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![1]).into_string()), - encoding: None, - })); - filters.push(RpcFilterType::Memcmp(Memcmp { - offset: ACCOUNT_TYPE_SIZE + OPTION_SIZE, - bytes: MemcmpEncodedBytes::Base58( - bs58::encode(authority_pubkey.as_ref()).into_string(), - ), - encoding: None, - })); + filters.push(RpcFilterType::Memcmp(Memcmp::new_base58_encoded( + ACCOUNT_TYPE_SIZE, + &[1], + ))); + filters.push(RpcFilterType::Memcmp(Memcmp::new_base58_encoded( + ACCOUNT_TYPE_SIZE + OPTION_SIZE, + authority_pubkey.as_ref(), + ))); } let results = get_accounts_with_filter( @@ -1256,24 +1251,19 @@ fn get_programs( authority_pubkey: Option, use_lamports_unit: bool, ) -> Result> { - let mut filters = vec![RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![3, 0, 0, 0]).into_string()), - encoding: None, - })]; + let mut filters = vec![RpcFilterType::Memcmp(Memcmp::new_base58_encoded( + 0, + &[3, 0, 0, 0], + ))]; if let Some(authority_pubkey) = authority_pubkey { - filters.push(RpcFilterType::Memcmp(Memcmp { - offset: ACCOUNT_TYPE_SIZE + SLOT_SIZE, - bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![1]).into_string()), - encoding: None, - })); - filters.push(RpcFilterType::Memcmp(Memcmp { - offset: ACCOUNT_TYPE_SIZE + SLOT_SIZE + OPTION_SIZE, - bytes: MemcmpEncodedBytes::Base58( - bs58::encode(authority_pubkey.as_ref()).into_string(), - ), - encoding: None, - })); + filters.push(RpcFilterType::Memcmp(Memcmp::new_base58_encoded( + ACCOUNT_TYPE_SIZE + SLOT_SIZE, + &[1], + ))); + filters.push(RpcFilterType::Memcmp(Memcmp::new_base58_encoded( + ACCOUNT_TYPE_SIZE + SLOT_SIZE + OPTION_SIZE, + authority_pubkey.as_ref(), + ))); } let results = get_accounts_with_filter( @@ -1291,11 +1281,7 @@ fn get_programs( { let mut bytes = vec![2, 0, 0, 0]; bytes.extend_from_slice(programdata_address.as_ref()); - let filters = vec![RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Base58(bs58::encode(bytes).into_string()), - encoding: None, - })]; + let filters = vec![RpcFilterType::Memcmp(Memcmp::new_base58_encoded(0, &bytes))]; let results = get_accounts_with_filter(rpc_client, filters, 0)?; if results.len() != 1 { diff --git a/client/src/nonblocking/rpc_client.rs b/client/src/nonblocking/rpc_client.rs index f899f723112d43..e855addaab2568 100644 --- a/client/src/nonblocking/rpc_client.rs +++ b/client/src/nonblocking/rpc_client.rs @@ -19,6 +19,7 @@ use { mock_sender::MockSender, rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClientConfig}, rpc_config::{RpcAccountInfoConfig, *}, + rpc_filter::{MemcmpEncodedBytes, RpcFilterType}, rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter}, rpc_response::*, rpc_sender::*, @@ -580,6 +581,33 @@ impl RpcClient { Ok(request) } + #[allow(deprecated)] + async fn maybe_map_filters( + &self, + mut filters: Vec, + ) -> Result, RpcError> { + let node_version = self.get_node_version().await?; + if node_version < semver::Version::new(1, 11, 2) { + for filter in filters.iter_mut() { + if let RpcFilterType::Memcmp(memcmp) = filter { + match &memcmp.bytes { + MemcmpEncodedBytes::Base58(string) => { + memcmp.bytes = MemcmpEncodedBytes::Binary(string.clone()); + } + MemcmpEncodedBytes::Base64(_) => { + return Err(RpcError::RpcRequestError(format!( + "RPC node on old version {} does not support base64 encoding for memcmp filters", + node_version + ))); + } + _ => {} + } + } + } + } + Ok(filters) + } + /// Submit a transaction and wait for confirmation. /// /// Once this function returns successfully, the given transaction is @@ -4490,21 +4518,17 @@ impl RpcClient { pub async fn get_program_accounts_with_config( &self, pubkey: &Pubkey, - config: RpcProgramAccountsConfig, + mut config: RpcProgramAccountsConfig, ) -> ClientResult> { let commitment = config .account_config .commitment .unwrap_or_else(|| self.commitment()); let commitment = self.maybe_map_commitment(commitment).await?; - let account_config = RpcAccountInfoConfig { - commitment: Some(commitment), - ..config.account_config - }; - let config = RpcProgramAccountsConfig { - account_config, - ..config - }; + config.account_config.commitment = Some(commitment); + if let Some(filters) = config.filters { + config.filters = Some(self.maybe_map_filters(filters).await?); + } let accounts: Vec = self .send( RpcRequest::GetProgramAccounts, diff --git a/client/src/rpc_filter.rs b/client/src/rpc_filter.rs index 043984fcefad36..483fba80286ae7 100644 --- a/client/src/rpc_filter.rs +++ b/client/src/rpc_filter.rs @@ -129,16 +129,37 @@ pub enum MemcmpEncodedBytes { } #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(into = "RpcMemcmp", from = "RpcMemcmp")] pub struct Memcmp { /// Data offset to begin match pub offset: usize, /// Bytes, encoded with specified encoding, or default Binary pub bytes: MemcmpEncodedBytes, /// Optional encoding specification + #[deprecated( + since = "1.11.2", + note = "Field has no server-side effect. Specify encoding with `MemcmpEncodedBytes` variant instead." + )] pub encoding: Option, } impl Memcmp { + pub fn new_raw_bytes(offset: usize, bytes: Vec) -> Self { + Self { + offset, + bytes: MemcmpEncodedBytes::Bytes(bytes), + encoding: None, + } + } + + pub fn new_base58_encoded(offset: usize, bytes: &[u8]) -> Self { + Self { + offset, + bytes: MemcmpEncodedBytes::Base58(bs58::encode(bytes).into_string()), + encoding: None, + } + } + pub fn bytes(&self) -> Option>> { use MemcmpEncodedBytes::*; match &self.bytes { @@ -164,6 +185,80 @@ impl Memcmp { } } +// Internal struct to hold Memcmp filter data as either encoded String or raw Bytes +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(untagged)] +enum DataType { + Encoded(String), + Raw(Vec), +} + +// Internal struct used to specify explicit Base58 and Base64 encoding +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +enum RpcMemcmpEncoding { + Base58, + Base64, + // This variant exists only to preserve backward compatibility with generic `Memcmp` serde + #[serde(other)] + Binary, +} + +// Internal struct to enable Memcmp filters with explicit Base58 and Base64 encoding. The From +// implementations emulate `#[serde(tag = "encoding", content = "bytes")]` for +// `MemcmpEncodedBytes`. On the next major version, all these internal elements should be removed +// and replaced with adjacent tagging of `MemcmpEncodedBytes`. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +struct RpcMemcmp { + offset: usize, + bytes: DataType, + encoding: Option, +} + +impl From for RpcMemcmp { + fn from(memcmp: Memcmp) -> RpcMemcmp { + let (bytes, encoding) = match memcmp.bytes { + MemcmpEncodedBytes::Binary(string) => { + (DataType::Encoded(string), Some(RpcMemcmpEncoding::Binary)) + } + MemcmpEncodedBytes::Base58(string) => { + (DataType::Encoded(string), Some(RpcMemcmpEncoding::Base58)) + } + MemcmpEncodedBytes::Base64(string) => { + (DataType::Encoded(string), Some(RpcMemcmpEncoding::Base64)) + } + MemcmpEncodedBytes::Bytes(vector) => (DataType::Raw(vector), None), + }; + RpcMemcmp { + offset: memcmp.offset, + bytes, + encoding, + } + } +} + +impl From for Memcmp { + fn from(memcmp: RpcMemcmp) -> Memcmp { + let encoding = memcmp.encoding.unwrap_or(RpcMemcmpEncoding::Binary); + let bytes = match (encoding, memcmp.bytes) { + (RpcMemcmpEncoding::Binary, DataType::Encoded(string)) + | (RpcMemcmpEncoding::Base58, DataType::Encoded(string)) => { + MemcmpEncodedBytes::Base58(string) + } + (RpcMemcmpEncoding::Binary, DataType::Raw(vector)) => MemcmpEncodedBytes::Bytes(vector), + (RpcMemcmpEncoding::Base64, DataType::Encoded(string)) => { + MemcmpEncodedBytes::Base64(string) + } + _ => unreachable!(), + }; + Memcmp { + offset: memcmp.offset, + bytes, + encoding: None, + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/docs/src/developing/clients/jsonrpc-api.md b/docs/src/developing/clients/jsonrpc-api.md index 3729b607639023..97e0df262ba4ec 100644 --- a/docs/src/developing/clients/jsonrpc-api.md +++ b/docs/src/developing/clients/jsonrpc-api.md @@ -1906,7 +1906,9 @@ Returns all accounts owned by the provided program Pubkey - `memcmp: ` - compares a provided series of bytes with program account data at a particular offset. Fields: - `offset: ` - offset into program account data to start comparison - - `bytes: ` - data to match, as base-58 encoded string and limited to less than 129 bytes + - `bytes: ` - data to match, as encoded string + - `encoding: ` - encoding for filter `bytes` data, either "base58" or "base64". Data is limited in size to 128 or fewer decoded bytes. + **NEW: This field, and base64 support generally, is only available in solana-core v1.11.2 or newer. Please omit when querying nodes on earlier versions** - `dataSize: ` - compares the program account data length with the provided data size diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 7d4b3235b82748..62e163575e49f6 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1899,11 +1899,10 @@ impl JsonRpcRequestProcessor { let mut filters = vec![]; if let Some(mint) = mint { // Optional filter on Mint address - filters.push(RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().into()), - encoding: None, - })); + filters.push(RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + 0, + mint.to_bytes().into(), + ))); } let keyed_accounts = self.get_filtered_spl_token_accounts_by_owner( @@ -1950,17 +1949,12 @@ impl JsonRpcRequestProcessor { let mut filters = vec![ // Filter on Delegate is_some() - RpcFilterType::Memcmp(Memcmp { - offset: 72, - bytes: MemcmpEncodedBytes::Bytes(bincode::serialize(&1u32).unwrap()), - encoding: None, - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + 72, + bincode::serialize(&1u32).unwrap(), + )), // Filter on Delegate address - RpcFilterType::Memcmp(Memcmp { - offset: 76, - bytes: MemcmpEncodedBytes::Bytes(delegate.to_bytes().into()), - encoding: None, - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(76, delegate.to_bytes().into())), ]; // Optional filter on Mint address, uses mint account index for scan let keyed_accounts = if let Some(mint) = mint { @@ -2052,11 +2046,10 @@ impl JsonRpcRequestProcessor { // Filter on Token Account state filters.push(RpcFilterType::TokenAccountState); // Filter on Owner address - filters.push(RpcFilterType::Memcmp(Memcmp { - offset: SPL_TOKEN_ACCOUNT_OWNER_OFFSET, - bytes: MemcmpEncodedBytes::Bytes(owner_key.to_bytes().into()), - encoding: None, - })); + filters.push(RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + SPL_TOKEN_ACCOUNT_OWNER_OFFSET, + owner_key.to_bytes().into(), + ))); if self .config @@ -2104,11 +2097,10 @@ impl JsonRpcRequestProcessor { // Filter on Token Account state filters.push(RpcFilterType::TokenAccountState); // Filter on Mint address - filters.push(RpcFilterType::Memcmp(Memcmp { - offset: SPL_TOKEN_ACCOUNT_MINT_OFFSET, - bytes: MemcmpEncodedBytes::Bytes(mint_key.to_bytes().into()), - encoding: None, - })); + filters.push(RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + SPL_TOKEN_ACCOUNT_MINT_OFFSET, + mint_key.to_bytes().into(), + ))); if self .config .account_indexes @@ -6402,6 +6394,7 @@ pub mod tests { #[test] fn test_rpc_verify_filter() { + #[allow(deprecated)] let filter = RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58( @@ -6411,6 +6404,7 @@ pub mod tests { }); assert_eq!(verify_filter(&filter), Ok(())); // Invalid base-58 + #[allow(deprecated)] let filter = RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58("III".to_string()), @@ -7967,11 +7961,7 @@ pub mod tests { get_spl_token_owner_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 32, - bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), - encoding: None - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), RpcFilterType::DataSize(165) ], ) @@ -7984,16 +7974,8 @@ pub mod tests { get_spl_token_owner_filter( &Pubkey::from_str("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 32, - bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), - encoding: None - }), - RpcFilterType::Memcmp(Memcmp { - offset: 165, - bytes: MemcmpEncodedBytes::Bytes(vec![ACCOUNTTYPE_ACCOUNT]), - encoding: None - }) + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), ], ) .unwrap(), @@ -8005,11 +7987,7 @@ pub mod tests { get_spl_token_owner_filter( &Pubkey::from_str("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 32, - bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), - encoding: None - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), RpcFilterType::TokenAccountState, ], ) @@ -8021,16 +7999,8 @@ pub mod tests { assert!(get_spl_token_owner_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 32, - bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), - encoding: None - }), - RpcFilterType::Memcmp(Memcmp { - offset: 165, - bytes: MemcmpEncodedBytes::Bytes(vec![ACCOUNTTYPE_ACCOUNT]), - encoding: None - }) + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), ], ) .is_none()); @@ -8039,11 +8009,7 @@ pub mod tests { assert!(get_spl_token_owner_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), - encoding: None - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, owner.to_bytes().to_vec())), RpcFilterType::DataSize(165) ], ) @@ -8053,11 +8019,7 @@ pub mod tests { assert!(get_spl_token_owner_filter( &Pubkey::new_unique(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 32, - bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), - encoding: None - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), RpcFilterType::DataSize(165) ], ) @@ -8065,16 +8027,8 @@ pub mod tests { assert!(get_spl_token_owner_filter( &Pubkey::new_unique(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 32, - bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), - encoding: None - }), - RpcFilterType::Memcmp(Memcmp { - offset: 165, - bytes: MemcmpEncodedBytes::Bytes(vec![ACCOUNTTYPE_ACCOUNT]), - encoding: None - }) + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), ], ) .is_none()); @@ -8088,11 +8042,7 @@ pub mod tests { get_spl_token_mint_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().to_vec()), - encoding: None - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), RpcFilterType::DataSize(165) ], ) @@ -8105,16 +8055,8 @@ pub mod tests { get_spl_token_mint_filter( &Pubkey::from_str("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().to_vec()), - encoding: None - }), - RpcFilterType::Memcmp(Memcmp { - offset: 165, - bytes: MemcmpEncodedBytes::Bytes(vec![ACCOUNTTYPE_ACCOUNT]), - encoding: None - }) + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), ], ) .unwrap(), @@ -8126,11 +8068,7 @@ pub mod tests { get_spl_token_mint_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().to_vec()), - encoding: None - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), RpcFilterType::TokenAccountState, ], ) @@ -8142,16 +8080,8 @@ pub mod tests { assert!(get_spl_token_mint_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().to_vec()), - encoding: None - }), - RpcFilterType::Memcmp(Memcmp { - offset: 165, - bytes: MemcmpEncodedBytes::Bytes(vec![ACCOUNTTYPE_ACCOUNT]), - encoding: None - }) + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), ], ) .is_none()); @@ -8160,11 +8090,7 @@ pub mod tests { assert!(get_spl_token_mint_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 32, - bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().to_vec()), - encoding: None - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, mint.to_bytes().to_vec())), RpcFilterType::DataSize(165) ], ) @@ -8174,11 +8100,7 @@ pub mod tests { assert!(get_spl_token_mint_filter( &Pubkey::new_unique(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().to_vec()), - encoding: None - }), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), RpcFilterType::DataSize(165) ], ) @@ -8186,16 +8108,8 @@ pub mod tests { assert!(get_spl_token_mint_filter( &Pubkey::new_unique(), &[ - RpcFilterType::Memcmp(Memcmp { - offset: 0, - bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().to_vec()), - encoding: None - }), - RpcFilterType::Memcmp(Memcmp { - offset: 165, - bytes: MemcmpEncodedBytes::Bytes(vec![ACCOUNTTYPE_ACCOUNT]), - encoding: None - }) + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), ], ) .is_none()); From f8bdedb596a4259d81aa2b114f2d7795a998763a Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 7 Jul 2022 00:01:13 -0500 Subject: [PATCH 059/100] Make accounts data size tests more robust (#26466) --- runtime/src/bank.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 59219c1bdacf8f..d6402d12e2e755 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -17747,8 +17747,10 @@ pub(crate) mod tests { let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000_000); let mut bank = Bank::new_for_tests(&genesis_config); - bank.set_accounts_data_size_initial_for_tests(INITIAL_ACCOUNTS_DATA_SIZE); bank.activate_feature(&feature_set::cap_accounts_data_len::id()); + bank.set_accounts_data_size_initial_for_tests( + INITIAL_ACCOUNTS_DATA_SIZE - bank.load_accounts_data_size_delta() as u64, + ); let mut i = 0; let result = loop { @@ -18585,10 +18587,13 @@ pub(crate) mod tests { // Test: Subtraction saturates at 0 { let bank = Bank::new_for_tests(&genesis_config); + let initial_data_size = bank.load_accounts_data_size() as i64; let data_size = 567; bank.accounts_data_size_delta_on_chain .store(data_size, Release); - bank.update_accounts_data_size_delta_on_chain(-(data_size + 1)); + bank.update_accounts_data_size_delta_on_chain( + (initial_data_size + data_size + 1).saturating_neg(), + ); assert_eq!(bank.load_accounts_data_size(), 0); } @@ -19219,11 +19224,18 @@ pub(crate) mod tests { } // Collect rent for real + let accounts_data_size_delta_before_collecting_rent = bank.load_accounts_data_size_delta(); bank.collect_rent_eagerly(false); + let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta(); + + let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent as i64 + - accounts_data_size_delta_before_collecting_rent as i64; + assert!(accounts_data_size_delta_delta < 0); + let reclaimed_data_size = accounts_data_size_delta_delta.saturating_neg() as usize; // Ensure the account is reclaimed by rent collection - // NOTE: Use `<=` here (instead of `==`) since other accounts could + // NOTE: Use `>=` here (instead of `==`) since other accounts could // also be reclaimed by rent collection. - assert!(bank.load_accounts_data_size_delta() <= -(data_size as i64)); + assert!(reclaimed_data_size >= data_size); } } From 9723a33d2f09cb770883c605ee96ce64666722d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 7 Jul 2022 09:56:15 +0200 Subject: [PATCH 060/100] Uses `BorrowedAccount` instead of `AccountSharedData` in program-test. (#26456) --- program-test/src/lib.rs | 80 ++++++++++++++++++++++++++++------------- 1 file changed, 55 insertions(+), 25 deletions(-) diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 6b9ce41ec96f98..8df3c9ac1b6f90 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -21,7 +21,7 @@ use { genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo}, }, solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, + account::{Account, AccountSharedData, ReadableAccount}, account_info::AccountInfo, clock::Slot, entrypoint::{ProgramResult, SUCCESS}, @@ -174,16 +174,21 @@ pub fn builtin_process_instruction( stable_log::program_success(&log_collector, program_id); // Commit AccountInfo changes back into KeyedAccounts - for (instruction_account_index, (_key, _owner, lamports, data)) in deduplicated_indices + for (instruction_account_index, (_key, owner, lamports, data)) in deduplicated_indices .into_iter() .zip(account_copies.into_iter()) { let mut borrowed_account = instruction_context .try_borrow_instruction_account(transaction_context, instruction_account_index)?; - if borrowed_account.is_writable() { + if borrowed_account.get_lamports() != lamports { borrowed_account.set_lamports(lamports)?; + } + if borrowed_account.get_data() != data { borrowed_account.set_data(&data)?; } + if borrowed_account.get_owner() != &owner { + borrowed_account.set_owner(owner.as_ref())?; + } } Ok(()) @@ -270,10 +275,13 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { .unwrap(); // Copy caller's account_info modifications into invoke_context accounts + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context + .get_current_instruction_context() + .unwrap(); let mut account_indices = Vec::with_capacity(instruction_accounts.len()); for instruction_account in instruction_accounts.iter() { - let account_key = invoke_context - .transaction_context + let account_key = transaction_context .get_key_of_account_at_index(instruction_account.index_in_transaction) .unwrap(); let account_info_index = account_infos @@ -282,19 +290,39 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { .ok_or(InstructionError::MissingAccount) .unwrap(); let account_info = &account_infos[account_info_index]; - let mut account = invoke_context - .transaction_context + let mut borrowed_account = instruction_context + .try_borrow_instruction_account( + transaction_context, + instruction_account.index_in_caller, + ) + .unwrap(); + if borrowed_account.get_lamports() != account_info.lamports() { + borrowed_account + .set_lamports(account_info.lamports()) + .unwrap(); + } + let account_info_data = account_info.try_borrow_data().unwrap(); + if borrowed_account.get_data() != *account_info_data { + borrowed_account.set_data(&account_info_data).unwrap(); + } + if borrowed_account.is_executable() != account_info.executable { + borrowed_account + .set_executable(account_info.executable) + .unwrap(); + } + if borrowed_account.get_owner() != account_info.owner { + borrowed_account + .set_owner(account_info.owner.as_ref()) + .unwrap(); + } + drop(borrowed_account); + let account = transaction_context .get_account_at_index(instruction_account.index_in_transaction) .unwrap() - .borrow_mut(); - account.copy_into_owner_from_slice(account_info.owner.as_ref()); - account.set_data_from_slice(&account_info.try_borrow_data().unwrap()); - account.set_lamports(account_info.lamports()); - account.set_executable(account_info.executable); - account.set_rent_epoch(account_info.rent_epoch); + .borrow(); + assert_eq!(account.rent_epoch(), account_info.rent_epoch); if instruction_account.is_writable { - account_indices - .push((instruction_account.index_in_transaction, account_info_index)); + account_indices.push((instruction_account.index_in_caller, account_info_index)); } } @@ -310,23 +338,25 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { .map_err(|err| ProgramError::try_from(err).unwrap_or_else(|err| panic!("{}", err)))?; // Copy invoke_context accounts modifications into caller's account_info - for (index_in_transaction, account_info_index) in account_indices.into_iter() { - let account = invoke_context - .transaction_context - .get_account_at_index(index_in_transaction) - .unwrap() - .borrow_mut(); + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context + .get_current_instruction_context() + .unwrap(); + for (index_in_caller, account_info_index) in account_indices.into_iter() { + let borrowed_account = instruction_context + .try_borrow_instruction_account(transaction_context, index_in_caller) + .unwrap(); let account_info = &account_infos[account_info_index]; - **account_info.try_borrow_mut_lamports().unwrap() = account.lamports(); + **account_info.try_borrow_mut_lamports().unwrap() = borrowed_account.get_lamports(); let mut data = account_info.try_borrow_mut_data()?; - let new_data = account.data(); - if account_info.owner != account.owner() { + let new_data = borrowed_account.get_data(); + if account_info.owner != borrowed_account.get_owner() { // TODO Figure out a better way to allow the System Program to set the account owner #[allow(clippy::transmute_ptr_to_ptr)] #[allow(mutable_transmutes)] let account_info_mut = unsafe { transmute::<&Pubkey, &mut Pubkey>(account_info.owner) }; - *account_info_mut = *account.owner(); + *account_info_mut = *borrowed_account.get_owner(); } // TODO: Figure out how to allow the System Program to resize the account data assert!( From 6f4838719b2127b7356b3e91ca1983c4e7e16c08 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 7 Jul 2022 11:13:13 +0000 Subject: [PATCH 061/100] decouples shreds sig-verify from tpu vote and transaction packets (#26300) Shreds have different workload and traffic pattern from TPU vote and transaction packets. Some of recent changes to SigVerifyStage are not suitable or at least optimal for shreds sig-verify; e.g. random discard, dedup with false positives, discard excess by IP-address, ... SigVerifier trait is meant to abstract out the distinctions between the two pipelines, but in practice it has led to more verbose and convoluted code. This commit discards SigVerifier implementation for shreds sig-verify and instead provides a standalone stage for verifying shreds signatures. --- core/src/shred_fetch_stage.rs | 10 +- core/src/sigverify_shreds.rs | 280 +++++++++++++++++++++------------- core/src/tvu.rs | 28 ++-- 3 files changed, 192 insertions(+), 126 deletions(-) diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index a0e836724acdeb..78b5e15b95efa8 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -28,7 +28,7 @@ impl ShredFetchStage { // updates packets received on a channel and sends them on another channel fn modify_packets( recvr: PacketBatchReceiver, - sendr: Sender>, + sendr: Sender, bank_forks: &RwLock, shred_version: u16, name: &'static str, @@ -46,7 +46,7 @@ impl ShredFetchStage { let mut stats = ShredFetchStats::default(); let mut packet_hasher = PacketHasher::default(); - while let Some(mut packet_batch) = recvr.iter().next() { + for mut packet_batch in recvr { if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT { last_updated = Instant::now(); packet_hasher.reset(); @@ -79,7 +79,7 @@ impl ShredFetchStage { } } stats.maybe_submit(name, STATS_SUBMIT_CADENCE); - if sendr.send(vec![packet_batch]).is_err() { + if sendr.send(packet_batch).is_err() { break; } } @@ -88,7 +88,7 @@ impl ShredFetchStage { fn packet_modifier( sockets: Vec>, exit: &Arc, - sender: Sender>, + sender: Sender, recycler: PacketBatchRecycler, bank_forks: Arc>, shred_version: u16, @@ -132,7 +132,7 @@ impl ShredFetchStage { sockets: Vec>, forward_sockets: Vec>, repair_socket: Arc, - sender: Sender>, + sender: Sender, shred_version: u16, bank_forks: Arc>, exit: &Arc, diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index b32d045bc39682..f9a50ab8b2a954 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -1,11 +1,5 @@ -#![allow(clippy::implicit_hasher)] - use { - crate::{ - sigverify, - sigverify_stage::{SigVerifier, SigVerifyServiceError}, - }, - crossbeam_channel::Sender, + crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender}, solana_ledger::{ leader_schedule_cache::LeaderScheduleCache, shred, sigverify_shreds::verify_shreds_gpu, }, @@ -18,87 +12,115 @@ use { atomic::{AtomicBool, Ordering}, Arc, RwLock, }, + thread::{Builder, JoinHandle}, + time::{Duration, Instant}, }, }; -#[derive(Clone)] -pub struct ShredSigVerifier { - pubkey: Pubkey, // TODO: Hot swap will change pubkey. +#[allow(clippy::enum_variant_names)] +enum Error { + RecvDisconnected, + RecvTimeout, + SendError, +} + +pub(crate) fn spawn_shred_sigverify( + // TODO: Hot swap will change pubkey. + self_pubkey: Pubkey, bank_forks: Arc>, leader_schedule_cache: Arc, - recycler_cache: RecyclerCache, + shred_fetch_receiver: Receiver, retransmit_sender: Sender>>, - packet_sender: Sender>, + verified_sender: Sender>, turbine_disabled: Arc, +) -> JoinHandle<()> { + let recycler_cache = RecyclerCache::warmed(); + let mut stats = ShredSigVerifyStats::new(Instant::now()); + Builder::new() + .name("shred-verifier".to_string()) + .spawn(move || loop { + match run_shred_sigverify( + &self_pubkey, + &bank_forks, + &leader_schedule_cache, + &recycler_cache, + &shred_fetch_receiver, + &retransmit_sender, + &verified_sender, + &turbine_disabled, + &mut stats, + ) { + Ok(()) => (), + Err(Error::RecvTimeout) => (), + Err(Error::RecvDisconnected) => break, + Err(Error::SendError) => break, + } + stats.maybe_submit(); + }) + .unwrap() } -impl ShredSigVerifier { - pub fn new( - pubkey: Pubkey, - bank_forks: Arc>, - leader_schedule_cache: Arc, - retransmit_sender: Sender>>, - packet_sender: Sender>, - turbine_disabled: Arc, - ) -> Self { - sigverify::init(); - Self { - pubkey, - bank_forks, - leader_schedule_cache, - recycler_cache: RecyclerCache::warmed(), - retransmit_sender, - packet_sender, - turbine_disabled, - } +fn run_shred_sigverify( + self_pubkey: &Pubkey, + bank_forks: &RwLock, + leader_schedule_cache: &LeaderScheduleCache, + recycler_cache: &RecyclerCache, + shred_fetch_receiver: &Receiver, + retransmit_sender: &Sender>>, + verified_sender: &Sender>, + turbine_disabled: &AtomicBool, + stats: &mut ShredSigVerifyStats, +) -> Result<(), Error> { + const RECV_TIMEOUT: Duration = Duration::from_secs(1); + let packets = shred_fetch_receiver.recv_timeout(RECV_TIMEOUT)?; + let mut packets: Vec<_> = std::iter::once(packets) + .chain(shred_fetch_receiver.try_iter()) + .collect(); + let now = Instant::now(); + stats.num_iters += 1; + stats.num_packets += packets.iter().map(PacketBatch::len).sum::(); + stats.num_discards_pre += count_discards(&packets); + verify_packets( + self_pubkey, + bank_forks, + leader_schedule_cache, + recycler_cache, + &mut packets, + ); + stats.num_discards_post += count_discards(&packets); + // Exclude repair packets from retransmit. + let shreds: Vec<_> = packets + .iter() + .flat_map(PacketBatch::iter) + .filter(|packet| !packet.meta.discard() && !packet.meta.repair()) + .filter_map(shred::layout::get_shred) + .map(<[u8]>::to_vec) + .collect(); + stats.num_retransmit_shreds += shreds.len(); + if !turbine_disabled.load(Ordering::Relaxed) { + retransmit_sender.send(shreds)?; + verified_sender.send(packets)?; } + stats.elapsed_micros += now.elapsed().as_micros() as u64; + Ok(()) } -impl SigVerifier for ShredSigVerifier { - type SendType = Vec; - - fn send_packets( - &mut self, - packet_batches: Vec, - ) -> Result<(), SigVerifyServiceError> { - if self.turbine_disabled.load(Ordering::Relaxed) { - return Ok(()); - } - // Exclude repair packets from retransmit. - // TODO: return the error here! - let _ = self.retransmit_sender.send( - packet_batches - .iter() - .flat_map(PacketBatch::iter) - .filter(|packet| !packet.meta.discard() && !packet.meta.repair()) - .filter_map(shred::layout::get_shred) - .map(<[u8]>::to_vec) - .collect(), - ); - self.packet_sender.send(packet_batches)?; - Ok(()) - } - - fn verify_batches( - &self, - mut batches: Vec, - _valid_packets: usize, - ) -> Vec { - let working_bank = self.bank_forks.read().unwrap().working_bank(); - let leader_slots: HashMap = get_slot_leaders( - &self.pubkey, - &mut batches, - &self.leader_schedule_cache, - &working_bank, - ) - .into_iter() - .filter_map(|(slot, pubkey)| Some((slot, pubkey?.to_bytes()))) - .chain(std::iter::once((Slot::MAX, [0u8; 32]))) - .collect(); - let r = verify_shreds_gpu(&batches, &leader_slots, &self.recycler_cache); - solana_perf::sigverify::mark_disabled(&mut batches, &r); - batches - } +fn verify_packets( + self_pubkey: &Pubkey, + bank_forks: &RwLock, + leader_schedule_cache: &LeaderScheduleCache, + recycler_cache: &RecyclerCache, + packets: &mut [PacketBatch], +) { + let working_bank = bank_forks.read().unwrap().working_bank(); + let leader_slots: HashMap = + get_slot_leaders(self_pubkey, packets, leader_schedule_cache, &working_bank) + .into_iter() + .filter_map(|(slot, pubkey)| Some((slot, pubkey?.to_bytes()))) + .chain(std::iter::once((Slot::MAX, [0u8; 32]))) + .collect(); + let out = verify_shreds_gpu(packets, &leader_slots, recycler_cache); + solana_perf::sigverify::mark_disabled(packets, &out); } // Returns pubkey of leaders for shred slots refrenced in the packets. @@ -139,11 +161,75 @@ fn get_slot_leaders( leaders } +fn count_discards(packets: &[PacketBatch]) -> usize { + packets + .iter() + .flat_map(PacketBatch::iter) + .filter(|packet| packet.meta.discard()) + .count() +} + +impl From for Error { + fn from(err: RecvTimeoutError) -> Self { + match err { + RecvTimeoutError::Timeout => Self::RecvTimeout, + RecvTimeoutError::Disconnected => Self::RecvDisconnected, + } + } +} + +impl From> for Error { + fn from(_: SendError) -> Self { + Self::SendError + } +} + +struct ShredSigVerifyStats { + since: Instant, + num_iters: usize, + num_packets: usize, + num_discards_pre: usize, + num_discards_post: usize, + num_retransmit_shreds: usize, + elapsed_micros: u64, +} + +impl ShredSigVerifyStats { + const METRICS_SUBMIT_CADENCE: Duration = Duration::from_secs(2); + + fn new(now: Instant) -> Self { + Self { + since: now, + num_iters: 0usize, + num_packets: 0usize, + num_discards_pre: 0usize, + num_discards_post: 0usize, + num_retransmit_shreds: 0usize, + elapsed_micros: 0u64, + } + } + + fn maybe_submit(&mut self) { + if self.since.elapsed() <= Self::METRICS_SUBMIT_CADENCE { + return; + } + datapoint_info!( + "shred_sigverify", + ("num_iters", self.num_iters, i64), + ("num_packets", self.num_packets, i64), + ("num_discards_pre", self.num_discards_pre, i64), + ("num_discards_post", self.num_discards_post, i64), + ("num_retransmit_shreds", self.num_retransmit_shreds, i64), + ("elapsed_micros", self.elapsed_micros, i64), + ); + *self = Self::new(Instant::now()); + } +} + #[cfg(test)] -pub mod tests { +mod tests { use { super::*, - crossbeam_channel::unbounded, solana_ledger::{ genesis_utils::create_genesis_config_with_leader, shred::{Shred, ShredFlags}, @@ -160,18 +246,8 @@ pub mod tests { let bank = Bank::new_for_tests( &create_genesis_config_with_leader(100, &leader_pubkey, 10).genesis_config, ); - let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let bf = Arc::new(RwLock::new(BankForks::new(bank))); - let (sender, receiver) = unbounded(); - let (retransmit_sender, _retransmit_receiver) = unbounded(); - let mut verifier = ShredSigVerifier::new( - Pubkey::new_unique(), - bf, - cache, - retransmit_sender, - sender, - Arc::::default(), // turbine_disabled - ); + let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank); + let bank_forks = RwLock::new(BankForks::new(bank)); let batch_size = 2; let mut batch = PacketBatch::with_capacity(batch_size); batch.resize(batch_size, Packet::default()); @@ -206,20 +282,14 @@ pub mod tests { batches[0][1].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); batches[0][1].meta.size = shred.payload().len(); - let num_packets = solana_perf::sigverify::count_packets_in_batches(&batches); - let rv = verifier.verify_batches(batches, num_packets); - assert!(!rv[0][0].meta.discard()); - assert!(rv[0][1].meta.discard()); - - verifier.send_packets(rv.clone()).unwrap(); - let received_packets = receiver.recv().unwrap(); - assert_eq!(received_packets.len(), rv.len()); - for (received_packet_batch, original_packet_batch) in received_packets.iter().zip(rv.iter()) - { - assert_eq!( - received_packet_batch.iter().collect::>(), - original_packet_batch.iter().collect::>() - ); - } + verify_packets( + &Pubkey::new_unique(), // self_pubkey + &bank_forks, + &leader_schedule_cache, + &RecyclerCache::warmed(), + &mut batches, + ); + assert!(!batches[0][0].meta.discard()); + assert!(batches[0][1].meta.discard()); } } diff --git a/core/src/tvu.rs b/core/src/tvu.rs index fe7d1d94b0f6b6..e6deed99fe2ac8 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -20,8 +20,7 @@ use { retransmit_stage::RetransmitStage, rewards_recorder_service::RewardsRecorderSender, shred_fetch_stage::ShredFetchStage, - sigverify_shreds::ShredSigVerifier, - sigverify_stage::SigVerifyStage, + sigverify_shreds, tower_storage::TowerStorage, validator::ProcessBlockStore, voting_service::VotingService, @@ -56,13 +55,13 @@ use { collections::HashSet, net::UdpSocket, sync::{atomic::AtomicBool, Arc, RwLock}, - thread, + thread::{self, JoinHandle}, }, }; pub struct Tvu { fetch_stage: ShredFetchStage, - sigverify_stage: SigVerifyStage, + shred_sigverify: JoinHandle<()>, retransmit_stage: RetransmitStage, window_service: WindowService, cluster_slots_service: ClusterSlotsService, @@ -163,17 +162,14 @@ impl Tvu { let (verified_sender, verified_receiver) = unbounded(); let (retransmit_sender, retransmit_receiver) = unbounded(); - let sigverify_stage = SigVerifyStage::new( + let shred_sigverify = sigverify_shreds::spawn_shred_sigverify( + cluster_info.id(), + bank_forks.clone(), + leader_schedule_cache.clone(), fetch_receiver, - ShredSigVerifier::new( - cluster_info.id(), - bank_forks.clone(), - leader_schedule_cache.clone(), - retransmit_sender.clone(), - verified_sender, - turbine_disabled, - ), - "shred-verifier", + retransmit_sender.clone(), + verified_sender, + turbine_disabled, ); let retransmit_stage = RetransmitStage::new( @@ -319,7 +315,7 @@ impl Tvu { Tvu { fetch_stage, - sigverify_stage, + shred_sigverify, retransmit_stage, window_service, cluster_slots_service, @@ -338,7 +334,7 @@ impl Tvu { self.window_service.join()?; self.cluster_slots_service.join()?; self.fetch_stage.join()?; - self.sigverify_stage.join()?; + self.shred_sigverify.join()?; if self.ledger_cleanup_service.is_some() { self.ledger_cleanup_service.unwrap().join()?; } From b3a47de1ce2fae7f185b1014df1685f80c43cd55 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Jul 2022 08:39:57 -0500 Subject: [PATCH 062/100] improve startup stats for rent payers (#26432) --- runtime/src/accounts_db.rs | 121 ++++++++++++++++++++++++++----------- 1 file changed, 86 insertions(+), 35 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 07cc9ef3d9bc7c..efd3eea5267208 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -251,8 +251,9 @@ pub struct IndexGenerationInfo { struct SlotIndexGenerationInfo { insert_time_us: u64, num_accounts: u64, - num_accounts_rent_exempt: u64, + num_accounts_rent_paying: usize, accounts_data_len: u64, + amount_to_top_off_rent: u64, } #[derive(Default, Debug)] @@ -267,7 +268,8 @@ struct GenerateIndexTimings { pub storage_size_storages_us: u64, pub storage_size_accounts_map_flatten_us: u64, pub index_flush_us: u64, - pub rent_exempt: u64, + pub rent_paying: AtomicUsize, + pub amount_to_top_off_rent: AtomicU64, pub total_duplicates: u64, pub accounts_data_len_dedup_time_us: u64, } @@ -306,8 +308,13 @@ impl GenerateIndexTimings { ), ("index_flush_us", self.index_flush_us as i64, i64), ( - "total_rent_paying_with_duplicates", - self.total_duplicates.saturating_sub(self.rent_exempt) as i64, + "total_rent_paying", + self.rent_paying.load(Ordering::Relaxed) as i64, + i64 + ), + ( + "amount_to_top_off_rent", + self.amount_to_top_off_rent.load(Ordering::Relaxed) as i64, i64 ), ( @@ -7894,6 +7901,21 @@ impl AccountsDb { accounts_map } + /// return Some(lamports_to_top_off) if 'account' would collect rent + fn stats_for_rent_payers( + pubkey: &Pubkey, + account: &T, + rent_collector: &RentCollector, + ) -> Option { + (rent_collector.should_collect_rent(pubkey, account) + && !rent_collector.get_rent_due(account).is_exempt()) + .then(|| { + let min_balance = rent_collector.rent.minimum_balance(account.data().len()); + // return lamports required to top off this account to make it rent exempt + min_balance.saturating_sub(account.lamports()) + }) + } + fn generate_index_for_slot<'a>( &self, accounts_map: GenerateIndexAccountsMap<'a>, @@ -7907,8 +7929,9 @@ impl AccountsDb { let secondary = !self.account_indexes.is_empty(); let mut accounts_data_len = 0; - let mut num_accounts_rent_exempt = 0; + let mut num_accounts_rent_paying = 0; let num_accounts = accounts_map.len(); + let mut amount_to_top_off_rent = 0; let items = accounts_map.into_iter().map( |( pubkey, @@ -7929,10 +7952,11 @@ impl AccountsDb { accounts_data_len += stored_account.data().len() as u64; } - if !rent_collector.should_collect_rent(&pubkey, &stored_account) - || rent_collector.get_rent_due(&stored_account).is_exempt() + if let Some(amount_to_top_off_rent_this_account) = + Self::stats_for_rent_payers(&pubkey, &stored_account, rent_collector) { - num_accounts_rent_exempt += 1; + amount_to_top_off_rent += amount_to_top_off_rent_this_account; + num_accounts_rent_paying += 1; } ( @@ -7959,8 +7983,9 @@ impl AccountsDb { SlotIndexGenerationInfo { insert_time_us, num_accounts: num_accounts as u64, - num_accounts_rent_exempt, + num_accounts_rent_paying, accounts_data_len, + amount_to_top_off_rent, } } @@ -8180,7 +8205,8 @@ impl AccountsDb { let chunk_size = (outer_slots_len / (std::cmp::max(1, threads.saturating_sub(1)))) + 1; // approximately 400k slots in a snapshot let mut index_time = Measure::start("index"); let insertion_time_us = AtomicU64::new(0); - let rent_exempt = AtomicU64::new(0); + let rent_paying = AtomicUsize::new(0); + let amount_to_top_off_rent = AtomicU64::new(0); let total_duplicates = AtomicU64::new(0); let storage_info_timings = Mutex::new(GenerateIndexTimings::default()); let scan_time: u64 = slots @@ -8214,10 +8240,13 @@ impl AccountsDb { let SlotIndexGenerationInfo { insert_time_us: insert_us, num_accounts: total_this_slot, - num_accounts_rent_exempt: rent_exempt_this_slot, + num_accounts_rent_paying: rent_paying_this_slot, accounts_data_len: accounts_data_len_this_slot, + amount_to_top_off_rent: amount_to_top_off_rent_this_slot, } = self.generate_index_for_slot(accounts_map, slot, &rent_collector); - rent_exempt.fetch_add(rent_exempt_this_slot, Ordering::Relaxed); + rent_paying.fetch_add(rent_paying_this_slot, Ordering::Relaxed); + amount_to_top_off_rent + .fetch_add(amount_to_top_off_rent_this_slot, Ordering::Relaxed); total_duplicates.fetch_add(total_this_slot, Ordering::Relaxed); accounts_data_len .fetch_add(accounts_data_len_this_slot, Ordering::Relaxed); @@ -8281,7 +8310,7 @@ impl AccountsDb { m.stop(); index_flush_us = m.as_us(); - // this has to happen before get_duplicate_accounts_slots_and_data_len below + // this has to happen before visit_duplicate_pubkeys_during_startup below // get duplicate keys from acct idx. We have to wait until we've finished flushing. for (slot, key) in self .accounts_index @@ -8297,6 +8326,25 @@ impl AccountsDb { } } } + + let storage_info_timings = storage_info_timings.into_inner().unwrap(); + let mut timings = GenerateIndexTimings { + index_flush_us, + scan_time, + index_time: index_time.as_us(), + insertion_time_us: insertion_time_us.load(Ordering::Relaxed), + min_bin_size, + max_bin_size, + total_items, + rent_paying, + amount_to_top_off_rent, + total_duplicates: total_duplicates.load(Ordering::Relaxed), + storage_size_accounts_map_us: storage_info_timings.storage_size_accounts_map_us, + storage_size_accounts_map_flatten_us: storage_info_timings + .storage_size_accounts_map_flatten_us, + ..GenerateIndexTimings::default() + }; + // subtract data.len() from accounts_data_len for all old accounts that are in the index twice let mut accounts_data_len_dedup_timer = Measure::start("handle accounts data len duplicates"); @@ -8313,8 +8361,12 @@ impl AccountsDb { .collect::>() .par_chunks(4096) .map(|pubkeys| { - let (count, uncleaned_roots_this_group) = - self.get_duplicate_accounts_slots_and_data_len(pubkeys); + let (count, uncleaned_roots_this_group) = self + .visit_duplicate_pubkeys_during_startup( + pubkeys, + &rent_collector, + &timings, + ); let mut uncleaned_roots = uncleaned_roots.lock().unwrap(); uncleaned_roots_this_group.into_iter().for_each(|slot| { uncleaned_roots.insert(slot); @@ -8329,25 +8381,7 @@ impl AccountsDb { ); } accounts_data_len_dedup_timer.stop(); - - let storage_info_timings = storage_info_timings.into_inner().unwrap(); - - let mut timings = GenerateIndexTimings { - index_flush_us, - scan_time, - index_time: index_time.as_us(), - insertion_time_us: insertion_time_us.load(Ordering::Relaxed), - min_bin_size, - max_bin_size, - total_items, - rent_exempt: rent_exempt.load(Ordering::Relaxed), - total_duplicates: total_duplicates.load(Ordering::Relaxed), - storage_size_accounts_map_us: storage_info_timings.storage_size_accounts_map_us, - storage_size_accounts_map_flatten_us: storage_info_timings - .storage_size_accounts_map_flatten_us, - accounts_data_len_dedup_time_us: accounts_data_len_dedup_timer.as_us(), - ..GenerateIndexTimings::default() - }; + timings.accounts_data_len_dedup_time_us = accounts_data_len_dedup_timer.as_us(); if pass == 0 { let uncleaned_roots = uncleaned_roots.into_inner().unwrap(); @@ -8397,14 +8431,19 @@ impl AccountsDb { /// Used during generate_index() to: /// 1. get the _duplicate_ accounts data len from the given pubkeys /// 2. get the slots that contained duplicate pubkeys + /// 3. update rent stats /// Note this should only be used when ALL entries in the accounts index are roots. /// returns (data len sum of all older duplicates, slots that contained duplicate pubkeys) - fn get_duplicate_accounts_slots_and_data_len( + fn visit_duplicate_pubkeys_during_startup( &self, pubkeys: &[Pubkey], + rent_collector: &RentCollector, + timings: &GenerateIndexTimings, ) -> (u64, HashSet) { let mut accounts_data_len_from_duplicates = 0; let mut uncleaned_slots = HashSet::::default(); + let mut removed_rent_paying = 0; + let mut removed_top_off = 0; pubkeys.iter().for_each(|pubkey| { if let Some(entry) = self.accounts_index.get_account_read_entry(pubkey) { let slot_list = entry.slot_list(); @@ -8431,9 +8470,21 @@ impl AccountsDb { ); let loaded_account = accessor.check_and_get_loaded_account(); accounts_data_len_from_duplicates += loaded_account.data().len(); + if let Some(lamports_to_top_off) = + Self::stats_for_rent_payers(pubkey, &loaded_account, rent_collector) + { + removed_rent_paying += 1; + removed_top_off += lamports_to_top_off; + } }); } }); + timings + .rent_paying + .fetch_sub(removed_rent_paying, Ordering::Relaxed); + timings + .amount_to_top_off_rent + .fetch_sub(removed_top_off, Ordering::Relaxed); (accounts_data_len_from_duplicates as u64, uncleaned_slots) } From eb6a722ebac60456b9a030ab803fee5f4e616700 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 7 Jul 2022 15:45:19 +0100 Subject: [PATCH 063/100] Clean up `nonce_must_be_writable` feature (#26444) Clean up nonce_must_be_writable feature --- rpc/src/rpc.rs | 8 ++------ runtime/src/bank.rs | 8 ++------ runtime/src/nonce_keyed_account.rs | 26 +++++--------------------- sdk/program/src/message/sanitized.rs | 4 ++-- sdk/src/transaction/sanitized.rs | 4 ++-- 5 files changed, 13 insertions(+), 37 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 62e163575e49f6..330e8ceef7e84d 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -61,7 +61,7 @@ use { epoch_info::EpochInfo, epoch_schedule::EpochSchedule, exit::Exit, - feature_set::{self, nonce_must_be_writable}, + feature_set, fee_calculator::FeeCalculator, hash::Hash, message::{Message, SanitizedMessage}, @@ -3614,11 +3614,7 @@ pub mod rpc_full { .unwrap_or(0); let durable_nonce_info = transaction - .get_durable_nonce( - preflight_bank - .feature_set - .is_active(&nonce_must_be_writable::id()), - ) + .get_durable_nonce() .map(|&pubkey| (pubkey, *transaction.message().recent_blockhash())); if durable_nonce_info.is_some() { // While it uses a defined constant, this last_valid_block_height value is chosen arbitrarily. diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d6402d12e2e755..15512c23df09a7 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -109,7 +109,7 @@ use { feature, feature_set::{ self, add_set_compute_unit_price_ix, default_units_per_instruction, - disable_fee_calculator, nonce_must_be_writable, FeatureSet, + disable_fee_calculator, FeatureSet, }, fee::FeeStructure, fee_calculator::{FeeCalculator, FeeRateGovernor}, @@ -4134,8 +4134,7 @@ impl Bank { } fn check_message_for_nonce(&self, message: &SanitizedMessage) -> Option { - let nonce_address = - message.get_durable_nonce(self.feature_set.is_active(&nonce_must_be_writable::id()))?; + let nonce_address = message.get_durable_nonce()?; let nonce_account = self.get_account_with_fixed_root(nonce_address)?; let nonce_data = nonce_account::verify_nonce_account(&nonce_account, message.recent_blockhash())?; @@ -13616,9 +13615,6 @@ pub(crate) mod tests { FeatureSet::all_enabled(), ) .unwrap(); - Arc::get_mut(&mut bank) - .unwrap() - .activate_feature(&feature_set::nonce_must_be_writable::id()); let custodian_pubkey = custodian_keypair.pubkey(); let nonce_pubkey = nonce_keypair.pubkey(); diff --git a/runtime/src/nonce_keyed_account.rs b/runtime/src/nonce_keyed_account.rs index f6f02225372d0d..59ce1a808f7ff8 100644 --- a/runtime/src/nonce_keyed_account.rs +++ b/runtime/src/nonce_keyed_account.rs @@ -1,7 +1,7 @@ use { solana_program_runtime::{ic_msg, invoke_context::InvokeContext}, solana_sdk::{ - feature_set::{self, nonce_must_be_writable}, + feature_set, instruction::{checked_add, InstructionError}, nonce::{ self, @@ -25,11 +25,7 @@ pub fn advance_nonce_account( .feature_set .is_active(&feature_set::merge_nonce_error_into_system_error::id()); - if invoke_context - .feature_set - .is_active(&nonce_must_be_writable::id()) - && !account.is_writable() - { + if !account.is_writable() { ic_msg!( invoke_context, "Advance nonce account: Account {} must be writeable", @@ -98,11 +94,7 @@ pub fn withdraw_nonce_account( .feature_set .is_active(&feature_set::merge_nonce_error_into_system_error::id()); - if invoke_context - .feature_set - .is_active(&nonce_must_be_writable::id()) - && !from.is_writable() - { + if !from.is_writable() { ic_msg!( invoke_context, "Withdraw nonce account: Account {} must be writeable", @@ -184,11 +176,7 @@ pub fn initialize_nonce_account( .feature_set .is_active(&feature_set::merge_nonce_error_into_system_error::id()); - if invoke_context - .feature_set - .is_active(&nonce_must_be_writable::id()) - && !account.is_writable() - { + if !account.is_writable() { ic_msg!( invoke_context, "Initialize nonce account: Account {} must be writeable", @@ -242,11 +230,7 @@ pub fn authorize_nonce_account( .feature_set .is_active(&feature_set::merge_nonce_error_into_system_error::id()); - if invoke_context - .feature_set - .is_active(&nonce_must_be_writable::id()) - && !account.is_writable() - { + if !account.is_writable() { ic_msg!( invoke_context, "Authorize nonce account: Account {} must be writeable", diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 593a4ac90f08a0..5fc64cd05f8f58 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -241,7 +241,7 @@ impl SanitizedMessage { } /// If the message uses a durable nonce, return the pubkey of the nonce account - pub fn get_durable_nonce(&self, nonce_must_be_writable: bool) -> Option<&Pubkey> { + pub fn get_durable_nonce(&self) -> Option<&Pubkey> { self.instructions() .get(NONCED_TX_MARKER_IX_INDEX as usize) .filter( @@ -259,7 +259,7 @@ impl SanitizedMessage { .and_then(|ix| { ix.accounts.first().and_then(|idx| { let idx = *idx as usize; - if nonce_must_be_writable && !self.is_writable(idx) { + if !self.is_writable(idx) { None } else { self.account_keys().get(idx) diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 4c813e6b76a4fe..35a379f724da0a 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -250,8 +250,8 @@ impl SanitizedTransaction { } /// If the transaction uses a durable nonce, return the pubkey of the nonce account - pub fn get_durable_nonce(&self, nonce_must_be_writable: bool) -> Option<&Pubkey> { - self.message.get_durable_nonce(nonce_must_be_writable) + pub fn get_durable_nonce(&self) -> Option<&Pubkey> { + self.message.get_durable_nonce() } /// Return the serialized message data to sign. From 8416090732033f82ead11e0b971e161605da703f Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 7 Jul 2022 17:14:46 +0100 Subject: [PATCH 064/100] Clean up `enable_durable_nonce` feature (#26470) Clean up enable_durable_nonce feature --- runtime/src/bank.rs | 29 ++++++++--------------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 15512c23df09a7..8dcee99b89ec1d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4058,9 +4058,6 @@ impl Bank { max_age: usize, error_counters: &mut TransactionErrorMetrics, ) -> Vec { - let enable_durable_nonce = self - .feature_set - .is_active(&feature_set::enable_durable_nonce::id()); let hash_queue = self.blockhash_queue.read().unwrap(); let last_blockhash = hash_queue.last_hash(); let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash); @@ -4071,11 +4068,9 @@ impl Bank { let recent_blockhash = tx.message().recent_blockhash(); if hash_queue.is_hash_valid_for_age(recent_blockhash, max_age) { (Ok(()), None) - } else if let Some((address, account)) = self.check_transaction_for_nonce( - tx, - enable_durable_nonce, - &next_durable_nonce, - ) { + } else if let Some((address, account)) = + self.check_transaction_for_nonce(tx, &next_durable_nonce) + { (Ok(()), Some(NoncePartial::new(address, account))) } else { error_counters.blockhash_not_found += 1; @@ -4152,16 +4147,14 @@ impl Bank { fn check_transaction_for_nonce( &self, tx: &SanitizedTransaction, - enable_durable_nonce: bool, next_durable_nonce: &DurableNonce, ) -> Option { - let durable_nonces_enabled = enable_durable_nonce - || self.slot() <= 135986379 - || self.cluster_type() != ClusterType::MainnetBeta; let nonce_is_advanceable = tx.message().recent_blockhash() != next_durable_nonce.as_hash(); - (durable_nonces_enabled && nonce_is_advanceable) - .then(|| self.check_message_for_nonce(tx.message())) - .flatten() + if nonce_is_advanceable { + self.check_message_for_nonce(tx.message()) + } else { + None + } } pub fn check_transactions( @@ -12821,7 +12814,6 @@ pub(crate) mod tests { assert_eq!( bank.check_transaction_for_nonce( &SanitizedTransaction::from_transaction_for_tests(tx), - true, // enable_durable_nonce &bank.next_durable_nonce(), ), Some((nonce_pubkey, nonce_account)) @@ -12855,7 +12847,6 @@ pub(crate) mod tests { assert!(bank .check_transaction_for_nonce( &SanitizedTransaction::from_transaction_for_tests(tx,), - true, // enable_durable_nonce &bank.next_durable_nonce(), ) .is_none()); @@ -12889,7 +12880,6 @@ pub(crate) mod tests { assert!(bank .check_transaction_for_nonce( &SanitizedTransaction::from_transaction_for_tests(tx), - true, // enable_durable_nonce &bank.next_durable_nonce(), ) .is_none()); @@ -12924,7 +12914,6 @@ pub(crate) mod tests { assert!(bank .check_transaction_for_nonce( &SanitizedTransaction::from_transaction_for_tests(tx), - true, // enable_durable_nonce &bank.next_durable_nonce(), ) .is_none()); @@ -12956,7 +12945,6 @@ pub(crate) mod tests { assert!(bank .check_transaction_for_nonce( &SanitizedTransaction::from_transaction_for_tests(tx), - true, // enable_durable_nonce &bank.next_durable_nonce(), ) .is_none()); @@ -13657,7 +13645,6 @@ pub(crate) mod tests { assert_eq!( bank.check_transaction_for_nonce( &SanitizedTransaction::from_transaction_for_tests(tx), - true, // enable_durable_nonce &bank.next_durable_nonce(), ), None From 938be88ae9d917cddd56fbc6f1537a455f99d9d3 Mon Sep 17 00:00:00 2001 From: Ruud van Asseldonk Date: Thu, 7 Jul 2022 18:22:53 +0200 Subject: [PATCH 065/100] Bump typenum from 1.14.0 to 1.15.0 to fix SIGILL during build (#26471) Bump typenum from 1.14.0 to 1.15.0 Version 1.15.0 fixes an issue where code that runs at build-time would be compiled with the target_cpu setting, and the target CPU might support instructions that the host system does not have, causing a SIGILL during the build. See also [1] and [2]. [1]: https://github.com/paholg/typenum/issues/162 [2]: https://github.com/paholg/typenum/pull/177 --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e916001545f111..1b8fd2ab29dca7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7365,9 +7365,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "ucd-trie" From 5e8e1beeb5947604af7d7aea43c98cb0cf610474 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Jul 2022 10:52:20 -0600 Subject: [PATCH 066/100] chore: bump serial_test from 0.6.0 to 0.8.0 (#26463) Bumps [serial_test](https://github.com/palfrey/serial_test) from 0.6.0 to 0.8.0. - [Release notes](https://github.com/palfrey/serial_test/releases) - [Commits](https://github.com/palfrey/serial_test/compare/v0.6.0...v0.8.0) --- updated-dependencies: - dependency-name: serial_test dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 +++++++----- bench-tps/Cargo.toml | 2 +- client-test/Cargo.toml | 2 +- core/Cargo.toml | 2 +- dos/Cargo.toml | 2 +- gossip/Cargo.toml | 2 +- local-cluster/Cargo.toml | 2 +- metrics/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- 9 files changed, 15 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1b8fd2ab29dca7..0e9b3a25bc8056 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4192,20 +4192,22 @@ dependencies = [ [[package]] name = "serial_test" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5bcc41d18f7a1d50525d080fd3e953be87c4f9f1a974f3c21798ca00d54ec15" +checksum = "7eec42e7232e5ca56aa59d63af3c7f991fe71ee6a3ddd2d3480834cf3902b007" dependencies = [ + "futures 0.3.21", "lazy_static", - "parking_lot 0.11.2", + "log", + "parking_lot 0.12.0", "serial_test_derive", ] [[package]] name = "serial_test_derive" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2881bccd7d60fb32dfa3d7b3136385312f8ad75e2674aab2852867a09790cae8" +checksum = "f1b95bb2f4f624565e8fe8140c789af7e2082c0e0561b5a82a1b678baa9703dc" dependencies = [ "proc-macro-error", "proc-macro2 1.0.38", diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index a4e4e7c0d42456..0d5bc6aad772fe 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -34,7 +34,7 @@ solana-version = { path = "../version", version = "=1.11.2" } thiserror = "1.0" [dev-dependencies] -serial_test = "0.6.0" +serial_test = "0.8.0" solana-local-cluster = { path = "../local-cluster", version = "=1.11.2" } solana-test-validator = { path = "../test-validator", version = "=1.11.2" } diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index 19a50a7790d702..83ce8f1b936d08 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -13,7 +13,7 @@ publish = false [dependencies] futures-util = "0.3.21" serde_json = "1.0.81" -serial_test = "0.6.0" +serial_test = "0.8.0" solana-client = { path = "../client", version = "=1.11.2" } solana-ledger = { path = "../ledger", version = "=1.11.2" } solana-measure = { path = "../measure", version = "=1.11.2" } diff --git a/core/Cargo.toml b/core/Cargo.toml index 328b3dee265043..aefdbdbc6ed2a8 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -69,7 +69,7 @@ trees = "0.4.2" matches = "0.1.9" raptorq = "1.7.0" serde_json = "1.0.81" -serial_test = "0.6.0" +serial_test = "0.8.0" solana-logger = { path = "../logger", version = "=1.11.2" } solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } diff --git a/dos/Cargo.toml b/dos/Cargo.toml index 8db04713aa633f..4459a82d6ec31c 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -33,5 +33,5 @@ solana-version = { path = "../version", version = "=1.11.2" } targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -serial_test = "0.6.0" +serial_test = "0.8.0" solana-local-cluster = { path = "../local-cluster", version = "=1.11.2" } diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index f72abef55499a1..7f1ce383bbd44b 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -50,7 +50,7 @@ thiserror = "1.0" [dev-dependencies] num_cpus = "1.13.1" regex = "1" -serial_test = "0.6.0" +serial_test = "0.8.0" [build-dependencies] rustc_version = "0.4" diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index b62fc3ee37e998..e769dc886b4479 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -32,7 +32,7 @@ tempfile = "3.3.0" [dev-dependencies] assert_matches = "1.5.0" gag = "1.0.0" -serial_test = "0.6.0" +serial_test = "0.8.0" solana-download-utils = { path = "../download-utils", version = "=1.11.2" } solana-logger = { path = "../logger", version = "=1.11.2" } diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index c1aecc89eaf1a2..2cfd3618b164d3 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -20,7 +20,7 @@ solana-sdk = { path = "../sdk", version = "=1.11.2" } [dev-dependencies] env_logger = "0.9.0" rand = "0.7.0" -serial_test = "0.6.0" +serial_test = "0.8.0" [lib] name = "solana_metrics" diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 1944ad3c11b154..5ca79073166303 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -56,7 +56,7 @@ tokio = { version = "~1.14.1", features = ["full"] } tokio-util = { version = "0.6", features = ["codec", "compat"] } [dev-dependencies] -serial_test = "0.6.0" +serial_test = "0.8.0" solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.2" } solana-net-utils = { path = "../net-utils", version = "=1.11.2" } solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } From 72256ac54d49023ba9bf0fec0bbd65a8c9b49685 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Jul 2022 13:13:47 -0500 Subject: [PATCH 067/100] use iter for append vecs (#26459) --- runtime/src/accounts_db.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index efd3eea5267208..699991de0c4cb4 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -3095,14 +3095,13 @@ impl AccountsDb { let mut original_bytes = 0; let mut num_stores = 0; for store in stores { - let mut start = 0; original_bytes += store.total_bytes(); let store_id = store.append_vec_id(); - while let Some((account, next)) = store.accounts.get_account(start) { + AppendVecAccountsIter::new(&store.accounts).for_each(|account| { let new_entry = FoundStoredAccount { + account_size: account.stored_size, account, store_id, - account_size: next - start, }; match stored_accounts.entry(new_entry.account.meta.pubkey) { Entry::Occupied(mut occupied_entry) => { @@ -3116,8 +3115,7 @@ impl AccountsDb { vacant_entry.insert(new_entry); } } - start = next; - } + }); num_stores += 1; } (stored_accounts, num_stores, original_bytes) From 8e64b5883e2bb18c6bd4de97bac38b345abdb350 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Jul 2022 13:19:23 -0500 Subject: [PATCH 068/100] restore population of 'accounts' metric in rent collection (#26454) --- runtime/src/bank.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8dcee99b89ec1d..0c128c6cd1b0e9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -208,7 +208,7 @@ struct RentMetrics { collect_us: AtomicU64, hash_us: AtomicU64, store_us: AtomicU64, - count: AtomicU64, + count: AtomicUsize, } #[derive(Clone, Debug, PartialEq, Eq)] @@ -5377,6 +5377,7 @@ impl Bank { time_collecting_rent_us, time_hashing_skipped_rewrites_us, time_storing_accounts_us, + num_accounts: accounts.len(), } } @@ -5482,6 +5483,7 @@ impl Bank { metrics .store_us .fetch_add(results.time_storing_accounts_us, Relaxed); + metrics.count.fetch_add(results.num_accounts, Relaxed); }); } @@ -7619,6 +7621,7 @@ struct CollectRentFromAccountsInfo { time_collecting_rent_us: u64, time_hashing_skipped_rewrites_us: u64, time_storing_accounts_us: u64, + num_accounts: usize, } /// Return the computed values—of each iteration in the parallel loop inside @@ -7633,6 +7636,7 @@ struct CollectRentInPartitionInfo { time_collecting_rent_us: u64, time_hashing_skipped_rewrites_us: u64, time_storing_accounts_us: u64, + num_accounts: usize, } impl CollectRentInPartitionInfo { @@ -7649,6 +7653,7 @@ impl CollectRentInPartitionInfo { time_collecting_rent_us: info.time_collecting_rent_us, time_hashing_skipped_rewrites_us: info.time_hashing_skipped_rewrites_us, time_storing_accounts_us: info.time_storing_accounts_us, + num_accounts: info.num_accounts, } } @@ -7677,6 +7682,7 @@ impl CollectRentInPartitionInfo { time_storing_accounts_us: lhs .time_storing_accounts_us .saturating_add(rhs.time_storing_accounts_us), + num_accounts: lhs.num_accounts.saturating_add(rhs.num_accounts), } } } From c9f1d01e794fb27131a159786dc5b18409b61af6 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Jul 2022 13:49:33 -0500 Subject: [PATCH 069/100] refactor, add GetUniqueAccountsResult (#26457) --- runtime/src/accounts_db.rs | 28 ++++++++++++++++++++++------ runtime/src/snapshot_minimizer.rs | 6 ++++-- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 699991de0c4cb4..4760b6bd3dc00a 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -163,6 +163,12 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig pub type BinnedHashData = Vec>; +pub struct GetUniqueAccountsResult<'a> { + pub stored_accounts: HashMap>, + pub num_stores: usize, + pub original_bytes: u64, +} + pub struct AccountsAddRootTiming { pub index_us: u64, pub cache_us: u64, @@ -3087,7 +3093,7 @@ impl AccountsDb { pub(crate) fn get_unique_accounts_from_storages<'a, I>( &'a self, stores: I, - ) -> (HashMap, usize, u64) + ) -> GetUniqueAccountsResult<'a> where I: Iterator>, { @@ -3118,7 +3124,11 @@ impl AccountsDb { }); num_stores += 1; } - (stored_accounts, num_stores, original_bytes) + GetUniqueAccountsResult { + stored_accounts, + num_stores, + original_bytes, + } } fn do_shrink_slot_stores<'a, I>(&'a self, slot: Slot, stores: I) -> usize @@ -3126,8 +3136,11 @@ impl AccountsDb { I: Iterator>, { debug!("do_shrink_slot_stores: slot: {}", slot); - let (stored_accounts, num_stores, original_bytes) = - self.get_unique_accounts_from_storages(stores); + let GetUniqueAccountsResult { + stored_accounts, + num_stores, + original_bytes, + } = self.get_unique_accounts_from_storages(stores); // sort by pubkey to keep account index lookups close let mut stored_accounts = stored_accounts.into_iter().collect::>(); @@ -3683,8 +3696,11 @@ impl AccountsDb { } // this code is copied from shrink. I would like to combine it into a helper function, but the borrow checker has defeated my efforts so far. - let (stored_accounts, _num_stores, original_bytes) = - self.get_unique_accounts_from_storages(old_storages.iter()); + let GetUniqueAccountsResult { + stored_accounts, + num_stores: _num_stores, + original_bytes, + } = self.get_unique_accounts_from_storages(old_storages.iter()); // sort by pubkey to keep account index lookups close let mut stored_accounts = stored_accounts.into_iter().collect::>(); diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index fcf1341afcd5b6..8aa908339d55fc 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -2,7 +2,7 @@ use { crate::{ - accounts_db::{AccountStorageEntry, AccountsDb, PurgeStats}, + accounts_db::{AccountStorageEntry, AccountsDb, GetUniqueAccountsResult, PurgeStats}, bank::Bank, builtins, static_ids, }, @@ -302,7 +302,9 @@ impl<'a> SnapshotMinimizer<'a> { dead_storages: &Mutex>>, ) { let slot = storages.first().unwrap().slot(); - let (stored_accounts, _, _) = self + let GetUniqueAccountsResult { + stored_accounts, .. + } = self .accounts_db() .get_unique_accounts_from_storages(storages.iter()); let mut stored_accounts = stored_accounts.into_iter().collect::>(); From 9d937fb8a043edb2a23440ccca3121fdb01feda1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 7 Jul 2022 14:39:46 -0500 Subject: [PATCH 070/100] Bump Version to 1.11.3 (#26481) Co-authored-by: willhickey --- Cargo.lock | 480 ++++++++--------- account-decoder/Cargo.toml | 8 +- accounts-bench/Cargo.toml | 12 +- accounts-cluster-bench/Cargo.toml | 34 +- banking-bench/Cargo.toml | 26 +- banks-client/Cargo.toml | 12 +- banks-interface/Cargo.toml | 4 +- banks-server/Cargo.toml | 12 +- bench-streamer/Cargo.toml | 8 +- bench-tps/Cargo.toml | 38 +- bloom/Cargo.toml | 8 +- bucket_map/Cargo.toml | 8 +- clap-utils/Cargo.toml | 8 +- clap-v3-utils/Cargo.toml | 8 +- cli-config/Cargo.toml | 6 +- cli-output/Cargo.toml | 16 +- cli/Cargo.toml | 36 +- client-test/Cargo.toml | 32 +- client/Cargo.toml | 28 +- core/Cargo.toml | 56 +- docs/src/developing/clients/jsonrpc-api.md | 4 +- dos/Cargo.toml | 28 +- download-utils/Cargo.toml | 6 +- entry/Cargo.toml | 16 +- faucet/Cargo.toml | 14 +- frozen-abi/Cargo.toml | 6 +- frozen-abi/macro/Cargo.toml | 2 +- genesis-utils/Cargo.toml | 8 +- genesis/Cargo.toml | 22 +- geyser-plugin-interface/Cargo.toml | 6 +- geyser-plugin-manager/Cargo.toml | 16 +- gossip/Cargo.toml | 38 +- install/Cargo.toml | 14 +- keygen/Cargo.toml | 12 +- ledger-tool/Cargo.toml | 30 +- ledger/Cargo.toml | 38 +- local-cluster/Cargo.toml | 28 +- log-analyzer/Cargo.toml | 6 +- logger/Cargo.toml | 2 +- measure/Cargo.toml | 4 +- merkle-root-bench/Cargo.toml | 12 +- merkle-tree/Cargo.toml | 4 +- metrics/Cargo.toml | 4 +- net-shaper/Cargo.toml | 4 +- net-utils/Cargo.toml | 8 +- notifier/Cargo.toml | 2 +- perf/Cargo.toml | 12 +- poh-bench/Cargo.toml | 14 +- poh/Cargo.toml | 20 +- program-runtime/Cargo.toml | 14 +- program-test/Cargo.toml | 18 +- .../address-lookup-table-tests/Cargo.toml | 8 +- programs/address-lookup-table/Cargo.toml | 12 +- programs/bpf/Cargo.lock | 490 +++++++++--------- programs/bpf/Cargo.toml | 26 +- programs/bpf/rust/128bit/Cargo.toml | 6 +- programs/bpf/rust/128bit_dep/Cargo.toml | 4 +- programs/bpf/rust/alloc/Cargo.toml | 4 +- programs/bpf/rust/call_depth/Cargo.toml | 4 +- programs/bpf/rust/caller_access/Cargo.toml | 4 +- programs/bpf/rust/curve25519/Cargo.toml | 6 +- programs/bpf/rust/custom_heap/Cargo.toml | 4 +- programs/bpf/rust/dep_crate/Cargo.toml | 6 +- .../bpf/rust/deprecated_loader/Cargo.toml | 4 +- programs/bpf/rust/dup_accounts/Cargo.toml | 4 +- programs/bpf/rust/error_handling/Cargo.toml | 4 +- programs/bpf/rust/external_spend/Cargo.toml | 4 +- programs/bpf/rust/finalize/Cargo.toml | 4 +- .../rust/get_minimum_delegation/Cargo.toml | 4 +- .../Cargo.toml | 4 +- .../rust/instruction_introspection/Cargo.toml | 4 +- programs/bpf/rust/invoke/Cargo.toml | 4 +- programs/bpf/rust/invoke_and_error/Cargo.toml | 4 +- programs/bpf/rust/invoke_and_ok/Cargo.toml | 4 +- .../bpf/rust/invoke_and_return/Cargo.toml | 4 +- programs/bpf/rust/invoked/Cargo.toml | 4 +- programs/bpf/rust/iter/Cargo.toml | 4 +- programs/bpf/rust/log_data/Cargo.toml | 4 +- programs/bpf/rust/many_args/Cargo.toml | 6 +- programs/bpf/rust/many_args_dep/Cargo.toml | 4 +- programs/bpf/rust/mem/Cargo.toml | 10 +- programs/bpf/rust/membuiltins/Cargo.toml | 6 +- programs/bpf/rust/noop/Cargo.toml | 4 +- programs/bpf/rust/panic/Cargo.toml | 4 +- programs/bpf/rust/param_passing/Cargo.toml | 6 +- .../bpf/rust/param_passing_dep/Cargo.toml | 4 +- programs/bpf/rust/rand/Cargo.toml | 4 +- programs/bpf/rust/realloc/Cargo.toml | 4 +- programs/bpf/rust/realloc_invoke/Cargo.toml | 6 +- .../bpf/rust/ro_account_modify/Cargo.toml | 4 +- programs/bpf/rust/ro_modify/Cargo.toml | 4 +- programs/bpf/rust/sanity/Cargo.toml | 10 +- .../bpf/rust/secp256k1_recover/Cargo.toml | 4 +- programs/bpf/rust/sha/Cargo.toml | 4 +- .../rust/sibling_inner_instruction/Cargo.toml | 4 +- .../bpf/rust/sibling_instruction/Cargo.toml | 4 +- programs/bpf/rust/simulation/Cargo.toml | 12 +- programs/bpf/rust/spoof1/Cargo.toml | 4 +- programs/bpf/rust/spoof1_system/Cargo.toml | 4 +- programs/bpf/rust/sysvar/Cargo.toml | 10 +- programs/bpf/rust/upgradeable/Cargo.toml | 4 +- programs/bpf/rust/upgraded/Cargo.toml | 4 +- programs/bpf_loader/Cargo.toml | 14 +- .../bpf_loader/gen-syscall-list/Cargo.toml | 2 +- programs/compute-budget/Cargo.toml | 6 +- programs/config/Cargo.toml | 8 +- programs/ed25519-tests/Cargo.toml | 6 +- programs/stake/Cargo.toml | 18 +- programs/vote/Cargo.toml | 14 +- programs/zk-token-proof/Cargo.toml | 8 +- rayon-threadlimit/Cargo.toml | 2 +- rbpf-cli/Cargo.toml | 10 +- remote-wallet/Cargo.toml | 4 +- rpc-test/Cargo.toml | 18 +- rpc/Cargo.toml | 46 +- runtime/Cargo.toml | 34 +- runtime/store-tool/Cargo.toml | 8 +- sdk/Cargo.toml | 12 +- sdk/cargo-build-bpf/Cargo.toml | 4 +- sdk/cargo-build-sbf/Cargo.toml | 8 +- .../tests/crates/fail/Cargo.toml | 4 +- .../tests/crates/noop/Cargo.toml | 4 +- sdk/cargo-test-bpf/Cargo.toml | 2 +- sdk/cargo-test-sbf/Cargo.toml | 2 +- sdk/gen-headers/Cargo.toml | 2 +- sdk/macro/Cargo.toml | 2 +- sdk/program/Cargo.toml | 10 +- send-transaction-service/Cargo.toml | 14 +- stake-accounts/Cargo.toml | 16 +- storage-bigtable/Cargo.toml | 10 +- storage-bigtable/build-proto/Cargo.lock | 2 +- storage-bigtable/build-proto/Cargo.toml | 2 +- storage-proto/Cargo.toml | 8 +- streamer/Cargo.toml | 10 +- sys-tuner/Cargo.toml | 6 +- test-validator/Cargo.toml | 28 +- tokens/Cargo.toml | 24 +- transaction-dos/Cargo.toml | 32 +- transaction-status/Cargo.toml | 14 +- upload-perf/Cargo.toml | 4 +- validator/Cargo.toml | 50 +- version/Cargo.toml | 8 +- watchtower/Cargo.toml | 20 +- zk-token-sdk/Cargo.toml | 6 +- 144 files changed, 1259 insertions(+), 1259 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e9b3a25bc8056..9ab4f482d4b3df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1634,7 +1634,7 @@ dependencies = [ [[package]] name = "gen-headers" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", "regex", @@ -1642,7 +1642,7 @@ dependencies = [ [[package]] name = "gen-syscall-list" -version = "1.11.2" +version = "1.11.3" dependencies = [ "regex", ] @@ -3699,15 +3699,15 @@ dependencies = [ [[package]] name = "rbpf-cli" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 3.1.8", "serde", "serde_json", "solana-bpf-loader-program", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana_rbpf", ] @@ -4409,7 +4409,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.11.2" +version = "1.11.3" dependencies = [ "Inflector", "base64 0.13.0", @@ -4421,7 +4421,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-config-program", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", "spl-token", "spl-token-2022", @@ -4431,21 +4431,21 @@ dependencies = [ [[package]] name = "solana-accounts-bench" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "log", "rayon", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-version", ] [[package]] name = "solana-accounts-cluster-bench" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "log", @@ -4458,11 +4458,11 @@ dependencies = [ "solana-faucet", "solana-gossip", "solana-local-cluster", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-net-utils", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-test-validator", "solana-transaction-status", @@ -4472,7 +4472,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bytemuck", @@ -4481,28 +4481,28 @@ dependencies = [ "num-traits", "rustc_version 0.4.0", "serde", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-program 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-program 1.11.3", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", ] [[package]] name = "solana-address-lookup-table-program-tests" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_matches", "bincode", "solana-address-lookup-table-program", "solana-program-test", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-banking-bench" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 3.1.8", "crossbeam-channel", @@ -4513,27 +4513,27 @@ dependencies = [ "solana-core", "solana-gossip", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-perf", "solana-poh", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-version", ] [[package]] name = "solana-banks-client" -version = "1.11.2" +version = "1.11.3" dependencies = [ "borsh", "futures 0.3.21", "solana-banks-interface", "solana-banks-server", - "solana-program 1.11.2", + "solana-program 1.11.3", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "tarpc", "thiserror", "tokio", @@ -4542,16 +4542,16 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "1.11.2" +version = "1.11.3" dependencies = [ "serde", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "tarpc", ] [[package]] name = "solana-banks-server" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "crossbeam-channel", @@ -4559,7 +4559,7 @@ dependencies = [ "solana-banks-interface", "solana-client", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-send-transaction-service", "tarpc", "tokio", @@ -4569,7 +4569,7 @@ dependencies = [ [[package]] name = "solana-bench-streamer" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 3.1.8", "crossbeam-channel", @@ -4580,7 +4580,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "crossbeam-channel", @@ -4597,13 +4597,13 @@ dependencies = [ "solana-genesis", "solana-gossip", "solana-local-cluster", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-net-utils", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-test-validator", "solana-version", @@ -4612,7 +4612,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bv", "fnv", @@ -4622,14 +4622,14 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-sdk 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-sdk 1.11.3", ] [[package]] name = "solana-bpf-loader-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "byteorder", @@ -4640,15 +4640,15 @@ dependencies = [ "solana-metrics", "solana-program-runtime", "solana-runtime", - "solana-sdk 1.11.2", - "solana-zk-token-sdk 1.11.2", + "solana-sdk 1.11.3", + "solana-zk-token-sdk 1.11.3", "solana_rbpf", "thiserror", ] [[package]] name = "solana-bucket-map" -version = "1.11.2" +version = "1.11.3" dependencies = [ "fs_extra", "log", @@ -4656,24 +4656,24 @@ dependencies = [ "modular-bitfield", "rand 0.7.3", "rayon", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "tempfile", ] [[package]] name = "solana-cargo-build-bpf" -version = "1.11.2" +version = "1.11.3" dependencies = [ "cargo_metadata", "clap 3.1.8", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-cargo-build-sbf" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bzip2", "cargo_metadata", @@ -4682,14 +4682,14 @@ dependencies = [ "regex", "serial_test", "solana-download-utils", - "solana-logger 1.11.2", - "solana-sdk 1.11.2", + "solana-logger 1.11.3", + "solana-sdk 1.11.3", "tar", ] [[package]] name = "solana-cargo-test-bpf" -version = "1.11.2" +version = "1.11.3" dependencies = [ "cargo_metadata", "clap 3.1.8", @@ -4697,7 +4697,7 @@ dependencies = [ [[package]] name = "solana-cargo-test-sbf" -version = "1.11.2" +version = "1.11.3" dependencies = [ "cargo_metadata", "clap 3.1.8", @@ -4705,14 +4705,14 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "chrono", "clap 2.33.3", "rpassword", "solana-perf", "solana-remote-wallet", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "tempfile", "thiserror", "tiny-bip39", @@ -4722,14 +4722,14 @@ dependencies = [ [[package]] name = "solana-clap-v3-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "chrono", "clap 3.1.8", "rpassword", "solana-perf", "solana-remote-wallet", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "tempfile", "thiserror", "tiny-bip39", @@ -4739,7 +4739,7 @@ dependencies = [ [[package]] name = "solana-cli" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bs58", @@ -4766,10 +4766,10 @@ dependencies = [ "solana-client", "solana-config-program", "solana-faucet", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-program-runtime", "solana-remote-wallet", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-test-validator", "solana-transaction-status", @@ -4784,7 +4784,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.11.2" +version = "1.11.3" dependencies = [ "anyhow", "dirs-next", @@ -4793,13 +4793,13 @@ dependencies = [ "serde_derive", "serde_yaml", "solana-clap-utils", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "url 2.2.2", ] [[package]] name = "solana-cli-output" -version = "1.11.2" +version = "1.11.3" dependencies = [ "Inflector", "base64 0.13.0", @@ -4817,7 +4817,7 @@ dependencies = [ "solana-clap-utils", "solana-cli-config", "solana-client", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "solana-vote-program", "spl-memo", @@ -4825,7 +4825,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.11.2" +version = "1.11.3" dependencies = [ "anyhow", "assert_matches", @@ -4861,12 +4861,12 @@ dependencies = [ "solana-account-decoder", "solana-clap-utils", "solana-faucet", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-net-utils", "solana-perf", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-transaction-status", "solana-version", @@ -4882,14 +4882,14 @@ dependencies = [ [[package]] name = "solana-client-test" -version = "1.11.2" +version = "1.11.3" dependencies = [ "futures-util", "serde_json", "serial_test", "solana-client", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-merkle-tree", "solana-metrics", @@ -4897,7 +4897,7 @@ dependencies = [ "solana-rayon-threadlimit", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-test-validator", "solana-transaction-status", @@ -4908,28 +4908,28 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-config-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "chrono", "serde", "serde_derive", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-core" -version = "1.11.2" +version = "1.11.3" dependencies = [ "ahash", "base64 0.13.0", @@ -4961,12 +4961,12 @@ dependencies = [ "solana-bloom", "solana-client", "solana-entry", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-net-utils", @@ -4976,7 +4976,7 @@ dependencies = [ "solana-rayon-threadlimit", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-send-transaction-service", "solana-stake-program", "solana-streamer", @@ -4996,7 +4996,7 @@ dependencies = [ [[package]] name = "solana-dos" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "clap 3.1.8", @@ -5011,41 +5011,41 @@ dependencies = [ "solana-faucet", "solana-gossip", "solana-local-cluster", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-net-utils", "solana-perf", "solana-rpc", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-version", ] [[package]] name = "solana-download-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "console", "indicatif", "log", "reqwest", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-ed25519-program-tests" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_matches", "ed25519-dalek", "rand 0.7.3", "solana-program-test", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-entry" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "crossbeam-channel", @@ -5057,18 +5057,18 @@ dependencies = [ "rand 0.7.3", "rayon", "serde", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-merkle-tree", "solana-metrics", "solana-perf", "solana-rayon-threadlimit", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-faucet" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "byteorder", @@ -5079,9 +5079,9 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-metrics", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-version", "spl-memo", "thiserror", @@ -5112,7 +5112,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bs58", "bv", @@ -5127,8 +5127,8 @@ dependencies = [ "serde_bytes", "serde_derive", "sha2 0.10.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", "thiserror", ] @@ -5146,7 +5146,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.11.2" +version = "1.11.3" dependencies = [ "proc-macro2 1.0.38", "quote 1.0.18", @@ -5156,7 +5156,7 @@ dependencies = [ [[package]] name = "solana-genesis" -version = "1.11.2" +version = "1.11.3" dependencies = [ "base64 0.13.0", "clap 2.33.3", @@ -5167,9 +5167,9 @@ dependencies = [ "solana-cli-config", "solana-entry", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-stake-program", "solana-version", "solana-vote-program", @@ -5178,26 +5178,26 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-download-utils", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-geyser-plugin-interface" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "thiserror", ] [[package]] name = "solana-geyser-plugin-manager" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bs58", "crossbeam-channel", @@ -5210,14 +5210,14 @@ dependencies = [ "solana-metrics", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "thiserror", ] [[package]] name = "solana-gossip" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bv", @@ -5244,17 +5244,17 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-entry", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-net-utils", "solana-perf", "solana-rayon-threadlimit", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-version", "solana-vote-program", @@ -5263,7 +5263,7 @@ dependencies = [ [[package]] name = "solana-install" -version = "1.11.2" +version = "1.11.3" dependencies = [ "atty", "bincode", @@ -5284,8 +5284,8 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-config-program", - "solana-logger 1.11.2", - "solana-sdk 1.11.2", + "solana-logger 1.11.3", + "solana-sdk 1.11.3", "solana-version", "tar", "tempfile", @@ -5296,7 +5296,7 @@ dependencies = [ [[package]] name = "solana-keygen" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bs58", "clap 3.1.8", @@ -5305,14 +5305,14 @@ dependencies = [ "solana-clap-v3-utils", "solana-cli-config", "solana-remote-wallet", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-version", "tiny-bip39", ] [[package]] name = "solana-ledger" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_matches", "bincode", @@ -5346,16 +5346,16 @@ dependencies = [ "solana-account-decoder", "solana-bpf-loader-program", "solana-entry", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-perf", "solana-program-runtime", "solana-rayon-threadlimit", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-stake-program", "solana-storage-bigtable", "solana-storage-proto", @@ -5371,7 +5371,7 @@ dependencies = [ [[package]] name = "solana-ledger-tool" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_cmd", "bs58", @@ -5393,10 +5393,10 @@ dependencies = [ "solana-core", "solana-entry", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-stake-program", "solana-storage-bigtable", "solana-transaction-status", @@ -5408,7 +5408,7 @@ dependencies = [ [[package]] name = "solana-local-cluster" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_matches", "crossbeam-channel", @@ -5426,9 +5426,9 @@ dependencies = [ "solana-entry", "solana-gossip", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-stake-program", "solana-streamer", "solana-vote-program", @@ -5437,13 +5437,13 @@ dependencies = [ [[package]] name = "solana-log-analyzer" -version = "1.11.2" +version = "1.11.3" dependencies = [ "byte-unit", "clap 3.1.8", "serde", "serde_json", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-version", ] @@ -5460,7 +5460,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.11.2" +version = "1.11.3" dependencies = [ "env_logger", "lazy_static", @@ -5469,38 +5469,38 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-merkle-root-bench" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "log", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-version", ] [[package]] name = "solana-merkle-tree" -version = "1.11.2" +version = "1.11.3" dependencies = [ "fast-math", "hex", "matches", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-metrics" -version = "1.11.2" +version = "1.11.3" dependencies = [ "crossbeam-channel", "env_logger", @@ -5510,23 +5510,23 @@ dependencies = [ "rand 0.7.3", "reqwest", "serial_test", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-net-shaper" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 3.1.8", "rand 0.7.3", "serde", "serde_json", - "solana-logger 1.11.2", + "solana-logger 1.11.3", ] [[package]] name = "solana-net-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "clap 3.1.8", @@ -5537,8 +5537,8 @@ dependencies = [ "serde", "serde_derive", "socket2", - "solana-logger 1.11.2", - "solana-sdk 1.11.2", + "solana-logger 1.11.3", + "solana-sdk 1.11.3", "solana-version", "tokio", "url 2.2.2", @@ -5546,7 +5546,7 @@ dependencies = [ [[package]] name = "solana-notifier" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", "reqwest", @@ -5555,7 +5555,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.11.2" +version = "1.11.3" dependencies = [ "ahash", "bincode", @@ -5573,16 +5573,16 @@ dependencies = [ "rand 0.7.3", "rayon", "serde", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-metrics", "solana-rayon-threadlimit", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", ] [[package]] name = "solana-poh" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "core_affinity", @@ -5592,29 +5592,29 @@ dependencies = [ "rand 0.7.3", "solana-entry", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-perf", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-sys-tuner", "thiserror", ] [[package]] name = "solana-poh-bench" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 3.1.8", "log", "rand 0.7.3", "rayon", "solana-entry", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-perf", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-version", ] @@ -5662,7 +5662,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "anyhow", "assert_matches", @@ -5697,10 +5697,10 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.1", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", - "solana-sdk-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", + "solana-sdk-macro 1.11.3", "static_assertions", "thiserror", "wasm-bindgen", @@ -5708,7 +5708,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.11.2" +version = "1.11.3" dependencies = [ "base64 0.13.0", "bincode", @@ -5722,18 +5722,18 @@ dependencies = [ "num-traits", "rustc_version 0.4.0", "serde", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", ] [[package]] name = "solana-program-test" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_matches", "async-trait", @@ -5745,10 +5745,10 @@ dependencies = [ "solana-banks-client", "solana-banks-server", "solana-bpf-loader-program", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-program-runtime", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", "thiserror", "tokio", @@ -5756,7 +5756,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.11.2" +version = "1.11.3" dependencies = [ "lazy_static", "num_cpus", @@ -5764,7 +5764,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.11.2" +version = "1.11.3" dependencies = [ "console", "dialoguer", @@ -5775,14 +5775,14 @@ dependencies = [ "parking_lot 0.12.0", "qstring", "semver 1.0.10", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", "uriparse", ] [[package]] name = "solana-rpc" -version = "1.11.2" +version = "1.11.3" dependencies = [ "base64 0.13.0", "bincode", @@ -5818,7 +5818,7 @@ dependencies = [ "solana-poh", "solana-rayon-threadlimit", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-send-transaction-service", "solana-stake-program", "solana-storage-bigtable", @@ -5837,7 +5837,7 @@ dependencies = [ [[package]] name = "solana-rpc-test" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bs58", @@ -5849,9 +5849,9 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-client", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-rpc", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-test-validator", "solana-transaction-status", @@ -5860,7 +5860,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.11.2" +version = "1.11.3" dependencies = [ "arrayref", "assert_matches", @@ -5900,18 +5900,18 @@ dependencies = [ "solana-bucket-map", "solana-compute-budget-program", "solana-config-program", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-program-runtime", "solana-rayon-threadlimit", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-stake-program", "solana-vote-program", "solana-zk-token-proof-program", - "solana-zk-token-sdk 1.11.2", + "solana-zk-token-sdk 1.11.3", "strum", "strum_macros", "symlink", @@ -5974,7 +5974,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.11.2" +version = "1.11.3" dependencies = [ "anyhow", "assert_matches", @@ -6013,11 +6013,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.1", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", - "solana-program 1.11.2", - "solana-sdk-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", + "solana-program 1.11.3", + "solana-sdk-macro 1.11.3", "static_assertions", "thiserror", "tiny-bip39", @@ -6040,7 +6040,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bs58", "proc-macro2 1.0.38", @@ -6051,21 +6051,21 @@ dependencies = [ [[package]] name = "solana-send-transaction-service" -version = "1.11.2" +version = "1.11.3" dependencies = [ "crossbeam-channel", "log", "solana-client", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-stake-accounts" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -6073,13 +6073,13 @@ dependencies = [ "solana-client", "solana-remote-wallet", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-stake-program", ] [[package]] name = "solana-stake-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_matches", "bincode", @@ -6091,12 +6091,12 @@ dependencies = [ "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", "solana-metrics", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", "test-case", "thiserror", @@ -6104,7 +6104,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.11.2" +version = "1.11.3" dependencies = [ "backoff", "bincode", @@ -6125,7 +6125,7 @@ dependencies = [ "serde_derive", "smpl_jwt", "solana-metrics", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-storage-proto", "solana-transaction-status", "thiserror", @@ -6136,7 +6136,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bs58", @@ -6144,25 +6144,25 @@ dependencies = [ "prost 0.10.4", "serde", "solana-account-decoder", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "tonic-build 0.7.2", ] [[package]] name = "solana-store-tool" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "log", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-runtime", "solana-version", ] [[package]] name = "solana-streamer" -version = "1.11.2" +version = "1.11.3" dependencies = [ "crossbeam-channel", "futures-util", @@ -6179,23 +6179,23 @@ dependencies = [ "rand 0.7.3", "rcgen", "rustls 0.20.6", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-metrics", "solana-perf", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", "tokio", ] [[package]] name = "solana-sys-tuner" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "libc", "log", "nix", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-version", "sysctl", "unix_socket2", @@ -6204,7 +6204,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.11.2" +version = "1.11.3" dependencies = [ "base64 0.13.0", "log", @@ -6215,20 +6215,20 @@ dependencies = [ "solana-core", "solana-gossip", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-net-utils", "solana-program-runtime", "solana-program-test", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "tokio", ] [[package]] name = "solana-tokens" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "chrono", @@ -6244,9 +6244,9 @@ dependencies = [ "solana-clap-utils", "solana-cli-config", "solana-client", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-remote-wallet", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-test-validator", "solana-transaction-status", @@ -6259,7 +6259,7 @@ dependencies = [ [[package]] name = "solana-transaction-dos" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "clap 2.33.3", @@ -6273,11 +6273,11 @@ dependencies = [ "solana-faucet", "solana-gossip", "solana-local-cluster", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-net-utils", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-transaction-status", "solana-version", @@ -6285,7 +6285,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.11.2" +version = "1.11.3" dependencies = [ "Inflector", "base64 0.13.0", @@ -6301,7 +6301,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", "spl-associated-token-account", "spl-memo", @@ -6312,7 +6312,7 @@ dependencies = [ [[package]] name = "solana-upload-perf" -version = "1.11.2" +version = "1.11.3" dependencies = [ "serde_json", "solana-metrics", @@ -6320,7 +6320,7 @@ dependencies = [ [[package]] name = "solana-validator" -version = "1.11.2" +version = "1.11.3" dependencies = [ "chrono", "clap 2.33.3", @@ -6351,14 +6351,14 @@ dependencies = [ "solana-genesis-utils", "solana-gossip", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-metrics", "solana-net-utils", "solana-perf", "solana-poh", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-send-transaction-service", "solana-storage-bigtable", "solana-streamer", @@ -6371,21 +6371,21 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", "rustc_version 0.4.0", "semver 1.0.10", "serde", "serde_derive", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-sdk 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-sdk 1.11.3", ] [[package]] name = "solana-vote-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "log", @@ -6394,18 +6394,18 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", "solana-metrics", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", ] [[package]] name = "solana-watchtower" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "humantime", @@ -6414,24 +6414,24 @@ dependencies = [ "solana-cli-config", "solana-cli-output", "solana-client", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-metrics", "solana-notifier", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-version", ] [[package]] name = "solana-zk-token-proof-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bytemuck", "getrandom 0.1.16", "num-derive", "num-traits", "solana-program-runtime", - "solana-sdk 1.11.2", - "solana-zk-token-sdk 1.11.2", + "solana-sdk 1.11.3", + "solana-zk-token-sdk 1.11.3", ] [[package]] @@ -6466,7 +6466,7 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.11.2" +version = "1.11.3" dependencies = [ "aes-gcm-siv", "arrayref", @@ -6485,8 +6485,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.11.2", - "solana-sdk 1.11.2", + "solana-program 1.11.3", + "solana-sdk 1.11.3", "subtle", "thiserror", "zeroize", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index 7abc3631ed7fc5..9b8860f178b0a4 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-account-decoder" -version = "1.11.2" +version = "1.11.3" description = "Solana account decoder" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,9 +19,9 @@ lazy_static = "1.4.0" serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" -solana-config-program = { path = "../programs/config", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-config-program = { path = "../programs/config", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } spl-token = { version = "=3.3.0", features = ["no-entrypoint"] } spl-token-2022 = { version = "=0.3.0", features = ["no-entrypoint"] } thiserror = "1.0" diff --git a/accounts-bench/Cargo.toml b/accounts-bench/Cargo.toml index 05cddc8938940b..1863033a8bf9ba 100644 --- a/accounts-bench/Cargo.toml +++ b/accounts-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-accounts-bench" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -12,11 +12,11 @@ publish = false clap = "2.33.1" log = "0.4.17" rayon = "1.5.3" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index c11f1a47a91431..aed4b6a9afc50f 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-accounts-cluster-bench" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,25 +13,25 @@ clap = "2.33.1" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-faucet = { path = "../faucet", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-faucet = { path = "../faucet", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } spl-token = { version = "=3.3.0", features = ["no-entrypoint"] } [dev-dependencies] -solana-core = { path = "../core", version = "=1.11.2" } -solana-local-cluster = { path = "../local-cluster", version = "=1.11.2" } -solana-test-validator = { path = "../test-validator", version = "=1.11.2" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-local-cluster = { path = "../local-cluster", version = "=1.11.3" } +solana-test-validator = { path = "../test-validator", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/banking-bench/Cargo.toml b/banking-bench/Cargo.toml index dccf92cb669284..4766b2043b4a1d 100644 --- a/banking-bench/Cargo.toml +++ b/banking-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-banking-bench" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,18 +14,18 @@ crossbeam-channel = "0.5" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -solana-client = { path = "../client", version = "=1.11.2" } -solana-core = { path = "../core", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-poh = { path = "../poh", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-poh = { path = "../poh", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/banks-client/Cargo.toml b/banks-client/Cargo.toml index 69c9b6daf7cb29..c8b783e13318cd 100644 --- a/banks-client/Cargo.toml +++ b/banks-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-client" -version = "1.11.2" +version = "1.11.3" description = "Solana banks client" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,17 +12,17 @@ edition = "2021" [dependencies] borsh = "0.9.3" futures = "0.3" -solana-banks-interface = { path = "../banks-interface", version = "=1.11.2" } -solana-program = { path = "../sdk/program", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-banks-interface = { path = "../banks-interface", version = "=1.11.3" } +solana-program = { path = "../sdk/program", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } tarpc = { version = "0.29.0", features = ["full"] } thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } tokio-serde = { version = "0.8", features = ["bincode"] } [dev-dependencies] -solana-banks-server = { path = "../banks-server", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } +solana-banks-server = { path = "../banks-server", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/banks-interface/Cargo.toml b/banks-interface/Cargo.toml index 6c770e1f46437d..00fffa65433cc9 100644 --- a/banks-interface/Cargo.toml +++ b/banks-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-interface" -version = "1.11.2" +version = "1.11.3" description = "Solana banks RPC interface" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2021" [dependencies] serde = { version = "1.0.138", features = ["derive"] } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } tarpc = { version = "0.29.0", features = ["full"] } [lib] diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml index 2f6bc3a0e307e8..ab56fde52d1aba 100644 --- a/banks-server/Cargo.toml +++ b/banks-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-server" -version = "1.11.2" +version = "1.11.3" description = "Solana banks server" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,11 +13,11 @@ edition = "2021" bincode = "1.3.3" crossbeam-channel = "0.5" futures = "0.3" -solana-banks-interface = { path = "../banks-interface", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.2" } +solana-banks-interface = { path = "../banks-interface", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.3" } tarpc = { version = "0.29.0", features = ["full"] } tokio = { version = "~1.14.1", features = ["full"] } tokio-serde = { version = "0.8", features = ["bincode"] } diff --git a/bench-streamer/Cargo.toml b/bench-streamer/Cargo.toml index f1e4c8a5c3cadd..9b6c111662b4bc 100644 --- a/bench-streamer/Cargo.toml +++ b/bench-streamer/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-bench-streamer" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,9 +11,9 @@ publish = false [dependencies] clap = { version = "3.1.5", features = ["cargo"] } crossbeam-channel = "0.5" -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 0d5bc6aad772fe..ba63a5350016f1 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-bench-tps" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -15,28 +15,28 @@ log = "0.4.17" rayon = "1.5.3" serde_json = "1.0.81" serde_yaml = "0.8.24" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-core = { path = "../core", version = "=1.11.2" } -solana-faucet = { path = "../faucet", version = "=1.11.2" } -solana-genesis = { path = "../genesis", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-rpc = { path = "../rpc", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-faucet = { path = "../faucet", version = "=1.11.3" } +solana-genesis = { path = "../genesis", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-rpc = { path = "../rpc", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } thiserror = "1.0" [dev-dependencies] serial_test = "0.8.0" -solana-local-cluster = { path = "../local-cluster", version = "=1.11.2" } -solana-test-validator = { path = "../test-validator", version = "=1.11.2" } +solana-local-cluster = { path = "../local-cluster", version = "=1.11.3" } +solana-test-validator = { path = "../test-validator", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bloom/Cargo.toml b/bloom/Cargo.toml index e730fbcfd42d29..40fb66b7cdbf3e 100644 --- a/bloom/Cargo.toml +++ b/bloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bloom" -version = "1.11.2" +version = "1.11.3" description = "Solana bloom filter" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -17,9 +17,9 @@ rand = "0.7.0" rayon = "1.5.3" serde = { version = "1.0.138", features = ["rc"] } serde_derive = "1.0.103" -solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/bucket_map/Cargo.toml b/bucket_map/Cargo.toml index 686bdb8946e154..89343b7d12c269 100644 --- a/bucket_map/Cargo.toml +++ b/bucket_map/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bucket-map" -version = "1.11.2" +version = "1.11.3" description = "solana-bucket-map" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-bucket-map" @@ -15,14 +15,14 @@ log = { version = "0.4.17" } memmap2 = "0.5.3" modular-bitfield = "0.11.2" rand = "0.7.0" -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } tempfile = "3.3.0" [dev-dependencies] fs_extra = "1.2.0" rayon = "1.5.3" -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/clap-utils/Cargo.toml b/clap-utils/Cargo.toml index c120e8cc25339f..f2dcf2678c153b 100644 --- a/clap-utils/Cargo.toml +++ b/clap-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-clap-utils" -version = "1.11.2" +version = "1.11.3" description = "Solana utilities for the clap" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,9 +13,9 @@ edition = "2021" chrono = "0.4" clap = "2.33.0" rpassword = "6.0" -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.2", default-features = false } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.3", default-features = false } +solana-sdk = { path = "../sdk", version = "=1.11.3" } thiserror = "1.0.31" tiny-bip39 = "0.8.2" uriparse = "0.6.4" diff --git a/clap-v3-utils/Cargo.toml b/clap-v3-utils/Cargo.toml index 8585efb47789b4..81ea397d9be4dd 100644 --- a/clap-v3-utils/Cargo.toml +++ b/clap-v3-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-clap-v3-utils" -version = "1.11.2" +version = "1.11.3" description = "Solana utilities for the clap v3" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,9 +13,9 @@ edition = "2021" chrono = "0.4" clap = { version = "3.1.5", features = ["cargo"] } rpassword = "6.0" -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.2", default-features = false } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.3", default-features = false } +solana-sdk = { path = "../sdk", version = "=1.11.3" } thiserror = "1.0.31" tiny-bip39 = "0.8.2" uriparse = "0.6.4" diff --git a/cli-config/Cargo.toml b/cli-config/Cargo.toml index d4c27b4b967799..2231160b4eb29c 100644 --- a/cli-config/Cargo.toml +++ b/cli-config/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-cli-config" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -15,8 +15,8 @@ lazy_static = "1.4.0" serde = "1.0.138" serde_derive = "1.0.103" serde_yaml = "0.8.24" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } url = "2.2.2" [dev-dependencies] diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index 667fec840cd73a..138f311f5d5999 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-cli-output" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -21,13 +21,13 @@ pretty-hex = "0.3.0" semver = "1.0.10" serde = "1.0.138" serde_json = "1.0.81" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } [dev-dependencies] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index e8978e3f74f7a5..fb4ebf0768a768 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-cli" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -27,29 +27,29 @@ semver = "1.0.10" serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.2" } -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-cli-output = { path = "../cli-output", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-config-program = { path = "../programs/config", version = "=1.11.2" } -solana-faucet = { path = "../faucet", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.3" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-cli-output = { path = "../cli-output", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-config-program = { path = "../programs/config", version = "=1.11.3" } +solana-faucet = { path = "../faucet", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-program-runtime = { path = "../program-runtime", version = "=1.11.3" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } solana_rbpf = "=0.2.31" spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0.31" tiny-bip39 = "0.8.2" [dev-dependencies] -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-test-validator = { path = "../test-validator", version = "=1.11.2" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-test-validator = { path = "../test-validator", version = "=1.11.3" } tempfile = "3.3.0" [[bin]] diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index 83ce8f1b936d08..2c436a53a62c92 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-client-test" -version = "1.11.2" +version = "1.11.3" description = "Solana RPC Test" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,25 +14,25 @@ publish = false futures-util = "0.3.21" serde_json = "1.0.81" serial_test = "0.8.0" -solana-client = { path = "../client", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } -solana-rpc = { path = "../rpc", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-test-validator = { path = "../test-validator", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.3" } +solana-rpc = { path = "../rpc", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-test-validator = { path = "../test-validator", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } systemstat = "0.1.11" tokio = { version = "~1.14.1", features = ["full"] } [dev-dependencies] -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/Cargo.toml b/client/Cargo.toml index 1ad9bbfa1c62d6..3d6bcc8d2a8f8c 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-client" -version = "1.11.2" +version = "1.11.3" description = "Solana Client" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -38,17 +38,17 @@ semver = "1.0.10" serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-faucet = { path = "../faucet", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-faucet = { path = "../faucet", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } spl-token-2022 = { version = "=0.3.0", features = ["no-entrypoint"] } thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } @@ -61,8 +61,8 @@ url = "2.2.2" anyhow = "1.0.57" assert_matches = "1.5.0" jsonrpc-http-server = "18.0.0" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/core/Cargo.toml b/core/Cargo.toml index aefdbdbc6ed2a8..ef51be7531d784 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-core" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-core" readme = "../README.md" @@ -35,30 +35,30 @@ rand_chacha = "0.2.2" rayon = "1.5.3" serde = "1.0.138" serde_derive = "1.0.103" -solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.2" } -solana-bloom = { path = "../bloom", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } -solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-poh = { path = "../poh", version = "=1.11.2" } -solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } -solana-rpc = { path = "../rpc", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.3" } +solana-bloom = { path = "../bloom", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.3" } +solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-poh = { path = "../poh", version = "=1.11.3" } +solana-program-runtime = { path = "../program-runtime", version = "=1.11.3" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.3" } +solana-rpc = { path = "../rpc", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } sys-info = "0.9.1" tempfile = "3.3.0" thiserror = "1.0" @@ -70,9 +70,9 @@ matches = "0.1.9" raptorq = "1.7.0" serde_json = "1.0.81" serial_test = "0.8.0" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } -solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-program-runtime = { path = "../program-runtime", version = "=1.11.3" } +solana-stake-program = { path = "../programs/stake", version = "=1.11.3" } static_assertions = "1.1.0" systemstat = "0.1.11" test-case = "2.1.0" diff --git a/docs/src/developing/clients/jsonrpc-api.md b/docs/src/developing/clients/jsonrpc-api.md index 97e0df262ba4ec..5d93d5b93ac9a8 100644 --- a/docs/src/developing/clients/jsonrpc-api.md +++ b/docs/src/developing/clients/jsonrpc-api.md @@ -1908,7 +1908,7 @@ Returns all accounts owned by the provided program Pubkey - `offset: ` - offset into program account data to start comparison - `bytes: ` - data to match, as encoded string - `encoding: ` - encoding for filter `bytes` data, either "base58" or "base64". Data is limited in size to 128 or fewer decoded bytes. - **NEW: This field, and base64 support generally, is only available in solana-core v1.11.2 or newer. Please omit when querying nodes on earlier versions** + **NEW: This field, and base64 support generally, is only available in solana-core v1.11.3 or newer. Please omit when querying nodes on earlier versions** - `dataSize: ` - compares the program account data length with the provided data size @@ -3088,7 +3088,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' Result: ```json -{ "jsonrpc": "2.0", "result": { "solana-core": "1.11.2" }, "id": 1 } +{ "jsonrpc": "2.0", "result": { "solana-core": "1.11.3" }, "id": 1 } ``` ### getVoteAccounts diff --git a/dos/Cargo.toml b/dos/Cargo.toml index 4459a82d6ec31c..95c32333b2ce1a 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-dos" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -16,22 +16,22 @@ itertools = "0.10.3" log = "0.4.17" rand = "0.7.0" serde = "1.0.138" -solana-bench-tps = { path = "../bench-tps", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-core = { path = "../core", version = "=1.11.2" } -solana-faucet = { path = "../faucet", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-rpc = { path = "../rpc", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-bench-tps = { path = "../bench-tps", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-faucet = { path = "../faucet", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-rpc = { path = "../rpc", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] serial_test = "0.8.0" -solana-local-cluster = { path = "../local-cluster", version = "=1.11.2" } +solana-local-cluster = { path = "../local-cluster", version = "=1.11.3" } diff --git a/download-utils/Cargo.toml b/download-utils/Cargo.toml index 677581933bac02..396ba07a621f4e 100644 --- a/download-utils/Cargo.toml +++ b/download-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-download-utils" -version = "1.11.2" +version = "1.11.3" description = "Solana Download Utils" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,8 +14,8 @@ console = "0.15.0" indicatif = "0.16.2" log = "0.4.17" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/entry/Cargo.toml b/entry/Cargo.toml index a825cf6a5ed194..fb76ca458989d6 100644 --- a/entry/Cargo.toml +++ b/entry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-entry" -version = "1.11.2" +version = "1.11.3" description = "Solana Entry" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,16 +19,16 @@ log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" serde = "1.0.138" -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } [dev-dependencies] matches = "0.1.9" -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/faucet/Cargo.toml b/faucet/Cargo.toml index 32974f83ac9061..abc45d406691eb 100644 --- a/faucet/Cargo.toml +++ b/faucet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-faucet" -version = "1.11.2" +version = "1.11.3" description = "Solana Faucet" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -17,12 +17,12 @@ crossbeam-channel = "0.5" log = "0.4.17" serde = "1.0.138" serde_derive = "1.0.103" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index be0b1226e11318..28a964052ca303 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-frozen-abi" -version = "1.11.2" +version = "1.11.3" description = "Solana Frozen ABI" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,7 +19,7 @@ serde = "1.0.138" serde_bytes = "0.11" serde_derive = "1.0.103" sha2 = "0.10.2" -solana-frozen-abi-macro = { path = "macro", version = "=1.11.2" } +solana-frozen-abi-macro = { path = "macro", version = "=1.11.3" } thiserror = "1.0" [target.'cfg(not(target_os = "solana"))'.dependencies] @@ -31,7 +31,7 @@ im = { version = "15.1.0", features = ["rayon", "serde"] } memmap2 = "0.5.3" [target.'cfg(not(target_os = "solana"))'.dev-dependencies] -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [build-dependencies] rustc_version = "0.4" diff --git a/frozen-abi/macro/Cargo.toml b/frozen-abi/macro/Cargo.toml index 41a4902519df66..b2ea0396cda47e 100644 --- a/frozen-abi/macro/Cargo.toml +++ b/frozen-abi/macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-frozen-abi-macro" -version = "1.11.2" +version = "1.11.3" description = "Solana Frozen ABI Macro" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/genesis-utils/Cargo.toml b/genesis-utils/Cargo.toml index 606124487876aa..a57e2576692a4c 100644 --- a/genesis-utils/Cargo.toml +++ b/genesis-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-genesis-utils" -version = "1.11.2" +version = "1.11.3" description = "Solana Genesis Utils" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-download-utils" edition = "2021" [dependencies] -solana-download-utils = { path = "../download-utils", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-download-utils = { path = "../download-utils", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index a1427fc5d28b03..8cef47bfcfddeb 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-genesis" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -15,16 +15,16 @@ clap = "2.33.1" serde = "1.0.138" serde_json = "1.0.81" serde_yaml = "0.8.24" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-stake-program = { path = "../programs/stake", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } tempfile = "3.3.0" [[bin]] diff --git a/geyser-plugin-interface/Cargo.toml b/geyser-plugin-interface/Cargo.toml index 8412e86e8b1448..e506363eff5958 100644 --- a/geyser-plugin-interface/Cargo.toml +++ b/geyser-plugin-interface/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-geyser-plugin-interface" description = "The Solana Geyser plugin interface." -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,8 +11,8 @@ documentation = "https://docs.rs/solana-geyser-plugin-interface" [dependencies] log = "0.4.17" -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } thiserror = "1.0.31" [package.metadata.docs.rs] diff --git a/geyser-plugin-manager/Cargo.toml b/geyser-plugin-manager/Cargo.toml index a0213daab4705a..92870716410260 100644 --- a/geyser-plugin-manager/Cargo.toml +++ b/geyser-plugin-manager/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-geyser-plugin-manager" description = "The Solana Geyser plugin manager." -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -16,13 +16,13 @@ json5 = "0.4.1" libloading = "0.7.3" log = "0.4.17" serde_json = "1.0.81" -solana-geyser-plugin-interface = { path = "../geyser-plugin-interface", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-rpc = { path = "../rpc", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } +solana-geyser-plugin-interface = { path = "../geyser-plugin-interface", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-rpc = { path = "../rpc", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } thiserror = "1.0.31" [package.metadata.docs.rs] diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 7f1ce383bbd44b..c457c68ea76c04 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-gossip" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -27,24 +27,24 @@ rayon = "1.5.3" serde = "1.0.138" serde_bytes = "0.11" serde_derive = "1.0.103" -solana-bloom = { path = "../bloom", version = "=1.11.2" } -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-bloom = { path = "../bloom", version = "=1.11.3" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } thiserror = "1.0" [dev-dependencies] diff --git a/install/Cargo.toml b/install/Cargo.toml index b59eeee734eeeb..4ab5e51638b623 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-install" description = "The solana cluster software installer" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -26,12 +26,12 @@ reqwest = { version = "0.11.11", default-features = false, features = ["blocking semver = "1.0.10" serde = { version = "1.0.138", features = ["derive"] } serde_yaml = "0.8.24" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-config-program = { path = "../programs/config", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-config-program = { path = "../programs/config", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } tar = "0.4.38" tempfile = "3.3.0" url = "2.2.2" diff --git a/keygen/Cargo.toml b/keygen/Cargo.toml index 643a780699e569..300fac1237869e 100644 --- a/keygen/Cargo.toml +++ b/keygen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-keygen" -version = "1.11.2" +version = "1.11.3" description = "Solana key generation utility" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,11 +14,11 @@ bs58 = "0.4.0" clap = { version = "3.1.5", features = ["cargo"] } dirs-next = "2.0.0" num_cpus = "1.13.1" -solana-clap-v3-utils = { path = "../clap-v3-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-clap-v3-utils = { path = "../clap-v3-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } tiny-bip39 = "0.8.2" [[bin]] diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 3147087b6946cd..035e6e00d45310 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-ledger-tool" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -22,20 +22,20 @@ log = { version = "0.4.17" } regex = "1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.81" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-output = { path = "../cli-output", version = "=1.11.2" } -solana-core = { path = "../core", version = "=1.11.2" } -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-output = { path = "../cli-output", version = "=1.11.3" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-stake-program = { path = "../programs/stake", version = "=1.11.3" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } tokio = { version = "~1.14.1", features = ["full"] } [target.'cfg(not(target_env = "msvc"))'.dependencies] diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 4b03a6e8614014..e3b98d8c952e4a 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-ledger" -version = "1.11.2" +version = "1.11.3" description = "Solana ledger" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -34,22 +34,22 @@ reed-solomon-erasure = { version = "5.0.2", features = ["simd-accel"] } serde = "1.0.138" serde_bytes = "0.11.6" sha2 = "0.10.2" -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.2" } -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.2" } -solana-storage-proto = { path = "../storage-proto", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.3" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-program-runtime = { path = "../program-runtime", version = "=1.11.3" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-stake-program = { path = "../programs/stake", version = "=1.11.3" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.3" } +solana-storage-proto = { path = "../storage-proto", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } static_assertions = "1.1.0" tempfile = "3.3.0" thiserror = "1.0" @@ -68,8 +68,8 @@ features = ["lz4"] assert_matches = "1.5.0" bs58 = "0.4.0" matches = "0.1.9" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } [build-dependencies] rustc_version = "0.4" diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index e769dc886b4479..ca8000b447737b 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-local-cluster" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -16,25 +16,25 @@ itertools = "0.10.3" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -solana-client = { path = "../client", version = "=1.11.2" } -solana-config-program = { path = "../programs/config", version = "=1.11.2" } -solana-core = { path = "../core", version = "=1.11.2" } -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-config-program = { path = "../programs/config", version = "=1.11.3" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-stake-program = { path = "../programs/stake", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } tempfile = "3.3.0" [dev-dependencies] assert_matches = "1.5.0" gag = "1.0.0" serial_test = "0.8.0" -solana-download-utils = { path = "../download-utils", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-download-utils = { path = "../download-utils", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml index b01c9f90dfa8c4..da137279d294e7 100644 --- a/log-analyzer/Cargo.toml +++ b/log-analyzer/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-log-analyzer" description = "The solana cluster network analysis tool" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,8 +14,8 @@ byte-unit = "4.0.14" clap = { version = "3.1.5", features = ["cargo"] } serde = "1.0.138" serde_json = "1.0.81" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [[bin]] name = "solana-log-analyzer" diff --git a/logger/Cargo.toml b/logger/Cargo.toml index 40d4b2bb6df6d6..7c529047208ebb 100644 --- a/logger/Cargo.toml +++ b/logger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-logger" -version = "1.11.2" +version = "1.11.3" description = "Solana Logger" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/measure/Cargo.toml b/measure/Cargo.toml index e6161327bf2a00..721f3f8bf03735 100644 --- a/measure/Cargo.toml +++ b/measure/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-measure" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-measure" readme = "../README.md" @@ -12,7 +12,7 @@ edition = "2021" [dependencies] log = "0.4.17" -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/merkle-root-bench/Cargo.toml b/merkle-root-bench/Cargo.toml index 6d53510f2230bd..8003561f7489f9 100644 --- a/merkle-root-bench/Cargo.toml +++ b/merkle-root-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-merkle-root-bench" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,11 +11,11 @@ publish = false [dependencies] clap = "2.33.1" log = "0.4.17" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/merkle-tree/Cargo.toml b/merkle-tree/Cargo.toml index 14f4ae1b398af6..1965664a2f78d0 100644 --- a/merkle-tree/Cargo.toml +++ b/merkle-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-merkle-tree" -version = "1.11.2" +version = "1.11.3" description = "Solana Merkle Tree" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2021" [dependencies] fast-math = "0.1" -solana-program = { path = "../sdk/program", version = "=1.11.2" } +solana-program = { path = "../sdk/program", version = "=1.11.3" } # This can go once the BPF toolchain target Rust 1.42.0+ [target.bpfel-unknown-unknown.dependencies] diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index 2cfd3618b164d3..6f783666ee790b 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-metrics" -version = "1.11.2" +version = "1.11.3" description = "Solana Metrics" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,7 +15,7 @@ gethostname = "0.2.3" lazy_static = "1.4.0" log = "0.4.17" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } [dev-dependencies] env_logger = "0.9.0" diff --git a/net-shaper/Cargo.toml b/net-shaper/Cargo.toml index d9cbb8b2b95f22..68e24b4eeda98d 100644 --- a/net-shaper/Cargo.toml +++ b/net-shaper/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-net-shaper" description = "The solana cluster network shaping tool" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,7 +14,7 @@ clap = { version = "3.1.5", features = ["cargo"] } rand = "0.7.0" serde = { version = "1.0.138", features = ["derive"] } serde_json = "1.0.81" -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [[bin]] name = "solana-net-shaper" diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index e3035812986cdf..754f60b4152c11 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-net-utils" -version = "1.11.2" +version = "1.11.3" description = "Solana Network Utilities" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,9 +19,9 @@ rand = "0.7.0" serde = "1.0.138" serde_derive = "1.0.103" socket2 = "0.4.4" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } tokio = { version = "~1.14.1", features = ["full"] } url = "2.2.2" diff --git a/notifier/Cargo.toml b/notifier/Cargo.toml index b64c5dec858404..dcbf91b3d9821a 100644 --- a/notifier/Cargo.toml +++ b/notifier/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-notifier" -version = "1.11.2" +version = "1.11.3" description = "Solana Notifier" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/perf/Cargo.toml b/perf/Cargo.toml index dd2c26fd5bcd8b..f86b4037fb7495 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-perf" -version = "1.11.2" +version = "1.11.3" description = "Solana Performance APIs" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -22,10 +22,10 @@ log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" serde = "1.0.138" -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } [target."cfg(target_os = \"linux\")".dependencies] caps = "0.5.3" @@ -37,7 +37,7 @@ name = "solana_perf" [dev-dependencies] matches = "0.1.9" -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [[bench]] name = "sigverify" diff --git a/poh-bench/Cargo.toml b/poh-bench/Cargo.toml index 26cd7bd0da930f..fd75731a995f65 100644 --- a/poh-bench/Cargo.toml +++ b/poh-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-poh-bench" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,12 +14,12 @@ clap = { version = "3.1.5", features = ["cargo"] } log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/poh/Cargo.toml b/poh/Cargo.toml index ebdbc633b174dd..3bb88e57a36e2d 100644 --- a/poh/Cargo.toml +++ b/poh/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-poh" -version = "1.11.2" +version = "1.11.3" description = "Solana PoH" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,21 +13,21 @@ edition = "2021" core_affinity = "0.5.10" crossbeam-channel = "0.5" log = "0.4.17" -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-sys-tuner = { path = "../sys-tuner", version = "=1.11.2" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-sys-tuner = { path = "../sys-tuner", version = "=1.11.3" } thiserror = "1.0" [dev-dependencies] bincode = "1.3.3" matches = "0.1.9" rand = "0.7.0" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index 0ba4b59be62c14..ce4866af9c7689 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-program-runtime" -version = "1.11.2" +version = "1.11.3" description = "Solana program runtime" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -20,16 +20,16 @@ log = "0.4.14" num-derive = { version = "0.3" } num-traits = { version = "0.2" } serde = { version = "1.0.129", features = ["derive", "rc"] } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } thiserror = "1.0" enum-iterator = "0.8.1" [dev-dependencies] -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 3c531d069bf420..e1c454d94fa473 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" license = "Apache-2.0" name = "solana-program-test" repository = "https://github.com/solana-labs/solana" -version = "1.11.2" +version = "1.11.3" [dependencies] assert_matches = "1.5.0" @@ -15,13 +15,13 @@ bincode = "1.3.3" chrono-humanize = "0.2.1" log = "0.4.17" serde = "1.0.138" -solana-banks-client = { path = "../banks-client", version = "=1.11.2" } -solana-banks-server = { path = "../banks-server", version = "=1.11.2" } -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-banks-client = { path = "../banks-client", version = "=1.11.3" } +solana-banks-server = { path = "../banks-server", version = "=1.11.3" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-program-runtime = { path = "../program-runtime", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } diff --git a/programs/address-lookup-table-tests/Cargo.toml b/programs/address-lookup-table-tests/Cargo.toml index 5d9e143a10b9ce..3ab3080a12cadb 100644 --- a/programs/address-lookup-table-tests/Cargo.toml +++ b/programs/address-lookup-table-tests/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "solana-address-lookup-table-program-tests" -version = "1.11.2" +version = "1.11.3" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" @@ -14,9 +14,9 @@ publish = false [dev-dependencies] assert_matches = "1.5.0" bincode = "1.3.3" -solana-address-lookup-table-program = { path = "../address-lookup-table", version = "=1.11.2" } -solana-program-test = { path = "../../program-test", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } +solana-address-lookup-table-program = { path = "../address-lookup-table", version = "=1.11.3" } +solana-program-test = { path = "../../program-test", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index a784c1555277f9..42deafdbb56bf6 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-address-lookup-table-program" -version = "1.11.2" +version = "1.11.3" description = "Solana address lookup table program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,14 +16,14 @@ log = "0.4.17" num-derive = "0.3" num-traits = "0.2" serde = { version = "1.0.138", features = ["derive"] } -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.2" } -solana-program = { path = "../../sdk/program", version = "=1.11.2" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.3" } +solana-program = { path = "../../sdk/program", version = "=1.11.3" } thiserror = "1.0" [target.'cfg(not(target_os = "solana"))'.dependencies] -solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } [build-dependencies] rustc_version = "0.4" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 47981f4062ddaa..8b0596eeef6ac3 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -3915,7 +3915,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.11.2" +version = "1.11.3" dependencies = [ "Inflector", "base64 0.13.0", @@ -3927,7 +3927,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-config-program", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", "spl-token", "spl-token-2022", @@ -3937,7 +3937,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bytemuck", @@ -3946,23 +3946,23 @@ dependencies = [ "num-traits", "rustc_version", "serde", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-program 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-program 1.11.3", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", ] [[package]] name = "solana-banks-client" -version = "1.11.2" +version = "1.11.3" dependencies = [ "borsh", "futures 0.3.21", "solana-banks-interface", - "solana-program 1.11.2", - "solana-sdk 1.11.2", + "solana-program 1.11.3", + "solana-sdk 1.11.3", "tarpc", "thiserror", "tokio", @@ -3971,16 +3971,16 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "1.11.2" +version = "1.11.3" dependencies = [ "serde", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "tarpc", ] [[package]] name = "solana-banks-server" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "crossbeam-channel", @@ -3988,7 +3988,7 @@ dependencies = [ "solana-banks-interface", "solana-client", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-send-transaction-service", "tarpc", "tokio", @@ -3998,7 +3998,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bv", "fnv", @@ -4008,14 +4008,14 @@ dependencies = [ "rustc_version", "serde", "serde_derive", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-sdk 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-sdk 1.11.3", ] [[package]] name = "solana-bpf-loader-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "byteorder 1.4.3", @@ -4024,15 +4024,15 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-program-runtime", - "solana-sdk 1.11.2", - "solana-zk-token-sdk 1.11.2", + "solana-sdk 1.11.3", + "solana-zk-token-sdk 1.11.3", "solana_rbpf", "thiserror", ] [[package]] name = "solana-bpf-programs" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "byteorder 1.4.3", @@ -4047,11 +4047,11 @@ dependencies = [ "solana-bpf-rust-realloc", "solana-bpf-rust-realloc-invoke", "solana-cli-output", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-program-runtime", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "solana_rbpf", "walkdir", @@ -4059,384 +4059,384 @@ dependencies = [ [[package]] name = "solana-bpf-rust-128bit" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-bpf-rust-128bit-dep", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-128bit-dep" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-alloc" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-call-depth" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-caller-access" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-curve25519" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", - "solana-zk-token-sdk 1.11.2", + "solana-program 1.11.3", + "solana-zk-token-sdk 1.11.3", ] [[package]] name = "solana-bpf-rust-custom-heap" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-dep-crate" -version = "1.11.2" +version = "1.11.3" dependencies = [ "byteorder 1.4.3", "solana-address-lookup-table-program", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-deprecated-loader" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-dup-accounts" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-error-handling" -version = "1.11.2" +version = "1.11.3" dependencies = [ "num-derive", "num-traits", - "solana-program 1.11.2", + "solana-program 1.11.3", "thiserror", ] [[package]] name = "solana-bpf-rust-external-spend" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-finalize" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-get-minimum-delegation" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-inner_instruction_alignment_check" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-instruction-introspection" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-invoke" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-bpf-rust-invoked", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-invoke-and-error" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-invoke-and-ok" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-invoke-and-return" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-invoked" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-iter" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-log-data" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-many-args" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-bpf-rust-many-args-dep", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-many-args-dep" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-mem" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", "solana-program-runtime", "solana-program-test", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-bpf-rust-membuiltins" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-bpf-rust-mem", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-noop" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-panic" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-param-passing" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-bpf-rust-param-passing-dep", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-param-passing-dep" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-rand" -version = "1.11.2" +version = "1.11.3" dependencies = [ "getrandom 0.1.14", "rand 0.7.3", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-realloc" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-realloc-invoke" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-bpf-rust-realloc", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-ro-account_modify" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-ro-modify" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-sanity" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", "solana-program-runtime", "solana-program-test", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-bpf-rust-secp256k1-recover" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-sha" -version = "1.11.2" +version = "1.11.3" dependencies = [ "blake3", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-sibling-instructions" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-sibling_inner-instructions" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-simulation" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-logger 1.11.2", - "solana-program 1.11.2", + "solana-logger 1.11.3", + "solana-program 1.11.3", "solana-program-test", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-validator", ] [[package]] name = "solana-bpf-rust-spoof1" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-spoof1-system" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-sysvar" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", "solana-program-runtime", "solana-program-test", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-bpf-rust-upgradeable" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bpf-rust-upgraded" -version = "1.11.2" +version = "1.11.3" dependencies = [ - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-bucket-map" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", "memmap2", "modular-bitfield", "rand 0.7.3", "solana-measure", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "tempfile", ] [[package]] name = "solana-clap-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "chrono", "clap 2.33.3", "rpassword", "solana-perf", "solana-remote-wallet", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", "tiny-bip39", "uriparse", @@ -4445,7 +4445,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.11.2" +version = "1.11.3" dependencies = [ "dirs-next", "lazy_static", @@ -4453,13 +4453,13 @@ dependencies = [ "serde_derive", "serde_yaml", "solana-clap-utils", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "url 2.2.2", ] [[package]] name = "solana-cli-output" -version = "1.11.2" +version = "1.11.3" dependencies = [ "Inflector", "base64 0.13.0", @@ -4476,7 +4476,7 @@ dependencies = [ "solana-clap-utils", "solana-cli-config", "solana-client", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "solana-vote-program", "spl-memo", @@ -4484,7 +4484,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.11.2" +version = "1.11.3" dependencies = [ "async-mutex", "async-trait", @@ -4520,7 +4520,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-net-utils", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-transaction-status", "solana-version", @@ -4536,27 +4536,27 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-config-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "chrono", "serde", "serde_derive", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-core" -version = "1.11.2" +version = "1.11.3" dependencies = [ "ahash", "base64 0.13.0", @@ -4584,8 +4584,8 @@ dependencies = [ "solana-bloom", "solana-client", "solana-entry", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", @@ -4598,7 +4598,7 @@ dependencies = [ "solana-rayon-threadlimit", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-send-transaction-service", "solana-streamer", "solana-transaction-status", @@ -4614,19 +4614,19 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "console", "indicatif", "log", "reqwest", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-entry" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "crossbeam-channel", @@ -4642,12 +4642,12 @@ dependencies = [ "solana-metrics", "solana-perf", "solana-rayon-threadlimit", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-faucet" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "byteorder 1.4.3", @@ -4658,9 +4658,9 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-metrics", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-version", "spl-memo", "thiserror", @@ -4691,7 +4691,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bs58", "bv", @@ -4706,7 +4706,7 @@ dependencies = [ "serde_bytes", "serde_derive", "sha2 0.10.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi-macro 1.11.3", "thiserror", ] @@ -4724,7 +4724,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.11.2" +version = "1.11.3" dependencies = [ "proc-macro2 1.0.38", "quote 1.0.18", @@ -4734,26 +4734,26 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "solana-download-utils", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-geyser-plugin-interface" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "thiserror", ] [[package]] name = "solana-geyser-plugin-manager" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bs58", "crossbeam-channel", @@ -4766,14 +4766,14 @@ dependencies = [ "solana-metrics", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "thiserror", ] [[package]] name = "solana-gossip" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bv", @@ -4797,17 +4797,17 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-entry", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-measure", "solana-metrics", "solana-net-utils", "solana-perf", "solana-rayon-threadlimit", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "solana-version", "solana-vote-program", @@ -4816,7 +4816,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bitflags", @@ -4846,15 +4846,15 @@ dependencies = [ "sha2 0.10.2", "solana-bpf-loader-program", "solana-entry", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-measure", "solana-metrics", "solana-perf", "solana-program-runtime", "solana-rayon-threadlimit", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-stake-program", "solana-storage-bigtable", "solana-storage-proto", @@ -4881,7 +4881,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.11.2" +version = "1.11.3" dependencies = [ "env_logger", "lazy_static", @@ -4890,36 +4890,36 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-merkle-tree" -version = "1.11.2" +version = "1.11.3" dependencies = [ "fast-math", "matches", - "solana-program 1.11.2", + "solana-program 1.11.3", ] [[package]] name = "solana-metrics" -version = "1.11.2" +version = "1.11.3" dependencies = [ "crossbeam-channel", "gethostname", "lazy_static", "log", "reqwest", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-net-utils" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "clap 3.1.6", @@ -4930,8 +4930,8 @@ dependencies = [ "serde", "serde_derive", "socket2", - "solana-logger 1.11.2", - "solana-sdk 1.11.2", + "solana-logger 1.11.3", + "solana-sdk 1.11.3", "solana-version", "tokio", "url 2.2.2", @@ -4939,7 +4939,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.11.2" +version = "1.11.3" dependencies = [ "ahash", "bincode", @@ -4958,13 +4958,13 @@ dependencies = [ "serde", "solana-metrics", "solana-rayon-threadlimit", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", ] [[package]] name = "solana-poh" -version = "1.11.2" +version = "1.11.3" dependencies = [ "core_affinity", "crossbeam-channel", @@ -4974,7 +4974,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-sys-tuner", "thiserror", ] @@ -5023,7 +5023,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "base64 0.13.0", "bincode", @@ -5055,16 +5055,16 @@ dependencies = [ "serde_derive", "sha2 0.10.2", "sha3 0.10.1", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-sdk-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-sdk-macro 1.11.3", "thiserror", "wasm-bindgen", ] [[package]] name = "solana-program-runtime" -version = "1.11.2" +version = "1.11.3" dependencies = [ "base64 0.13.0", "bincode", @@ -5078,17 +5078,17 @@ dependencies = [ "num-traits", "rustc_version", "serde", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-measure", "solana-metrics", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", ] [[package]] name = "solana-program-test" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_matches", "async-trait", @@ -5100,10 +5100,10 @@ dependencies = [ "solana-banks-client", "solana-banks-server", "solana-bpf-loader-program", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-program-runtime", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", "thiserror", "tokio", @@ -5111,7 +5111,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.11.2" +version = "1.11.3" dependencies = [ "lazy_static", "num_cpus", @@ -5119,7 +5119,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.11.2" +version = "1.11.3" dependencies = [ "console", "dialoguer", @@ -5129,14 +5129,14 @@ dependencies = [ "parking_lot 0.12.0", "qstring", "semver", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", "uriparse", ] [[package]] name = "solana-rpc" -version = "1.11.2" +version = "1.11.3" dependencies = [ "base64 0.13.0", "bincode", @@ -5169,7 +5169,7 @@ dependencies = [ "solana-poh", "solana-rayon-threadlimit", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-send-transaction-service", "solana-storage-bigtable", "solana-streamer", @@ -5186,7 +5186,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.11.2" +version = "1.11.3" dependencies = [ "arrayref", "bincode", @@ -5222,17 +5222,17 @@ dependencies = [ "solana-bucket-map", "solana-compute-budget-program", "solana-config-program", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-measure", "solana-metrics", "solana-program-runtime", "solana-rayon-threadlimit", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-stake-program", "solana-vote-program", "solana-zk-token-proof-program", - "solana-zk-token-sdk 1.11.2", + "solana-zk-token-sdk 1.11.3", "strum", "strum_macros", "symlink", @@ -5295,7 +5295,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.11.2" +version = "1.11.3" dependencies = [ "assert_matches", "base64 0.13.0", @@ -5332,11 +5332,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.1", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-logger 1.11.2", - "solana-program 1.11.2", - "solana-sdk-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-logger 1.11.3", + "solana-program 1.11.3", + "solana-sdk-macro 1.11.3", "thiserror", "uriparse", "wasm-bindgen", @@ -5357,7 +5357,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bs58", "proc-macro2 1.0.38", @@ -5368,7 +5368,7 @@ dependencies = [ [[package]] name = "solana-send-transaction-service" -version = "1.11.2" +version = "1.11.3" dependencies = [ "crossbeam-channel", "log", @@ -5376,12 +5376,12 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", ] [[package]] name = "solana-stake-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "log", @@ -5391,18 +5391,18 @@ dependencies = [ "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-metrics", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", "thiserror", ] [[package]] name = "solana-storage-bigtable" -version = "1.11.2" +version = "1.11.3" dependencies = [ "backoff", "bincode", @@ -5423,7 +5423,7 @@ dependencies = [ "serde_derive", "smpl_jwt", "solana-metrics", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-storage-proto", "solana-transaction-status", "thiserror", @@ -5434,21 +5434,21 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "bs58", "prost 0.10.4", "serde", "solana-account-decoder", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-transaction-status", "tonic-build 0.7.2", ] [[package]] name = "solana-streamer" -version = "1.11.2" +version = "1.11.3" dependencies = [ "crossbeam-channel", "futures-util", @@ -5467,20 +5467,20 @@ dependencies = [ "rustls 0.20.6", "solana-metrics", "solana-perf", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", "tokio", ] [[package]] name = "solana-sys-tuner" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap 2.33.3", "libc", "log", "nix", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-version", "sysctl", "unix_socket2", @@ -5489,7 +5489,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.11.2" +version = "1.11.3" dependencies = [ "base64 0.13.0", "log", @@ -5500,20 +5500,20 @@ dependencies = [ "solana-core", "solana-gossip", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-net-utils", "solana-program-runtime", "solana-program-test", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-streamer", "tokio", ] [[package]] name = "solana-transaction-status" -version = "1.11.2" +version = "1.11.3" dependencies = [ "Inflector", "base64 0.13.0", @@ -5529,7 +5529,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-vote-program", "spl-associated-token-account", "spl-memo", @@ -5540,7 +5540,7 @@ dependencies = [ [[package]] name = "solana-validator" -version = "1.11.2" +version = "1.11.3" dependencies = [ "chrono", "clap 2.33.3", @@ -5571,14 +5571,14 @@ dependencies = [ "solana-genesis-utils", "solana-gossip", "solana-ledger", - "solana-logger 1.11.2", + "solana-logger 1.11.3", "solana-metrics", "solana-net-utils", "solana-perf", "solana-poh", "solana-rpc", "solana-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "solana-send-transaction-service", "solana-storage-bigtable", "solana-streamer", @@ -5591,21 +5591,21 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.11.2" +version = "1.11.3" dependencies = [ "log", "rustc_version", "semver", "serde", "serde_derive", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", - "solana-sdk 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", + "solana-sdk 1.11.3", ] [[package]] name = "solana-vote-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bincode", "log", @@ -5614,25 +5614,25 @@ dependencies = [ "rustc_version", "serde", "serde_derive", - "solana-frozen-abi 1.11.2", - "solana-frozen-abi-macro 1.11.2", + "solana-frozen-abi 1.11.3", + "solana-frozen-abi-macro 1.11.3", "solana-metrics", "solana-program-runtime", - "solana-sdk 1.11.2", + "solana-sdk 1.11.3", "thiserror", ] [[package]] name = "solana-zk-token-proof-program" -version = "1.11.2" +version = "1.11.3" dependencies = [ "bytemuck", "getrandom 0.1.14", "num-derive", "num-traits", "solana-program-runtime", - "solana-sdk 1.11.2", - "solana-zk-token-sdk 1.11.2", + "solana-sdk 1.11.3", + "solana-zk-token-sdk 1.11.3", ] [[package]] @@ -5667,7 +5667,7 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.11.2" +version = "1.11.3" dependencies = [ "aes-gcm-siv", "arrayref", @@ -5686,8 +5686,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.11.2", - "solana-sdk 1.11.2", + "solana-program 1.11.3", + "solana-sdk 1.11.3", "subtle", "thiserror", "zeroize", diff --git a/programs/bpf/Cargo.toml b/programs/bpf/Cargo.toml index 2f99a38843367e..d9a6ffd22fa484 100644 --- a/programs/bpf/Cargo.toml +++ b/programs/bpf/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-bpf-programs" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" documentation = "https://docs.rs/solana" homepage = "https://solana.com/" readme = "README.md" @@ -26,18 +26,18 @@ itertools = "0.10.1" log = "0.4.11" miow = "0.3.6" net2 = "0.2.37" -solana-account-decoder = { path = "../../account-decoder", version = "=1.11.2" } -solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.11.2" } -solana-bpf-rust-invoke = { path = "rust/invoke", version = "=1.11.2" } -solana-bpf-rust-realloc = { path = "rust/realloc", version = "=1.11.2" } -solana-bpf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.11.2" } -solana-cli-output = { path = "../../cli-output", version = "=1.11.2" } -solana-logger = { path = "../../logger", version = "=1.11.2" } -solana-measure = { path = "../../measure", version = "=1.11.2" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } -solana-runtime = { path = "../../runtime", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } -solana-transaction-status = { path = "../../transaction-status", version = "=1.11.2" } +solana-account-decoder = { path = "../../account-decoder", version = "=1.11.3" } +solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.11.3" } +solana-bpf-rust-invoke = { path = "rust/invoke", version = "=1.11.3" } +solana-bpf-rust-realloc = { path = "rust/realloc", version = "=1.11.3" } +solana-bpf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.11.3" } +solana-cli-output = { path = "../../cli-output", version = "=1.11.3" } +solana-logger = { path = "../../logger", version = "=1.11.3" } +solana-measure = { path = "../../measure", version = "=1.11.3" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.11.3" } +solana-runtime = { path = "../../runtime", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } +solana-transaction-status = { path = "../../transaction-status", version = "=1.11.3" } solana_rbpf = "=0.2.31" [[bench]] diff --git a/programs/bpf/rust/128bit/Cargo.toml b/programs/bpf/rust/128bit/Cargo.toml index c4ae6451829e05..1086027892fbbd 100644 --- a/programs/bpf/rust/128bit/Cargo.toml +++ b/programs/bpf/rust/128bit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-128bit" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-128bit" edition = "2021" [dependencies] -solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "=1.11.2" } -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "=1.11.3" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/128bit_dep/Cargo.toml b/programs/bpf/rust/128bit_dep/Cargo.toml index 9c62b642326484..5e181679268465 100644 --- a/programs/bpf/rust/128bit_dep/Cargo.toml +++ b/programs/bpf/rust/128bit_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-128bit-dep" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-128bit-dep" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/alloc/Cargo.toml b/programs/bpf/rust/alloc/Cargo.toml index 798223f99ae9a0..289b3255dffe7c 100644 --- a/programs/bpf/rust/alloc/Cargo.toml +++ b/programs/bpf/rust/alloc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-alloc" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-alloc" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/call_depth/Cargo.toml b/programs/bpf/rust/call_depth/Cargo.toml index 826203a775011a..95aeba28c3bfec 100644 --- a/programs/bpf/rust/call_depth/Cargo.toml +++ b/programs/bpf/rust/call_depth/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-call-depth" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-call-depth" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/caller_access/Cargo.toml b/programs/bpf/rust/caller_access/Cargo.toml index 8190f342acb7f9..728f1bc328975b 100644 --- a/programs/bpf/rust/caller_access/Cargo.toml +++ b/programs/bpf/rust/caller_access/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-caller-access" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-caller-access" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/curve25519/Cargo.toml b/programs/bpf/rust/curve25519/Cargo.toml index 43d9235e6094e1..3007507c35602c 100644 --- a/programs/bpf/rust/curve25519/Cargo.toml +++ b/programs/bpf/rust/curve25519/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-curve25519" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-zktoken_crypto" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } -solana-zk-token-sdk = { path = "../../../../zk-token-sdk", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } +solana-zk-token-sdk = { path = "../../../../zk-token-sdk", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/custom_heap/Cargo.toml b/programs/bpf/rust/custom_heap/Cargo.toml index c9d101f26e05ac..364ada79bd323c 100644 --- a/programs/bpf/rust/custom_heap/Cargo.toml +++ b/programs/bpf/rust/custom_heap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-custom-heap" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-custom-heap" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [features] default = ["custom-heap"] diff --git a/programs/bpf/rust/dep_crate/Cargo.toml b/programs/bpf/rust/dep_crate/Cargo.toml index f731361fbcc6df..8251b0c6c1abde 100644 --- a/programs/bpf/rust/dep_crate/Cargo.toml +++ b/programs/bpf/rust/dep_crate/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-dep-crate" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,8 +12,8 @@ edition = "2021" [dependencies] byteorder = { version = "1", default-features = false } # list of crates which must be buildable for bpf programs -solana-address-lookup-table-program = { path = "../../../../programs/address-lookup-table", version = "=1.11.2" } -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-address-lookup-table-program = { path = "../../../../programs/address-lookup-table", version = "=1.11.3" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/deprecated_loader/Cargo.toml b/programs/bpf/rust/deprecated_loader/Cargo.toml index 6d171ef92e5f27..096ab95f1220d0 100644 --- a/programs/bpf/rust/deprecated_loader/Cargo.toml +++ b/programs/bpf/rust/deprecated_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-deprecated-loader" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-deprecated-loader" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/dup_accounts/Cargo.toml b/programs/bpf/rust/dup_accounts/Cargo.toml index 8a10f3480a0ec5..b4b7bf0300ba34 100644 --- a/programs/bpf/rust/dup_accounts/Cargo.toml +++ b/programs/bpf/rust/dup_accounts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-dup-accounts" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-dup-accounts" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/error_handling/Cargo.toml b/programs/bpf/rust/error_handling/Cargo.toml index d77100225526c7..b5d2ca007a779d 100644 --- a/programs/bpf/rust/error_handling/Cargo.toml +++ b/programs/bpf/rust/error_handling/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-error-handling" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2021" [dependencies] num-derive = "0.3" num-traits = "0.2" -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } thiserror = "1.0" [lib] diff --git a/programs/bpf/rust/external_spend/Cargo.toml b/programs/bpf/rust/external_spend/Cargo.toml index 0e38cbe32d91d4..fe53a6482966be 100644 --- a/programs/bpf/rust/external_spend/Cargo.toml +++ b/programs/bpf/rust/external_spend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-external-spend" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-external-spend" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/finalize/Cargo.toml b/programs/bpf/rust/finalize/Cargo.toml index 5a37518902e88b..8c407cf118997b 100644 --- a/programs/bpf/rust/finalize/Cargo.toml +++ b/programs/bpf/rust/finalize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-finalize" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-finalize" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/get_minimum_delegation/Cargo.toml b/programs/bpf/rust/get_minimum_delegation/Cargo.toml index 722753f29948d1..7f77ac14ccd831 100644 --- a/programs/bpf/rust/get_minimum_delegation/Cargo.toml +++ b/programs/bpf/rust/get_minimum_delegation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-get-minimum-delegation" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-get-minimum-delegation" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/inner_instruction_alignment_check/Cargo.toml b/programs/bpf/rust/inner_instruction_alignment_check/Cargo.toml index ef764c7fe4a51c..0e772517bcd978 100644 --- a/programs/bpf/rust/inner_instruction_alignment_check/Cargo.toml +++ b/programs/bpf/rust/inner_instruction_alignment_check/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-inner_instruction_alignment_check" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-inner_instruction_alignment_che edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/instruction_introspection/Cargo.toml b/programs/bpf/rust/instruction_introspection/Cargo.toml index 2e734fd85f06fb..f678008c7a287f 100644 --- a/programs/bpf/rust/instruction_introspection/Cargo.toml +++ b/programs/bpf/rust/instruction_introspection/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-instruction-introspection" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-instruction-introspection" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke/Cargo.toml b/programs/bpf/rust/invoke/Cargo.toml index e4cfd09cc6416c..df7a0c070d988d 100644 --- a/programs/bpf/rust/invoke/Cargo.toml +++ b/programs/bpf/rust/invoke/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,7 +15,7 @@ program = [] [dependencies] solana-bpf-rust-invoked = { path = "../invoked", default-features = false } -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/bpf/rust/invoke_and_error/Cargo.toml b/programs/bpf/rust/invoke_and_error/Cargo.toml index a69390c51b68e0..979556cafa6b16 100644 --- a/programs/bpf/rust/invoke_and_error/Cargo.toml +++ b/programs/bpf/rust/invoke_and_error/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-error" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-error" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_ok/Cargo.toml b/programs/bpf/rust/invoke_and_ok/Cargo.toml index 2e56934c2a4ded..1a99985c176f0f 100644 --- a/programs/bpf/rust/invoke_and_ok/Cargo.toml +++ b/programs/bpf/rust/invoke_and_ok/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-ok" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-ok" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_return/Cargo.toml b/programs/bpf/rust/invoke_and_return/Cargo.toml index 78f2a17ecf4473..3c31f245c05a1e 100644 --- a/programs/bpf/rust/invoke_and_return/Cargo.toml +++ b/programs/bpf/rust/invoke_and_return/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-return" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-return" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoked/Cargo.toml b/programs/bpf/rust/invoked/Cargo.toml index 4e1b2d3e215fb5..4a828514e5c885 100644 --- a/programs/bpf/rust/invoked/Cargo.toml +++ b/programs/bpf/rust/invoked/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoked" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,7 +14,7 @@ default = ["program"] program = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/bpf/rust/iter/Cargo.toml b/programs/bpf/rust/iter/Cargo.toml index 5d40b925473b97..1ea8db80a2861e 100644 --- a/programs/bpf/rust/iter/Cargo.toml +++ b/programs/bpf/rust/iter/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-iter" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-iter" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/log_data/Cargo.toml b/programs/bpf/rust/log_data/Cargo.toml index 3ff88d55b07091..70d5c81af58cdb 100644 --- a/programs/bpf/rust/log_data/Cargo.toml +++ b/programs/bpf/rust/log_data/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-log-data" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-log-data" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [features] default = ["program"] diff --git a/programs/bpf/rust/many_args/Cargo.toml b/programs/bpf/rust/many_args/Cargo.toml index 18bbb432a044f8..134841c15cfb0a 100644 --- a/programs/bpf/rust/many_args/Cargo.toml +++ b/programs/bpf/rust/many_args/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-many-args" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-many-args" edition = "2021" [dependencies] -solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "=1.11.2" } -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "=1.11.3" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/many_args_dep/Cargo.toml b/programs/bpf/rust/many_args_dep/Cargo.toml index fca394659a8fd6..16175a1252daf0 100644 --- a/programs/bpf/rust/many_args_dep/Cargo.toml +++ b/programs/bpf/rust/many_args_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-many-args-dep" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-many-args-dep" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/mem/Cargo.toml b/programs/bpf/rust/mem/Cargo.toml index 0b731440487d29..3869f5e1c43403 100644 --- a/programs/bpf/rust/mem/Cargo.toml +++ b/programs/bpf/rust/mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-mem" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,12 +13,12 @@ edition = "2021" no-entrypoint = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [dev-dependencies] -solana-program-runtime = { path = "../../../../program-runtime", version = "=1.11.2" } -solana-program-test = { path = "../../../../program-test", version = "=1.11.2" } -solana-sdk = { path = "../../../../sdk", version = "=1.11.2" } +solana-program-runtime = { path = "../../../../program-runtime", version = "=1.11.3" } +solana-program-test = { path = "../../../../program-test", version = "=1.11.3" } +solana-sdk = { path = "../../../../sdk", version = "=1.11.3" } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/bpf/rust/membuiltins/Cargo.toml b/programs/bpf/rust/membuiltins/Cargo.toml index b4376ceadf148a..879780538d7a9c 100644 --- a/programs/bpf/rust/membuiltins/Cargo.toml +++ b/programs/bpf/rust/membuiltins/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-membuiltins" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-mem" edition = "2021" [dependencies] -solana-bpf-rust-mem = { path = "../mem", version = "=1.11.2", features = [ "no-entrypoint" ] } -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-bpf-rust-mem = { path = "../mem", version = "=1.11.3", features = [ "no-entrypoint" ] } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/noop/Cargo.toml b/programs/bpf/rust/noop/Cargo.toml index 4b237fdfefd5dd..3077693f38b864 100644 --- a/programs/bpf/rust/noop/Cargo.toml +++ b/programs/bpf/rust/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-noop" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-noop" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/panic/Cargo.toml b/programs/bpf/rust/panic/Cargo.toml index 31205b0df8585a..6bd1f3380344ad 100644 --- a/programs/bpf/rust/panic/Cargo.toml +++ b/programs/bpf/rust/panic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-panic" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-panic" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [features] default = ["custom-panic"] diff --git a/programs/bpf/rust/param_passing/Cargo.toml b/programs/bpf/rust/param_passing/Cargo.toml index 2adaae3ac2ef4b..5d8be230b0f08d 100644 --- a/programs/bpf/rust/param_passing/Cargo.toml +++ b/programs/bpf/rust/param_passing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-param-passing" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-param-passing" edition = "2021" [dependencies] -solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "=1.11.2" } -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "=1.11.3" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/param_passing_dep/Cargo.toml b/programs/bpf/rust/param_passing_dep/Cargo.toml index 95ba512db1a4b7..b743bfc0db58b9 100644 --- a/programs/bpf/rust/param_passing_dep/Cargo.toml +++ b/programs/bpf/rust/param_passing_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-param-passing-dep" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-param-passing-dep" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/rand/Cargo.toml b/programs/bpf/rust/rand/Cargo.toml index f9f10f4593079e..b2e81a8e6a3ffc 100644 --- a/programs/bpf/rust/rand/Cargo.toml +++ b/programs/bpf/rust/rand/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-rand" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2021" [dependencies] getrandom = { version = "0.1.14", features = ["dummy"] } rand = "0.7" -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/realloc/Cargo.toml b/programs/bpf/rust/realloc/Cargo.toml index 946a0cb91578d5..ee84dd3725438d 100644 --- a/programs/bpf/rust/realloc/Cargo.toml +++ b/programs/bpf/rust/realloc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-realloc" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,7 +14,7 @@ default = ["program"] program = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/bpf/rust/realloc_invoke/Cargo.toml b/programs/bpf/rust/realloc_invoke/Cargo.toml index 56b734c755f0a3..e7ed80653a320a 100644 --- a/programs/bpf/rust/realloc_invoke/Cargo.toml +++ b/programs/bpf/rust/realloc_invoke/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-realloc-invoke" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,8 +14,8 @@ default = ["program"] program = [] [dependencies] -solana-bpf-rust-realloc = { path = "../realloc", version = "=1.11.2", default-features = false } -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-bpf-rust-realloc = { path = "../realloc", version = "=1.11.3", default-features = false } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/bpf/rust/ro_account_modify/Cargo.toml b/programs/bpf/rust/ro_account_modify/Cargo.toml index fa39de5ca477c4..071aa4e02134e8 100644 --- a/programs/bpf/rust/ro_account_modify/Cargo.toml +++ b/programs/bpf/rust/ro_account_modify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-ro-account_modify" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-ro-modify" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/ro_modify/Cargo.toml b/programs/bpf/rust/ro_modify/Cargo.toml index 5ec48c109a358b..da72e31d9c0ed6 100644 --- a/programs/bpf/rust/ro_modify/Cargo.toml +++ b/programs/bpf/rust/ro_modify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-ro-modify" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-ro-modify" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sanity/Cargo.toml b/programs/bpf/rust/sanity/Cargo.toml index 8d847135b26e18..ca0ded330178f7 100644 --- a/programs/bpf/rust/sanity/Cargo.toml +++ b/programs/bpf/rust/sanity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sanity" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,12 +13,12 @@ edition = "2021" test-bpf = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [dev-dependencies] -solana-program-runtime = { path = "../../../../program-runtime", version = "=1.11.2" } -solana-program-test = { path = "../../../../program-test", version = "=1.11.2" } -solana-sdk = { path = "../../../../sdk", version = "=1.11.2" } +solana-program-runtime = { path = "../../../../program-runtime", version = "=1.11.3" } +solana-program-test = { path = "../../../../program-test", version = "=1.11.3" } +solana-sdk = { path = "../../../../sdk", version = "=1.11.3" } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/bpf/rust/secp256k1_recover/Cargo.toml b/programs/bpf/rust/secp256k1_recover/Cargo.toml index d11d8fd0b4c410..1ea9c64e33eac7 100644 --- a/programs/bpf/rust/secp256k1_recover/Cargo.toml +++ b/programs/bpf/rust/secp256k1_recover/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-secp256k1-recover" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-secp256k1-recover" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sha/Cargo.toml b/programs/bpf/rust/sha/Cargo.toml index 1fce3ad2ee9557..b45fa89a100112 100644 --- a/programs/bpf/rust/sha/Cargo.toml +++ b/programs/bpf/rust/sha/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sha" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2021" [dependencies] blake3 = "1.0.0" -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sibling_inner_instruction/Cargo.toml b/programs/bpf/rust/sibling_inner_instruction/Cargo.toml index 4fd242b653fe40..cf3b5e0aa5d864 100644 --- a/programs/bpf/rust/sibling_inner_instruction/Cargo.toml +++ b/programs/bpf/rust/sibling_inner_instruction/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sibling_inner-instructions" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-log-data" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [features] default = ["program"] diff --git a/programs/bpf/rust/sibling_instruction/Cargo.toml b/programs/bpf/rust/sibling_instruction/Cargo.toml index 6ccd858ea0e252..bc77e63a1494a8 100644 --- a/programs/bpf/rust/sibling_instruction/Cargo.toml +++ b/programs/bpf/rust/sibling_instruction/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sibling-instructions" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-log-data" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [features] default = ["program"] diff --git a/programs/bpf/rust/simulation/Cargo.toml b/programs/bpf/rust/simulation/Cargo.toml index 428bfd8217148e..f2c7a708078135 100644 --- a/programs/bpf/rust/simulation/Cargo.toml +++ b/programs/bpf/rust/simulation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-simulation" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF Program Simulation Differences" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,13 +13,13 @@ edition = "2021" test-bpf = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [dev-dependencies] -solana-logger = { path = "../../../../logger", version = "=1.11.2" } -solana-program-test = { path = "../../../../program-test", version = "=1.11.2" } -solana-sdk = { path = "../../../../sdk", version = "=1.11.2" } -solana-validator = { path = "../../../../validator", version = "=1.11.2" } +solana-logger = { path = "../../../../logger", version = "=1.11.3" } +solana-program-test = { path = "../../../../program-test", version = "=1.11.3" } +solana-sdk = { path = "../../../../sdk", version = "=1.11.3" } +solana-validator = { path = "../../../../validator", version = "=1.11.3" } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/bpf/rust/spoof1/Cargo.toml b/programs/bpf/rust/spoof1/Cargo.toml index 9bcce42c7892f7..d4472d0a53a32e 100644 --- a/programs/bpf/rust/spoof1/Cargo.toml +++ b/programs/bpf/rust/spoof1/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-spoof1" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-spoof1" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/spoof1_system/Cargo.toml b/programs/bpf/rust/spoof1_system/Cargo.toml index 1216ac3360d78e..30a1f1cd056f23 100644 --- a/programs/bpf/rust/spoof1_system/Cargo.toml +++ b/programs/bpf/rust/spoof1_system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-spoof1-system" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-spoof1-system" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sysvar/Cargo.toml b/programs/bpf/rust/sysvar/Cargo.toml index e71f50f168d6f9..10ad382f77ac67 100644 --- a/programs/bpf/rust/sysvar/Cargo.toml +++ b/programs/bpf/rust/sysvar/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sysvar" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,12 +10,12 @@ documentation = "https://docs.rs/solana-bpf-rust-sysvar" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [dev-dependencies] -solana-program-runtime = { path = "../../../../program-runtime", version = "=1.11.2" } -solana-program-test = { path = "../../../../program-test", version = "=1.11.2" } -solana-sdk = { path = "../../../../sdk", version = "=1.11.2" } +solana-program-runtime = { path = "../../../../program-runtime", version = "=1.11.3" } +solana-program-test = { path = "../../../../program-test", version = "=1.11.3" } +solana-sdk = { path = "../../../../sdk", version = "=1.11.3" } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/bpf/rust/upgradeable/Cargo.toml b/programs/bpf/rust/upgradeable/Cargo.toml index 4934d89df4fc4a..7b4ed6602b7f20 100644 --- a/programs/bpf/rust/upgradeable/Cargo.toml +++ b/programs/bpf/rust/upgradeable/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-upgradeable" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-upgradeable" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] name = "solana_bpf_rust_upgradeable" diff --git a/programs/bpf/rust/upgraded/Cargo.toml b/programs/bpf/rust/upgraded/Cargo.toml index aa2a97365324a5..199ff25df0daaf 100644 --- a/programs/bpf/rust/upgraded/Cargo.toml +++ b/programs/bpf/rust/upgraded/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-upgraded" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-upgraded" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.11.2" } +solana-program = { path = "../../../../sdk/program", version = "=1.11.3" } [lib] name = "solana_bpf_rust_upgraded" diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index ffa81b8776ba20..6831cb818b869a 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-loader-program" -version = "1.11.2" +version = "1.11.3" description = "Solana BPF loader" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,17 +14,17 @@ bincode = "1.3.3" byteorder = "1.4.3" libsecp256k1 = "0.6.0" log = "0.4.17" -solana-measure = { path = "../../measure", version = "=1.11.2" } -solana-metrics = { path = "../../metrics", version = "=1.11.2" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } -solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.11.2" } +solana-measure = { path = "../../measure", version = "=1.11.3" } +solana-metrics = { path = "../../metrics", version = "=1.11.3" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } +solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.11.3" } solana_rbpf = "=0.2.31" thiserror = "1.0" [dev-dependencies] rand = "0.7.3" -solana-runtime = { path = "../../runtime", version = "=1.11.2" } +solana-runtime = { path = "../../runtime", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/programs/bpf_loader/gen-syscall-list/Cargo.toml b/programs/bpf_loader/gen-syscall-list/Cargo.toml index e47ef4066cc461..7019aed9a4947f 100644 --- a/programs/bpf_loader/gen-syscall-list/Cargo.toml +++ b/programs/bpf_loader/gen-syscall-list/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "gen-syscall-list" -version = "1.11.2" +version = "1.11.3" edition = "2021" license = "Apache-2.0" publish = false diff --git a/programs/compute-budget/Cargo.toml b/programs/compute-budget/Cargo.toml index 990a71eec01f3c..f3ca901ff5830a 100644 --- a/programs/compute-budget/Cargo.toml +++ b/programs/compute-budget/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-compute-budget-program" description = "Solana Compute Budget program" -version = "1.11.2" +version = "1.11.3" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-compute-budget-program" repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ license = "Apache-2.0" edition = "2021" [dependencies] -solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/programs/config/Cargo.toml b/programs/config/Cargo.toml index 7d293686795be6..09924ad207e9d1 100644 --- a/programs/config/Cargo.toml +++ b/programs/config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-config-program" -version = "1.11.2" +version = "1.11.3" description = "Solana Config program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,11 +14,11 @@ bincode = "1.3.3" chrono = { version = "0.4.11", features = ["serde"] } serde = "1.0.138" serde_derive = "1.0.103" -solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } [dev-dependencies] -solana-logger = { path = "../../logger", version = "=1.11.2" } +solana-logger = { path = "../../logger", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/programs/ed25519-tests/Cargo.toml b/programs/ed25519-tests/Cargo.toml index 59da4d2bb5ee12..e67f1bae833d03 100644 --- a/programs/ed25519-tests/Cargo.toml +++ b/programs/ed25519-tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-ed25519-program-tests" -version = "1.11.2" +version = "1.11.3" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" @@ -12,8 +12,8 @@ publish = false assert_matches = "1.5.0" ed25519-dalek = "=1.0.1" rand = "0.7.0" -solana-program-test = { path = "../../program-test", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } +solana-program-test = { path = "../../program-test", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 02c1a0a5ba97ac..9b2fc31540f6f8 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-stake-program" -version = "1.11.2" +version = "1.11.3" description = "Solana Stake program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,19 +16,19 @@ num-derive = "0.3" num-traits = "0.2" serde = "1.0.138" serde_derive = "1.0.103" -solana-config-program = { path = "../config", version = "=1.11.2" } -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.2" } -solana-metrics = { path = "../../metrics", version = "=1.11.2" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } -solana-vote-program = { path = "../vote", version = "=1.11.2" } +solana-config-program = { path = "../config", version = "=1.11.3" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.3" } +solana-metrics = { path = "../../metrics", version = "=1.11.3" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } +solana-vote-program = { path = "../vote", version = "=1.11.3" } thiserror = "1.0" [dev-dependencies] assert_matches = "1.5.0" proptest = "1.0" -solana-logger = { path = "../../logger", version = "=1.11.2" } +solana-logger = { path = "../../logger", version = "=1.11.3" } test-case = "2.1.0" [build-dependencies] diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index 9d6ed359cba0f7..bdad1ca4840f3a 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-vote-program" -version = "1.11.2" +version = "1.11.3" description = "Solana Vote program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,15 +16,15 @@ num-derive = "0.3" num-traits = "0.2" serde = "1.0.138" serde_derive = "1.0.103" -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.2" } -solana-metrics = { path = "../../metrics", version = "=1.11.2" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.3" } +solana-metrics = { path = "../../metrics", version = "=1.11.3" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } thiserror = "1.0" [dev-dependencies] -solana-logger = { path = "../../logger", version = "=1.11.2" } +solana-logger = { path = "../../logger", version = "=1.11.3" } [build-dependencies] rustc_version = "0.4" diff --git a/programs/zk-token-proof/Cargo.toml b/programs/zk-token-proof/Cargo.toml index 7bd08d861dc2f9..959e70a81e3923 100644 --- a/programs/zk-token-proof/Cargo.toml +++ b/programs/zk-token-proof/Cargo.toml @@ -3,7 +3,7 @@ name = "solana-zk-token-proof-program" description = "Solana Zk Token Proof Program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" -version = "1.11.2" +version = "1.11.3" license = "Apache-2.0" edition = "2021" @@ -12,6 +12,6 @@ bytemuck = { version = "1.9.1", features = ["derive"] } getrandom = { version = "0.1", features = ["dummy"] } num-derive = "0.3" num-traits = "0.2" -solana-program-runtime = { path = "../../program-runtime", version = "=1.11.2" } -solana-sdk = { path = "../../sdk", version = "=1.11.2" } -solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.11.2" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.11.3" } +solana-sdk = { path = "../../sdk", version = "=1.11.3" } +solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.11.3" } diff --git a/rayon-threadlimit/Cargo.toml b/rayon-threadlimit/Cargo.toml index 054a5d3701b5ac..4e9c9b777fc87b 100644 --- a/rayon-threadlimit/Cargo.toml +++ b/rayon-threadlimit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-rayon-threadlimit" -version = "1.11.2" +version = "1.11.3" description = "solana-rayon-threadlimit" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-rayon-threadlimit" diff --git a/rbpf-cli/Cargo.toml b/rbpf-cli/Cargo.toml index 4cfca09317f871..73b1dca221d712 100644 --- a/rbpf-cli/Cargo.toml +++ b/rbpf-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rbpf-cli" -version = "1.11.2" +version = "1.11.3" description = "CLI to test and analyze eBPF programs" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/rbpf" @@ -13,8 +13,8 @@ publish = false clap = { version = "3.1.5", features = ["cargo"] } serde = "1.0.138" serde_json = "1.0.81" -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-program-runtime = { path = "../program-runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } solana_rbpf = "=0.2.31" diff --git a/remote-wallet/Cargo.toml b/remote-wallet/Cargo.toml index a955af47a71f5e..377c8f92e19c68 100644 --- a/remote-wallet/Cargo.toml +++ b/remote-wallet/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-remote-wallet" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -19,7 +19,7 @@ num-traits = { version = "0.2" } parking_lot = "0.12" qstring = "0.7.2" semver = "1.0" -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } thiserror = "1.0" uriparse = "0.6.4" diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index deb0aa8d1273f5..18ede1dee19fd8 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-rpc-test" -version = "1.11.2" +version = "1.11.3" description = "Solana RPC Test" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,17 +19,17 @@ log = "0.4.17" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } serde = "1.0.138" serde_json = "1.0.81" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-rpc = { path = "../rpc", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-test-validator = { path = "../test-validator", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-rpc = { path = "../rpc", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-test-validator = { path = "../test-validator", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } tokio = { version = "~1.14.1", features = ["full"] } [dev-dependencies] -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 5ca79073166303..a7c5b9b1796343 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-rpc" -version = "1.11.2" +version = "1.11.3" description = "Solana RPC" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -29,25 +29,25 @@ serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" soketto = "0.7" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-faucet = { path = "../faucet", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-poh = { path = "../poh", version = "=1.11.2" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.2" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-faucet = { path = "../faucet", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-poh = { path = "../poh", version = "=1.11.3" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.3" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } spl-token = { version = "=3.3.0", features = ["no-entrypoint"] } spl-token-2022 = { version = "=0.3.0", features = ["no-entrypoint"] } stream-cancel = "0.8.1" @@ -57,9 +57,9 @@ tokio-util = { version = "0.6", features = ["codec", "compat"] } [dev-dependencies] serial_test = "0.8.0" -solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } +solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-stake-program = { path = "../programs/stake", version = "=1.11.3" } symlink = "0.1.0" [lib] diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 7f149b06c9813c..d193c052502ddd 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-runtime" -version = "1.11.2" +version = "1.11.3" description = "Solana runtime" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -39,21 +39,21 @@ rayon = "1.5.3" regex = "1.5.6" serde = { version = "1.0.138", features = ["rc"] } serde_derive = "1.0.103" -solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.2" } -solana-bucket-map = { path = "../bucket_map", version = "=1.11.2" } -solana-compute-budget-program = { path = "../programs/compute-budget", version = "=1.11.2" } -solana-config-program = { path = "../programs/config", version = "=1.11.2" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } -solana-zk-token-proof-program = { path = "../programs/zk-token-proof", version = "=1.11.2" } -solana-zk-token-sdk = { path = "../zk-token-sdk", version = "=1.11.2" } +solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.3" } +solana-bucket-map = { path = "../bucket_map", version = "=1.11.3" } +solana-compute-budget-program = { path = "../programs/compute-budget", version = "=1.11.3" } +solana-config-program = { path = "../programs/config", version = "=1.11.3" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-program-runtime = { path = "../program-runtime", version = "=1.11.3" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-stake-program = { path = "../programs/stake", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } +solana-zk-token-proof-program = { path = "../programs/zk-token-proof", version = "=1.11.3" } +solana-zk-token-sdk = { path = "../zk-token-sdk", version = "=1.11.3" } strum = { version = "0.24", features = ["derive"] } strum_macros = "0.24" symlink = "0.1.0" @@ -71,7 +71,7 @@ assert_matches = "1.5.0" ed25519-dalek = "=1.0.1" libsecp256k1 = "0.6.0" rand_chacha = "0.2.2" -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/runtime/store-tool/Cargo.toml b/runtime/store-tool/Cargo.toml index 212b64b586cbc1..4bc998ba226419 100644 --- a/runtime/store-tool/Cargo.toml +++ b/runtime/store-tool/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-store-tool" description = "Tool to inspect append vecs" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -12,9 +12,9 @@ publish = false [dependencies] clap = "2.33.1" log = { version = "0.4.17" } -solana-logger = { path = "../../logger", version = "=1.11.2" } -solana-runtime = { path = "..", version = "=1.11.2" } -solana-version = { path = "../../version", version = "=1.11.2" } +solana-logger = { path = "../../logger", version = "=1.11.3" } +solana-runtime = { path = "..", version = "=1.11.3" } +solana-version = { path = "../../version", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index e826a14917c250..cc9b9040524204 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-sdk" -version = "1.11.2" +version = "1.11.3" description = "Solana SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -71,11 +71,11 @@ serde_derive = "1.0.103" serde_json = { version = "1.0.81", optional = true } sha2 = "0.10.2" sha3 = { version = "0.10.1", optional = true } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2", optional = true } -solana-program = { path = "program", version = "=1.11.2" } -solana-sdk-macro = { path = "macro", version = "=1.11.2" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3", optional = true } +solana-program = { path = "program", version = "=1.11.3" } +solana-sdk-macro = { path = "macro", version = "=1.11.3" } thiserror = "1.0" uriparse = "0.6.4" wasm-bindgen = "0.2" diff --git a/sdk/cargo-build-bpf/Cargo.toml b/sdk/cargo-build-bpf/Cargo.toml index 3428ba3b86c93f..26c62fc8b50df2 100644 --- a/sdk/cargo-build-bpf/Cargo.toml +++ b/sdk/cargo-build-bpf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-cargo-build-bpf" -version = "1.11.2" +version = "1.11.3" description = "Compile a local package and all of its dependencies using the Solana SBF SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ publish = false [dependencies] cargo_metadata = "0.14.2" clap = { version = "3.1.5", features = ["cargo", "env"] } -solana-sdk = { path = "..", version = "=1.11.2" } +solana-sdk = { path = "..", version = "=1.11.3" } [features] program = [] diff --git a/sdk/cargo-build-sbf/Cargo.toml b/sdk/cargo-build-sbf/Cargo.toml index 70c03302df4857..9501fffa6fe9cc 100644 --- a/sdk/cargo-build-sbf/Cargo.toml +++ b/sdk/cargo-build-sbf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-cargo-build-sbf" -version = "1.11.2" +version = "1.11.3" description = "Compile a local package and all of its dependencies using the Solana SBF SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,9 +15,9 @@ cargo_metadata = "0.14.2" clap = { version = "3.1.5", features = ["cargo", "env"] } log = { version = "0.4.14", features = ["std"] } regex = "1.5.6" -solana-download-utils = { path = "../../download-utils", version = "=1.11.2" } -solana-logger = { path = "../../logger", version = "=1.11.2" } -solana-sdk = { path = "..", version = "=1.11.2" } +solana-download-utils = { path = "../../download-utils", version = "=1.11.3" } +solana-logger = { path = "../../logger", version = "=1.11.3" } +solana-sdk = { path = "..", version = "=1.11.3" } tar = "0.4.38" [dev-dependencies] diff --git a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml index cbeb11de671866..ed199ad594fcd2 100644 --- a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fail" -version = "1.11.2" +version = "1.11.3" description = "Solana SBF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.11.2" } +solana-program = { path = "../../../../program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml index e64e4993c8ee79..e62e308cce69fe 100644 --- a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noop" -version = "1.11.2" +version = "1.11.3" description = "Solana SBF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.11.2" } +solana-program = { path = "../../../../program", version = "=1.11.3" } [lib] crate-type = ["cdylib"] diff --git a/sdk/cargo-test-bpf/Cargo.toml b/sdk/cargo-test-bpf/Cargo.toml index 4f6a587c9e7e46..9f8028582e5c05 100644 --- a/sdk/cargo-test-bpf/Cargo.toml +++ b/sdk/cargo-test-bpf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-cargo-test-bpf" -version = "1.11.2" +version = "1.11.3" description = "Execute all unit and integration tests after building with the Solana SBF SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/sdk/cargo-test-sbf/Cargo.toml b/sdk/cargo-test-sbf/Cargo.toml index 6a8c301dc17b76..9e055e2dac3ac4 100644 --- a/sdk/cargo-test-sbf/Cargo.toml +++ b/sdk/cargo-test-sbf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-cargo-test-sbf" -version = "1.11.2" +version = "1.11.3" description = "Execute all unit and integration tests after building with the Solana SBF SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/sdk/gen-headers/Cargo.toml b/sdk/gen-headers/Cargo.toml index 3c5c52f67c94e4..a8b162701eb651 100644 --- a/sdk/gen-headers/Cargo.toml +++ b/sdk/gen-headers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "gen-headers" -version = "1.11.2" +version = "1.11.3" edition = "2021" license = "Apache-2.0" publish = false diff --git a/sdk/macro/Cargo.toml b/sdk/macro/Cargo.toml index 7d69f3b9c854b9..4f95d7a1c34b51 100644 --- a/sdk/macro/Cargo.toml +++ b/sdk/macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-sdk-macro" -version = "1.11.2" +version = "1.11.3" description = "Solana SDK Macro" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 960548a6b60009..a7a91bb1081517 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-program" -version = "1.11.2" +version = "1.11.3" description = "Solana Program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -30,9 +30,9 @@ serde_bytes = "0.11" serde_derive = "1.0.103" sha2 = "0.10.0" sha3 = "0.10.0" -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.2" } -solana-sdk-macro = { path = "../macro", version = "=1.11.2" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.11.3" } +solana-sdk-macro = { path = "../macro", version = "=1.11.3" } thiserror = "1.0" [target.'cfg(not(target_os = "solana"))'.dependencies] @@ -45,7 +45,7 @@ itertools = "0.10.1" wasm-bindgen = "0.2" [target.'cfg(not(target_os = "solana"))'.dev-dependencies] -solana-logger = { path = "../../logger", version = "=1.11.2" } +solana-logger = { path = "../../logger", version = "=1.11.3" } [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = "0.1.7" diff --git a/send-transaction-service/Cargo.toml b/send-transaction-service/Cargo.toml index 45b39fefb96da9..7e797f22530909 100644 --- a/send-transaction-service/Cargo.toml +++ b/send-transaction-service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-send-transaction-service" -version = "1.11.2" +version = "1.11.3" description = "Solana send transaction service" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,14 +12,14 @@ edition = "2021" [dependencies] crossbeam-channel = "0.5" log = "0.4.17" -solana-client = { path = "../client", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } [dev-dependencies] -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/stake-accounts/Cargo.toml b/stake-accounts/Cargo.toml index 4327ed4f41eedc..8962f1579a60f7 100644 --- a/stake-accounts/Cargo.toml +++ b/stake-accounts/Cargo.toml @@ -3,7 +3,7 @@ name = "solana-stake-accounts" description = "Blockchain, Rebuilt for Scale" authors = ["Solana Maintainers "] edition = "2021" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,15 +11,15 @@ documentation = "https://docs.rs/solana-stake-accounts" [dependencies] clap = "2.33.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-stake-program = { path = "../programs/stake", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-stake-program = { path = "../programs/stake", version = "=1.11.3" } [dev-dependencies] -solana-runtime = { path = "../runtime", version = "=1.11.2" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index 6de50a80cb37ce..2901bac6d407a0 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-storage-bigtable" -version = "1.11.2" +version = "1.11.3" description = "Solana Storage BigTable" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -27,10 +27,10 @@ prost-types = "0.10.0" serde = "1.0.138" serde_derive = "1.0.103" smpl_jwt = "0.7.1" -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-storage-proto = { path = "../storage-proto", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-storage-proto = { path = "../storage-proto", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } thiserror = "1.0" tokio = "~1.14.1" tonic = { version = "0.7.2", features = ["tls", "transport"] } diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index 8d5bf7e4709a07..638c8f57099f05 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -221,7 +221,7 @@ dependencies = [ [[package]] name = "proto" -version = "1.11.2" +version = "1.11.3" dependencies = [ "tonic-build", ] diff --git a/storage-bigtable/build-proto/Cargo.toml b/storage-bigtable/build-proto/Cargo.toml index a87848e0d0882d..cc5c7ae8f87b04 100644 --- a/storage-bigtable/build-proto/Cargo.toml +++ b/storage-bigtable/build-proto/Cargo.toml @@ -7,7 +7,7 @@ license = "Apache-2.0" name = "proto" publish = false repository = "https://github.com/solana-labs/solana" -version = "1.11.2" +version = "1.11.3" [workspace] diff --git a/storage-proto/Cargo.toml b/storage-proto/Cargo.toml index f3d941de85504b..c92056233f5de5 100644 --- a/storage-proto/Cargo.toml +++ b/storage-proto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-storage-proto" -version = "1.11.2" +version = "1.11.3" description = "Solana Storage Protobuf Definitions" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,9 +14,9 @@ bincode = "1.3.3" bs58 = "0.4.0" prost = "0.10.4" serde = "1.0.138" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } [dev-dependencies] enum-iterator = "0.8.1" diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index dd12051ac39776..d1f3e78db91f78 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-streamer" -version = "1.11.2" +version = "1.11.3" description = "Solana Streamer" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -25,14 +25,14 @@ quinn = "0.8.3" rand = "0.7.0" rcgen = "0.9.2" rustls = { version = "0.20.6", features = ["dangerous_configuration"] } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } [dev-dependencies] -solana-logger = { path = "../logger", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } [lib] crate-type = ["lib"] diff --git a/sys-tuner/Cargo.toml b/sys-tuner/Cargo.toml index b67c1623fdac61..6cd11a45c3efd6 100644 --- a/sys-tuner/Cargo.toml +++ b/sys-tuner/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-sys-tuner" description = "The solana cluster system tuner daemon" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,8 +14,8 @@ publish = true clap = "2.33.1" libc = "0.2.126" log = "0.4.17" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [target."cfg(unix)".dependencies] unix_socket2 = "0.5.4" diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 3341a65fc4d904..7192f0fed52624 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-test-validator" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-test-validator" readme = "../README.md" @@ -15,19 +15,19 @@ base64 = "0.13.0" log = "0.4.17" serde_derive = "1.0.103" serde_json = "1.0.81" -solana-cli-output = { path = "../cli-output", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-core = { path = "../core", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-program-runtime = { path = "../program-runtime", version = "=1.11.2" } -solana-program-test = { path = "../program-test", version = "=1.11.2" } -solana-rpc = { path = "../rpc", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } +solana-cli-output = { path = "../cli-output", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-program-runtime = { path = "../program-runtime", version = "=1.11.3" } +solana-program-test = { path = "../program-test", version = "=1.11.3" } +solana-rpc = { path = "../rpc", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } tokio = { version = "~1.14.1", features = ["full"] } [package.metadata.docs.rs] diff --git a/tokens/Cargo.toml b/tokens/Cargo.toml index 5173c9bb2aa4a9..a84f101fa15fbf 100644 --- a/tokens/Cargo.toml +++ b/tokens/Cargo.toml @@ -3,7 +3,7 @@ name = "solana-tokens" description = "Blockchain, Rebuilt for Scale" authors = ["Solana Maintainers "] edition = "2021" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -19,14 +19,14 @@ indexmap = "1.9.1" indicatif = "0.16.2" pickledb = "0.4.1" serde = { version = "1.0", features = ["derive"] } -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } spl-associated-token-account = { version = "=1.0.5" } spl-token = { version = "=3.3.0", features = ["no-entrypoint"] } tempfile = "3.3.0" @@ -34,6 +34,6 @@ thiserror = "1.0" [dev-dependencies] bincode = "1.3.3" -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-test-validator = { path = "../test-validator", version = "=1.11.2" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-test-validator = { path = "../test-validator", version = "=1.11.3" } diff --git a/transaction-dos/Cargo.toml b/transaction-dos/Cargo.toml index 501730b3f04800..0e2ed0d0967f20 100644 --- a/transaction-dos/Cargo.toml +++ b/transaction-dos/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-transaction-dos" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,23 +14,23 @@ clap = "2.33.1" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli = { path = "../cli", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-core = { path = "../core", version = "=1.11.2" } -solana-faucet = { path = "../faucet", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-transaction-status = { path = "../transaction-status", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli = { path = "../cli", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-faucet = { path = "../faucet", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-transaction-status = { path = "../transaction-status", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [dev-dependencies] -solana-local-cluster = { path = "../local-cluster", version = "=1.11.2" } +solana-local-cluster = { path = "../local-cluster", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 23834f77cc469d..d6b55651d05075 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-transaction-status" -version = "1.11.2" +version = "1.11.3" description = "Solana transaction status types" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -20,12 +20,12 @@ log = "0.4.17" serde = "1.0.138" serde_derive = "1.0.103" serde_json = "1.0.81" -solana-account-decoder = { path = "../account-decoder", version = "=1.11.2" } -solana-measure = { path = "../measure", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-account-decoder = { path = "../account-decoder", version = "=1.11.3" } +solana-measure = { path = "../measure", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } spl-associated-token-account = { version = "=1.0.5", features = ["no-entrypoint"] } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } spl-token = { version = "=3.3.0", features = ["no-entrypoint"] } diff --git a/upload-perf/Cargo.toml b/upload-perf/Cargo.toml index a8e47799e96e41..0b95a5a67793c7 100644 --- a/upload-perf/Cargo.toml +++ b/upload-perf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-upload-perf" -version = "1.11.2" +version = "1.11.3" description = "Metrics Upload Utility" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ publish = false [dependencies] serde_json = "1.0.81" -solana-metrics = { path = "../metrics", version = "=1.11.2" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } [[bin]] name = "solana-upload-perf" diff --git a/validator/Cargo.toml b/validator/Cargo.toml index da99a5fc8f7399..8e100a886f13a3 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-validator" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -28,30 +28,30 @@ num_cpus = "1.13.1" rand = "0.7.0" serde = "1.0.138" serde_json = "1.0.81" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-core = { path = "../core", version = "=1.11.2" } -solana-download-utils = { path = "../download-utils", version = "=1.11.2" } -solana-entry = { path = "../entry", version = "=1.11.2" } -solana-faucet = { path = "../faucet", version = "=1.11.2" } -solana-genesis-utils = { path = "../genesis-utils", version = "=1.11.2" } -solana-gossip = { path = "../gossip", version = "=1.11.2" } -solana-ledger = { path = "../ledger", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-net-utils = { path = "../net-utils", version = "=1.11.2" } -solana-perf = { path = "../perf", version = "=1.11.2" } -solana-poh = { path = "../poh", version = "=1.11.2" } -solana-rpc = { path = "../rpc", version = "=1.11.2" } -solana-runtime = { path = "../runtime", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.2" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.2" } -solana-streamer = { path = "../streamer", version = "=1.11.2" } -solana-test-validator = { path = "../test-validator", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } -solana-vote-program = { path = "../programs/vote", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-core = { path = "../core", version = "=1.11.3" } +solana-download-utils = { path = "../download-utils", version = "=1.11.3" } +solana-entry = { path = "../entry", version = "=1.11.3" } +solana-faucet = { path = "../faucet", version = "=1.11.3" } +solana-genesis-utils = { path = "../genesis-utils", version = "=1.11.3" } +solana-gossip = { path = "../gossip", version = "=1.11.3" } +solana-ledger = { path = "../ledger", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-net-utils = { path = "../net-utils", version = "=1.11.3" } +solana-perf = { path = "../perf", version = "=1.11.3" } +solana-poh = { path = "../poh", version = "=1.11.3" } +solana-rpc = { path = "../rpc", version = "=1.11.3" } +solana-runtime = { path = "../runtime", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.3" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.3" } +solana-streamer = { path = "../streamer", version = "=1.11.3" } +solana-test-validator = { path = "../test-validator", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } +solana-vote-program = { path = "../programs/vote", version = "=1.11.3" } symlink = "0.1.0" [target.'cfg(not(target_env = "msvc"))'.dependencies] diff --git a/version/Cargo.toml b/version/Cargo.toml index fd5b46062fb7ae..351bf8943f22b1 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-version" -version = "1.11.2" +version = "1.11.3" description = "Solana Version" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,9 +14,9 @@ log = "0.4.17" semver = "1.0.10" serde = "1.0.138" serde_derive = "1.0.103" -solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.2" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.3" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } [lib] name = "solana_version" diff --git a/watchtower/Cargo.toml b/watchtower/Cargo.toml index 07a5ff13cb896a..e9bc06d3a84b6f 100644 --- a/watchtower/Cargo.toml +++ b/watchtower/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-watchtower" description = "Blockchain, Rebuilt for Scale" -version = "1.11.2" +version = "1.11.3" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,15 +13,15 @@ documentation = "https://docs.rs/solana-watchtower" clap = "2.33.1" humantime = "2.0.1" log = "0.4.17" -solana-clap-utils = { path = "../clap-utils", version = "=1.11.2" } -solana-cli-config = { path = "../cli-config", version = "=1.11.2" } -solana-cli-output = { path = "../cli-output", version = "=1.11.2" } -solana-client = { path = "../client", version = "=1.11.2" } -solana-logger = { path = "../logger", version = "=1.11.2" } -solana-metrics = { path = "../metrics", version = "=1.11.2" } -solana-notifier = { path = "../notifier", version = "=1.11.2" } -solana-sdk = { path = "../sdk", version = "=1.11.2" } -solana-version = { path = "../version", version = "=1.11.2" } +solana-clap-utils = { path = "../clap-utils", version = "=1.11.3" } +solana-cli-config = { path = "../cli-config", version = "=1.11.3" } +solana-cli-output = { path = "../cli-output", version = "=1.11.3" } +solana-client = { path = "../client", version = "=1.11.3" } +solana-logger = { path = "../logger", version = "=1.11.3" } +solana-metrics = { path = "../metrics", version = "=1.11.3" } +solana-notifier = { path = "../notifier", version = "=1.11.3" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } +solana-version = { path = "../version", version = "=1.11.3" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/zk-token-sdk/Cargo.toml b/zk-token-sdk/Cargo.toml index fc4466a65998c2..bc8e220b94ae1c 100644 --- a/zk-token-sdk/Cargo.toml +++ b/zk-token-sdk/Cargo.toml @@ -3,7 +3,7 @@ name = "solana-zk-token-sdk" description = "Solana Zk Token SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" -version = "1.11.2" +version = "1.11.3" license = "Apache-2.0" edition = "2021" @@ -12,7 +12,7 @@ base64 = "0.13" bytemuck = { version = "1.9.1", features = ["derive"] } num-derive = "0.3" num-traits = "0.2" -solana-program = { path = "../sdk/program", version = "=1.11.2" } +solana-program = { path = "../sdk/program", version = "=1.11.3" } [target.'cfg(not(target_os = "solana"))'.dependencies] aes-gcm-siv = "0.10.3" @@ -28,7 +28,7 @@ rand = "0.7" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" sha3 = "0.9" -solana-sdk = { path = "../sdk", version = "=1.11.2" } +solana-sdk = { path = "../sdk", version = "=1.11.3" } subtle = "2" thiserror = "1" zeroize = { version = "1.3", default-features = false, features = ["zeroize_derive"] } From 5bffee248cfcea98a6b3c338d39800cb6f5c4fcc Mon Sep 17 00:00:00 2001 From: carllin Date: Thu, 7 Jul 2022 15:02:43 -0500 Subject: [PATCH 071/100] Cleanup repair logging (#26461) --- core/src/repair_service.rs | 4 +-- core/src/serve_repair.rs | 56 ++++++++++++++++++++------------------ 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index d87430423cba68..d1ad6546c32a37 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -387,7 +387,7 @@ impl RepairService { info!("repair_stats: {:?}", slot_to_count); if repair_total > 0 { datapoint_info!( - "serve_repair-repair", + "repair_service-my_requests", ("repair-total", repair_total, i64), ("shred-count", repair_stats.shred.count, i64), ("highest-shred-count", repair_stats.highest_shred.count, i64), @@ -397,7 +397,7 @@ impl RepairService { ); } datapoint_info!( - "serve_repair-repair-timing", + "repair_service-repair_timing", ("set-root-elapsed", repair_timing.set_root_elapsed, i64), ("get-votes-elapsed", repair_timing.get_votes_elapsed, i64), ("add-votes-elapsed", repair_timing.add_votes_elapsed, i64), diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index d17c1978e14df5..56039cb8e6732f 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -142,8 +142,9 @@ impl RequestResponse for AncestorHashesRepairType { #[derive(Default)] pub struct ServeRepairStats { - pub total_packets: usize, - pub dropped_packets: usize, + pub total_requests: usize, + pub dropped_requests: usize, + pub total_response_packets: usize, pub processed: usize, pub self_repair: usize, pub window_index: usize, @@ -327,26 +328,26 @@ impl ServeRepair { //TODO cache connections let timeout = Duration::new(1, 0); let mut reqs_v = vec![requests_receiver.recv_timeout(timeout)?]; - let mut total_packets = reqs_v[0].len(); + let mut total_requests = reqs_v[0].len(); - let mut dropped_packets = 0; + let mut dropped_requests = 0; while let Ok(more) = requests_receiver.try_recv() { - total_packets += more.len(); - if packet_threshold.should_drop(total_packets) { - dropped_packets += more.len(); + total_requests += more.len(); + if packet_threshold.should_drop(total_requests) { + dropped_requests += more.len(); } else { reqs_v.push(more); } } - stats.dropped_packets += dropped_packets; - stats.total_packets += total_packets; + stats.dropped_requests += dropped_requests; + stats.total_requests += total_requests; let timer = Instant::now(); for reqs in reqs_v { Self::handle_packets(obj, recycler, blockstore, reqs, response_sender, stats); } - packet_threshold.update(total_packets, timer.elapsed()); + packet_threshold.update(total_requests, timer.elapsed()); Ok(()) } @@ -360,24 +361,26 @@ impl ServeRepair { inc_new_counter_debug!("serve_repair-handle-repair--eq", stats.self_repair); } - inc_new_counter_info!("serve_repair-total_packets", stats.total_packets); - inc_new_counter_info!("serve_repair-dropped_packets", stats.dropped_packets); - - debug!( - "repair_listener: total_packets: {} passed: {}", - stats.total_packets, stats.processed + datapoint_info!( + "serve_repair-requests_received", + ("total_requests", stats.total_requests, i64), + ("dropped_requests", stats.dropped_requests, i64), + ("total_response_packets", stats.total_response_packets, i64), + ("self_repair", stats.self_repair, i64), + ("window_index", stats.window_index, i64), + ( + "request-highest-window-index", + stats.highest_window_index, + i64 + ), + ("orphan", stats.orphan, i64), + ( + "serve_repair-request-ancestor-hashes", + stats.ancestor_hashes, + i64 + ), ); - inc_new_counter_debug!("serve_repair-request-window-index", stats.window_index); - inc_new_counter_debug!( - "serve_repair-request-highest-window-index", - stats.highest_window_index - ); - inc_new_counter_debug!("serve_repair-request-orphan", stats.orphan); - inc_new_counter_debug!( - "serve_repair-request-ancestor-hashes", - stats.ancestor_hashes - ); *stats = ServeRepairStats::default(); } @@ -436,6 +439,7 @@ impl ServeRepair { stats.processed += 1; let from_addr = packet.meta.socket_addr(); let rsp = Self::handle_repair(me, recycler, &from_addr, blockstore, request, stats); + stats.total_response_packets += rsp.as_ref().map(PacketBatch::len).unwrap_or(0); if let Some(rsp) = rsp { let _ignore_disconnect = response_sender.send(rsp); } From b582e4ce0fe8213bd52f10758118d9ced5ee1ecd Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Jul 2022 15:40:17 -0500 Subject: [PATCH 072/100] introduce UpsertReclaim (#26462) --- runtime/benches/accounts_index.rs | 7 ++-- runtime/src/accounts_db.rs | 17 ++++++--- runtime/src/accounts_index.rs | 23 +++++++----- runtime/src/in_mem_accounts_index.rs | 56 ++++++++++++++-------------- 4 files changed, 56 insertions(+), 47 deletions(-) diff --git a/runtime/benches/accounts_index.rs b/runtime/benches/accounts_index.rs index 9a076d4dfc8e48..c7032075ede0ad 100644 --- a/runtime/benches/accounts_index.rs +++ b/runtime/benches/accounts_index.rs @@ -7,7 +7,8 @@ use { solana_runtime::{ account_info::AccountInfo, accounts_index::{ - AccountSecondaryIndexes, AccountsIndex, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, + AccountSecondaryIndexes, AccountsIndex, UpsertReclaim, + ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, }, }, solana_sdk::{account::AccountSharedData, pubkey}, @@ -33,7 +34,7 @@ fn bench_accounts_index(bencher: &mut Bencher) { &AccountSecondaryIndexes::default(), AccountInfo::default(), &mut reclaims, - false, + UpsertReclaim::PopulateReclaims, ); } } @@ -51,7 +52,7 @@ fn bench_accounts_index(bencher: &mut Bencher) { &AccountSecondaryIndexes::default(), AccountInfo::default(), &mut reclaims, - false, + UpsertReclaim::PopulateReclaims, ); reclaims.clear(); } diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 4760b6bd3dc00a..757c4c69bd3fd5 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -30,7 +30,7 @@ use { accounts_index::{ AccountIndexGetResult, AccountSecondaryIndexes, AccountsIndex, AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, IndexKey, IndexValue, IsCached, - RefCount, ScanConfig, ScanResult, SlotList, SlotSlice, ZeroLamport, + RefCount, ScanConfig, ScanResult, SlotList, SlotSlice, UpsertReclaim, ZeroLamport, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING, }, accounts_index_storage::Startup, @@ -7107,7 +7107,7 @@ impl AccountsDb { &self, infos: Vec, accounts: impl StorableAccounts<'a, T>, - previous_slot_entry_was_cached: bool, + reclaim: UpsertReclaim, ) -> SlotList { let target_slot = accounts.target_slot(); // using a thread pool here results in deadlock panics from bank_hashes.write() @@ -7130,7 +7130,7 @@ impl AccountsDb { &self.account_indexes, info, &mut reclaims, - previous_slot_entry_was_cached, + reclaim, ); }); reclaims @@ -7716,7 +7716,11 @@ impl AccountsDb { .fetch_add(store_accounts_time.as_us(), Ordering::Relaxed); let mut update_index_time = Measure::start("update_index"); - let previous_slot_entry_was_cached = self.caching_enabled && is_cached_store; + let reclaim = if self.caching_enabled && is_cached_store { + UpsertReclaim::PreviousSlotEntryWasCached + } else { + UpsertReclaim::PopulateReclaims + }; // if we are squashing a single slot, then we can expect a single dead slot let expected_single_dead_slot = @@ -7726,7 +7730,7 @@ impl AccountsDb { // after the account are stored by the above `store_accounts_to` // call and all the accounts are stored, all reads after this point // will know to not check the cache anymore - let mut reclaims = self.update_index(infos, accounts, previous_slot_entry_was_cached); + let mut reclaims = self.update_index(infos, accounts, reclaim); // For each updated account, `reclaims` should only have at most one // item (if the account was previously updated in this slot). @@ -12690,7 +12694,8 @@ pub mod tests { ); } - const UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE: bool = false; + const UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE: UpsertReclaim = + UpsertReclaim::PopulateReclaims; #[test] fn test_delete_dependencies() { diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 3a61585d340fdd..fc4e6e8c58227f 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -70,6 +70,16 @@ pub type SlotSlice<'s, T> = &'s [(Slot, T)]; pub type RefCount = u64; pub type AccountMap = Arc>; +#[derive(Debug, Clone, Copy)] +/// how accounts index 'upsert' should handle reclaims +pub enum UpsertReclaim { + /// previous entry for this slot in the index is expected to be cached, so irrelevant to reclaims + PreviousSlotEntryWasCached, + /// previous entry for this slot in the index may need to be reclaimed, so return it. + /// reclaims is the only output of upsert, requiring a synchronous execution + PopulateReclaims, +} + #[derive(Debug, Default)] pub struct ScanConfig { /// checked by the scan. When true, abort scan. @@ -1605,7 +1615,7 @@ impl AccountsIndex { account_indexes: &AccountSecondaryIndexes, account_info: T, reclaims: &mut SlotList, - previous_slot_entry_was_cached: bool, + reclaim: UpsertReclaim, ) { // vast majority of updates are to item already in accounts index, so store as raw to avoid unnecessary allocations let store_raw = true; @@ -1631,13 +1641,7 @@ impl AccountsIndex { { let r_account_maps = map.read().unwrap(); - r_account_maps.upsert( - pubkey, - new_item, - Some(old_slot), - reclaims, - previous_slot_entry_was_cached, - ); + r_account_maps.upsert(pubkey, new_item, Some(old_slot), reclaims, reclaim); } self.update_secondary_indexes(pubkey, account, account_indexes); } @@ -2301,7 +2305,8 @@ pub mod tests { assert!(index.include_key(&pk2)); } - const UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE: bool = false; + const UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE: UpsertReclaim = + UpsertReclaim::PopulateReclaims; #[test] fn test_insert_no_ancestors() { diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index b1fabd4c28d17f..500aa3d186c512 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -2,7 +2,7 @@ use { crate::{ accounts_index::{ AccountMapEntry, AccountMapEntryInner, AccountMapEntryMeta, IndexValue, - PreAllocatedAccountMapEntry, RefCount, SlotList, SlotSlice, ZeroLamport, + PreAllocatedAccountMapEntry, RefCount, SlotList, SlotSlice, UpsertReclaim, ZeroLamport, }, bucket_map_holder::{Age, BucketMapHolder}, bucket_map_holder_stats::BucketMapHolderStats, @@ -364,7 +364,7 @@ impl InMemAccountsIndex { new_value: PreAllocatedAccountMapEntry, other_slot: Option, reclaims: &mut SlotList, - previous_slot_entry_was_cached: bool, + reclaim: UpsertReclaim, ) { let mut updated_in_mem = true; // try to get it just from memory first using only a read lock @@ -375,7 +375,7 @@ impl InMemAccountsIndex { new_value.into(), other_slot, reclaims, - previous_slot_entry_was_cached, + reclaim, ); // age is incremented by caller } else { @@ -392,7 +392,7 @@ impl InMemAccountsIndex { new_value.into(), other_slot, reclaims, - previous_slot_entry_was_cached, + reclaim, ); current.set_age(self.storage.future_age_to_flush()); } @@ -407,13 +407,8 @@ impl InMemAccountsIndex { // We may like this to always run, but it is unclear. // If disk bucket needs to resize, then this call can stall for a long time. // Right now, we know it is safe during startup. - let already_existed = self.upsert_on_disk( - vacant, - new_value, - other_slot, - reclaims, - previous_slot_entry_was_cached, - ); + let already_existed = self + .upsert_on_disk(vacant, new_value, other_slot, reclaims, reclaim); if !already_existed { self.stats().inc_insert(); } @@ -427,7 +422,7 @@ impl InMemAccountsIndex { new_value.into(), other_slot, reclaims, - previous_slot_entry_was_cached, + reclaim, ); disk_entry } else { @@ -471,7 +466,7 @@ impl InMemAccountsIndex { new_value: (Slot, T), other_slot: Option, reclaims: &mut SlotList, - previous_slot_entry_was_cached: bool, + reclaim: UpsertReclaim, ) { let mut slot_list = current.slot_list.write().unwrap(); let (slot, new_entry) = new_value; @@ -481,7 +476,7 @@ impl InMemAccountsIndex { new_entry, other_slot, reclaims, - previous_slot_entry_was_cached, + reclaim, ); if addref { current.add_un_ref(true); @@ -504,7 +499,7 @@ impl InMemAccountsIndex { account_info: T, mut other_slot: Option, reclaims: &mut SlotList, - previous_slot_entry_was_cached: bool, + reclaim: UpsertReclaim, ) -> bool { let mut addref = !account_info.is_cached(); @@ -546,10 +541,13 @@ impl InMemAccountsIndex { // already replaced one entry, so this one has to be removed slot_list.remove(slot_list_index) }; - if previous_slot_entry_was_cached { - assert!(is_cur_account_cached); - } else { - reclaims.push(reclaim_item); + match reclaim { + UpsertReclaim::PopulateReclaims => { + reclaims.push(reclaim_item); + } + UpsertReclaim::PreviousSlotEntryWasCached => { + assert!(is_cur_account_cached); + } } if matched_slot { @@ -618,7 +616,7 @@ impl InMemAccountsIndex { (slot, account_info), None, // should be None because we don't expect a different slot # during index generation &mut Vec::default(), - false, + UpsertReclaim::PopulateReclaims, // this should be ignore? ); ( true, /* found in mem */ @@ -637,7 +635,7 @@ impl InMemAccountsIndex { new_entry, None, // not changing slots here since it doesn't exist in the index at all &mut Vec::default(), - false, + UpsertReclaim::PopulateReclaims, ); (false, already_existed) } else { @@ -652,7 +650,7 @@ impl InMemAccountsIndex { // There can be no 'other' slot in the list. None, &mut Vec::default(), - false, + UpsertReclaim::PopulateReclaims, ); vacant.insert(disk_entry); ( @@ -694,7 +692,7 @@ impl InMemAccountsIndex { new_entry: PreAllocatedAccountMapEntry, other_slot: Option, reclaims: &mut SlotList, - previous_slot_entry_was_cached: bool, + reclaim: UpsertReclaim, ) -> bool { if let Some(disk) = self.bucket.as_ref() { let mut existed = false; @@ -709,7 +707,7 @@ impl InMemAccountsIndex { account_info, other_slot, reclaims, - previous_slot_entry_was_cached, + reclaim, ); if addref { ref_count += 1 @@ -1672,7 +1670,7 @@ mod tests { #[test] fn test_update_slot_list_other() { solana_logger::setup(); - let previous_slot_entry_was_cached = false; + let reclaim = UpsertReclaim::PopulateReclaims; let new_slot = 0; let info = 1; let other_value = info + 1; @@ -1689,7 +1687,7 @@ mod tests { info, other_slot, &mut reclaims, - previous_slot_entry_was_cached + reclaim ), "other_slot: {:?}", other_slot @@ -1711,7 +1709,7 @@ mod tests { info, other_slot, &mut reclaims, - previous_slot_entry_was_cached + reclaim ), "other_slot: {:?}", other_slot @@ -1732,7 +1730,7 @@ mod tests { info, other_slot, &mut reclaims, - previous_slot_entry_was_cached + reclaim ), "other_slot: {:?}", other_slot @@ -1805,7 +1803,7 @@ mod tests { info, other_slot, &mut reclaims, - previous_slot_entry_was_cached, + reclaim, ); // calculate expected results From ee0a54ce802c7f94aea738ef684e11ca22b13f03 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Jul 2022 15:50:12 -0500 Subject: [PATCH 073/100] improve clean acct idx calls (#26320) --- runtime/src/accounts_db.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 757c4c69bd3fd5..650fd28f16e1ad 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -7358,15 +7358,19 @@ impl AccountsDb { let mut measure = Measure::start("unref_from_storage"); if let Some(purged_stored_account_slots) = purged_stored_account_slots { let len = purged_stored_account_slots.len(); - // we could build a higher level function in accounts_index to group by bin const BATCH_SIZE: usize = 10_000; let batches = 1 + (len / BATCH_SIZE); self.thread_pool_clean.install(|| { (0..batches).into_par_iter().for_each(|batch| { let skip = batch * BATCH_SIZE; - for (_slot, pubkey) in purged_slot_pubkeys.iter().skip(skip).take(BATCH_SIZE) { - self.accounts_index.unref_from_storage(pubkey); - } + self.accounts_index.scan( + purged_slot_pubkeys + .iter() + .skip(skip) + .take(BATCH_SIZE) + .map(|(_slot, pubkey)| pubkey), + |_pubkey, _slots_refs| AccountsIndexScanResult::Unref, + ) }) }); for (slot, pubkey) in purged_slot_pubkeys { From 8caf0f3d05c22ae577ecbbaf9d26ad995c53745e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Jul 2022 14:58:46 -0600 Subject: [PATCH 074/100] chore: bump parking_lot from 0.12.0 to 0.12.1 (#26476) * chore: bump parking_lot from 0.12.0 to 0.12.1 Bumps [parking_lot](https://github.com/Amanieu/parking_lot) from 0.12.0 to 0.12.1. - [Release notes](https://github.com/Amanieu/parking_lot/releases) - [Changelog](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md) - [Commits](https://github.com/Amanieu/parking_lot/compare/0.12.0...0.12.1) --- updated-dependencies: - dependency-name: parking_lot dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 12 ++++++------ programs/bpf/Cargo.lock | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ab4f482d4b3df..6c2ab9fea51d97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2982,9 +2982,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core 0.9.1", @@ -4199,7 +4199,7 @@ dependencies = [ "futures 0.3.21", "lazy_static", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "serial_test_derive", ] @@ -5644,7 +5644,7 @@ dependencies = [ "log", "num-derive", "num-traits", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "rustc_version 0.4.0", "rustversion", @@ -5687,7 +5687,7 @@ dependencies = [ "memoffset", "num-derive", "num-traits", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "rustc_version 0.4.0", "rustversion", @@ -5772,7 +5772,7 @@ dependencies = [ "log", "num-derive", "num-traits", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "qstring", "semver 1.0.10", "solana-sdk 1.11.3", diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 8b0596eeef6ac3..e822771b5745ec 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -2710,9 +2710,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core 0.9.1", @@ -5005,7 +5005,7 @@ dependencies = [ "log", "num-derive", "num-traits", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "rustc_version", "rustversion", @@ -5046,7 +5046,7 @@ dependencies = [ "memoffset", "num-derive", "num-traits", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "rustc_version", "rustversion", @@ -5126,7 +5126,7 @@ dependencies = [ "log", "num-derive", "num-traits", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "qstring", "semver", "solana-sdk 1.11.3", From 4a78dfe240a8b48f4a4f51e7b8ed02be5b52ed0d Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Thu, 7 Jul 2022 16:06:34 -0500 Subject: [PATCH 075/100] Clarify micro-lamports is unit of Compute Unit Price (#26486) * to clarify micro-lamport is the unit of compute-unit price Co-authored-by: Tyera Eulberg --- docs/src/terminology.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/terminology.md b/docs/src/terminology.md index b25f9bbd4f88a2..2c22efb2bbdfa3 100644 --- a/docs/src/terminology.md +++ b/docs/src/terminology.md @@ -223,7 +223,7 @@ A stack of proofs, each of which proves that some data existed before the proof An additional fee user can specify in compute budget [instruction](#instruction) to prioritize their [transactions](#transaction). -The prioritization fee is calculated from multiplying the number of compute units requested by the compute unit price (0.000001 lamports per compute unit) rounded up to the nearest lamport. +The prioritization fee is calculated by multiplying the requested maximum compute units by the compute-unit price (specified in increments of 0.000001 lamports per compute unit) rounded up to the nearest lamport. Transactions should request the minimum amount of compute units required for execution to minimize fees. From 38d53ef8a258ab84988f57e2379678eadf40aeab Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Jul 2022 16:20:32 -0500 Subject: [PATCH 076/100] add metrics for # scans active and scan distance (#26395) --- runtime/src/accounts_db.rs | 12 ++++++++++++ runtime/src/accounts_index.rs | 28 ++++++++++++++++++++++------ 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 650fd28f16e1ad..7287198431d83f 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2837,6 +2837,18 @@ impl AccountsDb { self.accounts_index.roots_removed.swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "active_scans", + self.accounts_index.active_scans.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "max_distance_to_min_scan_slot", + self.accounts_index + .max_distance_to_min_scan_slot + .swap(0, Ordering::Relaxed), + i64 + ), ("next_store_id", self.next_id.load(Ordering::Relaxed), i64), ); } diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index fc4e6e8c58227f..55c53e33ca1c20 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -688,6 +688,10 @@ pub struct AccountsIndex { pub roots_added: AtomicUsize, /// # roots removed since last check pub roots_removed: AtomicUsize, + /// # scans active currently + pub active_scans: AtomicUsize, + /// # of slots between latest max and latest scan + pub max_distance_to_min_scan_slot: AtomicU64, } impl AccountsIndex { @@ -719,6 +723,8 @@ impl AccountsIndex { scan_results_limit_bytes, roots_added: AtomicUsize::default(), roots_removed: AtomicUsize::default(), + active_scans: AtomicUsize::default(), + max_distance_to_min_scan_slot: AtomicU64::default(), } } @@ -755,6 +761,10 @@ impl AccountsIndex { self.storage.storage.is_disk_index_enabled() } + fn min_ongoing_scan_root_from_btree(ongoing_scan_roots: &BTreeMap) -> Option { + ongoing_scan_roots.keys().next().cloned() + } + fn do_checked_scan_accounts( &self, metric_name: &'static str, @@ -778,6 +788,7 @@ impl AccountsIndex { } } + self.active_scans.fetch_add(1, Ordering::Relaxed); let max_root = { let mut w_ongoing_scan_roots = self // This lock is also grabbed by clean_accounts(), so clean @@ -792,6 +803,15 @@ impl AccountsIndex { // make sure inverse doesn't happen to avoid // deadlock let max_root_inclusive = self.max_root_inclusive(); + if let Some(min_ongoing_scan_root) = + Self::min_ongoing_scan_root_from_btree(&w_ongoing_scan_roots) + { + if min_ongoing_scan_root < max_root_inclusive { + let current = max_root_inclusive - min_ongoing_scan_root; + self.max_distance_to_min_scan_slot + .fetch_max(current, Ordering::Relaxed); + } + } *w_ongoing_scan_roots.entry(max_root_inclusive).or_default() += 1; max_root_inclusive @@ -950,6 +970,7 @@ impl AccountsIndex { } { + self.active_scans.fetch_sub(1, Ordering::Relaxed); let mut ongoing_scan_roots = self.ongoing_scan_roots.write().unwrap(); let count = ongoing_scan_roots.get_mut(&max_root).unwrap(); *count -= 1; @@ -1252,12 +1273,7 @@ impl AccountsIndex { } pub fn min_ongoing_scan_root(&self) -> Option { - self.ongoing_scan_roots - .read() - .unwrap() - .keys() - .next() - .cloned() + Self::min_ongoing_scan_root_from_btree(&self.ongoing_scan_roots.read().unwrap()) } // Given a SlotSlice `L`, a list of ancestors and a maximum slot, find the latest element From 3b759bcffbdacdc47fbb22ea1cb99821c609b82e Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 7 Jul 2022 18:08:42 -0500 Subject: [PATCH 077/100] Add fn to get max accounts data size from Bank (#26478) --- runtime/src/bank.rs | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0c128c6cd1b0e9..46a583a83ee33e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1866,7 +1866,11 @@ impl Bank { cost_tracker: RwLock::new(CostTracker::new_with_account_data_size_limit( feature_set .is_active(&feature_set::cap_accounts_data_len::id()) - .then(|| MAX_ACCOUNTS_DATA_LEN.saturating_sub(accounts_data_size_initial)), + .then(|| { + parent + .accounts_data_size_limit() + .saturating_sub(accounts_data_size_initial) + }), )), sysvar_cache: RwLock::new(SysvarCache::default()), accounts_data_size_initial, @@ -4279,7 +4283,8 @@ impl Bank { transaction_accounts, compute_budget.max_invoke_depth.saturating_add(1), tx.message().instructions().len(), - MAX_ACCOUNTS_DATA_LEN.saturating_sub(prev_accounts_data_len), + self.accounts_data_size_limit() + .saturating_sub(prev_accounts_data_len), ); let pre_account_state_info = @@ -4653,6 +4658,11 @@ impl Bank { } } + /// The maximum allowed size, in bytes, of the accounts data + pub fn accounts_data_size_limit(&self) -> u64 { + MAX_ACCOUNTS_DATA_LEN + } + /// Load the accounts data size, in bytes pub fn load_accounts_data_size(&self) -> u64 { // Mixed integer ops currently not stable, so copying the impl. @@ -6383,7 +6393,8 @@ impl Bank { .is_active(&feature_set::cap_accounts_data_len::id()) { self.cost_tracker = RwLock::new(CostTracker::new_with_account_data_size_limit(Some( - MAX_ACCOUNTS_DATA_LEN.saturating_sub(self.accounts_data_size_initial), + self.accounts_data_size_limit() + .saturating_sub(self.accounts_data_size_initial), ))); } } @@ -7761,7 +7772,6 @@ pub(crate) mod tests { }, crossbeam_channel::{bounded, unbounded}, solana_program_runtime::{ - accounts_data_meter::MAX_ACCOUNTS_DATA_LEN, compute_budget::MAX_COMPUTE_UNIT_LIMIT, invoke_context::InvokeContext, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, @@ -17731,15 +17741,13 @@ pub(crate) mod tests { const NUM_ACCOUNTS: u64 = 20; const ACCOUNT_SIZE: u64 = MAX_PERMITTED_DATA_LENGTH / (NUM_ACCOUNTS + 1); const REMAINING_ACCOUNTS_DATA_SIZE: u64 = NUM_ACCOUNTS * ACCOUNT_SIZE; - const INITIAL_ACCOUNTS_DATA_SIZE: u64 = - MAX_ACCOUNTS_DATA_LEN - REMAINING_ACCOUNTS_DATA_SIZE; let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000_000); let mut bank = Bank::new_for_tests(&genesis_config); bank.activate_feature(&feature_set::cap_accounts_data_len::id()); - bank.set_accounts_data_size_initial_for_tests( - INITIAL_ACCOUNTS_DATA_SIZE - bank.load_accounts_data_size_delta() as u64, - ); + bank.accounts_data_size_initial = bank.accounts_data_size_limit() + - REMAINING_ACCOUNTS_DATA_SIZE + - bank.load_accounts_data_size_delta() as u64; let mut i = 0; let result = loop { @@ -17757,7 +17765,7 @@ pub(crate) mod tests { let accounts_data_size_before = bank.load_accounts_data_size(); let result = bank.process_transaction(&txn); let accounts_data_size_after = bank.load_accounts_data_size(); - assert!(accounts_data_size_after <= MAX_ACCOUNTS_DATA_LEN); + assert!(accounts_data_size_after <= bank.accounts_data_size_limit()); if result.is_err() { assert_eq!(i, NUM_ACCOUNTS); break result; From d9eee72edb29f90bdd307ffd143739e34eb2bbf0 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Jul 2022 18:37:14 -0500 Subject: [PATCH 078/100] remove FoundStoredAccount.account_size (#26460) --- runtime/src/accounts_db.rs | 9 ++------- runtime/src/ancient_append_vecs.rs | 12 +++--------- runtime/src/snapshot_minimizer.rs | 2 +- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 7287198431d83f..33cc27d2a792d9 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -210,7 +210,6 @@ pub struct AccountsDbConfig { pub struct FoundStoredAccount<'a> { pub account: StoredAccountMeta<'a>, pub store_id: AppendVecId, - pub account_size: usize, } #[cfg(not(test))] @@ -3081,7 +3080,7 @@ impl AccountsDb { dead += 1; } else { alive_accounts.push(pair); - alive_total += stored_account.account_size; + alive_total += stored_account.account.stored_size; alive += 1; } } @@ -3116,11 +3115,7 @@ impl AccountsDb { original_bytes += store.total_bytes(); let store_id = store.append_vec_id(); AppendVecAccountsIter::new(&store.accounts).for_each(|account| { - let new_entry = FoundStoredAccount { - account_size: account.stored_size, - account, - store_id, - }; + let new_entry = FoundStoredAccount { account, store_id }; match stored_accounts.entry(new_entry.account.meta.pubkey) { Entry::Occupied(mut occupied_entry) => { if new_entry.account.meta.write_version diff --git a/runtime/src/ancient_append_vecs.rs b/runtime/src/ancient_append_vecs.rs index 58ad2f5df23539..2e7c994c9a0f5a 100644 --- a/runtime/src/ancient_append_vecs.rs +++ b/runtime/src/ancient_append_vecs.rs @@ -46,7 +46,7 @@ impl<'a> AccountsToStore<'a> { // index of the first account that doesn't fit in the current append vec let mut index_first_item_overflow = num_accounts; // assume all fit stored_accounts.iter().for_each(|account| { - let account_size = account.1.account_size as u64; + let account_size = account.1.account.stored_size as u64; if available_bytes >= account_size { available_bytes = available_bytes.saturating_sub(account_size); } else if index_first_item_overflow == num_accounts { @@ -147,7 +147,6 @@ pub mod tests { rent_epoch: 0, }; let offset = 3; - let stored_size = 4; let hash = Hash::new(&[2; 32]); let stored_meta = StoredMeta { /// global write version @@ -162,15 +161,10 @@ pub mod tests { account_meta: &account_meta, data: account.data(), offset, - stored_size, + stored_size: account_size, hash: &hash, }; - // let account = StoredAccountMeta::new(); - let found = FoundStoredAccount { - account, - store_id, - account_size, - }; + let found = FoundStoredAccount { account, store_id }; let item = (pubkey, found); let map = vec![&item]; for (selector, available_bytes) in [ diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 8aa908339d55fc..e92ed871fa792d 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -320,7 +320,7 @@ impl<'a> SnapshotMinimizer<'a> { let mut purge_pubkeys = Vec::with_capacity(CHUNK_SIZE); chunk.iter().for_each(|(pubkey, account)| { if self.minimized_account_set.contains(pubkey) { - chunk_bytes += account.account_size; + chunk_bytes += account.account.stored_size; keep_accounts.push((pubkey, account)); } else if self .accounts_db() From 312748721d2fcb2d1e2352c7fb02853f5ce61e29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Jul 2022 20:54:27 -0600 Subject: [PATCH 079/100] chore: bump rustversion from 1.0.6 to 1.0.7 (#26488) * chore: bump rustversion from 1.0.6 to 1.0.7 Bumps [rustversion](https://github.com/dtolnay/rustversion) from 1.0.6 to 1.0.7. - [Release notes](https://github.com/dtolnay/rustversion/releases) - [Commits](https://github.com/dtolnay/rustversion/compare/1.0.6...1.0.7) --- updated-dependencies: - dependency-name: rustversion dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files * Sync rustversions Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite Co-authored-by: Tyera Eulberg --- Cargo.lock | 4 ++-- programs/bpf/Cargo.lock | 4 ++-- sdk/Cargo.toml | 2 +- sdk/macro/Cargo.toml | 2 +- sdk/program/Cargo.toml | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c2ab9fea51d97..6f71163af0a340 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3979,9 +3979,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf" [[package]] name = "rusty-fork" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index e822771b5745ec..a171967f1373c0 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -3554,9 +3554,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf" [[package]] name = "ryu" diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index cc9b9040524204..48aa985a802a97 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -64,7 +64,7 @@ pbkdf2 = { version = "0.11.0", default-features = false } qstring = "0.7.2" rand = { version = "0.7.0", optional = true } rand_chacha = { version = "0.2.2", optional = true } -rustversion = "1.0.6" +rustversion = "1.0.7" serde = "1.0.138" serde_bytes = "0.11" serde_derive = "1.0.103" diff --git a/sdk/macro/Cargo.toml b/sdk/macro/Cargo.toml index 4f95d7a1c34b51..58dfabec69a0f6 100644 --- a/sdk/macro/Cargo.toml +++ b/sdk/macro/Cargo.toml @@ -17,7 +17,7 @@ bs58 = "0.4.0" proc-macro2 = "1.0.19" quote = "1.0" syn = { version = "1.0", features = ["full", "extra-traits"] } -rustversion = "1.0.3" +rustversion = "1.0.7" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index a7a91bb1081517..db7f6eb35b35fd 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -24,7 +24,7 @@ log = "0.4.14" memoffset = "0.6" num-derive = "0.3" num-traits = "0.2" -rustversion = "1.0.3" +rustversion = "1.0.7" serde = "1.0.112" serde_bytes = "0.11" serde_derive = "1.0.103" From b8b521535c0d1376811089d57cc8115caca5d00b Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Thu, 7 Jul 2022 20:55:18 -0600 Subject: [PATCH 080/100] Add Pubsub getVersion, and support programSubscribe filter mapping (#26482) * Add pubsub getVersion api * Generalize maybe_map_filters * Add filter mapping to blocking PubsubClient * Add version tracking to nonblocking PubsubClient * Add filter mapping to nonblocking PubsubClient --- client/src/nonblocking/pubsub_client.rs | 97 +++++++++++++++++++++++-- client/src/nonblocking/rpc_client.rs | 22 +----- client/src/pubsub_client.rs | 54 +++++++++++++- client/src/rpc_filter.rs | 24 ++++++ rpc/src/rpc_pubsub.rs | 31 +++++++- 5 files changed, 202 insertions(+), 26 deletions(-) diff --git a/client/src/nonblocking/pubsub_client.rs b/client/src/nonblocking/pubsub_client.rs index 56a7817153952f..4f252e4fd89caf 100644 --- a/client/src/nonblocking/pubsub_client.rs +++ b/client/src/nonblocking/pubsub_client.rs @@ -6,9 +6,10 @@ use { RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter, }, + rpc_filter::maybe_map_filters, rpc_response::{ Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, - RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, + RpcSignatureResult, RpcVersionInfo, RpcVote, SlotInfo, SlotUpdate, }, }, futures_util::{ @@ -25,7 +26,7 @@ use { thiserror::Error, tokio::{ net::TcpStream, - sync::{mpsc, oneshot}, + sync::{mpsc, oneshot, RwLock}, task::JoinHandle, time::{sleep, Duration}, }, @@ -62,6 +63,9 @@ pub enum PubsubClientError { #[error("subscribe failed: {reason}")] SubscribeFailed { reason: String, message: String }, + + #[error("request failed: {reason}")] + RequestFailed { reason: String, message: String }, } type UnsubscribeFn = Box BoxFuture<'static, ()> + Send>; @@ -69,11 +73,18 @@ type SubscribeResponseMsg = Result<(mpsc::UnboundedReceiver, UnsubscribeFn), PubsubClientError>; type SubscribeRequestMsg = (String, Value, oneshot::Sender); type SubscribeResult<'a, T> = PubsubClientResult<(BoxStream<'a, T>, UnsubscribeFn)>; +type RequestMsg = ( + String, + Value, + oneshot::Sender>, +); #[derive(Debug)] pub struct PubsubClient { subscribe_tx: mpsc::UnboundedSender, + request_tx: mpsc::UnboundedSender, shutdown_tx: oneshot::Sender<()>, + node_version: RwLock>, ws: JoinHandle, } @@ -85,12 +96,20 @@ impl PubsubClient { .map_err(PubsubClientError::ConnectionError)?; let (subscribe_tx, subscribe_rx) = mpsc::unbounded_channel(); + let (request_tx, request_rx) = mpsc::unbounded_channel(); let (shutdown_tx, shutdown_rx) = oneshot::channel(); Ok(Self { subscribe_tx, + request_tx, shutdown_tx, - ws: tokio::spawn(PubsubClient::run_ws(ws, subscribe_rx, shutdown_rx)), + node_version: RwLock::new(None), + ws: tokio::spawn(PubsubClient::run_ws( + ws, + subscribe_rx, + request_rx, + shutdown_rx, + )), }) } @@ -99,6 +118,37 @@ impl PubsubClient { self.ws.await.unwrap() // WS future should not be cancelled or panicked } + async fn get_node_version(&self) -> PubsubClientResult { + let r_node_version = self.node_version.read().await; + if let Some(version) = &*r_node_version { + Ok(version.clone()) + } else { + drop(r_node_version); + let mut w_node_version = self.node_version.write().await; + let node_version = self.get_version().await?; + *w_node_version = Some(node_version.clone()); + Ok(node_version) + } + } + + async fn get_version(&self) -> PubsubClientResult { + let (response_tx, response_rx) = oneshot::channel(); + self.request_tx + .send(("getVersion".to_string(), Value::Null, response_tx)) + .map_err(|err| PubsubClientError::ConnectionClosed(err.to_string()))?; + let result = response_rx + .await + .map_err(|err| PubsubClientError::ConnectionClosed(err.to_string()))??; + let node_version: RpcVersionInfo = serde_json::from_value(result)?; + let node_version = semver::Version::parse(&node_version.solana_core).map_err(|e| { + PubsubClientError::RequestFailed { + reason: format!("failed to parse cluster version: {}", e), + message: "getVersion".to_string(), + } + })?; + Ok(node_version) + } + async fn subscribe<'a, T>(&self, operation: &str, params: Value) -> SubscribeResult<'a, T> where T: DeserializeOwned + Send + 'a, @@ -147,8 +197,22 @@ impl PubsubClient { pub async fn program_subscribe( &self, pubkey: &Pubkey, - config: Option, + mut config: Option, ) -> SubscribeResult<'_, RpcResponse> { + if let Some(ref mut config) = config { + if let Some(ref mut filters) = config.filters { + let node_version = self.get_node_version().await.ok(); + // If node does not support the pubsub `getVersion` method, assume version is old + // and filters should be mapped (node_version.is_none()). + maybe_map_filters(node_version, filters).map_err(|e| { + PubsubClientError::RequestFailed { + reason: e, + message: "maybe_map_filters".to_string(), + } + })?; + } + } + let params = json!([pubkey.to_string(), config]); self.subscribe("program", params).await } @@ -181,12 +245,14 @@ impl PubsubClient { async fn run_ws( mut ws: WebSocketStream>, mut subscribe_rx: mpsc::UnboundedReceiver, + mut request_rx: mpsc::UnboundedReceiver, mut shutdown_rx: oneshot::Receiver<()>, ) -> PubsubClientResult { let mut request_id: u64 = 0; let mut requests_subscribe = BTreeMap::new(); let mut requests_unsubscribe = BTreeMap::>::new(); + let mut other_requests = BTreeMap::new(); let mut subscriptions = BTreeMap::new(); let (unsubscribe_tx, mut unsubscribe_rx) = mpsc::unbounded_channel(); @@ -220,6 +286,13 @@ impl PubsubClient { ws.send(Message::Text(text)).await?; requests_unsubscribe.insert(request_id, response_tx); }, + // Read message for other requests + Some((method, params, response_tx)) = request_rx.recv() => { + request_id += 1; + let text = json!({"jsonrpc":"2.0","id":request_id,"method":method,"params":params}).to_string(); + ws.send(Message::Text(text)).await?; + other_requests.insert(request_id, response_tx); + } // Read incoming WebSocket message next_msg = ws.next() => { let msg = match next_msg { @@ -264,7 +337,21 @@ impl PubsubClient { } }); - if let Some(response_tx) = requests_unsubscribe.remove(&id) { + if let Some(response_tx) = other_requests.remove(&id) { + match err { + Some(reason) => { + let _ = response_tx.send(Err(PubsubClientError::RequestFailed { reason, message: text.clone()})); + }, + None => { + let json_result = json.get("result").ok_or_else(|| { + PubsubClientError::RequestFailed { reason: "missing `result` field".into(), message: text.clone() } + })?; + if response_tx.send(Ok(json_result.clone())).is_err() { + break; + } + } + } + } else if let Some(response_tx) = requests_unsubscribe.remove(&id) { let _ = response_tx.send(()); // do not care if receiver is closed } else if let Some((operation, response_tx)) = requests_subscribe.remove(&id) { match err { diff --git a/client/src/nonblocking/rpc_client.rs b/client/src/nonblocking/rpc_client.rs index e855addaab2568..e4afd7dae939de 100644 --- a/client/src/nonblocking/rpc_client.rs +++ b/client/src/nonblocking/rpc_client.rs @@ -19,7 +19,7 @@ use { mock_sender::MockSender, rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClientConfig}, rpc_config::{RpcAccountInfoConfig, *}, - rpc_filter::{MemcmpEncodedBytes, RpcFilterType}, + rpc_filter::{self, RpcFilterType}, rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter}, rpc_response::*, rpc_sender::*, @@ -587,24 +587,8 @@ impl RpcClient { mut filters: Vec, ) -> Result, RpcError> { let node_version = self.get_node_version().await?; - if node_version < semver::Version::new(1, 11, 2) { - for filter in filters.iter_mut() { - if let RpcFilterType::Memcmp(memcmp) = filter { - match &memcmp.bytes { - MemcmpEncodedBytes::Base58(string) => { - memcmp.bytes = MemcmpEncodedBytes::Binary(string.clone()); - } - MemcmpEncodedBytes::Base64(_) => { - return Err(RpcError::RpcRequestError(format!( - "RPC node on old version {} does not support base64 encoding for memcmp filters", - node_version - ))); - } - _ => {} - } - } - } - } + rpc_filter::maybe_map_filters(Some(node_version), &mut filters) + .map_err(RpcError::RpcRequestError)?; Ok(filters) } diff --git a/client/src/pubsub_client.rs b/client/src/pubsub_client.rs index 22d5182ae4f90c..22bf49b6ec4fdb 100644 --- a/client/src/pubsub_client.rs +++ b/client/src/pubsub_client.rs @@ -5,6 +5,7 @@ use { RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter, }, + rpc_filter, rpc_response::{ Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, @@ -48,6 +49,9 @@ pub enum PubsubClientError { #[error("unexpected message format: {0}")] UnexpectedMessageError(String), + + #[error("request error: {0}")] + RequestError(String), } pub struct PubsubClientSubscription @@ -123,6 +127,43 @@ where .map_err(|err| err.into()) } + fn get_version( + writable_socket: &Arc>>>, + ) -> Result { + writable_socket + .write() + .unwrap() + .write_message(Message::Text( + json!({ + "jsonrpc":"2.0","id":1,"method":"getVersion", + }) + .to_string(), + ))?; + let message = writable_socket.write().unwrap().read_message()?; + let message_text = &message.into_text()?; + let json_msg: Map = serde_json::from_str(message_text)?; + + if let Some(Object(version_map)) = json_msg.get("result") { + if let Some(node_version) = version_map.get("solana-core") { + let node_version = semver::Version::parse( + node_version.as_str().unwrap_or_default(), + ) + .map_err(|e| { + PubsubClientError::RequestError(format!( + "failed to parse cluster version: {}", + e + )) + })?; + return Ok(node_version); + } + } + // TODO: Add proper JSON RPC response/error handling... + Err(PubsubClientError::UnexpectedMessageError(format!( + "{:?}", + json_msg + ))) + } + fn read_message( writable_socket: &Arc>>>, ) -> Result { @@ -357,7 +398,7 @@ impl PubsubClient { pub fn program_subscribe( url: &str, pubkey: &Pubkey, - config: Option, + mut config: Option, ) -> Result { let url = Url::parse(url)?; let socket = connect_with_retry(url)?; @@ -367,6 +408,17 @@ impl PubsubClient { let socket_clone = socket.clone(); let exit = Arc::new(AtomicBool::new(false)); let exit_clone = exit.clone(); + + if let Some(ref mut config) = config { + if let Some(ref mut filters) = config.filters { + let node_version = PubsubProgramClientSubscription::get_version(&socket_clone).ok(); + // If node does not support the pubsub `getVersion` method, assume version is old + // and filters should be mapped (node_version.is_none()). + rpc_filter::maybe_map_filters(node_version, filters) + .map_err(PubsubClientError::RequestError)?; + } + } + let body = json!({ "jsonrpc":"2.0", "id":1, diff --git a/client/src/rpc_filter.rs b/client/src/rpc_filter.rs index 483fba80286ae7..1f6548c80a822c 100644 --- a/client/src/rpc_filter.rs +++ b/client/src/rpc_filter.rs @@ -259,6 +259,30 @@ impl From for Memcmp { } } +pub(crate) fn maybe_map_filters( + node_version: Option, + filters: &mut [RpcFilterType], +) -> Result<(), String> { + if node_version.is_none() || node_version.unwrap() < semver::Version::new(1, 11, 2) { + for filter in filters.iter_mut() { + if let RpcFilterType::Memcmp(memcmp) = filter { + match &memcmp.bytes { + MemcmpEncodedBytes::Base58(string) => { + memcmp.bytes = MemcmpEncodedBytes::Binary(string.clone()); + } + MemcmpEncodedBytes::Base64(_) => { + return Err("RPC node on old version does not support base64 \ + encoding for memcmp filters" + .to_string()); + } + _ => {} + } + } + } + } + Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/rpc/src/rpc_pubsub.rs b/rpc/src/rpc_pubsub.rs index 0b14f641cb3653..162a8a06ff6cf5 100644 --- a/rpc/src/rpc_pubsub.rs +++ b/rpc/src/rpc_pubsub.rs @@ -24,7 +24,7 @@ use { }, rpc_response::{ Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, - RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, + RpcSignatureResult, RpcVersionInfo, RpcVote, SlotInfo, SlotUpdate, }, }, solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}, @@ -348,6 +348,10 @@ mod internal { // Unsubscribe from slot notification subscription. #[rpc(name = "rootUnsubscribe")] fn root_unsubscribe(&self, id: SubscriptionId) -> Result; + + // Get the current solana version running on the node + #[rpc(name = "getVersion")] + fn get_version(&self) -> Result; } } @@ -576,6 +580,14 @@ impl RpcSolPubSubInternal for RpcSolPubSubImpl { fn root_unsubscribe(&self, id: SubscriptionId) -> Result { self.unsubscribe(id) } + + fn get_version(&self) -> Result { + let version = solana_version::Version::default(); + Ok(RpcVersionInfo { + solana_core: version.to_string(), + feature_set: Some(version.feature_set), + }) + } } #[cfg(test)] @@ -1370,4 +1382,21 @@ mod tests { assert!(rpc.vote_unsubscribe(42.into()).is_err()); assert!(rpc.vote_unsubscribe(sub_id).is_ok()); } + + #[test] + fn test_get_version() { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let rpc_subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks( + max_complete_transaction_status_slot, + bank_forks, + )); + let (rpc, _receiver) = rpc_pubsub_service::test_connection(&rpc_subscriptions); + let version = rpc.get_version().unwrap(); + let expected_version = solana_version::Version::default(); + assert_eq!(version.to_string(), expected_version.to_string()); + assert_eq!(version.feature_set.unwrap(), expected_version.feature_set); + } } From 734fedea4c58faba638a15758407d38b11914357 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 7 Jul 2022 22:29:02 -0700 Subject: [PATCH 081/100] Create a more compact vote state update transaction (#26092) * Create a more compact vote state update transaction * pr comments * change root to not be an option and update abi --- core/src/consensus.rs | 7 +- programs/vote/src/vote_state/mod.rs | 311 +++++++++++++++++++++++++++- 2 files changed, 316 insertions(+), 2 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index b6458e4d3f4a39..22e8fb5c9f89aa 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -87,6 +87,11 @@ impl SwitchForkDecision { v, *switch_proof_hash, )), + (SwitchForkDecision::SameFork, VoteTransaction::CompactVoteStateUpdate(_v)) => None, + ( + SwitchForkDecision::SwitchProof(_switch_proof_hash), + VoteTransaction::CompactVoteStateUpdate(_v), + ) => None, } } @@ -154,7 +159,7 @@ impl TowerVersions { } } -#[frozen_abi(digest = "BfeSJNsfQeX6JU7dmezv1s1aSvR5SoyxKRRZ4ubTh2mt")] +#[frozen_abi(digest = "8Y9r3XAwXwmrVGMCyTuy4Kbdotnt1V6N8J6NEniBFD9x")] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] pub struct Tower { pub node_pubkey: Pubkey, diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 5ffca0324d057d..c9d99ebffa9414 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -16,6 +16,7 @@ use { instruction::InstructionError, pubkey::Pubkey, rent::Rent, + short_vec, slot_hashes::SlotHash, sysvar::clock::Clock, transaction_context::{BorrowedAccount, InstructionContext, TransactionContext}, @@ -41,11 +42,12 @@ pub const MAX_EPOCH_CREDITS_HISTORY: usize = 64; // Offset of VoteState::prior_voters, for determining initialization status without deserialization const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 82; -#[frozen_abi(digest = "6LBwH5w3WyAWZhsM3KTG9QZP7nYBhcC61K33kHR6gMAD")] +#[frozen_abi(digest = "EYPXjH9Zn2vLzxyjHejkRkoTh4Tg4sirvb4FX9ye25qF")] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, AbiEnumVisitor, AbiExample)] pub enum VoteTransaction { Vote(Vote), VoteStateUpdate(VoteStateUpdate), + CompactVoteStateUpdate(CompactVoteStateUpdate), } impl VoteTransaction { @@ -53,6 +55,9 @@ impl VoteTransaction { match self { VoteTransaction::Vote(vote) => vote.slots.clone(), VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.slots(), + VoteTransaction::CompactVoteStateUpdate(compact_state_update) => { + compact_state_update.slots() + } } } @@ -62,6 +67,9 @@ impl VoteTransaction { VoteTransaction::VoteStateUpdate(vote_state_update) => { vote_state_update.lockouts[i].slot } + VoteTransaction::CompactVoteStateUpdate(compact_state_update) => { + compact_state_update.slots()[i] + } } } @@ -69,6 +77,11 @@ impl VoteTransaction { match self { VoteTransaction::Vote(vote) => vote.slots.len(), VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.lockouts.len(), + VoteTransaction::CompactVoteStateUpdate(compact_state_update) => { + 1 + compact_state_update.lockouts_32.len() + + compact_state_update.lockouts_16.len() + + compact_state_update.lockouts_8.len() + } } } @@ -78,6 +91,7 @@ impl VoteTransaction { VoteTransaction::VoteStateUpdate(vote_state_update) => { vote_state_update.lockouts.is_empty() } + VoteTransaction::CompactVoteStateUpdate(_) => false, } } @@ -85,6 +99,9 @@ impl VoteTransaction { match self { VoteTransaction::Vote(vote) => vote.hash, VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.hash, + VoteTransaction::CompactVoteStateUpdate(compact_state_update) => { + compact_state_update.hash + } } } @@ -92,6 +109,9 @@ impl VoteTransaction { match self { VoteTransaction::Vote(vote) => vote.timestamp, VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.timestamp, + VoteTransaction::CompactVoteStateUpdate(compact_state_update) => { + compact_state_update.timestamp + } } } @@ -99,6 +119,9 @@ impl VoteTransaction { match self { VoteTransaction::Vote(vote) => vote.timestamp = ts, VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.timestamp = ts, + VoteTransaction::CompactVoteStateUpdate(compact_state_update) => { + compact_state_update.timestamp = ts + } } } @@ -108,6 +131,9 @@ impl VoteTransaction { VoteTransaction::VoteStateUpdate(vote_state_update) => { Some(vote_state_update.lockouts.back()?.slot) } + VoteTransaction::CompactVoteStateUpdate(compact_state_update) => { + compact_state_update.slots().last().copied() + } } } @@ -128,6 +154,12 @@ impl From for VoteTransaction { } } +impl From for VoteTransaction { + fn from(compact_state_update: CompactVoteStateUpdate) -> Self { + VoteTransaction::CompactVoteStateUpdate(compact_state_update) + } +} + #[frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH")] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] pub struct Vote { @@ -180,6 +212,28 @@ impl Lockout { } } +#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Copy, Clone, AbiExample)] +pub struct CompactLockout { + // Offset to the next vote, 0 if this is the last vote in the tower + pub offset: T, + // Confirmation count, guarenteed to be < 32 + pub confirmation_count: u8, +} + +impl CompactLockout { + pub fn new(offset: T) -> Self { + Self { + offset, + confirmation_count: 1, + } + } + + // The number of slots for which this vote is locked + pub fn lockout(&self) -> u64 { + (INITIAL_LOCKOUT as u64).pow(self.confirmation_count.into()) + } +} + #[frozen_abi(digest = "BctadFJjUKbvPJzr6TszbX6rBfQUNSRKpKKngkzgXgeY")] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] pub struct VoteStateUpdate { @@ -226,6 +280,193 @@ impl VoteStateUpdate { } } +/// Ignoring overhead, in a full `VoteStateUpdate` the lockouts take up +/// 31 * (64 + 32) = 2976 bits. +/// +/// In this schema we separate the votes into 3 separate lockout structures +/// and store offsets rather than slot number, allowing us to use smaller fields. +/// +/// In a full `CompactVoteStateUpdate` the lockouts take up +/// 64 + (32 + 8) * 16 + (16 + 8) * 8 + (8 + 8) * 6 = 992 bits +/// allowing us to greatly reduce block size. +#[frozen_abi(digest = "C8ZrdXqqF3VxgsoCxnqNaYJggV6rr9PC3rtmVudJFmqG")] +#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] +pub struct CompactVoteStateUpdate { + /// The proposed root, u64::MAX if there is no root + pub root: Slot, + /// The offset from the root (or 0 if no root) to the first vote + pub root_to_first_vote_offset: u64, + /// Part of the proposed tower, votes with confirmation_count > 15 + #[serde(with = "short_vec")] + pub lockouts_32: Vec>, + /// Part of the proposed tower, votes with 15 >= confirmation_count > 7 + #[serde(with = "short_vec")] + pub lockouts_16: Vec>, + /// Part of the proposed tower, votes with 7 >= confirmation_count + #[serde(with = "short_vec")] + pub lockouts_8: Vec>, + + /// Signature of the bank's state at the last slot + pub hash: Hash, + /// Processing timestamp of last slot + pub timestamp: Option, +} + +impl From> for CompactVoteStateUpdate { + fn from(recent_slots: Vec<(Slot, u32)>) -> Self { + let lockouts: VecDeque = recent_slots + .into_iter() + .map(|(slot, confirmation_count)| Lockout { + slot, + confirmation_count, + }) + .collect(); + Self::new(lockouts, None, Hash::default()) + } +} + +impl CompactVoteStateUpdate { + pub fn new(mut lockouts: VecDeque, root: Option, hash: Hash) -> Self { + if lockouts.is_empty() { + return Self::default(); + } + let mut cur_slot = root.unwrap_or(0u64); + let mut cur_confirmation_count = 0; + let offset = lockouts + .pop_front() + .map( + |Lockout { + slot, + confirmation_count, + }| { + assert!(confirmation_count < 32); + + let offset = slot - cur_slot; + cur_slot = slot; + cur_confirmation_count = confirmation_count; + offset + }, + ) + .expect("Tower should not be empty"); + let mut lockouts_32 = Vec::new(); + let mut lockouts_16 = Vec::new(); + let mut lockouts_8 = Vec::new(); + + for Lockout { + slot, + confirmation_count, + } in lockouts + { + assert!(confirmation_count < 32); + let offset = slot - cur_slot; + if cur_confirmation_count > 15 { + lockouts_32.push(CompactLockout { + offset: offset.try_into().unwrap(), + confirmation_count: cur_confirmation_count.try_into().unwrap(), + }); + } else if cur_confirmation_count > 7 { + lockouts_16.push(CompactLockout { + offset: offset.try_into().unwrap(), + confirmation_count: cur_confirmation_count.try_into().unwrap(), + }); + } else { + lockouts_8.push(CompactLockout { + offset: offset.try_into().unwrap(), + confirmation_count: cur_confirmation_count.try_into().unwrap(), + }) + } + + cur_slot = slot; + cur_confirmation_count = confirmation_count; + } + // Last vote should be at the top of tower, so we don't have to explicitly store it + assert!(cur_confirmation_count == 1); + Self { + root: root.unwrap_or(u64::MAX), + root_to_first_vote_offset: offset, + lockouts_32, + lockouts_16, + lockouts_8, + hash, + timestamp: None, + } + } + + pub fn root(&self) -> Option { + if self.root == u64::MAX { + None + } else { + Some(self.root) + } + } + + pub fn slots(&self) -> Vec { + std::iter::once(self.root_to_first_vote_offset) + .chain(self.lockouts_32.iter().map(|lockout| lockout.offset.into())) + .chain(self.lockouts_16.iter().map(|lockout| lockout.offset.into())) + .chain(self.lockouts_8.iter().map(|lockout| lockout.offset.into())) + .scan(self.root().unwrap_or(0), |prev_slot, offset| { + let slot = *prev_slot + offset; + *prev_slot = slot; + Some(slot) + }) + .collect() + } +} + +impl From for VoteStateUpdate { + fn from(vote_state_update: CompactVoteStateUpdate) -> Self { + let lockouts = vote_state_update + .lockouts_32 + .iter() + .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)) + .chain( + vote_state_update + .lockouts_16 + .iter() + .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)), + ) + .chain( + vote_state_update + .lockouts_8 + .iter() + .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)), + ) + .chain( + // To pick up the last element + std::iter::once((0, 1)), + ) + .scan( + vote_state_update.root().unwrap_or(0) + vote_state_update.root_to_first_vote_offset, + |slot, (offset, confirmation_count): (u64, u8)| { + let cur_slot = *slot; + *slot += offset; + Some(Lockout { + slot: cur_slot, + confirmation_count: confirmation_count.into(), + }) + }, + ) + .collect(); + Self { + lockouts, + root: vote_state_update.root(), + hash: vote_state_update.hash, + timestamp: vote_state_update.timestamp, + } + } +} + +impl From for CompactVoteStateUpdate { + fn from(vote_state_update: VoteStateUpdate) -> Self { + CompactVoteStateUpdate::new( + vote_state_update.lockouts, + vote_state_update.root, + vote_state_update.hash, + ) + } +} + #[derive(Default, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] pub struct VoteInit { pub node_pubkey: Pubkey, @@ -3557,4 +3798,72 @@ mod tests { Err(VoteError::SlotHashMismatch), ); } + + #[test] + fn test_compact_vote_state_update_parity() { + let mut vote_state_update = VoteStateUpdate::from(vec![(2, 4), (4, 3), (6, 2), (7, 1)]); + vote_state_update.hash = Hash::new_unique(); + vote_state_update.root = Some(1); + + let compact_vote_state_update = CompactVoteStateUpdate::from(vote_state_update.clone()); + + assert_eq!(vote_state_update.slots(), compact_vote_state_update.slots()); + assert_eq!(vote_state_update.hash, compact_vote_state_update.hash); + assert_eq!(vote_state_update.root, compact_vote_state_update.root()); + + let vote_state_update_new = VoteStateUpdate::from(compact_vote_state_update); + assert_eq!(vote_state_update, vote_state_update_new); + } + + #[test] + fn test_compact_vote_state_update_large_offsets() { + let vote_state_update = VoteStateUpdate::from(vec![ + (0, 31), + (1, 30), + (2, 29), + (3, 28), + (u64::pow(2, 28), 17), + (u64::pow(2, 28) + u64::pow(2, 16), 1), + ]); + let compact_vote_state_update = CompactVoteStateUpdate::from(vote_state_update.clone()); + + assert_eq!(vote_state_update.slots(), compact_vote_state_update.slots()); + + let vote_state_update_new = VoteStateUpdate::from(compact_vote_state_update); + assert_eq!(vote_state_update, vote_state_update_new); + } + + #[test] + fn test_compact_vote_state_update_border_conditions() { + let two_31 = u64::pow(2, 31); + let two_15 = u64::pow(2, 15); + let vote_state_update = VoteStateUpdate::from(vec![ + (0, 31), + (two_31, 16), + (two_31 + 1, 15), + (two_31 + two_15, 7), + (two_31 + two_15 + 1, 6), + (two_31 + two_15 + 1 + 64, 1), + ]); + let compact_vote_state_update = CompactVoteStateUpdate::from(vote_state_update.clone()); + + assert_eq!(vote_state_update.slots(), compact_vote_state_update.slots()); + + let vote_state_update_new = VoteStateUpdate::from(compact_vote_state_update); + assert_eq!(vote_state_update, vote_state_update_new); + } + + #[test] + fn test_compact_vote_state_update_large_root() { + let two_58 = u64::pow(2, 58); + let two_31 = u64::pow(2, 31); + let mut vote_state_update = VoteStateUpdate::from(vec![(two_58, 31), (two_58 + two_31, 1)]); + vote_state_update.root = Some(two_31); + let compact_vote_state_update = CompactVoteStateUpdate::from(vote_state_update.clone()); + + assert_eq!(vote_state_update.slots(), compact_vote_state_update.slots()); + + let vote_state_update_new = VoteStateUpdate::from(compact_vote_state_update); + assert_eq!(vote_state_update, vote_state_update_new); + } } From b4b1f95b6a408a1472646aafe386d2f8daa1f75d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Jul 2022 06:01:31 +0000 Subject: [PATCH 082/100] chore: bump reed-solomon-erasure from 5.0.2 to 5.0.3 (#26492) * chore: bump reed-solomon-erasure from 5.0.2 to 5.0.3 Bumps [reed-solomon-erasure](https://github.com/darrenldl/reed-solomon-erasure) from 5.0.2 to 5.0.3. - [Release notes](https://github.com/darrenldl/reed-solomon-erasure/releases) - [Changelog](https://github.com/rust-rse/reed-solomon-erasure/blob/master/CHANGELOG.md) - [Commits](https://github.com/darrenldl/reed-solomon-erasure/compare/v5.0.2...v5.0.3) --- updated-dependencies: - dependency-name: reed-solomon-erasure dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- ledger/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f71163af0a340..c979d57a04b4b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3753,9 +3753,9 @@ dependencies = [ [[package]] name = "reed-solomon-erasure" -version = "5.0.2" +version = "5.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521342afeca28aec72cefa21941c640549a8d61d9dd13710da63a1c975b78dba" +checksum = "c2fe31452b684b8b33f65f8730c8b8812c3f5a0bb8a096934717edb1ac488641" dependencies = [ "cc", "libc", diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index e3b98d8c952e4a..3c3fb44436109d 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -30,7 +30,7 @@ prost = "0.10.4" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" -reed-solomon-erasure = { version = "5.0.2", features = ["simd-accel"] } +reed-solomon-erasure = { version = "5.0.3", features = ["simd-accel"] } serde = "1.0.138" serde_bytes = "0.11.6" sha2 = "0.10.2" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index a171967f1373c0..866a423d505b6e 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -3343,9 +3343,9 @@ dependencies = [ [[package]] name = "reed-solomon-erasure" -version = "5.0.2" +version = "5.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521342afeca28aec72cefa21941c640549a8d61d9dd13710da63a1c975b78dba" +checksum = "c2fe31452b684b8b33f65f8730c8b8812c3f5a0bb8a096934717edb1ac488641" dependencies = [ "cc", "libc", From 87a14043721ff2175ed3efe6d9a9c8760e87c1ca Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Fri, 8 Jul 2022 16:34:38 +0200 Subject: [PATCH 083/100] refactor and extract send batch logic (#26067) * move send batch txs logic to new file * refactor send batch --- bench-tps/src/bench.rs | 274 +----------------------------- bench-tps/src/lib.rs | 1 + bench-tps/src/main.rs | 3 +- bench-tps/src/send_batch.rs | 320 ++++++++++++++++++++++++++++++++++++ 4 files changed, 329 insertions(+), 269 deletions(-) create mode 100644 bench-tps/src/send_batch.rs diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 0d004410c53fb2..584fb045f32993 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -3,31 +3,29 @@ use { bench_tps_client::*, cli::Config, perf_utils::{sample_txs, SampleStats}, + send_batch::*, }, log::*, rayon::prelude::*, - solana_core::gen_keys::GenKeys, - solana_measure::measure::Measure, solana_metrics::{self, datapoint_info}, solana_sdk::{ clock::{DEFAULT_MS_PER_SLOT, DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE}, - commitment_config::CommitmentConfig, hash::Hash, instruction::{AccountMeta, Instruction}, message::Message, native_token::Sol, pubkey::Pubkey, signature::{Keypair, Signer}, - system_instruction, system_transaction, + system_transaction, timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp}, transaction::Transaction, }, std::{ - collections::{HashSet, VecDeque}, + collections::VecDeque, process::exit, sync::{ atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering}, - Arc, Mutex, RwLock, + Arc, RwLock, }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -37,22 +35,8 @@ use { // The point at which transactions become "too old", in seconds. const MAX_TX_QUEUE_AGE: u64 = (MAX_PROCESSING_AGE as f64 * DEFAULT_S_PER_SLOT) as u64; -pub const MAX_SPENDS_PER_TX: u64 = 4; - pub type SharedTransactions = Arc>>>; -fn get_latest_blockhash(client: &T) -> Hash { - loop { - match client.get_latest_blockhash_with_commitment(CommitmentConfig::processed()) { - Ok((blockhash, _)) => return blockhash, - Err(err) => { - info!("Couldn't get last blockhash: {:?}", err); - sleep(Duration::from_secs(1)); - } - }; - } -} - fn wait_for_target_slots_per_epoch(target_slots_per_epoch: u64, client: &Arc) where T: 'static + BenchTpsClient + Send + Sync, @@ -548,236 +532,6 @@ fn do_tx_transfers( } } -fn verify_funding_transfer( - client: &Arc, - tx: &Transaction, - amount: u64, -) -> bool { - for a in &tx.message().account_keys[1..] { - match client.get_balance_with_commitment(a, CommitmentConfig::processed()) { - Ok(balance) => return balance >= amount, - Err(err) => error!("failed to get balance {:?}", err), - } - } - false -} - -trait FundingTransactions<'a> { - fn fund( - &mut self, - client: &Arc, - to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)], - to_lamports: u64, - ); - fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]); - fn sign(&mut self, blockhash: Hash); - fn send(&self, client: &Arc); - fn verify( - &mut self, - client: &Arc, - to_lamports: u64, - ); -} - -impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { - fn fund( - &mut self, - client: &Arc, - to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)], - to_lamports: u64, - ) { - self.make(to_fund); - - let mut tries = 0; - while !self.is_empty() { - info!( - "{} {} each to {} accounts in {} txs", - if tries == 0 { - "transferring" - } else { - " retrying" - }, - to_lamports, - self.len() * MAX_SPENDS_PER_TX as usize, - self.len(), - ); - - let blockhash = get_latest_blockhash(client.as_ref()); - - // re-sign retained to_fund_txes with updated blockhash - self.sign(blockhash); - self.send(client); - - // Sleep a few slots to allow transactions to process - sleep(Duration::from_secs(1)); - - self.verify(client, to_lamports); - - // retry anything that seems to have dropped through cracks - // again since these txs are all or nothing, they're fine to - // retry - tries += 1; - } - info!("transferred"); - } - - fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]) { - let mut make_txs = Measure::start("make_txs"); - let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund - .par_iter() - .map(|(k, t)| { - let instructions = system_instruction::transfer_many(&k.pubkey(), t); - let message = Message::new(&instructions, Some(&k.pubkey())); - (*k, Transaction::new_unsigned(message)) - }) - .collect(); - make_txs.stop(); - debug!( - "make {} unsigned txs: {}us", - to_fund_txs.len(), - make_txs.as_us() - ); - self.extend(to_fund_txs); - } - - fn sign(&mut self, blockhash: Hash) { - let mut sign_txs = Measure::start("sign_txs"); - self.par_iter_mut().for_each(|(k, tx)| { - tx.sign(&[*k], blockhash); - }); - sign_txs.stop(); - debug!("sign {} txs: {}us", self.len(), sign_txs.as_us()); - } - - fn send(&self, client: &Arc) { - let mut send_txs = Measure::start("send_and_clone_txs"); - let batch: Vec<_> = self.iter().map(|(_keypair, tx)| tx.clone()).collect(); - client.send_batch(batch).expect("transfer"); - send_txs.stop(); - debug!("send {} {}", self.len(), send_txs); - } - - fn verify( - &mut self, - client: &Arc, - to_lamports: u64, - ) { - let starting_txs = self.len(); - let verified_txs = Arc::new(AtomicUsize::new(0)); - let too_many_failures = Arc::new(AtomicBool::new(false)); - let loops = if starting_txs < 1000 { 3 } else { 1 }; - // Only loop multiple times for small (quick) transaction batches - let time = Arc::new(Mutex::new(Instant::now())); - for _ in 0..loops { - let time = time.clone(); - let failed_verify = Arc::new(AtomicUsize::new(0)); - let client = client.clone(); - let verified_txs = &verified_txs; - let failed_verify = &failed_verify; - let too_many_failures = &too_many_failures; - let verified_set: HashSet = self - .par_iter() - .filter_map(move |(k, tx)| { - if too_many_failures.load(Ordering::Relaxed) { - return None; - } - - let verified = if verify_funding_transfer(&client, tx, to_lamports) { - verified_txs.fetch_add(1, Ordering::Relaxed); - Some(k.pubkey()) - } else { - failed_verify.fetch_add(1, Ordering::Relaxed); - None - }; - - let verified_txs = verified_txs.load(Ordering::Relaxed); - let failed_verify = failed_verify.load(Ordering::Relaxed); - let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify); - if failed_verify > 100 && failed_verify > verified_txs { - too_many_failures.store(true, Ordering::Relaxed); - warn!( - "Too many failed transfers... {} remaining, {} verified, {} failures", - remaining_count, verified_txs, failed_verify - ); - } - if remaining_count > 0 { - let mut time_l = time.lock().unwrap(); - if time_l.elapsed().as_secs() > 2 { - info!( - "Verifying transfers... {} remaining, {} verified, {} failures", - remaining_count, verified_txs, failed_verify - ); - *time_l = Instant::now(); - } - } - - verified - }) - .collect(); - - self.retain(|(k, _)| !verified_set.contains(&k.pubkey())); - if self.is_empty() { - break; - } - info!("Looping verifications"); - - let verified_txs = verified_txs.load(Ordering::Relaxed); - let failed_verify = failed_verify.load(Ordering::Relaxed); - let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify); - info!( - "Verifying transfers... {} remaining, {} verified, {} failures", - remaining_count, verified_txs, failed_verify - ); - sleep(Duration::from_millis(100)); - } - } -} - -/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX -/// on every iteration. This allows us to replay the transfers because the source is either empty, -/// or full -pub fn fund_keys( - client: Arc, - source: &Keypair, - dests: &[Keypair], - total: u64, - max_fee: u64, - lamports_per_account: u64, -) { - let mut funded: Vec<&Keypair> = vec![source]; - let mut funded_funds = total; - let mut not_funded: Vec<&Keypair> = dests.iter().collect(); - while !not_funded.is_empty() { - // Build to fund list and prepare funding sources for next iteration - let mut new_funded: Vec<&Keypair> = vec![]; - let mut to_fund: Vec<(&Keypair, Vec<(Pubkey, u64)>)> = vec![]; - let to_lamports = (funded_funds - lamports_per_account - max_fee) / MAX_SPENDS_PER_TX; - for f in funded { - let start = not_funded.len() - MAX_SPENDS_PER_TX as usize; - let dests: Vec<_> = not_funded.drain(start..).collect(); - let spends: Vec<_> = dests.iter().map(|k| (k.pubkey(), to_lamports)).collect(); - to_fund.push((f, spends)); - new_funded.extend(dests.into_iter()); - } - - // try to transfer a "few" at a time with recent blockhash - // assume 4MB network buffers, and 512 byte packets - const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512; - - to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| { - Vec::<(&Keypair, Transaction)>::with_capacity(chunk.len()).fund( - &client, - chunk, - to_lamports, - ); - }); - - info!("funded: {} left: {}", new_funded.len(), not_funded.len()); - funded = new_funded; - funded_funds = to_lamports; - } -} - fn compute_and_report_stats( maxes: &Arc>>, sample_period: u64, @@ -845,22 +599,6 @@ fn compute_and_report_stats( ); } -pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec, u64) { - let mut seed = [0u8; 32]; - seed.copy_from_slice(&seed_keypair.to_bytes()[..32]); - let mut rnd = GenKeys::new(seed); - - let mut total_keys = 0; - let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded - let mut delta = 1; - while total_keys < count { - extra += delta; - delta *= MAX_SPENDS_PER_TX; - total_keys += delta; - } - (rnd.gen_n_keypairs(total_keys), extra) -} - pub fn generate_and_fund_keypairs( client: Arc, funding_key: &Keypair, @@ -961,8 +699,8 @@ mod tests { super::*, solana_runtime::{bank::Bank, bank_client::BankClient}, solana_sdk::{ - fee_calculator::FeeRateGovernor, genesis_config::create_genesis_config, - native_token::sol_to_lamports, + commitment_config::CommitmentConfig, fee_calculator::FeeRateGovernor, + genesis_config::create_genesis_config, native_token::sol_to_lamports, }, }; diff --git a/bench-tps/src/lib.rs b/bench-tps/src/lib.rs index 06d5eaa1afa621..5226b4e56f07d5 100644 --- a/bench-tps/src/lib.rs +++ b/bench-tps/src/lib.rs @@ -4,3 +4,4 @@ pub mod bench_tps_client; pub mod cli; pub mod keypairs; mod perf_utils; +pub mod send_batch; diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 5b96ffb029375f..d7c77d03221111 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -3,9 +3,10 @@ use { clap::value_t, log::*, solana_bench_tps::{ - bench::{do_bench_tps, generate_keypairs}, + bench::do_bench_tps, cli::{self, ExternalClientType}, keypairs::get_keypairs, + send_batch::generate_keypairs, }, solana_client::{ connection_cache::ConnectionCache, diff --git a/bench-tps/src/send_batch.rs b/bench-tps/src/send_batch.rs new file mode 100644 index 00000000000000..250e5e2fe9ce97 --- /dev/null +++ b/bench-tps/src/send_batch.rs @@ -0,0 +1,320 @@ +use { + crate::bench_tps_client::*, + log::*, + rayon::prelude::*, + solana_core::gen_keys::GenKeys, + solana_measure::measure::Measure, + solana_sdk::{ + commitment_config::CommitmentConfig, + hash::Hash, + message::Message, + pubkey::Pubkey, + signature::{Keypair, Signer}, + signer::signers::Signers, + system_instruction, + transaction::Transaction, + }, + std::{ + collections::HashSet, + marker::Send, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, Mutex, + }, + thread::sleep, + time::{Duration, Instant}, + }, +}; + +pub fn get_latest_blockhash(client: &T) -> Hash { + loop { + match client.get_latest_blockhash_with_commitment(CommitmentConfig::processed()) { + Ok((blockhash, _)) => return blockhash, + Err(err) => { + info!("Couldn't get last blockhash: {:?}", err); + sleep(Duration::from_secs(1)); + } + }; + } +} + +pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec, u64) { + let mut seed = [0u8; 32]; + seed.copy_from_slice(&seed_keypair.to_bytes()[..32]); + let mut rnd = GenKeys::new(seed); + + let mut total_keys = 0; + let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded + let mut delta = 1; + while total_keys < count { + extra += delta; + delta *= MAX_SPENDS_PER_TX; + total_keys += delta; + } + (rnd.gen_n_keypairs(total_keys), extra) +} + +/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX +/// on every iteration. This allows us to replay the transfers because the source is either empty, +/// or full +pub fn fund_keys( + client: Arc, + source: &Keypair, + dests: &[Keypair], + total: u64, + max_fee: u64, + lamports_per_account: u64, +) { + let mut funded: Vec<&Keypair> = vec![source]; + let mut funded_funds = total; + let mut not_funded: Vec<&Keypair> = dests.iter().collect(); + while !not_funded.is_empty() { + // Build to fund list and prepare funding sources for next iteration + let mut new_funded: Vec<&Keypair> = vec![]; + let mut to_fund: Vec<(&Keypair, Vec<(Pubkey, u64)>)> = vec![]; + let to_lamports = (funded_funds - lamports_per_account - max_fee) / MAX_SPENDS_PER_TX; + for f in funded { + let start = not_funded.len() - MAX_SPENDS_PER_TX as usize; + let dests: Vec<_> = not_funded.drain(start..).collect(); + let spends: Vec<_> = dests.iter().map(|k| (k.pubkey(), to_lamports)).collect(); + to_fund.push((f, spends)); + new_funded.extend(dests.into_iter()); + } + + to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| { + Vec::<(&Keypair, Transaction)>::with_capacity(chunk.len()).fund( + &client, + chunk, + to_lamports, + ); + }); + + info!("funded: {} left: {}", new_funded.len(), not_funded.len()); + funded = new_funded; + funded_funds = to_lamports; + } +} + +const MAX_SPENDS_PER_TX: u64 = 4; + +// Size of the chunk of transactions +// try to transfer a "few" at a time with recent blockhash +// assume 4MB network buffers, and 512 byte packets +const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512; + +fn verify_funding_transfer( + client: &Arc, + tx: &Transaction, + amount: u64, +) -> bool { + for a in &tx.message().account_keys[1..] { + match client.get_balance_with_commitment(a, CommitmentConfig::processed()) { + Ok(balance) => return balance >= amount, + Err(err) => error!("failed to get balance {:?}", err), + } + } + false +} + +trait SendBatchTransactions<'a, T: Sliceable + Send + Sync> { + fn sign(&mut self, blockhash: Hash); + fn send(&self, client: &Arc); + fn verify( + &mut self, + client: &Arc, + to_lamports: u64, + ); +} + +/// This trait allows reuse SendBatchTransactions to send +/// transactions which require more than one signature +trait Sliceable { + type Slice; + fn as_slice(&self) -> Self::Slice; + // Pubkey used as unique id to identify verified transactions + fn get_pubkey(&self) -> Pubkey; +} + +impl<'a, T: Sliceable + Send + Sync> SendBatchTransactions<'a, T> for Vec<(T, Transaction)> +where + ::Slice: Signers, +{ + fn sign(&mut self, blockhash: Hash) { + let mut sign_txs = Measure::start("sign_txs"); + self.par_iter_mut().for_each(|(k, tx)| { + tx.sign(&k.as_slice(), blockhash); + }); + sign_txs.stop(); + debug!("sign {} txs: {}us", self.len(), sign_txs.as_us()); + } + + fn send(&self, client: &Arc) { + let mut send_txs = Measure::start("send_and_clone_txs"); + let batch: Vec<_> = self.iter().map(|(_keypair, tx)| tx.clone()).collect(); + client.send_batch(batch).expect("transfer"); + send_txs.stop(); + debug!("send {} {}", self.len(), send_txs); + } + + fn verify( + &mut self, + client: &Arc, + to_lamports: u64, + ) { + let starting_txs = self.len(); + let verified_txs = Arc::new(AtomicUsize::new(0)); + let too_many_failures = Arc::new(AtomicBool::new(false)); + let loops = if starting_txs < 1000 { 3 } else { 1 }; + // Only loop multiple times for small (quick) transaction batches + let time = Arc::new(Mutex::new(Instant::now())); + for _ in 0..loops { + let time = time.clone(); + let failed_verify = Arc::new(AtomicUsize::new(0)); + let client = client.clone(); + let verified_txs = &verified_txs; + let failed_verify = &failed_verify; + let too_many_failures = &too_many_failures; + let verified_set: HashSet = self + .par_iter() + .filter_map(move |(k, tx)| { + let pubkey = k.get_pubkey(); + if too_many_failures.load(Ordering::Relaxed) { + return None; + } + + let verified = if verify_funding_transfer(&client, tx, to_lamports) { + verified_txs.fetch_add(1, Ordering::Relaxed); + Some(pubkey) + } else { + failed_verify.fetch_add(1, Ordering::Relaxed); + None + }; + + let verified_txs = verified_txs.load(Ordering::Relaxed); + let failed_verify = failed_verify.load(Ordering::Relaxed); + let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify); + if failed_verify > 100 && failed_verify > verified_txs { + too_many_failures.store(true, Ordering::Relaxed); + warn!( + "Too many failed transfers... {} remaining, {} verified, {} failures", + remaining_count, verified_txs, failed_verify + ); + } + if remaining_count > 0 { + let mut time_l = time.lock().unwrap(); + if time_l.elapsed().as_secs() > 2 { + info!( + "Verifying transfers... {} remaining, {} verified, {} failures", + remaining_count, verified_txs, failed_verify + ); + *time_l = Instant::now(); + } + } + + verified + }) + .collect(); + + self.retain(|(k, _)| !verified_set.contains(&k.get_pubkey())); + if self.is_empty() { + break; + } + info!("Looping verifications"); + + let verified_txs = verified_txs.load(Ordering::Relaxed); + let failed_verify = failed_verify.load(Ordering::Relaxed); + let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify); + info!( + "Verifying transfers... {} remaining, {} verified, {} failures", + remaining_count, verified_txs, failed_verify + ); + sleep(Duration::from_millis(100)); + } + } +} + +type FundingSigners<'a> = &'a Keypair; +type FundingChunk<'a> = [(FundingSigners<'a>, Vec<(Pubkey, u64)>)]; +type FundingContainer<'a> = Vec<(FundingSigners<'a>, Transaction)>; + +impl<'a> Sliceable for FundingSigners<'a> { + type Slice = [FundingSigners<'a>; 1]; + fn as_slice(&self) -> Self::Slice { + [self] + } + fn get_pubkey(&self) -> Pubkey { + self.pubkey() + } +} + +trait FundingTransactions<'a>: SendBatchTransactions<'a, FundingSigners<'a>> { + fn fund( + &mut self, + client: &Arc, + to_fund: &FundingChunk<'a>, + to_lamports: u64, + ); + fn make(&mut self, to_fund: &FundingChunk<'a>); +} + +impl<'a> FundingTransactions<'a> for FundingContainer<'a> { + fn fund( + &mut self, + client: &Arc, + to_fund: &FundingChunk<'a>, + to_lamports: u64, + ) { + self.make(to_fund); + + let mut tries = 0; + while !self.is_empty() { + info!( + "{} {} each to {} accounts in {} txs", + if tries == 0 { + "transferring" + } else { + " retrying" + }, + to_lamports, + self.len() * MAX_SPENDS_PER_TX as usize, + self.len(), + ); + + let blockhash = get_latest_blockhash(client.as_ref()); + + // re-sign retained to_fund_txes with updated blockhash + self.sign(blockhash); + self.send(client); + + // Sleep a few slots to allow transactions to process + sleep(Duration::from_secs(1)); + + self.verify(client, to_lamports); + + // retry anything that seems to have dropped through cracks + // again since these txs are all or nothing, they're fine to + // retry + tries += 1; + } + info!("transferred"); + } + + fn make(&mut self, to_fund: &FundingChunk<'a>) { + let mut make_txs = Measure::start("make_txs"); + let to_fund_txs: FundingContainer<'a> = to_fund + .par_iter() + .map(|(k, t)| { + let instructions = system_instruction::transfer_many(&k.pubkey(), t); + let message = Message::new(&instructions, Some(&k.pubkey())); + (*k, Transaction::new_unsigned(message)) + }) + .collect(); + make_txs.stop(); + debug!( + "make {} unsigned txs: {}us", + to_fund_txs.len(), + make_txs.as_us() + ); + self.extend(to_fund_txs); + } +} From d1370f2c7d85884ec5872a2dacb7db9ccf16898a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Jul 2022 10:04:43 -0600 Subject: [PATCH 084/100] chore: bump bytemuck from 1.9.1 to 1.10.0 (#26495) * chore: bump bytemuck from 1.9.1 to 1.10.0 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.9.1 to 1.10.0. - [Release notes](https://github.com/Lokathor/bytemuck/releases) - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.9.1...v1.10.0) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- programs/address-lookup-table/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 4 ++-- programs/zk-token-proof/Cargo.toml | 2 +- runtime/Cargo.toml | 2 +- sdk/Cargo.toml | 2 +- zk-token-sdk/Cargo.toml | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c979d57a04b4b7..475f7104fb9f18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -554,9 +554,9 @@ checksum = "72feb31ffc86498dacdbd0fcebb56138e7177a8cc5cea4516031d15ae85a742e" [[package]] name = "bytemuck" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdead85bdec19c194affaeeb670c0e41fe23de31459efd1c174d049269cf02cc" +checksum = "c53dfa917ec274df8ed3c572698f381a24eef2efba9492d797301b72b6db408a" dependencies = [ "bytemuck_derive", ] diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index 42deafdbb56bf6..88398eb8678285 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -11,7 +11,7 @@ edition = "2021" [dependencies] bincode = "1.3.3" -bytemuck = "1.9.1" +bytemuck = "1.10.0" log = "0.4.17" num-derive = "0.3" num-traits = "0.2" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 866a423d505b6e..5119796b89960e 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -495,9 +495,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdead85bdec19c194affaeeb670c0e41fe23de31459efd1c174d049269cf02cc" +checksum = "c53dfa917ec274df8ed3c572698f381a24eef2efba9492d797301b72b6db408a" dependencies = [ "bytemuck_derive", ] diff --git a/programs/zk-token-proof/Cargo.toml b/programs/zk-token-proof/Cargo.toml index 959e70a81e3923..ba546709b2dc9c 100644 --- a/programs/zk-token-proof/Cargo.toml +++ b/programs/zk-token-proof/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" edition = "2021" [dependencies] -bytemuck = { version = "1.9.1", features = ["derive"] } +bytemuck = { version = "1.10.0", features = ["derive"] } getrandom = { version = "0.1", features = ["dummy"] } num-derive = "0.3" num-traits = "0.2" diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index d193c052502ddd..178dea93f66c86 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -14,7 +14,7 @@ arrayref = "0.3.6" bincode = "1.3.3" blake3 = "1.3.1" bv = { version = "0.11.1", features = ["serde"] } -bytemuck = "1.9.1" +bytemuck = "1.10.0" byteorder = "1.4.3" bzip2 = "0.4.3" crossbeam-channel = "0.5" diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 48aa985a802a97..5e7b813d2b54f6 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -43,7 +43,7 @@ bincode = "1.3.3" bitflags = "1.3.1" borsh = "0.9.3" bs58 = "0.4.0" -bytemuck = { version = "1.9.1", features = ["derive"] } +bytemuck = { version = "1.10.0", features = ["derive"] } byteorder = { version = "1.4.3", optional = true } chrono = { default-features = false, features = ["alloc"], version = "0.4", optional = true } curve25519-dalek = { version = "3.2.1", optional = true } diff --git a/zk-token-sdk/Cargo.toml b/zk-token-sdk/Cargo.toml index bc8e220b94ae1c..1511f3092aa057 100644 --- a/zk-token-sdk/Cargo.toml +++ b/zk-token-sdk/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] base64 = "0.13" -bytemuck = { version = "1.9.1", features = ["derive"] } +bytemuck = { version = "1.10.0", features = ["derive"] } num-derive = "0.3" num-traits = "0.2" solana-program = { path = "../sdk/program", version = "=1.11.3" } From 78b8a8a6dbbd40af96bebcd9e4a0baffa0842d64 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 8 Jul 2022 13:26:54 -0500 Subject: [PATCH 085/100] Remove `get_total_resize_remaining()` and `total_resize_limit` from `TransactionContext` (#26504) --- cli/src/program.rs | 2 +- program-runtime/src/invoke_context.rs | 41 ++++---------------- programs/bpf_loader/benches/serialization.rs | 2 +- programs/bpf_loader/src/serialization.rs | 2 +- programs/bpf_loader/src/syscalls.rs | 2 +- programs/stake/src/stake_state.rs | 9 ++--- programs/vote/benches/process_vote.rs | 3 +- rbpf-cli/src/main.rs | 3 +- runtime/src/bank.rs | 3 -- runtime/src/message_processor.rs | 6 +-- runtime/src/nonce_keyed_account.rs | 2 +- runtime/src/system_instruction_processor.rs | 2 +- sdk/src/transaction_context.rs | 15 ------- 13 files changed, 22 insertions(+), 70 deletions(-) diff --git a/cli/src/program.rs b/cli/src/program.rs index b3dc020f1268bf..b2c2135d2a998a 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -2060,7 +2060,7 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box R>( preparation.transaction_accounts, ComputeBudget::default().max_invoke_depth.saturating_add(1), 1, - MAX_ACCOUNTS_DATA_LEN, ); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context @@ -1183,7 +1182,6 @@ pub fn mock_process_instruction( preparation.transaction_accounts, ComputeBudget::default().max_invoke_depth.saturating_add(1), 1, - MAX_ACCOUNTS_DATA_LEN, ); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); if let Some(sysvar_cache) = sysvar_cache_override { @@ -1357,7 +1355,7 @@ mod tests { }); } let mut transaction_context = - TransactionContext::new(accounts, ComputeBudget::default().max_invoke_depth, 1, 0); + TransactionContext::new(accounts, ComputeBudget::default().max_invoke_depth, 1); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); // Check call depth increases and has a limit @@ -1465,7 +1463,7 @@ mod tests { let accounts = vec![(solana_sdk::pubkey::new_rand(), AccountSharedData::default())]; let instruction_accounts = vec![]; let program_indices = vec![0]; - let mut transaction_context = TransactionContext::new(accounts, 1, 1, 0); + let mut transaction_context = TransactionContext::new(accounts, 1, 1); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context .push(&instruction_accounts, &program_indices, &[]) @@ -1510,7 +1508,7 @@ mod tests { is_writable: instruction_account_index < 2, }) .collect::>(); - let mut transaction_context = TransactionContext::new(accounts, 2, 8, 0); + let mut transaction_context = TransactionContext::new(accounts, 2, 8); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, builtin_programs); @@ -1642,7 +1640,7 @@ mod tests { fn test_invoke_context_compute_budget() { let accounts = vec![(solana_sdk::pubkey::new_rand(), AccountSharedData::default())]; - let mut transaction_context = TransactionContext::new(accounts, 1, 3, 0); + let mut transaction_context = TransactionContext::new(accounts, 1, 3); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context.compute_budget = ComputeBudget::new(compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64); @@ -1677,8 +1675,7 @@ mod tests { process_instruction: mock_process_instruction, }]; - let mut transaction_context = - TransactionContext::new(accounts, 1, 3, user_account_data_len * 2); + let mut transaction_context = TransactionContext::new(accounts, 1, 3); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &builtin_programs); @@ -1688,13 +1685,7 @@ mod tests { invoke_context .accounts_data_meter .set_maximum(user_account_data_len as u64 * 3); - let remaining_account_data_len = invoke_context - .transaction_context - .get_total_resize_remaining(); - assert_eq!( - remaining_account_data_len, - invoke_context.accounts_data_meter.remaining(), - ); + let remaining_account_data_len = invoke_context.accounts_data_meter.remaining(); let instruction_accounts = [ InstructionAccount { @@ -1729,12 +1720,6 @@ mod tests { assert!(result.is_ok()); assert_eq!(invoke_context.accounts_data_meter.remaining(), 0); - assert_eq!( - invoke_context - .transaction_context - .get_total_resize_remaining(), - 0 - ); } // Test 2: Resize the account to *the same size*, so not consuming any additional size; this must succeed @@ -1753,12 +1738,6 @@ mod tests { assert!(result.is_ok()); assert_eq!(invoke_context.accounts_data_meter.remaining(), 0); - assert_eq!( - invoke_context - .transaction_context - .get_total_resize_remaining(), - 0 - ); } // Test 3: Resize the account to exceed the budget; this must fail @@ -1781,12 +1760,6 @@ mod tests { Err(solana_sdk::instruction::InstructionError::MaxAccountsDataSizeExceeded) )); assert_eq!(invoke_context.accounts_data_meter.remaining(), 0); - assert_eq!( - invoke_context - .transaction_context - .get_total_resize_remaining(), - 0 - ); } } } diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index c119d4266c8d51..2be8073bf06a19 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -101,7 +101,7 @@ fn create_inputs() -> TransactionContext { }, ) .collect::>(); - let mut transaction_context = TransactionContext::new(transaction_accounts, 1, 1, 0); + let mut transaction_context = TransactionContext::new(transaction_accounts, 1, 1); let instruction_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; transaction_context .push(&[0], &instruction_accounts, &instruction_data, true) diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 8b32e8c63f26ae..6f3f0add6c2285 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -454,7 +454,7 @@ mod tests { &program_indices, ); let mut transaction_context = - TransactionContext::new(preparation.transaction_accounts, 1, 1, 0); + TransactionContext::new(preparation.transaction_accounts, 1, 1); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context .push( diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index d67c5cc3ed8356..38e4afd80a9b97 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -3373,7 +3373,7 @@ mod tests { ), ($program_key, AccountSharedData::new(0, 0, &$loader_key)), ]; - let mut $transaction_context = TransactionContext::new(transaction_accounts, 1, 1, 0); + let mut $transaction_context = TransactionContext::new(transaction_accounts, 1, 1); let mut $invoke_context = InvokeContext::new_mock(&mut $transaction_context, &[]); $invoke_context.push(&[], &[0, 1], &[]).unwrap(); }; diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index f45916935d6c04..539dc83152417f 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -2785,7 +2785,6 @@ mod tests { )], 1, 1, - 0, ) } @@ -2895,7 +2894,7 @@ mod tests { #[test] fn test_things_can_merge() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let good_stake = Stake { credits_observed: 4242, @@ -2994,7 +2993,7 @@ mod tests { #[test] fn test_metas_can_merge() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); // Identical Metas can merge assert!(MergeKind::metas_can_merge( @@ -3141,7 +3140,7 @@ mod tests { #[test] fn test_merge_kind_get_if_mergeable() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let authority_pubkey = Pubkey::new_unique(); let initial_lamports = 4242424242; @@ -3380,7 +3379,7 @@ mod tests { #[test] fn test_merge_kind_merge() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let clock = Clock::default(); let lamports = 424242; diff --git a/programs/vote/benches/process_vote.rs b/programs/vote/benches/process_vote.rs index 43c4a019f9ea79..f939c6c26f0abb 100644 --- a/programs/vote/benches/process_vote.rs +++ b/programs/vote/benches/process_vote.rs @@ -107,8 +107,7 @@ fn bench_process_vote_instruction( instruction_data: Vec, ) { bencher.iter(|| { - let mut transaction_context = - TransactionContext::new(transaction_accounts.clone(), 1, 1, 0); + let mut transaction_context = TransactionContext::new(transaction_accounts.clone(), 1, 1); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context .push(&instruction_accounts, &[0], &instruction_data) diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index 0dc51d6deb891c..13601e549c0cd2 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -216,8 +216,7 @@ native machine code before execting it in the virtual machine.", let program_indices = [0, 1]; let preparation = prepare_mock_invoke_context(transaction_accounts, instruction_accounts, &program_indices); - let mut transaction_context = - TransactionContext::new(preparation.transaction_accounts, 1, 1, 0); + let mut transaction_context = TransactionContext::new(preparation.transaction_accounts, 1, 1); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); invoke_context .push( diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 46a583a83ee33e..d0d0f2479e1b53 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4283,8 +4283,6 @@ impl Bank { transaction_accounts, compute_budget.max_invoke_depth.saturating_add(1), tx.message().instructions().len(), - self.accounts_data_size_limit() - .saturating_sub(prev_accounts_data_len), ); let pre_account_state_info = @@ -18567,7 +18565,6 @@ pub(crate) mod tests { loaded_txs[0].0.as_ref().unwrap().accounts.clone(), compute_budget.max_invoke_depth.saturating_add(1), number_of_instructions_at_transaction_level, - 0, ); assert_eq!( diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index 359ad628eda131..d3e760de28bf11 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -282,7 +282,7 @@ mod tests { create_loadable_account_for_test("mock_system_program"), ), ]; - let mut transaction_context = TransactionContext::new(accounts, 1, 3, 0); + let mut transaction_context = TransactionContext::new(accounts, 1, 3); let program_indices = vec![vec![2]]; let executors = Rc::new(RefCell::new(Executors::default())); let account_keys = transaction_context.get_keys_of_accounts().to_vec(); @@ -502,7 +502,7 @@ mod tests { create_loadable_account_for_test("mock_system_program"), ), ]; - let mut transaction_context = TransactionContext::new(accounts, 1, 3, 0); + let mut transaction_context = TransactionContext::new(accounts, 1, 3); let program_indices = vec![vec![2]]; let executors = Rc::new(RefCell::new(Executors::default())); let account_metas = vec![ @@ -661,7 +661,7 @@ mod tests { (secp256k1_program::id(), secp256k1_account), (mock_program_id, mock_program_account), ]; - let mut transaction_context = TransactionContext::new(accounts, 1, 2, 0); + let mut transaction_context = TransactionContext::new(accounts, 1, 2); let message = SanitizedMessage::Legacy(Message::new( &[ diff --git a/runtime/src/nonce_keyed_account.rs b/runtime/src/nonce_keyed_account.rs index 59ce1a808f7ff8..a129b450536194 100644 --- a/runtime/src/nonce_keyed_account.rs +++ b/runtime/src/nonce_keyed_account.rs @@ -328,7 +328,7 @@ mod test { is_writable: true, }, ]; - let mut transaction_context = TransactionContext::new(accounts, 1, 2, 0); + let mut transaction_context = TransactionContext::new(accounts, 1, 2); let mut $invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); }; } diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index a4082c1ccbe349..2eb7621cf59164 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -786,7 +786,7 @@ mod tests { #[test] fn test_address_create_with_seed_mismatch() { - let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1, 0); + let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); let invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let from = Pubkey::new_unique(); let seed = "dull boy"; diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index d073c918988396..51bc4c72676164 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -48,7 +48,6 @@ pub struct TransactionContext { number_of_instructions_at_transaction_level: usize, instruction_trace: InstructionTrace, return_data: TransactionReturnData, - total_resize_limit: u64, total_resize_delta: RefCell, } @@ -58,7 +57,6 @@ impl TransactionContext { transaction_accounts: Vec, instruction_context_capacity: usize, number_of_instructions_at_transaction_level: usize, - total_resize_limit: u64, ) -> Self { let (account_keys, accounts): (Vec, Vec>) = transaction_accounts @@ -73,7 +71,6 @@ impl TransactionContext { number_of_instructions_at_transaction_level, instruction_trace: Vec::with_capacity(number_of_instructions_at_transaction_level), return_data: TransactionReturnData::default(), - total_resize_limit, total_resize_delta: RefCell::new(0), } } @@ -254,18 +251,6 @@ impl TransactionContext { pub fn get_instruction_trace(&self) -> &InstructionTrace { &self.instruction_trace } - - /// Returns (in bytes) how much data can still be allocated - pub fn get_total_resize_remaining(&self) -> u64 { - let total_resize_delta = *self.total_resize_delta.borrow(); - if total_resize_delta >= 0 { - self.total_resize_limit - .saturating_sub(total_resize_delta as u64) - } else { - self.total_resize_limit - .saturating_add(total_resize_delta.saturating_neg() as u64) - } - } } /// Return data at the end of a transaction From cdbcf614cac95261f25f39a86d718d2cb3eef33c Mon Sep 17 00:00:00 2001 From: Jack May Date: Fri, 8 Jul 2022 11:50:24 -0700 Subject: [PATCH 086/100] cleanup feature: executables_incur_cpi_data_cost (#26500) --- programs/bpf_loader/src/syscalls.rs | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 38e4afd80a9b97..9485904f547b96 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -23,9 +23,9 @@ use { entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ blake3_syscall_enabled, check_physical_overlapping, check_slice_translation_size, - curve25519_syscall_enabled, disable_fees_sysvar, executables_incur_cpi_data_cost, - libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id, - prevent_calling_precompiles_as_programs, quick_bail_on_panic, syscall_saturated_math, + curve25519_syscall_enabled, disable_fees_sysvar, libsecp256k1_0_5_upgrade_enabled, + limit_secp256k1_recovery_id, prevent_calling_precompiles_as_programs, + quick_bail_on_panic, syscall_saturated_math, }, hash::{Hasher, HASH_BYTES}, instruction::{ @@ -2652,15 +2652,11 @@ where .map_err(SyscallError::InstructionError)?; if callee_account.is_executable() { // Use the known account - if invoke_context - .feature_set - .is_active(&executables_incur_cpi_data_cost::id()) - { - invoke_context.get_compute_meter().consume( - (callee_account.get_data().len() as u64) - .saturating_div(invoke_context.get_compute_budget().cpi_bytes_per_unit), - )?; - } + invoke_context.get_compute_meter().consume( + (callee_account.get_data().len() as u64) + .saturating_div(invoke_context.get_compute_budget().cpi_bytes_per_unit), + )?; + accounts.push((instruction_account.index_in_caller, None)); } else if let Some(caller_account_index) = account_info_keys.iter().position(|key| *key == account_key) From 995756a5503adfe3da80de286bf81953d7a50cea Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 8 Jul 2022 13:51:16 -0500 Subject: [PATCH 087/100] Use RefCount type alias instead of u64 (#26472) use RefCount type alias instead of u64 --- bucket_map/src/bucket.rs | 2 +- runtime/src/accounts_db.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 0bbb502a577092..dccd44d08be824 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -240,7 +240,7 @@ impl Bucket { &mut self, key: &Pubkey, data: &[T], - ref_count: u64, + ref_count: RefCount, ) -> Result<(), BucketMapError> { let best_fit_bucket = IndexEntry::data_bucket_from_num_slots(data.len() as u64); if self.data.get(best_fit_bucket as usize).is_none() { diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 33cc27d2a792d9..cac113f6f2fc4c 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2204,7 +2204,7 @@ impl AccountsDb { } fn calc_delete_dependencies( - purges: &HashMap, u64)>, + purges: &HashMap, RefCount)>, store_counts: &mut HashMap)>, ) { // Another pass to check if there are some filtered accounts which @@ -2212,7 +2212,7 @@ impl AccountsDb { // then increment their storage count. let mut already_counted = HashSet::new(); for (pubkey, (account_infos, ref_count_from_storage)) in purges.iter() { - let no_delete = if account_infos.len() as u64 != *ref_count_from_storage { + let no_delete = if account_infos.len() as RefCount != *ref_count_from_storage { debug!( "calc_delete_dependencies(), pubkey: {}, From f3bba9723e99eef15bd0e21032fb5501db49e227 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Fri, 1 Jul 2022 12:30:39 -0700 Subject: [PATCH 088/100] Consolidate dep features in Cargo.toml files to minimize rebuilds Indirect dependency packages introduce variations in features of other dependencies, which affect the fingerprints of previously built packages such as solana-program and cause redundant rebuilds of affected packages. These changes specify several features in dependencies specifications explicitly to a common set of features. The result of such consolidation is improved re-usability of previously built binary packages across programs/bpf/rust/ packages when these packages are built in CI jobs. --- Cargo.lock | 20 +++++++++++++++++++- frozen-abi/Cargo.toml | 18 +++++++++++++++--- frozen-abi/macro/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 21 ++++++++++++++++++++- sdk/program/Cargo.toml | 24 +++++++++++++++--------- zk-token-sdk/Cargo.toml | 2 +- 6 files changed, 71 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 475f7104fb9f18..b69758efad5528 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1697,8 +1697,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", "wasi 0.10.2+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -5114,21 +5116,32 @@ dependencies = [ name = "solana-frozen-abi" version = "1.11.3" dependencies = [ + "ahash", + "blake3", + "block-buffer 0.9.0", "bs58", "bv", + "byteorder", + "cc", + "either", "generic-array 0.14.5", + "getrandom 0.1.16", + "hashbrown 0.11.2", "im", "lazy_static", "log", "memmap2", "once_cell", + "rand_core 0.6.3", "rustc_version 0.4.0", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", "solana-frozen-abi-macro 1.11.3", "solana-logger 1.11.3", + "subtle", "thiserror", ] @@ -5675,13 +5688,15 @@ dependencies = [ "bs58", "bv", "bytemuck", + "cc", "console_error_panic_hook", "console_log", "curve25519-dalek", - "getrandom 0.1.16", + "getrandom 0.2.3", "itertools", "js-sys", "lazy_static", + "libc", "libsecp256k1", "log", "memoffset", @@ -5689,6 +5704,7 @@ dependencies = [ "num-traits", "parking_lot 0.12.1", "rand 0.7.3", + "rand_chacha 0.2.2", "rustc_version 0.4.0", "rustversion", "serde", @@ -5703,7 +5719,9 @@ dependencies = [ "solana-sdk-macro 1.11.3", "static_assertions", "thiserror", + "tiny-bip39", "wasm-bindgen", + "zeroize", ] [[package]] diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 28a964052ca303..b97a62a2fa50e1 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -13,22 +13,34 @@ edition = "2021" bs58 = "0.4.0" bv = { version = "0.11.1", features = ["serde"] } lazy_static = "1.4.0" -log = "0.4.17" +log = { version = "0.4.17", features = ["std"] } once_cell = "1.12.0" -serde = "1.0.138" +serde = { version = "1.0", features = ["derive", "rc"] } serde_bytes = "0.11" -serde_derive = "1.0.103" +serde_derive = "1.0" +serde_json = "1.0" sha2 = "0.10.2" solana-frozen-abi-macro = { path = "macro", version = "=1.11.3" } thiserror = "1.0" [target.'cfg(not(target_os = "solana"))'.dependencies] +ahash = { version = "0.7.6", features = ["default", "std"] } +blake3 = { version = "1.3.1", features = ["digest", "traits-preview"] } +block-buffer = { version = "0.9.0", features = ["block-padding"] } +byteorder = { version = "1.4.3", features = ["default", "i128", "std"] } +cc = { version = "1.0.67", features = ["jobserver", "parallel"] } +either = { version = "1.6.1", features = ["use_std"] } generic-array = { version = "0.14.5", default-features = false, features = [ "serde", "more_lengths" ] } +getrandom = { version = "0.1", features = ["dummy"] } +hashbrown = { version = "0.11", features = ["raw"] } im = { version = "15.1.0", features = ["rayon", "serde"] } memmap2 = "0.5.3" +once_cell = { version = "1.8", features = ["alloc", "default", "race", "std"] } +rand_core = { version = "0.6.3", features = ["alloc", "getrandom", "std"] } +subtle = { version = "2.4.1", features = ["default", "i128", "std"] } [target.'cfg(not(target_os = "solana"))'.dev-dependencies] solana-logger = { path = "../logger", version = "=1.11.3" } diff --git a/frozen-abi/macro/Cargo.toml b/frozen-abi/macro/Cargo.toml index b2ea0396cda47e..8ec2c3841ac0bc 100644 --- a/frozen-abi/macro/Cargo.toml +++ b/frozen-abi/macro/Cargo.toml @@ -15,7 +15,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0" quote = "1.0" -syn = { version = "1.0", features = ["full", "extra-traits"] } +syn = { version = "1.0", features = ["full", "extra-traits", "visit-mut"] } [build-dependencies] rustc_version = "0.4" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 5119796b89960e..313f8217f22a48 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -1467,8 +1467,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", "wasi 0.10.1+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -4693,20 +4695,31 @@ dependencies = [ name = "solana-frozen-abi" version = "1.11.3" dependencies = [ + "ahash", + "blake3", + "block-buffer 0.9.0", "bs58", "bv", + "byteorder 1.4.3", + "cc", + "either", "generic-array 0.14.5", + "getrandom 0.1.14", + "hashbrown 0.11.2", "im", "lazy_static", "log", "memmap2", "once_cell", + "rand_core 0.6.3", "rustc_version", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", "solana-frozen-abi-macro 1.11.3", + "subtle", "thiserror", ] @@ -5034,13 +5047,15 @@ dependencies = [ "bs58", "bv", "bytemuck", + "cc", "console_error_panic_hook", "console_log", "curve25519-dalek", - "getrandom 0.1.14", + "getrandom 0.2.4", "itertools", "js-sys", "lazy_static", + "libc", "libsecp256k1", "log", "memoffset", @@ -5048,18 +5063,22 @@ dependencies = [ "num-traits", "parking_lot 0.12.1", "rand 0.7.3", + "rand_chacha 0.2.2", "rustc_version", "rustversion", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", "sha3 0.10.1", "solana-frozen-abi 1.11.3", "solana-frozen-abi-macro 1.11.3", "solana-sdk-macro 1.11.3", "thiserror", + "tiny-bip39", "wasm-bindgen", + "zeroize", ] [[package]] diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index db7f6eb35b35fd..433281c5de53c3 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -12,7 +12,7 @@ edition = "2021" [dependencies] bincode = "1.3.1" -blake3 = { version = "1.2.0", features = ["traits-preview"] } +blake3 = { version = "1.3.1", features = ["digest", "traits-preview"] } borsh = "0.9.1" borsh-derive = "0.9.1" bs58 = "0.4.0" @@ -20,14 +20,15 @@ bytemuck = { version = "1.8.0", features = ["derive"] } bv = { version = "0.11.1", features = ["serde"] } itertools = "0.10.1" lazy_static = "1.4.0" -log = "0.4.14" +log = "0.4.17" memoffset = "0.6" num-derive = "0.3" num-traits = "0.2" rustversion = "1.0.7" -serde = "1.0.112" +serde = { version = "1.0", features = ["derive"] } serde_bytes = "0.11" -serde_derive = "1.0.103" +serde_derive = "1.0" +serde_json = "1.0" sha2 = "0.10.0" sha3 = "0.10.0" solana-frozen-abi = { path = "../../frozen-abi", version = "=1.11.3" } @@ -37,12 +38,16 @@ thiserror = "1.0" [target.'cfg(not(target_os = "solana"))'.dependencies] bitflags = "1.3.1" -base64 = "0.13" -curve25519-dalek = "3.2.1" -libsecp256k1 = "0.6.0" -rand = "0.7.0" +base64 = { version = "0.13", features = ["alloc", "std"] } +curve25519-dalek = { version = "3.2.1", features = ["serde"] } itertools = "0.10.1" +libc = { version = "0.2.126", features = ["extra_traits"] } +libsecp256k1 = "0.6.0" +rand = "0.7" +rand_chacha = { version = "0.2.2", default-features = true, features = ["simd", "std"] } +tiny-bip39 = "0.8.2" wasm-bindgen = "0.2" +zeroize = { version = "1.3", default-features = true, features = ["zeroize_derive"] } [target.'cfg(not(target_os = "solana"))'.dev-dependencies] solana-logger = { path = "../../logger", version = "=1.11.3" } @@ -51,7 +56,7 @@ solana-logger = { path = "../../logger", version = "=1.11.3" } console_error_panic_hook = "0.1.7" console_log = "0.2.0" js-sys = "0.3.55" -getrandom = { version = "0.1", features = ["wasm-bindgen"] } +getrandom = { version = "0.2", features = ["js", "wasm-bindgen"] } [target.'cfg(not(target_pointer_width = "64"))'.dependencies] parking_lot = "0.12" @@ -66,6 +71,7 @@ serde_json = "1.0.56" static_assertions = "1.1.0" [build-dependencies] +cc = { version = "1.0.67", features = ["jobserver", "parallel"] } rustc_version = "0.4" [package.metadata.docs.rs] diff --git a/zk-token-sdk/Cargo.toml b/zk-token-sdk/Cargo.toml index 1511f3092aa057..67b744e49e2399 100644 --- a/zk-token-sdk/Cargo.toml +++ b/zk-token-sdk/Cargo.toml @@ -30,7 +30,7 @@ serde_json = "1.0" sha3 = "0.9" solana-sdk = { path = "../sdk", version = "=1.11.3" } subtle = "2" -thiserror = "1" +thiserror = "1.0" zeroize = { version = "1.3", default-features = false, features = ["zeroize_derive"] } [lib] From c99d9f00a9756018d7091f870cb9e3d7a37b3ac5 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 8 Jul 2022 20:04:08 +0000 Subject: [PATCH 089/100] preserves rent_epoch for rent exempt accounts (#26479) https://github.com/solana-labs/solana/pull/22292 prevents rent paying account creation going forward. As a result rent_epoch field for rent exempt accounts is redundant, and advancing this field will incur expensive account rewrites and cause discrepancy between accounts-db and cached vote/stake accounts. This commit adds a feature which upon activation preserves rent_epoch field for rent exempt accounts so that the field is frozen and is no longer advanced. --- runtime/src/accounts.rs | 23 +++++-- runtime/src/bank.rs | 29 ++++++--- runtime/src/expected_rent_collection.rs | 17 ++++- runtime/src/rent_collector.rs | 82 +++++++++++++++++++------ sdk/src/feature_set.rs | 5 ++ 5 files changed, 120 insertions(+), 36 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 881e96f2b78a47..747c57728708a5 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -265,6 +265,8 @@ impl Accounts { let mut accounts = Vec::with_capacity(account_keys.len()); let mut account_deps = Vec::with_capacity(account_keys.len()); let mut rent_debits = RentDebits::default(); + let preserve_rent_epoch_for_rent_exempt_accounts = feature_set + .is_active(&feature_set::preserve_rent_epoch_for_rent_exempt_accounts::id()); for (i, key) in account_keys.iter().enumerate() { let account = if !message.is_non_loader_key(i) { // Fill in an empty account for the program slots. @@ -292,6 +294,7 @@ impl Accounts { key, &mut account, self.accounts_db.filler_account_suffix.as_ref(), + preserve_rent_epoch_for_rent_exempt_accounts, ) .rent_amount; (account, rent_due) @@ -1182,15 +1185,16 @@ impl Accounts { /// Store the accounts into the DB // allow(clippy) needed for various gating flags #[allow(clippy::too_many_arguments)] - pub fn store_cached<'a>( + pub(crate) fn store_cached( &self, slot: Slot, - txs: &'a [SanitizedTransaction], - res: &'a [TransactionExecutionResult], - loaded: &'a mut [TransactionLoadResult], + txs: &[SanitizedTransaction], + res: &[TransactionExecutionResult], + loaded: &mut [TransactionLoadResult], rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, + preserve_rent_epoch_for_rent_exempt_accounts: bool, ) { let (accounts_to_store, txn_signatures) = self.collect_accounts_to_store( txs, @@ -1199,6 +1203,7 @@ impl Accounts { rent_collector, durable_nonce, lamports_per_signature, + preserve_rent_epoch_for_rent_exempt_accounts, ); self.accounts_db .store_cached((slot, &accounts_to_store[..]), Some(&txn_signatures)); @@ -1225,6 +1230,7 @@ impl Accounts { rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, + preserve_rent_epoch_for_rent_exempt_accounts: bool, ) -> ( Vec<(&'a Pubkey, &'a AccountSharedData)>, Vec>, @@ -1280,7 +1286,11 @@ impl Accounts { if execution_status.is_ok() || is_nonce_account || is_fee_payer { if account.rent_epoch() == INITIAL_RENT_EPOCH { let rent = rent_collector - .collect_from_created_account(address, account) + .collect_from_created_account( + address, + account, + preserve_rent_epoch_for_rent_exempt_accounts, + ) .rent_amount; loaded_transaction.rent += rent; loaded_transaction.rent_debits.insert( @@ -2995,6 +3005,7 @@ mod tests { &rent_collector, &DurableNonce::default(), 0, + true, // preserve_rent_epoch_for_rent_exempt_accounts ); assert_eq!(collected_accounts.len(), 2); assert!(collected_accounts @@ -3478,6 +3489,7 @@ mod tests { &rent_collector, &durable_nonce, 0, + true, // preserve_rent_epoch_for_rent_exempt_accounts ); assert_eq!(collected_accounts.len(), 2); assert_eq!( @@ -3592,6 +3604,7 @@ mod tests { &rent_collector, &durable_nonce, 0, + true, // preserve_rent_epoch_for_rent_exempt_accounts ); assert_eq!(collected_accounts.len(), 1); let collected_nonce_account = collected_accounts diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d0d0f2479e1b53..738c590375e917 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4929,6 +4929,7 @@ impl Bank { &self.rent_collector, &durable_nonce, lamports_per_signature, + self.preserve_rent_epoch_for_rent_exempt_accounts(), ); let rent_debits = self.collect_rent(&execution_results, loaded_txs); @@ -5336,6 +5337,7 @@ impl Bank { pubkey, account, self.rc.accounts.accounts_db.filler_account_suffix.as_ref(), + self.preserve_rent_epoch_for_rent_exempt_accounts(), )); time_collecting_rent_us += measure.as_us(); @@ -7274,6 +7276,11 @@ impl Bank { .is_active(&feature_set::send_to_tpu_vote_port::id()) } + fn preserve_rent_epoch_for_rent_exempt_accounts(&self) -> bool { + self.feature_set + .is_active(&feature_set::preserve_rent_epoch_for_rent_exempt_accounts::id()) + } + pub fn read_cost_tracker(&self) -> LockResult> { self.cost_tracker.read() } @@ -8202,6 +8209,7 @@ pub(crate) mod tests { &keypairs[4].pubkey(), &mut account_copy, None, + true, // preserve_rent_epoch_for_rent_exempt_accounts ); assert_eq!(expected_rent.rent_amount, too_few_lamports); assert_eq!(account_copy.lamports(), 0); @@ -9720,21 +9728,19 @@ pub(crate) mod tests { bank.collect_rent_in_partition((0, 0, 1), true, &RentMetrics::default()); { let rewrites_skipped = bank.rewrites_skipped_this_slot.read().unwrap(); - // `rewrites_skipped.len()` is the number of non-rent paying accounts in the slot. This - // is always at least the number of features in the Bank, due to - // `activate_all_features`. These accounts will stop being written to the append vec - // when we start skipping rewrites. + // `rewrites_skipped.len()` is the number of non-rent paying accounts in the slot. // 'collect_rent_in_partition' fills 'rewrites_skipped_this_slot' with rewrites that // were skipped during rent collection but should still be considered in the slot's // bank hash. If the slot is also written in the append vec, then the bank hash calc // code ignores the contents of this list. This assert is confirming that the expected # // of accounts were included in 'rewrites_skipped' by the call to // 'collect_rent_in_partition(..., true)' above. - let num_features = bank.feature_set.inactive.len() + bank.feature_set.active.len(); - assert!(rewrites_skipped.len() >= num_features); - // should have skipped 'rent_exempt_pubkey' - assert!(rewrites_skipped.contains_key(&rent_exempt_pubkey)); - // should NOT have skipped 'rent_exempt_pubkey' + assert_eq!(rewrites_skipped.len(), 1); + // should not have skipped 'rent_exempt_pubkey' + // Once preserve_rent_epoch_for_rent_exempt_accounts is activated, + // rewrite-skip is irrelevant to rent-exempt accounts. + assert!(!rewrites_skipped.contains_key(&rent_exempt_pubkey)); + // should NOT have skipped 'rent_due_pubkey' assert!(!rewrites_skipped.contains_key(&rent_due_pubkey)); } @@ -9754,9 +9760,11 @@ pub(crate) mod tests { bank.get_account(&rent_exempt_pubkey).unwrap().lamports(), large_lamports ); + // Once preserve_rent_epoch_for_rent_exempt_accounts is activated, + // rent_epoch of rent-exempt accounts will no longer advance. assert_eq!( bank.get_account(&rent_exempt_pubkey).unwrap().rent_epoch(), - current_epoch + 0 ); assert_eq!( bank.slots_by_pubkey(&rent_due_pubkey, &ancestors), @@ -19213,6 +19221,7 @@ pub(crate) mod tests { &keypair.pubkey(), &mut account, None, + true, // preserve_rent_epoch_for_rent_exempt_accounts ); assert_eq!(info.account_data_len_reclaimed, data_size as u64); } diff --git a/runtime/src/expected_rent_collection.rs b/runtime/src/expected_rent_collection.rs index 92ad9745118295..d049430933db33 100644 --- a/runtime/src/expected_rent_collection.rs +++ b/runtime/src/expected_rent_collection.rs @@ -302,7 +302,12 @@ impl ExpectedRentCollection { pubkey: &Pubkey, rewrites_skipped_this_slot: &Rewrites, ) -> Option { - let next_epoch = match rent_collector.calculate_rent_result(pubkey, account, None) { + let next_epoch = match rent_collector.calculate_rent_result( + pubkey, account, None, // filler_account_suffix + // Skipping rewrites is not compatible with the below feature. + // We will not skip rewrites until the feature is activated. + false, // preserve_rent_epoch_for_rent_exempt_accounts + ) { RentResult::LeaveAloneNoRent => return None, RentResult::CollectRent { new_rent_epoch, @@ -532,8 +537,14 @@ impl ExpectedRentCollection { // ask the rent collector what rent should be collected. // Rent collector knows the current epoch. - let rent_result = - rent_collector.calculate_rent_result(pubkey, loaded_account, filler_account_suffix); + let rent_result = rent_collector.calculate_rent_result( + pubkey, + loaded_account, + filler_account_suffix, + // Skipping rewrites is not compatible with the below feature. + // We will not skip rewrites until the feature is activated. + false, // preserve_rent_epoch_for_rent_exempt_accounts + ); let current_rent_epoch = loaded_account.rent_epoch(); let new_rent_epoch = match rent_result { RentResult::CollectRent { diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index 6ae978a9a5c7a6..bbcf8379cd0b81 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -67,7 +67,11 @@ impl RentCollector { } /// true if it is easy to determine this account should consider having rent collected from it - pub fn should_collect_rent(&self, address: &Pubkey, account: &impl ReadableAccount) -> bool { + pub(crate) fn should_collect_rent( + &self, + address: &Pubkey, + account: &impl ReadableAccount, + ) -> bool { !(account.executable() // executable accounts must be rent-exempt balance || *address == incinerator::id()) } @@ -121,8 +125,14 @@ impl RentCollector { address: &Pubkey, account: &mut AccountSharedData, filler_account_suffix: Option<&Pubkey>, + preserve_rent_epoch_for_rent_exempt_accounts: bool, ) -> CollectedInfo { - match self.calculate_rent_result(address, account, filler_account_suffix) { + match self.calculate_rent_result( + address, + account, + filler_account_suffix, + preserve_rent_epoch_for_rent_exempt_accounts, + ) { RentResult::LeaveAloneNoRent => CollectedInfo::default(), RentResult::CollectRent { new_rent_epoch, @@ -154,6 +164,7 @@ impl RentCollector { address: &Pubkey, account: &impl ReadableAccount, filler_account_suffix: Option<&Pubkey>, + preserve_rent_epoch_for_rent_exempt_accounts: bool, ) -> RentResult { if self.can_skip_rent_collection(address, account, filler_account_suffix) { return RentResult::LeaveAloneNoRent; @@ -161,10 +172,16 @@ impl RentCollector { match self.get_rent_due(account) { // Rent isn't collected for the next epoch. // Make sure to check exempt status again later in current epoch. - RentDue::Exempt => RentResult::CollectRent { - new_rent_epoch: self.epoch, - rent_due: 0, - }, + RentDue::Exempt => { + if preserve_rent_epoch_for_rent_exempt_accounts { + RentResult::LeaveAloneNoRent + } else { + RentResult::CollectRent { + new_rent_epoch: self.epoch, + rent_due: 0, + } + } + } // Maybe collect rent later, leave account alone. RentDue::Paying(0) => RentResult::LeaveAloneNoRent, // Rent is collected for next epoch. @@ -180,10 +197,16 @@ impl RentCollector { &self, address: &Pubkey, account: &mut AccountSharedData, + preserve_rent_epoch_for_rent_exempt_accounts: bool, ) -> CollectedInfo { // initialize rent_epoch as created at this epoch account.set_rent_epoch(self.epoch); - self.collect_from_existing_account(address, account, None) + self.collect_from_existing_account( + address, + account, + None, // filler_account_suffix + preserve_rent_epoch_for_rent_exempt_accounts, + ) } /// Performs easy checks to see if rent collection can be skipped @@ -204,11 +227,11 @@ impl RentCollector { /// Information computed during rent collection #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] -pub struct CollectedInfo { +pub(crate) struct CollectedInfo { /// Amount of rent collected from account - pub rent_amount: u64, + pub(crate) rent_amount: u64, /// Size of data reclaimed from account (happens when account's lamports go to zero) - pub account_data_len_reclaimed: u64, + pub(crate) account_data_len_reclaimed: u64, } impl std::ops::Add for CollectedInfo { @@ -258,8 +281,11 @@ mod tests { let rent_collector = default_rent_collector_clone_with_epoch(new_epoch); // collect rent on a newly-created account - let collected = rent_collector - .collect_from_created_account(&solana_sdk::pubkey::new_rand(), &mut created_account); + let collected = rent_collector.collect_from_created_account( + &solana_sdk::pubkey::new_rand(), + &mut created_account, + true, // preserve_rent_epoch_for_rent_exempt_accounts + ); assert!(created_account.lamports() < old_lamports); assert_eq!( created_account.lamports() + collected.rent_amount, @@ -272,7 +298,8 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &solana_sdk::pubkey::new_rand(), &mut existing_account, - None, + None, // filler_account_suffix + true, // preserve_rent_epoch_for_rent_exempt_accounts ); assert!(existing_account.lamports() < old_lamports); assert_eq!( @@ -302,7 +329,12 @@ mod tests { let rent_collector = default_rent_collector_clone_with_epoch(epoch); // first mark account as being collected while being rent-exempt - let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, None); + let collected = rent_collector.collect_from_existing_account( + &pubkey, + &mut account, + None, // filler_account_suffix + true, // preserve_rent_epoch_for_rent_exempt_accounts + ); assert_eq!(account.lamports(), huge_lamports); assert_eq!(collected, CollectedInfo::default()); @@ -310,7 +342,12 @@ mod tests { account.set_lamports(tiny_lamports); // ... and trigger another rent collection on the same epoch and check that rent is working - let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, None); + let collected = rent_collector.collect_from_existing_account( + &pubkey, + &mut account, + None, // filler_account_suffix + true, // preserve_rent_epoch_for_rent_exempt_accounts + ); assert_eq!(account.lamports(), tiny_lamports - collected.rent_amount); assert_ne!(collected, CollectedInfo::default()); } @@ -329,7 +366,12 @@ mod tests { let epoch = 3; let rent_collector = default_rent_collector_clone_with_epoch(epoch); - let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, None); + let collected = rent_collector.collect_from_existing_account( + &pubkey, + &mut account, + None, // filler_account_suffix + true, // preserve_rent_epoch_for_rent_exempt_accounts + ); assert_eq!(account.lamports(), 0); assert_eq!(collected.rent_amount, 1); } @@ -349,8 +391,12 @@ mod tests { }); let rent_collector = default_rent_collector_clone_with_epoch(account_rent_epoch + 1); - let collected = - rent_collector.collect_from_existing_account(&Pubkey::new_unique(), &mut account, None); + let collected = rent_collector.collect_from_existing_account( + &Pubkey::new_unique(), + &mut account, + None, // filler_account_suffix + true, // preserve_rent_epoch_for_rent_exempt_accounts + ); assert_eq!(collected.rent_amount, account_lamports); assert_eq!( diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index a40180a6967a44..df52abd4073b37 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -448,6 +448,10 @@ pub mod cap_accounts_data_size_per_block { solana_sdk::declare_id!("qywiJyZmqTKspFg2LeuUHqcA5nNvBgobqb9UprywS9N"); } +pub mod preserve_rent_epoch_for_rent_exempt_accounts { + solana_sdk::declare_id!("HH3MUYReL2BvqqA3oEcAa7txju5GY6G4nxJ51zvsEjEZ"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -554,6 +558,7 @@ lazy_static! { (nonce_must_be_advanceable::id(), "durable nonces must be advanceable"), (vote_authorize_with_seed::id(), "An instruction you can use to change a vote accounts authority when the current authority is a derived key #25860"), (cap_accounts_data_size_per_block::id(), "cap the accounts data size per block #25517"), + (preserve_rent_epoch_for_rent_exempt_accounts::id(), "preserve rent epoch for rent exempt accounts #26479"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 6d7a569087ee9cc3fa47232fb5b012937c6d1cba Mon Sep 17 00:00:00 2001 From: Mike MacCana Date: Fri, 8 Jul 2022 22:00:33 +0100 Subject: [PATCH 090/100] chore: make documentation and examples more prominent in README (#26498) * Make documentation and examples more prominent in README There's a lot more documentation to this library than many users would see looking at the current README - Make a dedicated 'Documentation and examples' heading - Bring the Solana Cookbook (which has more examples than all the other documentation listed in the README) to the top - Mention the examples higher up in the README * Update web3.js/README.md * Update web3.js/README.md --- web3.js/README.md | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/web3.js/README.md b/web3.js/README.md index f0cfd61edb3b61..1b07c4b6cc32ba 100644 --- a/web3.js/README.md +++ b/web3.js/README.md @@ -23,7 +23,10 @@ This is the Solana Javascript API built on the Solana [JSON RPC API](https://docs.solana.com/apps/jsonrpc-api) -[Latest API Documentation](https://solana-labs.github.io/solana-web3.js/) +## Documentation and examples + + - [The Solana Cookbook](https://solanacookbook.com/) has extensive task-based documentation using this library. + - For more detail on individual functions, see the [latest API Documentation](https://solana-labs.github.io/solana-web3.js/) ## Installation @@ -84,16 +87,6 @@ console.log(solanaWeb3); console.log(solanaWeb3); ``` -## Examples - -Example scripts for the web3.js repo and native programs: - -- [Web3 Examples](https://github.com/solana-labs/solana/tree/master/web3.js/examples) - -Example scripts for the Solana Program Library: - -- [Token Program Examples](https://github.com/solana-labs/solana-program-library/tree/master/token/js/examples) - ## Flow Support (Discontinued) Flow types are no longer supported in new releases. The last release with Flow support is v1.37.2 and its From 0de0d4625ec01b20d44cc3dbf107e4d9884d9640 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Fri, 8 Jul 2022 23:14:38 +0100 Subject: [PATCH 091/100] chore: remove web3 examples (#26515) --- ...nfirm-cargo-version-numbers-before-bump.sh | 1 - scripts/increment-cargo-version.sh | 1 - web3.js/examples/get_account_info.js | 25 ------------- web3.js/examples/send_sol.js | 37 ------------------- 4 files changed, 64 deletions(-) delete mode 100644 web3.js/examples/get_account_info.js delete mode 100644 web3.js/examples/send_sol.js diff --git a/scripts/confirm-cargo-version-numbers-before-bump.sh b/scripts/confirm-cargo-version-numbers-before-bump.sh index 401953a58e2772..c6b1c68e821ce2 100755 --- a/scripts/confirm-cargo-version-numbers-before-bump.sh +++ b/scripts/confirm-cargo-version-numbers-before-bump.sh @@ -32,7 +32,6 @@ ignores=( .cache .cargo target - web3.js/examples web3.js/test node_modules ) diff --git a/scripts/increment-cargo-version.sh b/scripts/increment-cargo-version.sh index 3c69debd45881c..386b2b21239a05 100755 --- a/scripts/increment-cargo-version.sh +++ b/scripts/increment-cargo-version.sh @@ -22,7 +22,6 @@ ignores=( .cache .cargo target - web3.js/examples web3.js/test node_modules ) diff --git a/web3.js/examples/get_account_info.js b/web3.js/examples/get_account_info.js deleted file mode 100644 index 86fe6eb450284f..00000000000000 --- a/web3.js/examples/get_account_info.js +++ /dev/null @@ -1,25 +0,0 @@ -import * as web3 from '@solana/web3.js'; - -(async () => { - // Connect to cluster - var connection = new web3.Connection( - web3.clusterApiUrl('devnet'), - 'confirmed', - ); - - // Generate a new wallet keypair and airdrop SOL - var wallet = web3.Keypair.generate(); - var airdropSignature = await connection.requestAirdrop( - wallet.publicKey, - web3.LAMPORTS_PER_SOL, - ); - - //wait for airdrop confirmation - await connection.confirmTransaction(airdropSignature); - - // get account info - // account data is bytecode that needs to be deserialized - // serialization and deserialization is program specific - let account = await connection.getAccountInfo(wallet.publicKey); - console.log(account); -})(); diff --git a/web3.js/examples/send_sol.js b/web3.js/examples/send_sol.js deleted file mode 100644 index 68e772dd2453fc..00000000000000 --- a/web3.js/examples/send_sol.js +++ /dev/null @@ -1,37 +0,0 @@ -import * as web3 from '@solana/web3.js'; - -(async () => { - // Connect to cluster - var connection = new web3.Connection( - web3.clusterApiUrl('devnet'), - 'confirmed', - ); - - // Generate a new random public key - var from = web3.Keypair.generate(); - var airdropSignature = await connection.requestAirdrop( - from.publicKey, - web3.LAMPORTS_PER_SOL, - ); - await connection.confirmTransaction(airdropSignature); - - // Generate a new random public key - var to = web3.Keypair.generate(); - - // Add transfer instruction to transaction - var transaction = new web3.Transaction().add( - web3.SystemProgram.transfer({ - fromPubkey: from.publicKey, - toPubkey: to.publicKey, - lamports: web3.LAMPORTS_PER_SOL / 100, - }), - ); - - // Sign transaction, broadcast, and confirm - var signature = await web3.sendAndConfirmTransaction( - connection, - transaction, - [from], - ); - console.log('SIGNATURE', signature); -})(); From ff3289de935aeae8450c579af9666ea1a2921510 Mon Sep 17 00:00:00 2001 From: Jack May Date: Fri, 8 Jul 2022 16:41:32 -0700 Subject: [PATCH 092/100] cleanup feature: quick_bail_on_panic (#26501) --- programs/bpf_loader/src/syscalls.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 9485904f547b96..23ab57b7f498c7 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -25,7 +25,7 @@ use { blake3_syscall_enabled, check_physical_overlapping, check_slice_translation_size, curve25519_syscall_enabled, disable_fees_sysvar, libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id, prevent_calling_precompiles_as_programs, - quick_bail_on_panic, syscall_saturated_math, + syscall_saturated_math, }, hash::{Hasher, HASH_BYTES}, instruction::{ @@ -550,12 +550,7 @@ declare_syscall!( .map_err(|_| SyscallError::InvokeContextBorrowFailed), result ); - if invoke_context - .feature_set - .is_active(&quick_bail_on_panic::id()) - { - question_mark!(invoke_context.get_compute_meter().consume(len), result); - } + question_mark!(invoke_context.get_compute_meter().consume(len), result); *result = translate_string_and_do( memory_mapping, From 785a7a5936afd6c5a8de6ed84ebdddcb75def4e3 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 8 Jul 2022 19:17:15 -0500 Subject: [PATCH 093/100] Track more accounts data size changes (#26467) --- runtime/src/bank.rs | 173 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 134 insertions(+), 39 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 738c590375e917..293fb5a6bd8118 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3501,6 +3501,7 @@ impl Bank { ); self.store_account(pubkey, account); self.capitalization.fetch_add(account.lamports(), Relaxed); + self.accounts_data_size_initial += account.data().len() as u64; } // updating sysvars (the fees sysvar in this case) now depends on feature activations in // genesis_config.accounts above @@ -3513,6 +3514,7 @@ impl Bank { pubkey ); self.store_account(pubkey, account); + self.accounts_data_size_initial += account.data().len() as u64; } // highest staked node is the first collector @@ -3552,12 +3554,14 @@ impl Bank { } fn burn_and_purge_account(&self, program_id: &Pubkey, mut account: AccountSharedData) { + let old_data_size = account.data().len(); self.capitalization.fetch_sub(account.lamports(), Relaxed); // Both resetting account balance to 0 and zeroing the account data // is needed to really purge from AccountsDb and flush the Stakes cache account.set_lamports(0); account.data_as_mut_slice().fill(0); self.store_account(program_id, &account); + self.calculate_and_update_accounts_data_size_delta_off_chain(old_data_size, 0); } // NOTE: must hold idempotent for the same set of arguments @@ -4730,6 +4734,16 @@ impl Bank { .unwrap(); } + /// Calculate the data size delta and update the off-chain accounts data size delta + fn calculate_and_update_accounts_data_size_delta_off_chain( + &self, + old_data_size: usize, + new_data_size: usize, + ) { + let data_size_delta = calculate_data_size_delta(old_data_size, new_data_size); + self.update_accounts_data_size_delta_off_chain(data_size_delta); + } + /// Set the initial accounts data size /// NOTE: This fn is *ONLY FOR TESTS* pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) { @@ -6271,39 +6285,46 @@ impl Bank { pubkey: &Pubkey, new_account: &AccountSharedData, ) { - if let Some(old_account) = self.get_account_with_fixed_root(pubkey) { - match new_account.lamports().cmp(&old_account.lamports()) { - std::cmp::Ordering::Greater => { - let increased = new_account.lamports() - old_account.lamports(); - trace!( - "store_account_and_update_capitalization: increased: {} {}", - pubkey, - increased - ); - self.capitalization.fetch_add(increased, Relaxed); - } - std::cmp::Ordering::Less => { - let decreased = old_account.lamports() - new_account.lamports(); - trace!( - "store_account_and_update_capitalization: decreased: {} {}", - pubkey, - decreased - ); - self.capitalization.fetch_sub(decreased, Relaxed); + let old_account_data_size = + if let Some(old_account) = self.get_account_with_fixed_root(pubkey) { + match new_account.lamports().cmp(&old_account.lamports()) { + std::cmp::Ordering::Greater => { + let increased = new_account.lamports() - old_account.lamports(); + trace!( + "store_account_and_update_capitalization: increased: {} {}", + pubkey, + increased + ); + self.capitalization.fetch_add(increased, Relaxed); + } + std::cmp::Ordering::Less => { + let decreased = old_account.lamports() - new_account.lamports(); + trace!( + "store_account_and_update_capitalization: decreased: {} {}", + pubkey, + decreased + ); + self.capitalization.fetch_sub(decreased, Relaxed); + } + std::cmp::Ordering::Equal => {} } - std::cmp::Ordering::Equal => {} - } - } else { - trace!( - "store_account_and_update_capitalization: created: {} {}", - pubkey, - new_account.lamports() - ); - self.capitalization - .fetch_add(new_account.lamports(), Relaxed); - } + old_account.data().len() + } else { + trace!( + "store_account_and_update_capitalization: created: {} {}", + pubkey, + new_account.lamports() + ); + self.capitalization + .fetch_add(new_account.lamports(), Relaxed); + 0 + }; self.store_account(pubkey, new_account); + self.calculate_and_update_accounts_data_size_delta_off_chain( + old_account_data_size, + new_account.data().len(), + ); } fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> { @@ -7496,6 +7517,11 @@ impl Bank { self.store_account(new_address, &AccountSharedData::default()); self.remove_executor(old_address); + + self.calculate_and_update_accounts_data_size_delta_off_chain( + old_account.data().len(), + new_account.data().len(), + ); } } } @@ -7520,9 +7546,11 @@ impl Bank { // As a workaround for // https://github.com/solana-labs/solana-program-library/issues/374, ensure that the // spl-token 2 native mint account is owned by the spl-token 2 program. + let old_account_data_size; let store = if let Some(existing_native_mint_account) = self.get_account_with_fixed_root(&inline_spl_token::native_mint::id()) { + old_account_data_size = existing_native_mint_account.data().len(); if existing_native_mint_account.owner() == &solana_sdk::system_program::id() { native_mint_account.set_lamports(existing_native_mint_account.lamports()); true @@ -7530,6 +7558,7 @@ impl Bank { false } } else { + old_account_data_size = 0; self.capitalization .fetch_add(native_mint_account.lamports(), Relaxed); true @@ -7537,6 +7566,10 @@ impl Bank { if store { self.store_account(&inline_spl_token::native_mint::id(), &native_mint_account); + self.calculate_and_update_accounts_data_size_delta_off_chain( + old_account_data_size, + native_mint_account.data().len(), + ); } } } @@ -7615,6 +7648,17 @@ impl Bank { } } +/// Compute how much an account has changed size. This function is useful when the data size delta +/// needs to be computed and passed to an `update_accounts_data_size_delta` function. +fn calculate_data_size_delta(old_data_size: usize, new_data_size: usize) -> i64 { + assert!(old_data_size <= i64::MAX as usize); + assert!(new_data_size <= i64::MAX as usize); + let old_data_size = old_data_size as i64; + let new_data_size = new_data_size as i64; + + new_data_size.saturating_sub(old_data_size) +} + /// Since `apply_feature_activations()` has different behavior depending on its caller, enumerate /// those callers explicitly. #[derive(Debug, Copy, Clone, Eq, PartialEq)] @@ -8768,19 +8812,14 @@ pub(crate) mod tests { genesis_config.rent = rent_with_exemption_threshold(1000.0); let root_bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let mut bank = create_child_bank_for_rent_test(&root_bank, &genesis_config); + let bank = create_child_bank_for_rent_test(&root_bank, &genesis_config); let account_pubkey = solana_sdk::pubkey::new_rand(); let account_balance = 1; - let data_size = 12345_u64; // use non-zero data size to also test accounts_data_size - let mut account = AccountSharedData::new( - account_balance, - data_size as usize, - &solana_sdk::pubkey::new_rand(), - ); + let mut account = + AccountSharedData::new(account_balance, 0, &solana_sdk::pubkey::new_rand()); account.set_executable(true); bank.store_account(&account_pubkey, &account); - bank.accounts_data_size_initial = data_size; let transfer_lamports = 1; let tx = system_transaction::transfer( @@ -8795,7 +8834,6 @@ pub(crate) mod tests { Err(TransactionError::InvalidWritableAccount) ); assert_eq!(bank.get_balance(&account_pubkey), account_balance); - assert_eq!(bank.load_accounts_data_size(), data_size); } #[test] @@ -19241,4 +19279,61 @@ pub(crate) mod tests { // also be reclaimed by rent collection. assert!(reclaimed_data_size >= data_size); } + + #[test] + fn test_accounts_data_size_with_default_bank() { + let bank = Bank::default_for_tests(); + assert_eq!( + bank.load_accounts_data_size() as usize, + bank.get_total_accounts_stats().unwrap().data_len + ); + } + + #[test] + fn test_accounts_data_size_from_genesis() { + let GenesisConfigInfo { + mut genesis_config, + mint_keypair, + .. + } = genesis_utils::create_genesis_config_with_leader( + 1_000_000 * LAMPORTS_PER_SOL, + &Pubkey::new_unique(), + 100 * LAMPORTS_PER_SOL, + ); + genesis_config.rent = Rent::default(); + genesis_config.ticks_per_slot = 3; + + let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + assert_eq!( + bank.load_accounts_data_size() as usize, + bank.get_total_accounts_stats().unwrap().data_len + ); + + // Create accounts over a number of banks and ensure the accounts data size remains correct + for _ in 0..10 { + bank = Arc::new(Bank::new_from_parent( + &bank, + &Pubkey::default(), + bank.slot() + 1, + )); + + // Store an account into the bank that is rent-exempt and has data + let data_size = rand::thread_rng().gen_range(3333, 4444); + let transaction = system_transaction::create_account( + &mint_keypair, + &Keypair::new(), + bank.last_blockhash(), + genesis_config.rent.minimum_balance(data_size), + data_size as u64, + &solana_sdk::system_program::id(), + ); + bank.process_transaction(&transaction).unwrap(); + bank.fill_bank_with_ticks_for_tests(); + + assert_eq!( + bank.load_accounts_data_size() as usize, + bank.get_total_accounts_stats().unwrap().data_len, + ); + } + } } From dcab37ecca21b9a5ce9032355b3543794d9dc545 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Fri, 8 Jul 2022 17:25:52 -0600 Subject: [PATCH 094/100] update `scripts/cargo-fmt.sh` to reflect changes in 9425478 --- scripts/cargo-fmt.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/cargo-fmt.sh b/scripts/cargo-fmt.sh index ee9d9cb726b0ce..0a378f6b460767 100755 --- a/scripts/cargo-fmt.sh +++ b/scripts/cargo-fmt.sh @@ -14,7 +14,7 @@ set -ex "$cargo" nightly fmt --all (cd programs/bpf && "$cargo" nightly fmt --all) -(cd sdk/cargo-build-bpf/tests/crates/fail && "$cargo" nightly fmt --all) -(cd sdk/cargo-build-bpf/tests/crates/noop && "$cargo" nightly fmt --all) +(cd sdk/cargo-build-sbf/tests/crates/fail && "$cargo" nightly fmt --all) +(cd sdk/cargo-build-sbf/tests/crates/noop && "$cargo" nightly fmt --all) (cd storage-bigtable/build-proto && "$cargo" nightly fmt --all) (cd web3.js/test/fixtures/noop-program && "$cargo" nightly fmt --all) From 6be835d887c7a8b36449556814e65bafb1377baa Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Fri, 8 Jul 2022 17:20:02 -0600 Subject: [PATCH 095/100] remote-wallet: add usb pids for ledger nano s plus --- remote-wallet/src/ledger.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/remote-wallet/src/ledger.rs b/remote-wallet/src/ledger.rs index c5a689d50a9522..ec929257acc0bf 100644 --- a/remote-wallet/src/ledger.rs +++ b/remote-wallet/src/ledger.rs @@ -35,7 +35,7 @@ const APDU_SUCCESS_CODE: usize = 0x9000; /// Ledger vendor ID const LEDGER_VID: u16 = 0x2c97; -/// Ledger product IDs: Nano S and Nano X +/// Ledger product IDs const LEDGER_NANO_S_PIDS: [u16; 33] = [ 0x0001, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1008, 0x1009, 0x100a, 0x100b, 0x100c, 0x100d, 0x100e, 0x100f, 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, @@ -46,6 +46,11 @@ const LEDGER_NANO_X_PIDS: [u16; 33] = [ 0x400b, 0x400c, 0x400d, 0x400e, 0x400f, 0x4010, 0x4011, 0x4012, 0x4013, 0x4014, 0x4015, 0x4016, 0x4017, 0x4018, 0x4019, 0x401a, 0x401b, 0x401c, 0x401d, 0x401e, 0x401f, ]; +const LEDGER_NANO_S_PLUS_PIDS: [u16; 33] = [ + 0x0005, 0x5000, 0x5001, 0x5002, 0x5003, 0x5004, 0x5005, 0x5006, 0x5007, 0x5008, 0x5009, 0x500a, + 0x500b, 0x500c, 0x500d, 0x500e, 0x500f, 0x5010, 0x5011, 0x5012, 0x5013, 0x5014, 0x5015, 0x5016, + 0x5017, 0x5018, 0x5019, 0x501a, 0x501b, 0x501c, 0x501d, 0x501e, 0x501f, +]; const LEDGER_TRANSPORT_HEADER_LEN: usize = 5; const HID_PACKET_SIZE: usize = 64 + HID_PREFIX_ZERO; @@ -508,8 +513,12 @@ impl RemoteWallet for LedgerWallet { /// Check if the detected device is a valid `Ledger device` by checking both the product ID and the vendor ID pub fn is_valid_ledger(vendor_id: u16, product_id: u16) -> bool { - vendor_id == LEDGER_VID - && (LEDGER_NANO_S_PIDS.contains(&product_id) || LEDGER_NANO_X_PIDS.contains(&product_id)) + let product_ids = [ + LEDGER_NANO_S_PIDS, + LEDGER_NANO_X_PIDS, + LEDGER_NANO_S_PLUS_PIDS, + ]; + vendor_id == LEDGER_VID && product_ids.iter().any(|pids| pids.contains(&product_id)) } /// Build the derivation path byte array from a DerivationPath selection From e3d1fe52a26bc00ad376d6884f9d743c00814ccd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 9 Jul 2022 00:35:29 -0600 Subject: [PATCH 096/100] chore: bump js-sys from 0.3.57 to 0.3.58 (#26508) * chore: bump js-sys from 0.3.57 to 0.3.58 Bumps [js-sys](https://github.com/rustwasm/wasm-bindgen) from 0.3.57 to 0.3.58. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/commits) --- updated-dependencies: - dependency-name: js-sys dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- programs/bpf/Cargo.lock | 4 ++-- sdk/Cargo.toml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b69758efad5528..026238d21ae424 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2164,9 +2164,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.57" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" +checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" dependencies = [ "wasm-bindgen", ] diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 313f8217f22a48..80e426fdc2c436 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -1905,9 +1905,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.57" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" +checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" dependencies = [ "wasm-bindgen", ] diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 5e7b813d2b54f6..95947737d3faa7 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -81,7 +81,7 @@ uriparse = "0.6.4" wasm-bindgen = "0.2" [target.'cfg(target_arch = "wasm32")'.dependencies] -js-sys = "0.3.57" +js-sys = "0.3.58" [dev-dependencies] anyhow = "1.0.57" From 9547b00f4c20d61e88171042b13b366692c0f81a Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Sat, 9 Jul 2022 12:28:44 -0700 Subject: [PATCH 097/100] epoch-info: Add epochCompletedPercent field to json output --- cli-output/src/cli_output.rs | 6 ++---- cli/src/cluster_query.rs | 3 +++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 0fb4899d3d0e54..d02b43dc71ab5d 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -251,6 +251,7 @@ pub struct CliSlotStatus { pub struct CliEpochInfo { #[serde(flatten)] pub epoch_info: EpochInfo, + pub epoch_completed_percent: f64, #[serde(skip)] pub average_slot_time_ms: u64, #[serde(skip)] @@ -285,10 +286,7 @@ impl fmt::Display for CliEpochInfo { writeln_name_value( f, "Epoch Completed Percent:", - &format!( - "{:>3.3}%", - self.epoch_info.slot_index as f64 / self.epoch_info.slots_in_epoch as f64 * 100_f64 - ), + &format!("{:>3.3}%", self.epoch_completed_percent), )?; let remaining_slots_in_epoch = self.epoch_info.slots_in_epoch - self.epoch_info.slot_index; writeln_name_value( diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index b64c5fd822fe97..5bf175bbe361eb 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1101,8 +1101,11 @@ pub fn process_get_epoch(rpc_client: &RpcClient, _config: &CliConfig) -> Process pub fn process_get_epoch_info(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult { let epoch_info = rpc_client.get_epoch_info()?; + let epoch_completed_percent = + epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100_f64; let mut cli_epoch_info = CliEpochInfo { epoch_info, + epoch_completed_percent, average_slot_time_ms: 0, start_block_time: None, current_block_time: None, From df616a0dda687104733203ee1fd38b1d933751fd Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Sun, 10 Jul 2022 13:13:07 +0000 Subject: [PATCH 098/100] removes redundant clone in gossip PruneData::signable_data (#26510) PruneData::signable_data redundantly clones inner fields, while only references suffice: https://github.com/solana-labs/solana/blob/d1370f2c7/gossip/src/cluster_info.rs#L219-L233 --- gossip/src/cluster_info.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 9bae4595013fd5..4bbde3f4b354ad 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -218,16 +218,16 @@ impl Signable for PruneData { fn signable_data(&self) -> Cow<[u8]> { #[derive(Serialize)] - struct SignData { - pubkey: Pubkey, - prunes: Vec, - destination: Pubkey, + struct SignData<'a> { + pubkey: &'a Pubkey, + prunes: &'a [Pubkey], + destination: &'a Pubkey, wallclock: u64, } let data = SignData { - pubkey: self.pubkey, - prunes: self.prunes.clone(), - destination: self.destination, + pubkey: &self.pubkey, + prunes: &self.prunes, + destination: &self.destination, wallclock: self.wallclock, }; Cow::Owned(serialize(&data).expect("serialize PruneData")) From 8576832f2a57a9d0361c3e1d3fde46e6143fa6c0 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sun, 10 Jul 2022 09:33:13 -0500 Subject: [PATCH 099/100] rename Bank::new_with_config_for_tests (#26533) --- runtime/src/bank.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 293fb5a6bd8118..692d1e11653956 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1474,7 +1474,7 @@ impl Bank { } #[cfg(test)] - pub(crate) fn new_with_config( + pub(crate) fn new_with_config_for_tests( genesis_config: &GenesisConfig, account_indexes: AccountSecondaryIndexes, accounts_db_caching_enabled: bool, @@ -12330,7 +12330,7 @@ pub(crate) mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(500); let mut account_indexes = AccountSecondaryIndexes::default(); account_indexes.indexes.insert(AccountIndex::ProgramId); - let bank = Arc::new(Bank::new_with_config( + let bank = Arc::new(Bank::new_with_config_for_tests( &genesis_config, account_indexes, false, @@ -12358,7 +12358,7 @@ pub(crate) mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(500); let mut account_indexes = AccountSecondaryIndexes::default(); account_indexes.indexes.insert(AccountIndex::ProgramId); - let bank = Arc::new(Bank::new_with_config( + let bank = Arc::new(Bank::new_with_config_for_tests( &genesis_config, account_indexes, false, @@ -14417,7 +14417,7 @@ pub(crate) mod tests { // Set root for bank 0, with caching disabled so we can get the size // of the storage for this slot - let mut bank0 = Arc::new(Bank::new_with_config( + let mut bank0 = Arc::new(Bank::new_with_config_for_tests( &genesis_config, AccountSecondaryIndexes::default(), false, @@ -14455,7 +14455,7 @@ pub(crate) mod tests { info!("pubkey1: {}", pubkey1); // Set root for bank 0, with caching enabled - let mut bank0 = Arc::new(Bank::new_with_config( + let mut bank0 = Arc::new(Bank::new_with_config_for_tests( &genesis_config, AccountSecondaryIndexes::default(), true, @@ -14531,7 +14531,7 @@ pub(crate) mod tests { let pubkey2 = solana_sdk::pubkey::new_rand(); // Set root for bank 0, with caching enabled - let mut bank0 = Arc::new(Bank::new_with_config( + let mut bank0 = Arc::new(Bank::new_with_config_for_tests( &genesis_config, AccountSecondaryIndexes::default(), true, @@ -15840,7 +15840,7 @@ pub(crate) mod tests { ) .genesis_config; genesis_config.rent = Rent::free(); - let bank0 = Arc::new(Bank::new_with_config( + let bank0 = Arc::new(Bank::new_with_config_for_tests( &genesis_config, AccountSecondaryIndexes::default(), accounts_db_caching_enabled, From 105c7e19e1168bf8218f97c7fdb9ee025a0be281 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Sun, 10 Jul 2022 17:39:19 +0000 Subject: [PATCH 100/100] moves check for feature activation out of accounts loop (#26535) --- runtime/src/bank.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 692d1e11653956..62e3252a230afa 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5344,6 +5344,8 @@ impl Bank { let mut time_hashing_skipped_rewrites_us = 0; let mut time_storing_accounts_us = 0; let can_skip_rewrites = self.rc.accounts.accounts_db.skip_rewrites || just_rewrites; + let preserve_rent_epoch_for_rent_exempt_accounts = + self.preserve_rent_epoch_for_rent_exempt_accounts(); for (pubkey, account, loaded_slot) in accounts.iter_mut() { let old_rent_epoch = account.rent_epoch(); let (rent_collected_info, measure) = @@ -5351,7 +5353,7 @@ impl Bank { pubkey, account, self.rc.accounts.accounts_db.filler_account_suffix.as_ref(), - self.preserve_rent_epoch_for_rent_exempt_accounts(), + preserve_rent_epoch_for_rent_exempt_accounts, )); time_collecting_rent_us += measure.as_us();