From 42572ebb4449fe73d5dbca3394d36f5deb513593 Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Tue, 16 Aug 2022 09:34:10 -0700 Subject: [PATCH 01/51] remove abort() from test-validator (#27124) --- test-validator/src/lib.rs | 64 +++++++++------------- validator/src/bin/solana-test-validator.rs | 22 ++++++-- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 12f4ff6775b0c6..f2fb1f37a3ab12 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -284,7 +284,7 @@ impl TestValidatorGenesis { addresses: T, rpc_client: &RpcClient, skip_missing: bool, - ) -> &mut Self + ) -> Result<&mut Self, String> where T: IntoIterator, { @@ -296,20 +296,21 @@ impl TestValidatorGenesis { } else if skip_missing { warn!("Could not find {}, skipping.", address); } else { - error!("Failed to fetch {}: {}", address, res.unwrap_err()); - Self::abort(); + return Err(format!("Failed to fetch {}: {}", address, res.unwrap_err())); } } - self + Ok(self) } - pub fn add_accounts_from_json_files(&mut self, accounts: &[AccountInfo]) -> &mut Self { + pub fn add_accounts_from_json_files( + &mut self, + accounts: &[AccountInfo], + ) -> Result<&mut Self, String> { for account in accounts { - let account_path = - solana_program_test::find_file(account.filename).unwrap_or_else(|| { - error!("Unable to locate {}", account.filename); - Self::abort(); - }); + let account_path = match solana_program_test::find_file(account.filename) { + Some(path) => path, + None => return Err(format!("Unable to locate {}", account.filename)), + }; let mut file = File::open(&account_path).unwrap(); let mut account_info_raw = String::new(); file.read_to_string(&mut account_info_raw).unwrap(); @@ -317,12 +318,11 @@ impl TestValidatorGenesis { let result: serde_json::Result = serde_json::from_str(&account_info_raw); let account_info = match result { Err(err) => { - error!( + return Err(format!( "Unable to deserialize {}: {}", account_path.to_str().unwrap(), err - ); - Self::abort(); + )); } Ok(deserialized) => deserialized, }; @@ -338,25 +338,24 @@ impl TestValidatorGenesis { self.add_account(address, account); } - self + Ok(self) } - pub fn add_accounts_from_directories(&mut self, dirs: T) -> &mut Self + pub fn add_accounts_from_directories(&mut self, dirs: T) -> Result<&mut Self, String> where T: IntoIterator, P: AsRef + Display, { let mut json_files: HashSet = HashSet::new(); for dir in dirs { - let matched_files = fs::read_dir(&dir) - .unwrap_or_else(|err| { - error!("Cannot read directory {}: {}", dir, err); - Self::abort(); - }) - .flatten() - .map(|entry| entry.path()) - .filter(|path| path.is_file() && path.extension() == Some(OsStr::new("json"))) - .map(|path| String::from(path.to_string_lossy())); + let matched_files = match fs::read_dir(&dir) { + Ok(dir) => dir, + Err(e) => return Err(format!("Cannot read directory {}: {}", &dir, e)), + } + .flatten() + .map(|entry| entry.path()) + .filter(|path| path.is_file() && path.extension() == Some(OsStr::new("json"))) + .map(|path| String::from(path.to_string_lossy())); json_files.extend(matched_files); } @@ -371,9 +370,9 @@ impl TestValidatorGenesis { }) .collect(); - self.add_accounts_from_json_files(&accounts); + self.add_accounts_from_json_files(&accounts)?; - self + Ok(self) } /// Add an account to the test environment with the account data in the provided `filename` @@ -512,19 +511,6 @@ impl TestValidatorGenesis { Err(err) => panic!("Test validator failed to start: {}", err), } } - - fn abort() -> ! { - #[cfg(not(test))] - { - // standard error is usually redirected to a log file, cry for help on standard output as - // well - println!("Validator process aborted. The validator log may contain further details"); - std::process::exit(1); - } - - #[cfg(test)] - panic!("process::exit(1) is intercepted for friendly test failure..."); - } } pub struct TestValidator { diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 8884ad36b2e245..46092b651caa18 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -767,27 +767,41 @@ fn main() { .rpc_port(rpc_port) .add_programs_with_path(&programs_to_load) .add_accounts_from_json_files(&accounts_to_load) + .unwrap_or_else(|e| { + println!("Error: add_accounts_from_json_files failed: {}", e); + exit(1); + }) .add_accounts_from_directories(&accounts_from_dirs) + .unwrap_or_else(|e| { + println!("Error: add_accounts_from_directories failed: {}", e); + exit(1); + }) .deactivate_features(&features_to_deactivate); if !accounts_to_clone.is_empty() { - genesis.clone_accounts( + if let Err(e) = genesis.clone_accounts( accounts_to_clone, cluster_rpc_client .as_ref() .expect("bug: --url argument missing?"), false, - ); + ) { + println!("Error: clone_accounts failed: {}", e); + exit(1); + } } if !accounts_to_maybe_clone.is_empty() { - genesis.clone_accounts( + if let Err(e) = genesis.clone_accounts( accounts_to_maybe_clone, cluster_rpc_client .as_ref() .expect("bug: --url argument missing?"), true, - ); + ) { + println!("Error: clone_accounts failed: {}", e); + exit(1); + } } if let Some(warp_slot) = warp_slot { From 47ca68e51f49be32be490c8d9e95081ba3020469 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 11:04:10 -0600 Subject: [PATCH 02/51] chore: bump bytes from 1.1.0 to 1.2.1 (#27172) * chore: bump bytes from 1.1.0 to 1.2.1 Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.1.0 to 1.2.1. - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.1.0...v1.2.1) --- updated-dependencies: - dependency-name: bytes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- client/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 4 ++-- storage-bigtable/Cargo.toml | 2 +- storage-bigtable/build-proto/Cargo.lock | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5efcd56579db48..ff736feafb8a1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -625,9 +625,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "bytesize" diff --git a/client/Cargo.toml b/client/Cargo.toml index 85a0b0fd218599..a17593a085ebee 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -15,7 +15,7 @@ async-trait = "0.1.57" base64 = "0.13.0" bincode = "1.3.3" bs58 = "0.4.0" -bytes = "1.1.0" +bytes = "1.2.1" clap = "2.33.0" crossbeam-channel = "0.5" enum_dispatch = "0.3.8" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 0226a144638870..273a42ed6daa07 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -584,9 +584,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "bzip2" diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index fc579eb884dd00..6bcedb5f200236 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -12,7 +12,7 @@ edition = "2021" [dependencies] backoff = { version = "0.4.0", features = ["tokio"] } bincode = "1.3.3" -bytes = "1.0" +bytes = "1.2" bzip2 = "0.4.3" enum-iterator = "0.8.1" flate2 = "1.0.24" diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index e595a4568b32f6..0b599060ce6997 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -31,9 +31,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bytes" -version = "1.0.1" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cc" From dbd2423e9f418512069c2a7b5a924f01dbad71f1 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 16 Aug 2022 12:06:52 -0500 Subject: [PATCH 03/51] Share Ancestors API get with contains_key (#27161) consolidate similar fns --- runtime/src/ancestors.rs | 8 ++------ runtime/src/status_cache.rs | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/runtime/src/ancestors.rs b/runtime/src/ancestors.rs index 42730efd98615c..9712f1fdbbda0a 100644 --- a/runtime/src/ancestors.rs +++ b/runtime/src/ancestors.rs @@ -65,10 +65,6 @@ impl Ancestors { self.ancestors.get_all() } - pub fn get(&self, slot: &Slot) -> bool { - self.ancestors.contains(slot) - } - pub fn remove(&mut self, slot: &Slot) { self.ancestors.remove(slot); } @@ -182,10 +178,10 @@ pub mod tests { let key = item.0; min = std::cmp::min(min, *key); max = std::cmp::max(max, *key); - assert!(ancestors.get(key)); + assert!(ancestors.contains_key(key)); } for slot in min - 1..max + 2 { - assert_eq!(ancestors.get(&slot), hashset.contains(&slot)); + assert_eq!(ancestors.contains_key(&slot), hashset.contains(&slot)); } } diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index 130810a5f87d78..c5d8379ce06443 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -137,7 +137,7 @@ impl StatusCache { if let Some(stored_forks) = keymap.get(key_slice) { let res = stored_forks .iter() - .find(|(f, _)| ancestors.get(f) || self.roots.get(f).is_some()) + .find(|(f, _)| ancestors.contains_key(f) || self.roots.get(f).is_some()) .cloned(); if res.is_some() { return res; From 3fc4ba7c9a33bd033523caa6bc87b3067fd28f33 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Tue, 16 Aug 2022 13:57:24 -0400 Subject: [PATCH 04/51] Rename to `MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA` (#27175) --- runtime/src/block_cost_limits.rs | 5 +++-- runtime/src/cost_tracker.rs | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/runtime/src/block_cost_limits.rs b/runtime/src/block_cost_limits.rs index a1f1db85dc69cb..31964f88cdaee3 100644 --- a/runtime/src/block_cost_limits.rs +++ b/runtime/src/block_cost_limits.rs @@ -63,5 +63,6 @@ pub const MAX_WRITABLE_ACCOUNT_UNITS: u64 = MAX_BLOCK_REPLAY_TIME_US * COMPUTE_U /// sets at ~75% of MAX_BLOCK_UNITS to leave room for non-vote transactions pub const MAX_VOTE_UNITS: u64 = (MAX_BLOCK_UNITS as f64 * 0.75_f64) as u64; -/// max length of account data in a block (bytes) -pub const MAX_ACCOUNT_DATA_BLOCK_LEN: u64 = 100_000_000; +/// The maximum allowed size, in bytes, that accounts data can grow, per block. +/// This can also be thought of as the maximum size of new allocations per block. +pub const MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA: u64 = 100_000_000; diff --git a/runtime/src/cost_tracker.rs b/runtime/src/cost_tracker.rs index a1d779a8a581b3..6e848b63d24c57 100644 --- a/runtime/src/cost_tracker.rs +++ b/runtime/src/cost_tracker.rs @@ -218,7 +218,7 @@ impl CostTracker { } } - if account_data_size > MAX_ACCOUNT_DATA_BLOCK_LEN { + if account_data_size > MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA { return Err(CostTrackerError::WouldExceedAccountDataBlockLimit); } @@ -618,8 +618,8 @@ mod tests { let second_account = Keypair::new(); let (_tx1, mut tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); let (_tx2, mut tx_cost2) = build_simple_transaction(&second_account, &start_hash); - tx_cost1.account_data_size = MAX_ACCOUNT_DATA_BLOCK_LEN; - tx_cost2.account_data_size = MAX_ACCOUNT_DATA_BLOCK_LEN + 1; + tx_cost1.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA; + tx_cost2.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA + 1; let cost1 = tx_cost1.sum(); let cost2 = tx_cost2.sum(); From 67d1628602d1bbd268fbd70c646969d0edaacf79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 13:01:37 -0600 Subject: [PATCH 05/51] chore: bump libc from 0.2.129 to 0.2.131 (#27162) * chore: bump libc from 0.2.129 to 0.2.131 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.129 to 0.2.131. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.129...0.2.131) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- ledger/Cargo.toml | 2 +- perf/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 4 ++-- rpc/Cargo.toml | 2 +- storage-bigtable/build-proto/Cargo.lock | 4 ++-- streamer/Cargo.toml | 2 +- sys-tuner/Cargo.toml | 2 +- validator/Cargo.toml | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ff736feafb8a1f..b5cbea2552af2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2423,9 +2423,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.129" +version = "0.2.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64de3cc433455c14174d42e554d4027ee631c4d046d43e3ecc6efc4636cdc7a7" +checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" [[package]] name = "libloading" diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index dae528e9999cef..f5fc208efc815e 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -21,7 +21,7 @@ fs_extra = "1.2.0" futures = "0.3.21" itertools = "0.10.3" lazy_static = "1.4.0" -libc = "0.2.129" +libc = "0.2.131" log = { version = "0.4.17" } lru = "0.7.7" num_cpus = "1.13.1" diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 8fd427b6365243..202bf36c687a43 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -29,7 +29,7 @@ solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } [target."cfg(target_os = \"linux\")".dependencies] caps = "0.5.3" -libc = "0.2.129" +libc = "0.2.131" nix = "0.24.2" [lib] diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 273a42ed6daa07..e3605f8eca2b35 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -2169,9 +2169,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.129" +version = "0.2.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64de3cc433455c14174d42e554d4027ee631c4d046d43e3ecc6efc4636cdc7a7" +checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" [[package]] name = "libloading" diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 00a059d2dccce1..531218daed646f 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -21,7 +21,7 @@ jsonrpc-core-client = { version = "18.0.0" } jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" jsonrpc-pubsub = "18.0.0" -libc = "0.2.129" +libc = "0.2.131" log = "0.4.17" rayon = "1.5.3" regex = "1.5.6" diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index 0b599060ce6997..110356f435c2a5 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -116,9 +116,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.129" +version = "0.2.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64de3cc433455c14174d42e554d4027ee631c4d046d43e3ecc6efc4636cdc7a7" +checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" [[package]] name = "log" diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 8bc2909620316a..e72816d03099b8 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -15,7 +15,7 @@ futures-util = "0.3.21" histogram = "0.6.9" indexmap = "1.9.1" itertools = "0.10.3" -libc = "0.2.129" +libc = "0.2.131" log = "0.4.17" nix = "0.24.2" pem = "1.0.2" diff --git a/sys-tuner/Cargo.toml b/sys-tuner/Cargo.toml index 4b60a190d295ad..a98a719300d4b1 100644 --- a/sys-tuner/Cargo.toml +++ b/sys-tuner/Cargo.toml @@ -12,7 +12,7 @@ publish = true [dependencies] clap = "2.33.1" -libc = "0.2.129" +libc = "0.2.131" log = "0.4.17" solana-logger = { path = "../logger", version = "=1.12.0" } solana-version = { path = "../version", version = "=1.12.0" } diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 106bc27d6f26df..3d1b79478d7394 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -60,7 +60,7 @@ symlink = "0.1.0" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] } [target."cfg(unix)".dependencies] -libc = "0.2.129" +libc = "0.2.131" signal-hook = "0.3.14" [package.metadata.docs.rs] From 3b87aa922720b151632bb38ee08b2622713822ff Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 16 Aug 2022 19:40:06 +0000 Subject: [PATCH 06/51] reverts wide fanout in broadcast when the root node is down (#26359) A change included in https://github.com/solana-labs/solana/pull/20480 was that when the root node in turbine broadcast tree is down, the leader will broadcast the shred to all nodes in the first layer. The intention was to mitigate the impact of dead nodes on shreds propagation, because if the root node is down, then the entire cluster will miss out the shred. On the other hand, if x% of stake is down, this will cause 200*x% + 1 packets/shreds ratio at the broadcast stage which might contribute to line-rate saturation and packet drop. To avoid this bandwidth saturation issue, this commit reverts that logic and always broadcasts shreds from the leader only to the root node. As before we rely on erasure codes to recover shreds lost due to staked nodes being offline. --- core/src/broadcast_stage.rs | 20 +++---- .../broadcast_duplicates_run.rs | 21 +------ core/src/cluster_nodes.rs | 59 ++----------------- 3 files changed, 16 insertions(+), 84 deletions(-) diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 18ab25a0b914c9..ba4c33fa38cc46 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -14,7 +14,10 @@ use { }, crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender}, itertools::Itertools, - solana_gossip::cluster_info::{ClusterInfo, ClusterInfoError, DATA_PLANE_FANOUT}, + solana_gossip::{ + cluster_info::{ClusterInfo, ClusterInfoError}, + contact_info::ContactInfo, + }, solana_ledger::{blockstore::Blockstore, shred::Shred}, solana_measure::measure::Measure, solana_metrics::{inc_new_counter_error, inc_new_counter_info}, @@ -32,7 +35,6 @@ use { }, std::{ collections::{HashMap, HashSet}, - iter::repeat, net::UdpSocket, sync::{ atomic::{AtomicBool, Ordering}, @@ -390,8 +392,8 @@ fn update_peer_stats( } } -/// broadcast messages from the leader to layer 1 nodes -/// # Remarks +/// Broadcasts shreds from the leader (i.e. this node) to the root of the +/// turbine retransmit tree for each shred. pub fn broadcast_shreds( s: &UdpSocket, shreds: &[Shred], @@ -416,14 +418,10 @@ pub fn broadcast_shreds( let cluster_nodes = cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info); update_peer_stats(&cluster_nodes, last_datapoint_submit); - let root_bank = root_bank.clone(); shreds.flat_map(move |shred| { - repeat(shred.payload()).zip(cluster_nodes.get_broadcast_addrs( - &shred.id(), - &root_bank, - DATA_PLANE_FANOUT, - socket_addr_space, - )) + let node = cluster_nodes.get_broadcast_peer(&shred.id())?; + ContactInfo::is_valid_address(&node.tvu, socket_addr_space) + .then(|| (shred.payload(), node.tvu)) }) }) .collect(); diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index 741be826c44982..9e60d6c8196cfe 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -3,7 +3,7 @@ use { crate::cluster_nodes::ClusterNodesCache, itertools::Itertools, solana_entry::entry::Entry, - solana_gossip::cluster_info::DATA_PLANE_FANOUT, + solana_gossip::contact_info::ContactInfo, solana_ledger::shred::{ProcessShredsStats, Shredder}, solana_sdk::{ hash::Hash, @@ -270,12 +270,6 @@ impl BroadcastRun for BroadcastDuplicatesRun { (bank_forks.root_bank(), bank_forks.working_bank()) }; let self_pubkey = cluster_info.id(); - let nodes: Vec<_> = cluster_info - .all_peers() - .into_iter() - .map(|(node, _)| node) - .collect(); - // Create cluster partition. let cluster_partition: HashSet = { let mut cumilative_stake = 0; @@ -302,17 +296,8 @@ impl BroadcastRun for BroadcastDuplicatesRun { let packets: Vec<_> = shreds .iter() .filter_map(|shred| { - let addr = cluster_nodes - .get_broadcast_addrs( - &shred.id(), - &root_bank, - DATA_PLANE_FANOUT, - socket_addr_space, - ) - .first() - .copied()?; - let node = nodes.iter().find(|node| node.tvu == addr)?; - if !socket_addr_space.check(&node.tvu) { + let node = cluster_nodes.get_broadcast_peer(&shred.id())?; + if ContactInfo::is_valid_address(&node.tvu, socket_addr_space) { return None; } if self diff --git a/core/src/cluster_nodes.rs b/core/src/cluster_nodes.rs index f83175a9946f8d..22fcc882c07186 100644 --- a/core/src/cluster_nodes.rs +++ b/core/src/cluster_nodes.rs @@ -26,7 +26,7 @@ use { any::TypeId, cmp::Reverse, collections::HashMap, - iter::{once, repeat_with}, + iter::repeat_with, marker::PhantomData, net::SocketAddr, ops::Deref, @@ -114,62 +114,11 @@ impl ClusterNodes { new_cluster_nodes(cluster_info, stakes) } - pub(crate) fn get_broadcast_addrs( - &self, - shred: &ShredId, - root_bank: &Bank, - fanout: usize, - socket_addr_space: &SocketAddrSpace, - ) -> Vec { - const MAX_CONTACT_INFO_AGE: Duration = Duration::from_secs(2 * 60); + pub(crate) fn get_broadcast_peer(&self, shred: &ShredId) -> Option<&ContactInfo> { let shred_seed = shred.seed(&self.pubkey); let mut rng = ChaChaRng::from_seed(shred_seed); - let index = match self.weighted_shuffle.first(&mut rng) { - None => return Vec::default(), - Some(index) => index, - }; - if let Some(node) = self.nodes[index].contact_info() { - let now = timestamp(); - let age = Duration::from_millis(now.saturating_sub(node.wallclock)); - if age < MAX_CONTACT_INFO_AGE - && ContactInfo::is_valid_address(&node.tvu, socket_addr_space) - { - return vec![node.tvu]; - } - } - let mut rng = ChaChaRng::from_seed(shred_seed); - let nodes: Vec<&Node> = self - .weighted_shuffle - .clone() - .shuffle(&mut rng) - .map(|index| &self.nodes[index]) - .collect(); - if nodes.is_empty() { - return Vec::default(); - } - if drop_redundant_turbine_path(shred.slot(), root_bank) { - let peers = once(nodes[0]).chain(get_retransmit_peers(fanout, 0, &nodes)); - let addrs = peers.filter_map(Node::contact_info).map(|peer| peer.tvu); - return addrs - .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) - .collect(); - } - let (neighbors, children) = compute_retransmit_peers(fanout, 0, &nodes); - neighbors[..1] - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu)) - .chain( - neighbors[1..] - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu_forwards)), - ) - .chain( - children - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu)), - ) - .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) - .collect() + let index = self.weighted_shuffle.first(&mut rng)?; + self.nodes[index].contact_info() } } From 9fb7ec77c8aa444544c4dd20497d92c9f378ea4f Mon Sep 17 00:00:00 2001 From: AJ Taylor Date: Tue, 16 Aug 2022 14:32:38 -0600 Subject: [PATCH 07/51] add getTokenLargestAccounts rpc method to rust client (#26840) * add get token largest accounts rpc call to client * split to include with commitment --- client/src/nonblocking/rpc_client.rs | 25 +++++++++++++++++++++++++ client/src/rpc_client.rs | 18 ++++++++++++++++++ client/src/rpc_request.rs | 6 ++++++ 3 files changed, 49 insertions(+) diff --git a/client/src/nonblocking/rpc_client.rs b/client/src/nonblocking/rpc_client.rs index c6f0098d71eb64..ead129fa26ac60 100644 --- a/client/src/nonblocking/rpc_client.rs +++ b/client/src/nonblocking/rpc_client.rs @@ -5016,6 +5016,31 @@ impl RpcClient { .await } + pub async fn get_token_largest_accounts( + &self, + mint: &Pubkey, + ) -> ClientResult> { + Ok(self + .get_token_largest_accounts_with_commitment(mint, self.commitment()) + .await? + .value) + } + + pub async fn get_token_largest_accounts_with_commitment( + &self, + mint: &Pubkey, + commitment_config: CommitmentConfig, + ) -> RpcResult> { + self.send( + RpcRequest::GetTokenLargestAccounts, + json!([ + mint.to_string(), + self.maybe_map_commitment(commitment_config).await? + ]), + ) + .await + } + pub async fn get_token_supply(&self, mint: &Pubkey) -> ClientResult { Ok(self .get_token_supply_with_commitment(mint, self.commitment()) diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index 9946dfa5cfdf66..b89b906e57ade4 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -3901,6 +3901,24 @@ impl RpcClient { ) } + pub fn get_token_largest_accounts( + &self, + mint: &Pubkey, + ) -> ClientResult> { + self.invoke((self.rpc_client.as_ref()).get_token_largest_accounts(mint)) + } + + pub fn get_token_largest_accounts_with_commitment( + &self, + mint: &Pubkey, + commitment_config: CommitmentConfig, + ) -> RpcResult> { + self.invoke( + (self.rpc_client.as_ref()) + .get_token_largest_accounts_with_commitment(mint, commitment_config), + ) + } + pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult { self.invoke((self.rpc_client.as_ref()).get_token_supply(mint)) } diff --git a/client/src/rpc_request.rs b/client/src/rpc_request.rs index d3f0ceb1c0ad54..32f8c45183762d 100644 --- a/client/src/rpc_request.rs +++ b/client/src/rpc_request.rs @@ -100,6 +100,7 @@ pub enum RpcRequest { GetTokenAccountBalance, GetTokenAccountsByDelegate, GetTokenAccountsByOwner, + GetTokenLargestAccounts, GetTokenSupply, GetTransaction, GetTransactionCount, @@ -175,6 +176,7 @@ impl fmt::Display for RpcRequest { RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate", RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner", RpcRequest::GetTokenSupply => "getTokenSupply", + RpcRequest::GetTokenLargestAccounts => "getTokenLargestAccounts", RpcRequest::GetTransaction => "getTransaction", RpcRequest::GetTransactionCount => "getTransactionCount", RpcRequest::GetVersion => "getVersion", @@ -322,6 +324,10 @@ mod tests { let test_request = RpcRequest::SendTransaction; let request = test_request.build_request_json(1, Value::Null); assert_eq!(request["method"], "sendTransaction"); + + let test_request = RpcRequest::GetTokenLargestAccounts; + let request = test_request.build_request_json(1, Value::Null); + assert_eq!(request["method"], "getTokenLargestAccounts"); } #[test] From 74f487c8281bb9e6322875359c95af97fec8d78c Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Tue, 16 Aug 2022 14:47:12 -0700 Subject: [PATCH 08/51] Bump spl-token-2022 (#27181) * Bump token-2022 to 0.4.3 * Allow cargo to bump stuff to v1.11.5 --- Cargo.lock | 96 +++++++++++++++++++---------------- account-decoder/Cargo.toml | 2 +- client/Cargo.toml | 2 +- ledger/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 96 +++++++++++++++++++---------------- rpc/Cargo.toml | 2 +- transaction-status/Cargo.toml | 2 +- 7 files changed, 111 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5cbea2552af2c..6be2310543e124 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3122,15 +3122,6 @@ dependencies = [ "crypto-mac", ] -[[package]] -name = "pbkdf2" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" -dependencies = [ - "digest 0.10.3", -] - [[package]] name = "pbkdf2" version = "0.11.0" @@ -5203,23 +5194,35 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a5d3280421bb53fc12bdba1eaa505153fb4f99a06b5609dae22192652ead3b" +checksum = "28e4e35bc58c465f161bde764ebce41fdfcb503583cf3a77e0211274cc12b22d" dependencies = [ + "ahash", + "blake3", + "block-buffer 0.9.0", "bs58", "bv", + "byteorder", + "cc", + "either", "generic-array 0.14.5", + "getrandom 0.1.16", + "hashbrown 0.12.3", "im", "lazy_static", "log", "memmap2", + "once_cell", + "rand_core 0.6.3", "rustc_version 0.4.0", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", - "solana-frozen-abi-macro 1.10.33", + "solana-frozen-abi-macro 1.11.5", + "subtle", "thiserror", ] @@ -5258,9 +5261,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "635c60ac96b1347af272c625465068b908aff919d19f29b5795a44310310494d" +checksum = "708f837d748e574b1e53b250ab1f4a69ba330bbc10d041d02381165f0f36291a" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -5576,9 +5579,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12cb6e6f1f9c9876d356c928b8c2ac532f6715e7cd2a1b4343d747bee3eca73" +checksum = "e7ea6fc68d63d33d862d919d4c8ad7f613ec243ccf6762d595c660020b289b57" dependencies = [ "env_logger", "lazy_static", @@ -5747,9 +5750,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeecf504cee2821b006871f70e7a1f54db15f914cedf259eaf5976fe606470f0" +checksum = "bdd314d85b171bb20ccdcaf07346a9d52a012b10d84f4706f0628813d002fef8" dependencies = [ "base64 0.13.0", "bincode", @@ -5760,31 +5763,38 @@ dependencies = [ "bs58", "bv", "bytemuck", + "cc", "console_error_panic_hook", "console_log", "curve25519-dalek", - "getrandom 0.1.16", + "getrandom 0.2.3", "itertools", "js-sys", "lazy_static", + "libc", "libsecp256k1", "log", + "memoffset", "num-derive", "num-traits", "parking_lot 0.12.1", "rand 0.7.3", + "rand_chacha 0.2.2", "rustc_version 0.4.0", "rustversion", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.10.33", - "solana-frozen-abi-macro 1.10.33", - "solana-sdk-macro 1.10.33", + "solana-frozen-abi 1.11.5", + "solana-frozen-abi-macro 1.11.5", + "solana-sdk-macro 1.11.5", "thiserror", + "tiny-bip39", "wasm-bindgen", + "zeroize", ] [[package]] @@ -6055,9 +6065,9 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636f6c615aca6f75e22b6baceaf0ffed9d74367f9320b07ed57cd9b5ce2e4ff9" +checksum = "ad7d954df63b267857e26670e3aacfd8e2943ca703653b0418e5afc85046c2f3" dependencies = [ "assert_matches", "base64 0.13.0", @@ -6082,7 +6092,7 @@ dependencies = [ "memmap2", "num-derive", "num-traits", - "pbkdf2 0.10.1", + "pbkdf2 0.11.0", "qstring", "rand 0.7.3", "rand_chacha 0.2.2", @@ -6094,11 +6104,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.10.33", - "solana-frozen-abi-macro 1.10.33", - "solana-logger 1.10.33", - "solana-program 1.10.33", - "solana-sdk-macro 1.10.33", + "solana-frozen-abi 1.11.5", + "solana-frozen-abi-macro 1.11.5", + "solana-logger 1.11.5", + "solana-program 1.11.5", + "solana-sdk-macro 1.11.5", "thiserror", "uriparse", "wasm-bindgen", @@ -6160,9 +6170,9 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b8bcac4394644f21dc013e932a7df9f536fcecef3e5df43fe362b4ec532ce30" +checksum = "d0d9e81bc46edcc517b2df504856d57a5101c7586ec63f3143ae11fbe2eba613" dependencies = [ "bs58", "proc-macro2 1.0.41", @@ -6575,9 +6585,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410ee53a26ac91098c289c983863535d4fbb6604b229ae1159503f48fa4fc90f" +checksum = "62415c05a9ebfffaf8befaa61b24492ebf88269cf84cbeba714bac4125ec4ea3" dependencies = [ "aes-gcm-siv", "arrayref", @@ -6596,8 +6606,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.10.33", - "solana-sdk 1.10.33", + "solana-program 1.11.5", + "solana-sdk 1.11.5", "subtle", "thiserror", "zeroize", @@ -6681,7 +6691,7 @@ dependencies = [ "borsh", "num-derive", "num-traits", - "solana-program 1.10.33", + "solana-program 1.11.5", "spl-token", "spl-token-2022", "thiserror", @@ -6693,7 +6703,7 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.10.33", + "solana-program 1.11.5", ] [[package]] @@ -6707,23 +6717,23 @@ dependencies = [ "num-derive", "num-traits", "num_enum", - "solana-program 1.10.33", + "solana-program 1.11.5", "thiserror", ] [[package]] name = "spl-token-2022" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a97cbf60b91b610c846ccf8eecca96d92a24a19ffbf9fe06cd0c84e76ec45e" +checksum = "e4c0ebca4740cc4c892aa31e07d0b4dc1a24cac4748376d4b34f8eb0fee9ff46" dependencies = [ "arrayref", "bytemuck", "num-derive", "num-traits", "num_enum", - "solana-program 1.10.33", - "solana-zk-token-sdk 1.10.33", + "solana-program 1.11.5", + "solana-zk-token-sdk 1.11.5", "spl-memo", "spl-token", "thiserror", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index d246ba28f186a0..96b97a5652fca5 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -24,7 +24,7 @@ solana-config-program = { path = "../programs/config", version = "=1.12.0" } solana-sdk = { path = "../sdk", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" zstd = "0.11.2" diff --git a/client/Cargo.toml b/client/Cargo.toml index a17593a085ebee..bc2810459e0b20 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -49,7 +49,7 @@ solana-streamer = { path = "../streamer", version = "=1.12.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.12.0" } solana-version = { path = "../version", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" tokio = { version = "1", features = ["full"] } tokio-stream = "0.1.9" diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index f5fc208efc815e..071c79d1349a30 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -52,7 +52,7 @@ solana-storage-proto = { path = "../storage-proto", version = "=1.12.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } static_assertions = "1.1.0" tempfile = "3.3.0" thiserror = "1.0" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index e3605f8eca2b35..284e03a6b30e49 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -2906,15 +2906,6 @@ dependencies = [ "crypto-mac", ] -[[package]] -name = "pbkdf2" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" -dependencies = [ - "digest 0.10.3", -] - [[package]] name = "pbkdf2" version = "0.11.0" @@ -4840,23 +4831,35 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a5d3280421bb53fc12bdba1eaa505153fb4f99a06b5609dae22192652ead3b" +checksum = "28e4e35bc58c465f161bde764ebce41fdfcb503583cf3a77e0211274cc12b22d" dependencies = [ + "ahash", + "blake3", + "block-buffer 0.9.0", "bs58", "bv", + "byteorder 1.4.3", + "cc", + "either", "generic-array 0.14.5", + "getrandom 0.1.14", + "hashbrown 0.12.3", "im", "lazy_static", "log", "memmap2", + "once_cell", + "rand_core 0.6.3", "rustc_version", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", - "solana-frozen-abi-macro 1.10.33", + "solana-frozen-abi-macro 1.11.5", + "subtle", "thiserror", ] @@ -4894,9 +4897,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "635c60ac96b1347af272c625465068b908aff919d19f29b5795a44310310494d" +checksum = "708f837d748e574b1e53b250ab1f4a69ba330bbc10d041d02381165f0f36291a" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -5055,9 +5058,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12cb6e6f1f9c9876d356c928b8c2ac532f6715e7cd2a1b4343d747bee3eca73" +checksum = "e7ea6fc68d63d33d862d919d4c8ad7f613ec243ccf6762d595c660020b289b57" dependencies = [ "env_logger", "lazy_static", @@ -5166,9 +5169,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeecf504cee2821b006871f70e7a1f54db15f914cedf259eaf5976fe606470f0" +checksum = "bdd314d85b171bb20ccdcaf07346a9d52a012b10d84f4706f0628813d002fef8" dependencies = [ "base64 0.13.0", "bincode", @@ -5179,31 +5182,38 @@ dependencies = [ "bs58", "bv", "bytemuck", + "cc", "console_error_panic_hook", "console_log", "curve25519-dalek", - "getrandom 0.1.14", + "getrandom 0.2.4", "itertools", "js-sys", "lazy_static", + "libc", "libsecp256k1 0.6.0", "log", + "memoffset", "num-derive", "num-traits", "parking_lot 0.12.1", "rand 0.7.3", + "rand_chacha 0.2.2", "rustc_version", "rustversion", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.10.33", - "solana-frozen-abi-macro 1.10.33", - "solana-sdk-macro 1.10.33", + "solana-frozen-abi 1.11.5", + "solana-frozen-abi-macro 1.11.5", + "solana-sdk-macro 1.11.5", "thiserror", + "tiny-bip39", "wasm-bindgen", + "zeroize", ] [[package]] @@ -5436,9 +5446,9 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636f6c615aca6f75e22b6baceaf0ffed9d74367f9320b07ed57cd9b5ce2e4ff9" +checksum = "ad7d954df63b267857e26670e3aacfd8e2943ca703653b0418e5afc85046c2f3" dependencies = [ "assert_matches", "base64 0.13.0", @@ -5463,7 +5473,7 @@ dependencies = [ "memmap2", "num-derive", "num-traits", - "pbkdf2 0.10.1", + "pbkdf2 0.11.0", "qstring", "rand 0.7.3", "rand_chacha 0.2.2", @@ -5475,11 +5485,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.10.33", - "solana-frozen-abi-macro 1.10.33", - "solana-logger 1.10.33", - "solana-program 1.10.33", - "solana-sdk-macro 1.10.33", + "solana-frozen-abi 1.11.5", + "solana-frozen-abi-macro 1.11.5", + "solana-logger 1.11.5", + "solana-program 1.11.5", + "solana-sdk-macro 1.11.5", "thiserror", "uriparse", "wasm-bindgen", @@ -5536,9 +5546,9 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b8bcac4394644f21dc013e932a7df9f536fcecef3e5df43fe362b4ec532ce30" +checksum = "d0d9e81bc46edcc517b2df504856d57a5101c7586ec63f3143ae11fbe2eba613" dependencies = [ "bs58", "proc-macro2 1.0.41", @@ -5834,9 +5844,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410ee53a26ac91098c289c983863535d4fbb6604b229ae1159503f48fa4fc90f" +checksum = "62415c05a9ebfffaf8befaa61b24492ebf88269cf84cbeba714bac4125ec4ea3" dependencies = [ "aes-gcm-siv", "arrayref", @@ -5855,8 +5865,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.10.33", - "solana-sdk 1.10.33", + "solana-program 1.11.5", + "solana-sdk 1.11.5", "subtle", "thiserror", "zeroize", @@ -5940,7 +5950,7 @@ dependencies = [ "borsh", "num-derive", "num-traits", - "solana-program 1.10.33", + "solana-program 1.11.5", "spl-token", "spl-token-2022", "thiserror", @@ -5952,7 +5962,7 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.10.33", + "solana-program 1.11.5", ] [[package]] @@ -5966,23 +5976,23 @@ dependencies = [ "num-derive", "num-traits", "num_enum", - "solana-program 1.10.33", + "solana-program 1.11.5", "thiserror", ] [[package]] name = "spl-token-2022" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a97cbf60b91b610c846ccf8eecca96d92a24a19ffbf9fe06cd0c84e76ec45e" +checksum = "e4c0ebca4740cc4c892aa31e07d0b4dc1a24cac4748376d4b34f8eb0fee9ff46" dependencies = [ "arrayref", "bytemuck", "num-derive", "num-traits", "num_enum", - "solana-program 1.10.33", - "solana-zk-token-sdk 1.10.33", + "solana-program 1.11.5", + "solana-zk-token-sdk 1.11.5", "spl-memo", "spl-token", "thiserror", diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 531218daed646f..3c55e8b25a7e6a 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -50,7 +50,7 @@ solana-transaction-status = { path = "../transaction-status", version = "=1.12.0 solana-version = { path = "../version", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } stream-cancel = "0.8.1" thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 3e73b85153ffe0..9d59696e00a89b 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -28,7 +28,7 @@ solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-associated-token-account = { version = "=1.1.1", features = ["no-entrypoint"] } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" [package.metadata.docs.rs] From 4b8ab4e65de153237521175430f1d42f2a2f9301 Mon Sep 17 00:00:00 2001 From: Andrew Schonfeld Date: Tue, 16 Aug 2022 18:22:38 -0400 Subject: [PATCH 09/51] VoteProgram.safeWithdraw function to safeguard against accidental vote account closures (#26586) feat: safe withdraw function Co-authored-by: aschonfeld --- web3.js/src/programs/vote.ts | 21 +++++++++++++++++++++ web3.js/test/program-tests/vote.test.ts | 15 +++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/web3.js/src/programs/vote.ts b/web3.js/src/programs/vote.ts index 6cd16671a977cd..db1a111da919a4 100644 --- a/web3.js/src/programs/vote.ts +++ b/web3.js/src/programs/vote.ts @@ -410,4 +410,25 @@ export class VoteProgram { data, }); } + + /** + * Generate a transaction to withdraw safely from a Vote account. + * + * This function was created as a safeguard for vote accounts running validators, `safeWithdraw` + * checks that the withdraw amount will not exceed the specified balance while leaving enough left + * to cover rent. If you wish to close the vote account by withdrawing the full amount, call the + * `withdraw` method directly. + */ + static safeWithdraw( + params: WithdrawFromVoteAccountParams, + currentVoteAccountBalance: number, + rentExemptMinimum: number, + ): Transaction { + if (params.lamports > currentVoteAccountBalance - rentExemptMinimum) { + throw new Error( + 'Withdraw will leave vote account with insuffcient funds.', + ); + } + return VoteProgram.withdraw(params); + } } diff --git a/web3.js/test/program-tests/vote.test.ts b/web3.js/test/program-tests/vote.test.ts index 596e6e401b4fb7..6cd349a0c3a772 100644 --- a/web3.js/test/program-tests/vote.test.ts +++ b/web3.js/test/program-tests/vote.test.ts @@ -167,6 +167,21 @@ describe('VoteProgram', () => { // Withdraw from Vote account let recipient = Keypair.generate(); + const voteBalance = await connection.getBalance(newVoteAccount.publicKey); + + expect(() => + VoteProgram.safeWithdraw( + { + votePubkey: newVoteAccount.publicKey, + authorizedWithdrawerPubkey: authorized.publicKey, + lamports: voteBalance - minimumAmount + 1, + toPubkey: recipient.publicKey, + }, + voteBalance, + minimumAmount, + ), + ).to.throw('Withdraw will leave vote account with insuffcient funds.'); + let withdraw = VoteProgram.withdraw({ votePubkey: newVoteAccount.publicKey, authorizedWithdrawerPubkey: authorized.publicKey, From 2a5e808438a644c49a87012a1df2288536a3dac6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:29:45 -0600 Subject: [PATCH 10/51] chore: bump futures from 0.3.21 to 0.3.23 (#27182) * chore: bump futures from 0.3.21 to 0.3.23 Bumps [futures](https://github.com/rust-lang/futures-rs) from 0.3.21 to 0.3.23. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.21...0.3.23) --- updated-dependencies: - dependency-name: futures dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 72 ++++++++++++++++++------------------ ledger/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 74 ++++++++++++++++++------------------- storage-bigtable/Cargo.toml | 2 +- 4 files changed, 75 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6be2310543e124..fb3eac355b2667 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1606,9 +1606,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ "futures-channel", "futures-core", @@ -1621,9 +1621,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ "futures-core", "futures-sink", @@ -1631,15 +1631,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "1d11aa21b5b587a64682c0094c2bdd4df0076c5324961a40cc3abd7f37930528" dependencies = [ "futures-core", "futures-task", @@ -1649,15 +1649,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -1666,21 +1666,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1811,7 +1811,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.21", + "futures 0.3.23", "log", "reqwest", "serde", @@ -2059,7 +2059,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "headers", "http", "hyper", @@ -2280,7 +2280,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2298,7 +2298,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "futures-executor", "futures-util", "log", @@ -2313,7 +2313,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-client-transports", ] @@ -2335,7 +2335,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2351,7 +2351,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2366,7 +2366,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "lazy_static", "log", @@ -2382,7 +2382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "globset", "jsonrpc-core", "lazy_static", @@ -3057,7 +3057,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "libc", "log", "rand 0.7.3", @@ -4284,7 +4284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92761393ee4dc3ff8f4af487bd58f4307c9329bbedea02cac0089ad9c411e153" dependencies = [ "dashmap 5.2.0", - "futures 0.3.21", + "futures 0.3.23", "lazy_static", "log", "parking_lot 0.12.1", @@ -4487,7 +4487,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.0", "bytes", - "futures 0.3.21", + "futures 0.3.23", "httparse", "log", "rand 0.8.5", @@ -4616,7 +4616,7 @@ name = "solana-banks-client" version = "1.12.0" dependencies = [ "borsh", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-banks-server", "solana-program 1.12.0", @@ -4643,7 +4643,7 @@ version = "1.12.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-client", "solana-runtime", @@ -4940,7 +4940,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "enum_dispatch", - "futures 0.3.21", + "futures 0.3.23", "futures-util", "indexmap", "indicatif", @@ -5451,7 +5451,7 @@ dependencies = [ "crossbeam-channel", "dashmap 4.0.2", "fs_extra", - "futures 0.3.21", + "futures 0.3.23", "itertools", "lazy_static", "libc", @@ -6255,7 +6255,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.21", + "futures 0.3.23", "goauth", "http", "hyper", @@ -6904,7 +6904,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.21", + "futures 0.3.23", "humantime", "opentelemetry", "pin-project", diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 071c79d1349a30..c8f16585eef955 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -18,7 +18,7 @@ chrono-humanize = "0.2.1" crossbeam-channel = "0.5" dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] } fs_extra = "1.2.0" -futures = "0.3.21" +futures = "0.3.23" itertools = "0.10.3" lazy_static = "1.4.0" libc = "0.2.131" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 284e03a6b30e49..ef2dd18fb852a8 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -1401,9 +1401,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ "futures-channel", "futures-core", @@ -1416,9 +1416,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ "futures-core", "futures-sink", @@ -1426,15 +1426,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "1d11aa21b5b587a64682c0094c2bdd4df0076c5324961a40cc3abd7f37930528" dependencies = [ "futures-core", "futures-task", @@ -1444,15 +1444,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -1461,21 +1461,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1580,7 +1580,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.21", + "futures 0.3.23", "log", "reqwest", "serde", @@ -1811,7 +1811,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "headers", "http", "hyper", @@ -2026,7 +2026,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2044,7 +2044,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "futures-executor", "futures-util", "log", @@ -2059,7 +2059,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-client-transports", ] @@ -2081,7 +2081,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2097,7 +2097,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2112,7 +2112,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "lazy_static", "log", @@ -2128,7 +2128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "globset", "jsonrpc-core", "lazy_static", @@ -2169,9 +2169,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.131" +version = "0.2.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" +checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" [[package]] name = "libloading" @@ -2841,7 +2841,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "libc", "log", "rand 0.7.3", @@ -4065,7 +4065,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.0", "bytes", - "futures 0.3.21", + "futures 0.3.23", "httparse", "log", "rand 0.8.5", @@ -4119,7 +4119,7 @@ name = "solana-banks-client" version = "1.12.0" dependencies = [ "borsh", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-program 1.12.0", "solana-sdk 1.12.0", @@ -4144,7 +4144,7 @@ version = "1.12.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-client", "solana-runtime", @@ -4657,7 +4657,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "enum_dispatch", - "futures 0.3.21", + "futures 0.3.23", "futures-util", "indexmap", "indicatif", @@ -5011,7 +5011,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "fs_extra", - "futures 0.3.21", + "futures 0.3.23", "itertools", "lazy_static", "libc", @@ -5612,7 +5612,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.21", + "futures 0.3.23", "goauth", "http", "hyper", @@ -6149,7 +6149,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.21", + "futures 0.3.23", "humantime", "opentelemetry", "pin-project", diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index 6bcedb5f200236..0958288e3d4c3c 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -16,7 +16,7 @@ bytes = "1.2" bzip2 = "0.4.3" enum-iterator = "0.8.1" flate2 = "1.0.24" -futures = "0.3.21" +futures = "0.3.23" goauth = "0.13.1" http = "0.2.8" hyper = "0.14.20" From 0797810af435f6b2ae434a614f5ac1b10fa57e7e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:59:18 -0600 Subject: [PATCH 11/51] chore: bump nix from 0.24.2 to 0.25.0 (#27179) * chore: bump nix from 0.24.2 to 0.25.0 Bumps [nix](https://github.com/nix-rust/nix) from 0.24.2 to 0.25.0. - [Release notes](https://github.com/nix-rust/nix/releases) - [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md) - [Commits](https://github.com/nix-rust/nix/compare/v0.24.2...v0.25.0) --- updated-dependencies: - dependency-name: nix dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 29 +++++++++++++++++++++-------- install/Cargo.toml | 2 +- net-utils/Cargo.toml | 2 +- perf/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 10 ++++++---- streamer/Cargo.toml | 2 +- sys-tuner/Cargo.toml | 2 +- 7 files changed, 32 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb3eac355b2667..8616c2632fdfae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -270,9 +270,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "autotools" @@ -1088,7 +1088,7 @@ version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" dependencies = [ - "nix", + "nix 0.24.2", "winapi 0.3.9", ] @@ -2743,7 +2743,20 @@ dependencies = [ "bitflags", "cfg-if 1.0.0", "libc", +] + +[[package]] +name = "nix" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" +dependencies = [ + "autocfg", + "bitflags", + "cfg-if 1.0.0", + "libc", "memoffset", + "pin-utils", ] [[package]] @@ -5403,7 +5416,7 @@ dependencies = [ "dirs-next", "indicatif", "lazy_static", - "nix", + "nix 0.25.0", "reqwest", "semver 1.0.13", "serde", @@ -5662,7 +5675,7 @@ dependencies = [ "clap 3.1.8", "crossbeam-channel", "log", - "nix", + "nix 0.25.0", "rand 0.7.3", "serde", "serde_derive", @@ -5699,7 +5712,7 @@ dependencies = [ "libc", "log", "matches", - "nix", + "nix 0.25.0", "rand 0.7.3", "rayon", "serde", @@ -6316,7 +6329,7 @@ dependencies = [ "itertools", "libc", "log", - "nix", + "nix 0.25.0", "pem", "percentage", "pkcs8", @@ -6341,7 +6354,7 @@ dependencies = [ "clap 2.33.3", "libc", "log", - "nix", + "nix 0.25.0", "solana-logger 1.12.0", "solana-version", "sysctl", diff --git a/install/Cargo.toml b/install/Cargo.toml index dae1b8016bbde3..af4cffd7de52b1 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -21,7 +21,7 @@ ctrlc = { version = "3.2.2", features = ["termination"] } dirs-next = "2.0.0" indicatif = "0.17.0" lazy_static = "1.4.0" -nix = "0.24.2" +nix = "0.25.0" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } semver = "1.0.13" serde = { version = "1.0.143", features = ["derive"] } diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index a0530e61f767dd..a26760134ef7e0 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -14,7 +14,7 @@ bincode = "1.3.3" clap = { version = "3.1.5", features = ["cargo"] } crossbeam-channel = "0.5" log = "0.4.17" -nix = "0.24.2" +nix = "0.25.0" rand = "0.7.0" serde = "1.0.143" serde_derive = "1.0.103" diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 202bf36c687a43..3380f2ac89f7c0 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -30,7 +30,7 @@ solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } [target."cfg(target_os = \"linux\")".dependencies] caps = "0.5.3" libc = "0.2.131" -nix = "0.24.2" +nix = "0.25.0" [lib] name = "solana_perf" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index ef2dd18fb852a8..3c5e530501665c 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -256,9 +256,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "autotools" @@ -2527,14 +2527,16 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" dependencies = [ + "autocfg", "bitflags", "cfg-if 1.0.0", "libc", "memoffset", + "pin-utils", ] [[package]] diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index e72816d03099b8..97b42aa58c9050 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -17,7 +17,7 @@ indexmap = "1.9.1" itertools = "0.10.3" libc = "0.2.131" log = "0.4.17" -nix = "0.24.2" +nix = "0.25.0" pem = "1.0.2" percentage = "0.1.0" pkcs8 = { version = "0.8.0", features = ["alloc"] } diff --git a/sys-tuner/Cargo.toml b/sys-tuner/Cargo.toml index a98a719300d4b1..f1cc3c97e26b08 100644 --- a/sys-tuner/Cargo.toml +++ b/sys-tuner/Cargo.toml @@ -20,7 +20,7 @@ solana-version = { path = "../version", version = "=1.12.0" } [target."cfg(unix)".dependencies] unix_socket2 = "0.5.4" users = "0.10.0" -nix = "0.24.2" +nix = "0.25.0" sysctl = "0.4.4" [lib] From 1eba91af2928848dc9a861d1af351434d0752d0c Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Tue, 16 Aug 2022 19:52:11 -0700 Subject: [PATCH 12/51] Parse ConfidentialTransaction instructions (#26825) Parse ConfidentialTransfer instructions --- transaction-status/src/parse_token.rs | 10 +- .../extension/confidential_transfer.rs | 399 ++++++++++++++++++ .../src/parse_token/extension/mod.rs | 1 + 3 files changed, 406 insertions(+), 4 deletions(-) create mode 100644 transaction-status/src/parse_token/extension/confidential_transfer.rs diff --git a/transaction-status/src/parse_token.rs b/transaction-status/src/parse_token.rs index 3c43cfbf426a28..f820883c74e1c8 100644 --- a/transaction-status/src/parse_token.rs +++ b/transaction-status/src/parse_token.rs @@ -3,8 +3,8 @@ use { check_num_accounts, ParsableProgram, ParseInstructionError, ParsedInstructionEnum, }, extension::{ - default_account_state::*, interest_bearing_mint::*, memo_transfer::*, - mint_close_authority::*, reallocate::*, transfer_fee::*, + confidential_transfer::*, default_account_state::*, interest_bearing_mint::*, + memo_transfer::*, mint_close_authority::*, reallocate::*, transfer_fee::*, }, serde_json::{json, Map, Value}, solana_account_decoder::parse_token::{ @@ -510,8 +510,10 @@ pub fn parse_token( account_keys, ) } - TokenInstruction::ConfidentialTransferExtension => Err( - ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken), + TokenInstruction::ConfidentialTransferExtension => parse_confidential_transfer_instruction( + &instruction.data[1..], + &instruction.accounts, + account_keys, ), TokenInstruction::DefaultAccountStateExtension => { if instruction.data.len() <= 2 { diff --git a/transaction-status/src/parse_token/extension/confidential_transfer.rs b/transaction-status/src/parse_token/extension/confidential_transfer.rs new file mode 100644 index 00000000000000..867f90e97be133 --- /dev/null +++ b/transaction-status/src/parse_token/extension/confidential_transfer.rs @@ -0,0 +1,399 @@ +use { + super::*, + solana_account_decoder::parse_token_extension::UiConfidentialTransferMint, + spl_token_2022::{ + extension::confidential_transfer::{instruction::*, ConfidentialTransferMint}, + instruction::{decode_instruction_data, decode_instruction_type}, + }, +}; + +pub(in crate::parse_token) fn parse_confidential_transfer_instruction( + instruction_data: &[u8], + account_indexes: &[u8], + account_keys: &AccountKeys, +) -> Result { + match decode_instruction_type(instruction_data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + { + ConfidentialTransferInstruction::InitializeMint => { + check_num_token_accounts(account_indexes, 1)?; + let confidential_transfer_mint: ConfidentialTransferMint = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let confidential_transfer_mint: UiConfidentialTransferMint = + confidential_transfer_mint.into(); + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + map.append(json!(confidential_transfer_mint).as_object_mut().unwrap()); + Ok(ParsedInstructionEnum { + instruction_type: "initializeConfidentialTransferMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::UpdateMint => { + check_num_token_accounts(account_indexes, 3)?; + let confidential_transfer_mint: ConfidentialTransferMint = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let confidential_transfer_mint: UiConfidentialTransferMint = + confidential_transfer_mint.into(); + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "confidentialTransferMintAuthority": account_keys[account_indexes[1] as usize].to_string(), + "newConfidentialTransferMintAuthority": account_keys[account_indexes[2] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + map.append(json!(confidential_transfer_mint).as_object_mut().unwrap()); + Ok(ParsedInstructionEnum { + instruction_type: "updateConfidentialTransferMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ConfigureAccount => { + check_num_token_accounts(account_indexes, 3)?; + let configure_account_data: ConfigureAccountInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let maximum_pending_balance_credit_counter: u64 = configure_account_data + .maximum_pending_balance_credit_counter + .into(); + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "mint": account_keys[account_indexes[1] as usize].to_string(), + "encryptionPubkey": format!("{}", configure_account_data.encryption_pubkey), + "decryptableZeroBalance": format!("{}", configure_account_data.decryptable_zero_balance), + "maximumPendingBalanceCreditCounter": maximum_pending_balance_credit_counter, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 2, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "configureConfidentialTransferAccount".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ApproveAccount => { + check_num_token_accounts(account_indexes, 3)?; + Ok(ParsedInstructionEnum { + instruction_type: "approveConfidentialTransferAccount".to_string(), + info: json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "mint": account_keys[account_indexes[1] as usize].to_string(), + "confidentialTransferAuditorAuthority": account_keys[account_indexes[2] as usize].to_string(), + }), + }) + } + ConfidentialTransferInstruction::EmptyAccount => { + check_num_token_accounts(account_indexes, 3)?; + let empty_account_data: EmptyAccountInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = empty_account_data.proof_instruction_offset; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[1] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 2, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "emptyConfidentialTransferAccount".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Deposit => { + check_num_token_accounts(account_indexes, 4)?; + let deposit_data: DepositInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let amount: u64 = deposit_data.amount.into(); + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "amount": amount, + "decimals": deposit_data.decimals, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 3, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "depositConfidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Withdraw => { + check_num_token_accounts(account_indexes, 5)?; + let withdrawal_data: WithdrawInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let amount: u64 = withdrawal_data.amount.into(); + let proof_instruction_offset: i8 = withdrawal_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "amount": amount, + "decimals": withdrawal_data.decimals, + "newDecryptableAvailableBalance": format!("{}", withdrawal_data.new_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawConfidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Transfer => { + check_num_token_accounts(account_indexes, 5)?; + let transfer_data: TransferInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = transfer_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "newSourceDecryptableAvailableBalance": format!("{}", transfer_data.new_source_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "confidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::TransferWithFee => { + check_num_token_accounts(account_indexes, 5)?; + let transfer_data: TransferInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = transfer_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "newSourceDecryptableAvailableBalance": format!("{}", transfer_data.new_source_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "confidentialTransferWithFee".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ApplyPendingBalance => { + check_num_token_accounts(account_indexes, 2)?; + let apply_pending_balance_data: ApplyPendingBalanceData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let expected_pending_balance_credit_counter: u64 = apply_pending_balance_data + .expected_pending_balance_credit_counter + .into(); + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "newDecryptableAvailableBalance": format!("{}", apply_pending_balance_data.new_decryptable_available_balance), + "expectedPendingBalanceCreditCounter": expected_pending_balance_credit_counter, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "applyPendingConfidentialTransferBalance".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::EnableBalanceCredits => { + check_num_token_accounts(account_indexes, 2)?; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "enableConfidentialTransferBalanceCredits".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::DisableBalanceCredits => { + check_num_token_accounts(account_indexes, 2)?; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "disableConfidentialTransferBalanceCredits".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::WithdrawWithheldTokensFromMint => { + check_num_token_accounts(account_indexes, 4)?; + let withdraw_withheld_data: WithdrawWithheldTokensFromMintData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 3, + account_keys, + account_indexes, + "withdrawWithheldAuthority", + "multisigWithdrawWithheldAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawWithheldConfidentialTransferTokensFromMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::WithdrawWithheldTokensFromAccounts => { + let withdraw_withheld_data: WithdrawWithheldTokensFromAccountsData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let num_token_accounts = withdraw_withheld_data.num_token_accounts; + check_num_token_accounts(account_indexes, 4 + num_token_accounts as usize)?; + let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + }); + let map = value.as_object_mut().unwrap(); + let mut source_accounts: Vec = vec![]; + let first_source_account_index = account_indexes + .len() + .saturating_sub(num_token_accounts as usize); + for i in account_indexes[first_source_account_index..].iter() { + source_accounts.push(account_keys[*i as usize].to_string()); + } + map.insert("sourceAccounts".to_string(), json!(source_accounts)); + parse_signers( + map, + 3, + account_keys, + &account_indexes[..first_source_account_index], + "withdrawWithheldAuthority", + "multisigWithdrawWithheldAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawWithheldConfidentialTransferTokensFromAccounts" + .to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::HarvestWithheldTokensToMint => { + check_num_token_accounts(account_indexes, 1)?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + let mut source_accounts: Vec = vec![]; + for i in account_indexes.iter().skip(1) { + source_accounts.push(account_keys[*i as usize].to_string()); + } + map.insert("sourceAccounts".to_string(), json!(source_accounts)); + Ok(ParsedInstructionEnum { + instruction_type: "harvestWithheldConfidentialTransferTokensToMint".to_string(), + info: value, + }) + } + } +} diff --git a/transaction-status/src/parse_token/extension/mod.rs b/transaction-status/src/parse_token/extension/mod.rs index 3c84942651ab79..f5d8e41f4a94d5 100644 --- a/transaction-status/src/parse_token/extension/mod.rs +++ b/transaction-status/src/parse_token/extension/mod.rs @@ -1,5 +1,6 @@ use super::*; +pub(super) mod confidential_transfer; pub(super) mod default_account_state; pub(super) mod interest_bearing_mint; pub(super) mod memo_transfer; From c1111fa069dfed5f8ba0801b2abae3ceded923dd Mon Sep 17 00:00:00 2001 From: apfitzge Date: Wed, 17 Aug 2022 12:57:52 -0500 Subject: [PATCH 13/51] snapshots: serialize version file first (#27192) serialize version file first --- runtime/src/snapshot_utils.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 1eab70b8cb5ab7..e8dbe024619622 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -345,10 +345,12 @@ pub fn archive_snapshot_package( let do_archive_files = |encoder: &mut dyn Write| -> Result<()> { let mut archive = tar::Builder::new(encoder); + // Serialize the version and snapshots files before accounts so we can quickly determine the version + // and other bank fields. This is necessary if we want to interleave unpacking with reconstruction + archive.append_path_with_name(staging_dir.as_ref().join("version"), "version")?; for dir in ["snapshots", "accounts"] { archive.append_dir_all(dir, staging_dir.as_ref().join(dir))?; } - archive.append_path_with_name(staging_dir.as_ref().join("version"), "version")?; archive.into_inner()?; Ok(()) }; From 225cddcffbf453cc05f5c6ca2da74509a3fcde07 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 17 Aug 2022 15:14:31 -0500 Subject: [PATCH 14/51] serialize incremental_snapshot_hash (#26839) * serialize incremental_snapshot_hash * pr feedback --- core/src/accounts_hash_verifier.rs | 1 + core/tests/snapshots.rs | 3 ++ runtime/src/bank.rs | 26 ++++++++++++++ runtime/src/serde_snapshot.rs | 14 ++++++-- runtime/src/serde_snapshot/newer.rs | 17 +++++++-- runtime/src/serde_snapshot/tests.rs | 54 ++++++++++++++++++++++------- runtime/src/snapshot_utils.rs | 2 ++ 7 files changed, 101 insertions(+), 16 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index ae8f0dbe780aae..118529dd983640 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -190,6 +190,7 @@ impl AccountsHashVerifier { accounts_package.snapshot_links.path(), accounts_package.slot, &accounts_hash, + None, ); datapoint_info!( "accounts_hash_verifier", diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 3dcc004a53df6a..6e3b7869fac0d6 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -256,6 +256,7 @@ fn run_bank_forks_snapshot_n( accounts_package.snapshot_links.path(), accounts_package.slot, &last_bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, last_bank.get_accounts_hash()); snapshot_utils::archive_snapshot_package( @@ -491,6 +492,7 @@ fn test_concurrent_snapshot_packaging( accounts_package.snapshot_links.path(), accounts_package.slot, &Hash::default(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, Hash::default()); pending_snapshot_package @@ -534,6 +536,7 @@ fn test_concurrent_snapshot_packaging( saved_snapshots_dir.path(), saved_slot, &Hash::default(), + None, ); snapshot_utils::verify_snapshot_archive( diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 32ece32f8892d1..8428400f7c7091 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -236,6 +236,25 @@ impl RentDebit { } } +/// Incremental snapshots only calculate their accounts hash based on the account changes WITHIN the incremental slot range. +/// So, we need to keep track of the full snapshot expected accounts hash results. +/// We also need to keep track of the hash and capitalization specific to the incremental snapshot slot range. +/// The capitalization we calculate for the incremental slot will NOT be consistent with the bank's capitalization. +/// It is not feasible to calculate a capitalization delta that is correct given just incremental slots account data and the full snapshot's capitalization. +#[derive(Serialize, Deserialize, AbiExample, Clone, Debug, Default, PartialEq, Eq)] +pub struct BankIncrementalSnapshotPersistence { + /// slot of full snapshot + pub full_slot: Slot, + /// accounts hash from the full snapshot + pub full_hash: Hash, + /// capitalization from the full snapshot + pub full_capitalization: u64, + /// hash of the accounts in the incremental snapshot slot range, including zero-lamport accounts + pub incremental_hash: Hash, + /// capitalization of the accounts in the incremental snapshot slot range + pub incremental_capitalization: u64, +} + #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct RentDebits(HashMap); impl RentDebits { @@ -976,6 +995,7 @@ pub struct BankFieldsToDeserialize { pub(crate) epoch_stakes: HashMap, pub(crate) is_delta: bool, pub(crate) accounts_data_len: u64, + pub(crate) incremental_snapshot_persistence: Option, } // Bank's common fields shared by all supported snapshot versions for serialization. @@ -1083,6 +1103,7 @@ impl PartialEq for Bank { accounts_data_size_delta_on_chain: _, accounts_data_size_delta_off_chain: _, fee_structure: _, + incremental_snapshot_persistence: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this ParitalEq is accordingly updated. @@ -1336,6 +1357,8 @@ pub struct Bank { /// Transaction fee structure pub fee_structure: FeeStructure, + + pub incremental_snapshot_persistence: Option, } struct VoteWithStakeDelegations { @@ -1466,6 +1489,7 @@ impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { + incremental_snapshot_persistence: None, rewrites_skipped_this_slot: Rewrites::default(), rc: BankRc::new(accounts, Slot::default()), status_cache: Arc::>::default(), @@ -1765,6 +1789,7 @@ impl Bank { let accounts_data_size_initial = parent.load_accounts_data_size(); let mut new = Bank { + incremental_snapshot_persistence: None, rewrites_skipped_this_slot: Rewrites::default(), rc, status_cache, @@ -2126,6 +2151,7 @@ impl Bank { } let feature_set = new(); let mut bank = Self { + incremental_snapshot_persistence: fields.incremental_snapshot_persistence, rewrites_skipped_this_slot: Rewrites::default(), rc: bank_rc, status_cache: new(), diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index e32aecbe4b6705..5b42208d042e7c 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -8,7 +8,7 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, append_vec::{AppendVec, StoredMetaWriteVersion}, - bank::{Bank, BankFieldsToDeserialize, BankRc}, + bank::{Bank, BankFieldsToDeserialize, BankIncrementalSnapshotPersistence, BankRc}, blockhash_queue::BlockhashQueue, builtins::Builtins, epoch_stakes::EpochStakes, @@ -77,6 +77,7 @@ pub struct AccountsDbFields( /// slots that were roots within the last epoch for which we care about the hash value #[serde(deserialize_with = "default_on_eof")] Vec<(Slot, Hash)>, + // here? ); /// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a @@ -193,6 +194,7 @@ trait TypeContext<'a>: PartialEq { stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> std::result::Result<(), Box> where R: Read, @@ -370,12 +372,18 @@ fn reserialize_bank_fields_with_new_hash( stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> Result<(), Error> where W: Write, R: Read, { - newer::Context::reserialize_bank_fields_with_hash(stream_reader, stream_writer, accounts_hash) + newer::Context::reserialize_bank_fields_with_hash( + stream_reader, + stream_writer, + accounts_hash, + incremental_snapshot_persistence, + ) } /// effectively updates the accounts hash in the serialized bank file on disk @@ -387,6 +395,7 @@ pub fn reserialize_bank_with_new_accounts_hash( bank_snapshots_dir: impl AsRef, slot: Slot, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> bool { let bank_post = snapshot_utils::get_bank_snapshots_dir(bank_snapshots_dir, slot); let bank_post = bank_post.join(snapshot_utils::get_snapshot_file_name(slot)); @@ -404,6 +413,7 @@ pub fn reserialize_bank_with_new_accounts_hash( &mut BufReader::new(file), &mut BufWriter::new(file_out), accounts_hash, + incremental_snapshot_persistence, ) .unwrap(); } diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index 3dd73803cf3010..512737106aebc9 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -96,6 +96,7 @@ impl From for BankFieldsToDeserialize { stakes: dvb.stakes, epoch_stakes: dvb.epoch_stakes, is_delta: dvb.is_delta, + incremental_snapshot_persistence: None, } } } @@ -209,6 +210,7 @@ impl<'a> TypeContext<'a> for Context { // we can grab it on restart. // TODO: if we do a snapshot version bump, consider moving this out. lamports_per_signature, + None::, ) .serialize(serializer) } @@ -314,6 +316,10 @@ impl<'a> TypeContext<'a> for Context { bank_fields.fee_rate_governor = bank_fields .fee_rate_governor .clone_with_lamports_per_signature(lamports_per_signature); + + let incremental_snapshot_persistence = ignore_eof_error(deserialize_from(stream))?; + bank_fields.incremental_snapshot_persistence = incremental_snapshot_persistence; + Ok((bank_fields, accounts_db_fields)) } @@ -327,12 +333,13 @@ impl<'a> TypeContext<'a> for Context { } /// deserialize the bank from 'stream_reader' - /// modify the accounts_hash + /// modify the accounts_hash and incremental_snapshot_persistence /// reserialize the bank to 'stream_writer' fn reserialize_bank_fields_with_hash( stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> std::result::Result<(), Box> where R: Read, @@ -345,6 +352,7 @@ impl<'a> TypeContext<'a> for Context { let blockhash_queue = RwLock::new(rhs.blockhash_queue.clone()); let hard_forks = RwLock::new(rhs.hard_forks.clone()); let lamports_per_signature = rhs.fee_rate_governor.lamports_per_signature; + let bank = SerializableVersionedBank { blockhash_queue: &blockhash_queue, ancestors: &rhs.ancestors, @@ -382,7 +390,12 @@ impl<'a> TypeContext<'a> for Context { bincode::serialize_into( stream_writer, - &(bank, accounts_db_fields, lamports_per_signature), + &( + bank, + accounts_db_fields, + lamports_per_signature, + incremental_snapshot_persistence, + ), ) } } diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index faf3006d9aaccc..1de6ee2a5d54c6 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -190,6 +190,7 @@ fn test_bank_serialize_style( serde_style: SerdeStyle, reserialize_accounts_hash: bool, update_accounts_hash: bool, + incremental_snapshot_persistence: bool, ) { solana_logger::setup(); let (genesis_config, _) = create_genesis_config(500); @@ -236,8 +237,18 @@ fn test_bank_serialize_style( } else { bank2.get_accounts_hash() }; - if reserialize_accounts_hash { - let slot = bank2.slot(); + + let slot = bank2.slot(); + let incremental = + incremental_snapshot_persistence.then(|| BankIncrementalSnapshotPersistence { + full_slot: slot + 1, + full_hash: Hash::new(&[1; 32]), + full_capitalization: 31, + incremental_hash: Hash::new(&[2; 32]), + incremental_capitalization: 32, + }); + + if reserialize_accounts_hash || incremental_snapshot_persistence { let temp_dir = TempDir::new().unwrap(); let slot_dir = temp_dir.path().join(slot.to_string()); let post_path = slot_dir.join(slot.to_string()); @@ -248,21 +259,32 @@ fn test_bank_serialize_style( let mut f = std::fs::File::create(&pre_path).unwrap(); f.write_all(&buf).unwrap(); } + assert!(reserialize_bank_with_new_accounts_hash( temp_dir.path(), slot, - &accounts_hash + &accounts_hash, + incremental.as_ref(), )); let previous_len = buf.len(); // larger buffer than expected to make sure the file isn't larger than expected - let mut buf_reserialized = vec![0; previous_len + 1]; + let sizeof_none = std::mem::size_of::(); + let sizeof_incremental_snapshot_persistence = + std::mem::size_of::>(); + let mut buf_reserialized = + vec![0; previous_len + sizeof_incremental_snapshot_persistence + 1]; { let mut f = std::fs::File::open(post_path).unwrap(); let size = f.read(&mut buf_reserialized).unwrap(); - assert_eq!(size, previous_len); + let expected = if !incremental_snapshot_persistence { + previous_len + } else { + previous_len + sizeof_incremental_snapshot_persistence - sizeof_none + }; + assert_eq!(size, expected); buf_reserialized.truncate(size); } - if update_accounts_hash { + if update_accounts_hash || incremental_snapshot_persistence { // We cannot guarantee buffer contents are exactly the same if hash is the same. // Things like hashsets/maps have randomness in their in-mem representations. // This make serialized bytes not deterministic. @@ -311,6 +333,7 @@ fn test_bank_serialize_style( assert_eq!(dbank.get_balance(&key3.pubkey()), 0); assert_eq!(dbank.get_accounts_hash(), accounts_hash); assert!(bank2 == dbank); + assert_eq!(dbank.incremental_snapshot_persistence, incremental); } pub(crate) fn reconstruct_accounts_db_via_serialization( @@ -359,11 +382,18 @@ fn test_bank_serialize_newer() { for (reserialize_accounts_hash, update_accounts_hash) in [(false, false), (true, false), (true, true)] { - test_bank_serialize_style( - SerdeStyle::Newer, - reserialize_accounts_hash, - update_accounts_hash, - ) + for incremental_snapshot_persistence in if reserialize_accounts_hash { + [false, true].to_vec() + } else { + [false].to_vec() + } { + test_bank_serialize_style( + SerdeStyle::Newer, + reserialize_accounts_hash, + update_accounts_hash, + incremental_snapshot_persistence, + ) + } } } @@ -555,7 +585,7 @@ mod test_bank_serialize { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "9vGBt7YfymKUTPWLHVVpQbDtPD7dFDwXRMFkCzwujNqJ")] + #[frozen_abi(digest = "5py4Wkuj5fV2sLyA1MrPg4pGNwMEaygQLnpLyY8MMLGC")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperNewer { #[serde(serialize_with = "wrapper_newer")] diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index e8dbe024619622..19e9d02f684273 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2045,6 +2045,7 @@ pub fn package_and_archive_full_snapshot( accounts_package.snapshot_links.path(), accounts_package.slot, &bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, bank.get_accounts_hash()); @@ -2097,6 +2098,7 @@ pub fn package_and_archive_incremental_snapshot( accounts_package.snapshot_links.path(), accounts_package.slot, &bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, bank.get_accounts_hash()); From fea66c8b6365c924b71daa264d0919127b330008 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 17 Aug 2022 22:01:51 +0000 Subject: [PATCH 15/51] derives Error trait for ClusterInfoError and core::result::Error (#27208) --- core/src/result.rs | 80 +++++++++++--------------------------- gossip/src/cluster_info.rs | 8 +++- 2 files changed, 29 insertions(+), 59 deletions(-) diff --git a/core/src/result.rs b/core/src/result.rs index 6c9b66b6d459c0..2aa8f8718f5141 100644 --- a/core/src/result.rs +++ b/core/src/result.rs @@ -3,53 +3,42 @@ use { solana_gossip::{cluster_info, gossip_error::GossipError}, solana_ledger::blockstore, + thiserror::Error, }; -#[derive(Debug)] +#[derive(Debug, Error)] pub enum Error { - Io(std::io::Error), - Recv(crossbeam_channel::RecvError), + #[error(transparent)] + Blockstore(#[from] blockstore::BlockstoreError), + #[error(transparent)] + ClusterInfo(#[from] cluster_info::ClusterInfoError), + #[error(transparent)] + Gossip(#[from] GossipError), + #[error(transparent)] + Io(#[from] std::io::Error), + #[error("ReadyTimeout")] ReadyTimeout, - RecvTimeout(crossbeam_channel::RecvTimeoutError), - TrySend, - Serialize(std::boxed::Box), - ClusterInfo(cluster_info::ClusterInfoError), + #[error(transparent)] + Recv(#[from] crossbeam_channel::RecvError), + #[error(transparent)] + RecvTimeout(#[from] crossbeam_channel::RecvTimeoutError), + #[error("Send")] Send, - Blockstore(blockstore::BlockstoreError), - WeightedIndex(rand::distributions::weighted::WeightedError), - Gossip(GossipError), + #[error("TrySend")] + TrySend, + #[error(transparent)] + Serialize(#[from] std::boxed::Box), + #[error(transparent)] + WeightedIndex(#[from] rand::distributions::weighted::WeightedError), } pub type Result = std::result::Result; -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "solana error") - } -} - -impl std::error::Error for Error {} - -impl std::convert::From for Error { - fn from(e: crossbeam_channel::RecvError) -> Error { - Error::Recv(e) - } -} impl std::convert::From for Error { fn from(_e: crossbeam_channel::ReadyTimeoutError) -> Error { Error::ReadyTimeout } } -impl std::convert::From for Error { - fn from(e: crossbeam_channel::RecvTimeoutError) -> Error { - Error::RecvTimeout(e) - } -} -impl std::convert::From for Error { - fn from(e: cluster_info::ClusterInfoError) -> Error { - Error::ClusterInfo(e) - } -} impl std::convert::From> for Error { fn from(_e: crossbeam_channel::TrySendError) -> Error { Error::TrySend @@ -60,31 +49,6 @@ impl std::convert::From> for Error { Error::Send } } -impl std::convert::From for Error { - fn from(e: std::io::Error) -> Error { - Error::Io(e) - } -} -impl std::convert::From> for Error { - fn from(e: std::boxed::Box) -> Error { - Error::Serialize(e) - } -} -impl std::convert::From for Error { - fn from(e: blockstore::BlockstoreError) -> Error { - Error::Blockstore(e) - } -} -impl std::convert::From for Error { - fn from(e: rand::distributions::weighted::WeightedError) -> Error { - Error::WeightedIndex(e) - } -} -impl std::convert::From for Error { - fn from(e: GossipError) -> Error { - Error::Gossip(e) - } -} #[cfg(test)] mod tests { diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 55d3c177515d6e..40142f70e3d336 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -92,6 +92,7 @@ use { thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, }, + thiserror::Error, }; /// The Data plane fanout size, also used as the neighborhood size @@ -138,12 +139,17 @@ const MIN_STAKE_FOR_GOSSIP: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL; /// Minimum number of staked nodes for enforcing stakes in gossip. const MIN_NUM_STAKED_NODES: usize = 500; -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Error)] pub enum ClusterInfoError { + #[error("NoPeers")] NoPeers, + #[error("NoLeader")] NoLeader, + #[error("BadContactInfo")] BadContactInfo, + #[error("BadGossipAddress")] BadGossipAddress, + #[error("TooManyIncrementalSnapshotHashes")] TooManyIncrementalSnapshotHashes, } From 2fd9a4f373ac0a15b347bebeb802aa50ab404d51 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Wed, 17 Aug 2022 18:45:59 -0400 Subject: [PATCH 16/51] Add clean_accounts_for_tests() (#27200) --- accounts-bench/src/main.rs | 2 +- runtime/benches/accounts.rs | 2 +- runtime/src/accounts.rs | 2 +- runtime/src/accounts_db.rs | 101 +++++++++++++++++++----------------- runtime/tests/accounts.rs | 2 +- 5 files changed, 57 insertions(+), 52 deletions(-) diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 987915d8c9fe15..3d1c18633f2747 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -110,7 +110,7 @@ fn main() { for x in 0..iterations { if clean { let mut time = Measure::start("clean"); - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); time.stop(); println!("{}", time); for slot in 0..num_slots { diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index ec4eea2fefde9b..7160c2efae8e2b 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -178,7 +178,7 @@ fn bench_delete_dependencies(bencher: &mut Bencher) { accounts.add_root(i); } bencher.iter(|| { - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); }); } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 9c7838938a5fa8..86d14aaf7b681c 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -3081,7 +3081,7 @@ mod tests { } } info!("done..cleaning.."); - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); } fn load_accounts_no_store( diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 4f2fa0a5ba9c10..caa4cc77f31376 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2507,6 +2507,11 @@ impl AccountsDb { pubkeys } + /// Call clean_accounts() with the common parameters that tests/benches use. + pub fn clean_accounts_for_tests(&self) { + self.clean_accounts(None, false, None) + } + // Purge zero lamport accounts and older rooted account states as garbage // collection // Only remove those accounts where the entire rooted history of the account @@ -10000,7 +10005,7 @@ pub mod tests { // overwrite old rooted account version; only the r_slot_0_stores.count() should be // decremented db.store_uncached(2, &[(&pubkeys[0], &account)]); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); { let slot_0_stores = &db.storage.get_slot_stores(0).unwrap(); let slot_1_stores = &db.storage.get_slot_stores(1).unwrap(); @@ -10439,7 +10444,7 @@ pub mod tests { //slot is gone accounts.print_accounts_stats("pre-clean"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.map.get(&0).is_none()); //new value is there @@ -10522,7 +10527,7 @@ pub mod tests { // Slot 1 should be removed, slot 0 cannot be removed because it still has // the latest update for pubkey 2 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.get_slot_stores(0).is_some()); assert!(accounts.storage.get_slot_stores(1).is_none()); @@ -10557,7 +10562,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 3); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Slots 0 and 1 should each have been cleaned because all of their // accounts are zero lamports assert!(accounts.storage.get_slot_stores(0).is_none()); @@ -10571,7 +10576,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 1); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 0); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0 assert!(accounts.storage.get_slot_stores(2).is_none()); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); @@ -10598,7 +10603,7 @@ pub mod tests { // Slot 0 should be removed, and // zero-lamport account should be cleaned - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.get_slot_stores(0).is_none()); assert!(accounts.storage.get_slot_stores(1).is_none()); @@ -10641,7 +10646,7 @@ pub mod tests { assert_eq!(accounts.alive_account_count_in_slot(0), 1); assert_eq!(accounts.alive_account_count_in_slot(1), 1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //now old state is cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10675,7 +10680,7 @@ pub mod tests { accounts.print_accounts_stats(""); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //Old state behind zero-lamport account is cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10792,7 +10797,7 @@ pub mod tests { accounts.account_indexes.keys = None; } - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //both zero lamport and normal accounts are cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10883,7 +10888,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1); //now uncleaned roots are cleaned up - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); } @@ -10900,7 +10905,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1); //now uncleaned roots are cleaned up - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); } @@ -10912,7 +10917,7 @@ pub mod tests { // Create 100 accounts in slot 0 create_account(&accounts, &mut pubkeys, 0, 100, 0, 0); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); check_accounts(&accounts, &pubkeys, 0, 100, 1); // do some updates to those accounts and re-check @@ -10948,7 +10953,7 @@ pub mod tests { // Modify first 20 of the accounts from slot 0 in slot 2 modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Overwrite account 31 from slot 0 with lamports=0 into slot 2. // Slot 2 should now have 20 + 1 = 21 accounts let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -10962,7 +10967,7 @@ pub mod tests { accounts.add_root(latest_slot); assert!(check_storage(&accounts, 2, 31)); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // The first 20 accounts of slot 0 have been updated in slot 2, as well as // accounts 30 and 31 (overwritten with zero-lamport accounts in slot 1 and // slot 2 respectively), so only 78 accounts are left in slot 0's storage entries. @@ -11102,7 +11107,7 @@ pub mod tests { accounts.print_accounts_stats("pre_purge"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post_purge"); @@ -11167,7 +11172,7 @@ pub mod tests { info!("ancestors: {:?}", ancestors); let hash = accounts.update_accounts_hash_test(current_slot, &ancestors); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( accounts.update_accounts_hash_test(current_slot, &ancestors), @@ -11234,7 +11239,7 @@ pub mod tests { accounts.print_accounts_stats("accounts"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("accounts_post_purge"); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); @@ -11320,7 +11325,7 @@ pub mod tests { fn test_accounts_purge_chained_purge_before_snapshot_restore() { solana_logger::setup(); with_chained_zero_lamport_accounts(|accounts, current_slot| { - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); reconstruct_accounts_db_via_serialization(&accounts, current_slot) }); } @@ -11331,7 +11336,7 @@ pub mod tests { with_chained_zero_lamport_accounts(|accounts, current_slot| { let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_accounts_stats("after_reconstruct"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); reconstruct_accounts_db_via_serialization(&accounts, current_slot) }); } @@ -12095,7 +12100,7 @@ pub mod tests { accounts.print_count_and_status("before reconstruct"); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_count_and_status("before purge zero"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_count_and_status("after purge zero"); assert_load_account(&accounts, current_slot, pubkey, old_lamport); @@ -12156,7 +12161,7 @@ pub mod tests { accounts.print_accounts_stats("Post-B pre-clean"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); info!("post B"); accounts.print_accounts_stats("Post-B"); @@ -12196,7 +12201,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("Post-D clean"); @@ -12286,7 +12291,7 @@ pub mod tests { current_slot += 1; assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( // Removed one reference from the dead slot (reference only counted once @@ -12311,9 +12316,9 @@ pub mod tests { // If step C and step D should be purged, snapshot restore would cause // pubkey1 to be revived as the state of step A. // So, prevent that from happening by introducing refcount - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); info!("pubkey: {}", pubkey1); accounts.print_accounts_stats("pre_clean"); @@ -12328,10 +12333,10 @@ pub mod tests { accounts.add_root(current_slot); // Do clean - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // 2nd clean needed to clean-up pubkey1 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Ensure pubkey2 is cleaned from the index finally assert_not_load_account(&accounts, current_slot, pubkey1); @@ -12472,7 +12477,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -12561,7 +12566,7 @@ pub mod tests { } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -12846,7 +12851,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -13056,7 +13061,7 @@ pub mod tests { accounts.flush_accounts_cache(true, None); // clear out the dirty keys - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // flush 1 accounts.get_accounts_delta_hash(1); @@ -13068,11 +13073,11 @@ pub mod tests { // clean to remove pubkey1 from 0, // shrink to shrink pubkey1 from 0 // then another clean to remove pubkey1 from slot 1 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.shrink_candidate_slots(); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post-clean"); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); @@ -13100,12 +13105,12 @@ pub mod tests { accounts.store_uncached(1, &[(key, &account)]); } accounts.add_root(1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.shrink_all_slots(false, None); // Clean again to flush the dirty stores // and allow them to be recycled in the next step - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post-shrink"); let num_stores = accounts.recycle_stores.read().unwrap().entry_count(); assert!(num_stores > 0); @@ -13425,9 +13430,9 @@ pub mod tests { db.add_root(0); db.add_root(1); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.add_root(2); assert_eq!(db.read_only_accounts_cache.cache_len(), 0); @@ -13475,7 +13480,7 @@ pub mod tests { db.add_root(1); // Clean should not remove anything yet as nothing has been flushed - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &Ancestors::default(), @@ -13491,7 +13496,7 @@ pub mod tests { // Flush, then clean again. Should not need another root to initiate the cleaning // because `accounts_index.uncleaned_roots` should be correct db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); assert!(db .do_load( &Ancestors::default(), @@ -13556,7 +13561,7 @@ pub mod tests { // Flush, then clean. Should not need another root to initiate the cleaning // because `accounts_index.uncleaned_roots` should be correct db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); // The `zero_lamport_account_key` is still alive in slot 1, so refcount for the // pubkey should be 2 @@ -13716,7 +13721,7 @@ pub mod tests { // Run clean, unrooted slot 1 should not be purged, and still readable from the cache, // because we're still doing a scan on it. - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &scan_ancestors, @@ -13730,7 +13735,7 @@ pub mod tests { // When the scan is over, clean should not panic and should not purge something // still in the cache. scan_tracker.exit().unwrap(); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &scan_ancestors, @@ -14332,7 +14337,7 @@ pub mod tests { // Checking that the uncleaned_pubkeys are not pre-maturely removed // such that when the slots are rooted, and can actually be cleaned, then the // delta keys are still there. - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean1"); // Check stores > 0 @@ -14347,12 +14352,12 @@ pub mod tests { db.store_uncached(2, &[(&account_key1, &account3)]); db.get_accounts_delta_hash(2); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean2"); // root slots 1 db.add_root(1); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean3"); @@ -14361,7 +14366,7 @@ pub mod tests { db.add_root(3); // Check that we can clean where max_root=3 and slot=2 is not rooted - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); assert!(db.uncleaned_pubkeys.is_empty()); @@ -15176,7 +15181,7 @@ pub mod tests { // The later rooted zero-lamport update to `shared_key` cannot be cleaned // because it is kept alive by the unrooted slot. - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts .accounts_index .get_account_read_entry(&shared_key) @@ -15186,7 +15191,7 @@ pub mod tests { accounts.purge_slot(slot0, 0, true); // Now clean should clean up the remaining key - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts .accounts_index .get_account_read_entry(&shared_key) diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index a055d62da14da5..d272e738a1695f 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -65,7 +65,7 @@ fn test_shrink_and_clean() { // let's dance. for _ in 0..10 { - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); std::thread::sleep(std::time::Duration::from_millis(100)); } From a2e7bdf50ac5e1d4c633f64f6362028b4164c003 Mon Sep 17 00:00:00 2001 From: Brennan Watt Date: Wed, 17 Aug 2022 15:48:33 -0700 Subject: [PATCH 17/51] Rust v1.63.0 (#27148) * Upgrade to Rust v1.63.0 * Add nightly_clippy_allows * Resolve some new clippy nightly lints * Increase QUIC packets completion timeout Co-authored-by: Michael Vines --- account-decoder/src/parse_address_lookup_table.rs | 2 +- banks-server/src/banks_server.rs | 10 +++------- ci/docker-rust-nightly/Dockerfile | 2 +- ci/docker-rust/Dockerfile | 2 +- ci/rust-version.sh | 4 ++-- ci/test-checks.sh | 14 ++++++++++++++ client/tests/quic_client.rs | 2 +- core/src/banking_stage.rs | 2 +- core/src/sigverify_shreds.rs | 2 +- frozen-abi/src/abi_example.rs | 2 +- gossip/src/crds_gossip_pull.rs | 2 +- ledger/src/bigtable_upload.rs | 2 +- ledger/src/blockstore.rs | 2 +- ledger/src/blockstore_meta.rs | 2 +- ledger/src/shred.rs | 2 +- ledger/src/shred/shred_code.rs | 2 +- local-cluster/src/local_cluster.rs | 2 +- perf/src/sigverify.rs | 7 +------ poh/src/poh_recorder.rs | 2 +- rpc/src/rpc.rs | 6 ++---- rpc/src/rpc_subscriptions.rs | 5 +---- runtime/src/account_rent_state.rs | 2 +- runtime/src/accounts.rs | 2 +- runtime/src/accounts_db.rs | 8 ++++---- runtime/src/bank.rs | 12 ++++++------ runtime/src/expected_rent_collection.rs | 8 ++++---- runtime/src/hardened_unpack.rs | 2 +- runtime/src/in_mem_accounts_index.rs | 2 ++ runtime/src/serde_snapshot.rs | 4 ++-- runtime/src/serde_snapshot/newer.rs | 4 ++-- runtime/src/serde_snapshot/tests.rs | 2 +- runtime/src/snapshot_minimizer.rs | 2 +- runtime/src/snapshot_utils.rs | 2 +- runtime/src/storable_accounts.rs | 2 +- runtime/src/system_instruction_processor.rs | 2 +- sdk/program/src/message/compiled_keys.rs | 10 +++++----- sdk/program/src/nonce/state/mod.rs | 2 +- sdk/program/src/stake/tools.rs | 2 +- streamer/src/streamer.rs | 2 +- validator/src/bootstrap.rs | 6 ++---- zk-token-sdk/src/instruction/close_account.rs | 2 +- zk-token-sdk/src/instruction/withdraw.rs | 2 +- 42 files changed, 78 insertions(+), 78 deletions(-) diff --git a/account-decoder/src/parse_address_lookup_table.rs b/account-decoder/src/parse_address_lookup_table.rs index 26955d74a74242..ca461f2636e92a 100644 --- a/account-decoder/src/parse_address_lookup_table.rs +++ b/account-decoder/src/parse_address_lookup_table.rs @@ -19,7 +19,7 @@ pub fn parse_address_lookup_table( }) } -#[derive(Debug, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase", tag = "type", content = "info")] pub enum LookupTableAccountType { Uninitialized, diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index c73844d2571560..a4b65601c389b3 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -153,13 +153,9 @@ fn verify_transaction( transaction: &Transaction, feature_set: &Arc, ) -> transaction::Result<()> { - if let Err(err) = transaction.verify() { - Err(err) - } else if let Err(err) = transaction.verify_precompiles(feature_set) { - Err(err) - } else { - Ok(()) - } + transaction.verify()?; + transaction.verify_precompiles(feature_set)?; + Ok(()) } fn simulate_transaction( diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index fff0f366d32f29..12aeff7e5e0b81 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.60.0 +FROM solanalabs/rust:1.63.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 6805f85fcd85df..a256d308d9b27a 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.60.0 +FROM rust:1.63.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index dc3570fa939e79..792863c3280fa1 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.60.0 + stable_version=1.63.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2022-04-01 + nightly_version=2022-08-12 fi diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 72c174395bd1d9..65e5e6271aa4bf 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -65,11 +65,25 @@ fi _ ci/order-crates-for-publishing.py +nightly_clippy_allows=( + # This lint occurs all over the code base + "--allow=clippy::significant_drop_in_scrutinee" + + # The prost crate, used by solana-storage-proto, generates Rust source that + # triggers this lint. Need to resolve upstream in prost + "--allow=clippy::derive_partial_eq_without_eq" + + # This link seems to incorrectly trigger in + # `programs/bpf_loader/src/syscalls/{lib,cpi}.rs` + "--allow=clippy::explicit_auto_deref" +) + # -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612 # run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there _ scripts/cargo-for-all-lock-files.sh -- nightly clippy -Zunstable-options --all-targets -- \ --deny=warnings \ --deny=clippy::integer_arithmetic \ + "${nightly_clippy_allows[@]}" _ scripts/cargo-for-all-lock-files.sh -- nightly sort --workspace --check _ scripts/cargo-for-all-lock-files.sh -- nightly fmt --all -- --check diff --git a/client/tests/quic_client.rs b/client/tests/quic_client.rs index 980476aee7b2c6..1c5348177dd644 100644 --- a/client/tests/quic_client.rs +++ b/client/tests/quic_client.rs @@ -27,7 +27,7 @@ mod tests { let mut all_packets = vec![]; let now = Instant::now(); let mut total_packets: usize = 0; - while now.elapsed().as_secs() < 5 { + while now.elapsed().as_secs() < 10 { if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) { total_packets = total_packets.saturating_add(packets.len()); all_packets.push(packets) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 2547c00f94e5ca..1c3e95e2bd6c5d 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1335,7 +1335,7 @@ impl BankingStage { ); retryable_transaction_indexes.extend(execution_results.iter().enumerate().filter_map( - |(index, execution_result)| execution_result.was_executed().then(|| index), + |(index, execution_result)| execution_result.was_executed().then_some(index), )); return ExecuteAndCommitTransactionsOutput { diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index f9a50ab8b2a954..f1f08ec671d2f3 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -151,7 +151,7 @@ fn get_slot_leaders( let leader = leaders.entry(slot).or_insert_with(|| { let leader = leader_schedule_cache.slot_leader_at(slot, Some(bank))?; // Discard the shred if the slot leader is the node itself. - (&leader != self_pubkey).then(|| leader) + (&leader != self_pubkey).then_some(leader) }); if leader.is_none() { packet.meta.set_discard(true); diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index e0dfa50b8acea6..2e1bdbcac16d0d 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -411,7 +411,7 @@ lazy_static! { impl AbiExample for &Vec { fn example() -> Self { info!("AbiExample for (&Vec): {}", type_name::()); - &*VEC_U8 + &VEC_U8 } } diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 2780bf7dabf56b..04df91227b971c 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -256,7 +256,7 @@ impl CrdsGossipPull { if let Some(ping) = ping { pings.push((peer.gossip, ping)); } - check.then(|| (weight, peer)) + check.then_some((weight, peer)) }) .unzip() }; diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index f43b07db12592a..c8cdef587b1fc7 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -60,7 +60,7 @@ pub async fn upload_confirmed_blocks( starting_slot, err ) })? - .map_while(|slot| (slot <= ending_slot).then(|| slot)) + .map_while(|slot| (slot <= ending_slot).then_some(slot)) .collect(); if blockstore_slots.is_empty() { diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index acacf9d842a7e5..66340b5cb00034 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3145,7 +3145,7 @@ impl Blockstore { } .expect("fetch from DuplicateSlots column family failed")?; let new_shred = Shred::new_from_serialized_shred(payload).unwrap(); - (existing_shred != *new_shred.payload()).then(|| existing_shred) + (existing_shred != *new_shred.payload()).then_some(existing_shred) } pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool { diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 65101fe98348ba..5cacf78198dafb 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -61,7 +61,7 @@ mod serde_compat { D: Deserializer<'de>, { let val = u64::deserialize(deserializer)?; - Ok((val != u64::MAX).then(|| val)) + Ok((val != u64::MAX).then_some(val)) } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index e17055b1e7d9a9..bef3df72515640 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -613,7 +613,7 @@ pub mod layout { merkle::ShredData::get_signed_message_range(proof_size)? } }; - (shred.len() <= range.end).then(|| range) + (shred.len() <= range.end).then_some(range) } pub(crate) fn get_reference_tick(shred: &[u8]) -> Result { diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 538bb25427f38f..1fe3fef026ff18 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -119,7 +119,7 @@ pub(super) fn erasure_shard_index(shred: &T) -> Option let position = usize::from(coding_header.position); let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; let index = position.checked_add(num_data_shreds)?; - (index < fec_set_size).then(|| index) + (index < fec_set_size).then_some(index) } pub(super) fn sanitize(shred: &T) -> Result<(), Error> { diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index f7b68647053eaf..0f1ca19f876aff 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -319,7 +319,7 @@ impl LocalCluster { }) .collect(); for (stake, validator_config, (key, _)) in izip!( - (&config.node_stakes[1..]).iter(), + config.node_stakes[1..].iter(), config.validator_configs[1..].iter(), validator_keys[1..].iter(), ) { diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index aee1b310dd59d9..1e40d29adcf13d 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -830,12 +830,7 @@ mod tests { pub fn memfind(a: &[A], b: &[A]) -> Option { assert!(a.len() >= b.len()); let end = a.len() - b.len() + 1; - for i in 0..end { - if a[i..i + b.len()] == b[..] { - return Some(i); - } - } - None + (0..end).find(|&i| a[i..i + b.len()] == b[..]) } #[test] diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index aef2d7393e9f51..d6c85c3fdf7f3f 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -505,7 +505,7 @@ impl PohRecorder { start: Arc::new(Instant::now()), min_tick_height: bank.tick_height(), max_tick_height: bank.max_tick_height(), - transaction_index: track_transaction_indexes.then(|| 0), + transaction_index: track_transaction_indexes.then_some(0), }; trace!("new working bank"); assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot()); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index fdf72d8f5d7299..9cad136b581927 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -129,7 +129,7 @@ fn new_response(bank: &Bank, value: T) -> RpcResponse { /// Wrapper for rpc return types of methods that provide responses both with and without context. /// Main purpose of this is to fix methods that lack context information in their return type, /// without breaking backwards compatibility. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(untagged)] pub enum OptionalContext { Context(RpcResponse), @@ -3646,9 +3646,7 @@ pub mod rpc_full { } if !skip_preflight { - if let Err(e) = verify_transaction(&transaction, &preflight_bank.feature_set) { - return Err(e); - } + verify_transaction(&transaction, &preflight_bank.feature_set)?; match meta.health.check() { RpcHealthStatus::Ok => (), diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index bd9fe337460279..896b6a9ad5f453 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -1001,10 +1001,7 @@ impl RpcSubscriptions { let mut slots_to_notify: Vec<_> = (*w_last_unnotified_slot..slot).collect(); let ancestors = bank.proper_ancestors_set(); - slots_to_notify = slots_to_notify - .into_iter() - .filter(|slot| ancestors.contains(slot)) - .collect(); + slots_to_notify.retain(|slot| ancestors.contains(slot)); slots_to_notify.push(slot); for s in slots_to_notify { // To avoid skipping a slot that fails this condition, diff --git a/runtime/src/account_rent_state.rs b/runtime/src/account_rent_state.rs index 629502caf475fe..74cbc5b81af5f1 100644 --- a/runtime/src/account_rent_state.rs +++ b/runtime/src/account_rent_state.rs @@ -104,7 +104,7 @@ pub(crate) fn check_rent_state( .get_account_at_index(index) .expect(expect_msg) .borrow(), - include_account_index_in_err.then(|| index), + include_account_index_in_err.then_some(index), prevent_crediting_accounts_that_end_rent_paying, )?; } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 86d14aaf7b681c..ade9d327ba1046 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -440,7 +440,7 @@ impl Accounts { payer_account, feature_set .is_active(&feature_set::include_account_index_in_rent_error::ID) - .then(|| payer_index), + .then_some(payer_index), feature_set .is_active(&feature_set::prevent_crediting_accounts_that_end_rent_paying::id()), ) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index caa4cc77f31376..4c789751a2405d 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2174,7 +2174,7 @@ impl AccountsDb { // figure out how many ancient accounts have been reclaimed let old_reclaims = reclaims .iter() - .filter_map(|(slot, _)| (slot < &one_epoch_old).then(|| 1)) + .filter_map(|(slot, _)| (slot < &one_epoch_old).then_some(1)) .sum(); ancient_account_cleans.fetch_add(old_reclaims, Ordering::Relaxed); reclaims @@ -2392,7 +2392,7 @@ impl AccountsDb { .iter() .filter_map(|entry| { let slot = *entry.key(); - (slot <= max_slot).then(|| slot) + (slot <= max_slot).then_some(slot) }) .collect() } @@ -3676,7 +3676,7 @@ impl AccountsDb { ) -> Option { self.get_storages_for_slot(slot).and_then(|all_storages| { self.should_move_to_ancient_append_vec(&all_storages, current_ancient, slot) - .then(|| all_storages) + .then_some(all_storages) }) } @@ -5309,7 +5309,7 @@ impl AccountsDb { // with the same slot. let is_being_flushed = !currently_contended_slots.insert(*remove_slot); // If the cache is currently flushing this slot, add it to the list - is_being_flushed.then(|| remove_slot) + is_being_flushed.then_some(remove_slot) }) .cloned() .collect(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8428400f7c7091..4aea09da2d3ab4 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2285,7 +2285,7 @@ impl Bank { hash: *self.hash.read().unwrap(), parent_hash: self.parent_hash, parent_slot: self.parent_slot, - hard_forks: &*self.hard_forks, + hard_forks: &self.hard_forks, transaction_count: self.transaction_count.load(Relaxed), tick_height: self.tick_height.load(Relaxed), signature_count: self.signature_count.load(Relaxed), @@ -3293,7 +3293,7 @@ impl Bank { let vote_state = account.vote_state(); let vote_state = vote_state.as_ref().ok()?; let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?; - (slot_delta <= slots_per_epoch).then(|| { + (slot_delta <= slots_per_epoch).then_some({ ( *pubkey, ( @@ -3963,10 +3963,10 @@ impl Bank { } /// Prepare a transaction batch without locking accounts for transaction simulation. - pub(crate) fn prepare_simulation_batch<'a>( - &'a self, + pub(crate) fn prepare_simulation_batch( + &self, transaction: SanitizedTransaction, - ) -> TransactionBatch<'a, '_> { + ) -> TransactionBatch<'_, '_> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_result = transaction .get_account_locks(tx_account_lock_limit) @@ -4367,7 +4367,7 @@ impl Bank { self.feature_set.clone(), compute_budget, timings, - &*self.sysvar_cache.read().unwrap(), + &self.sysvar_cache.read().unwrap(), blockhash, lamports_per_signature, prev_accounts_data_len, diff --git a/runtime/src/expected_rent_collection.rs b/runtime/src/expected_rent_collection.rs index d049430933db33..bd6a6bb4842a85 100644 --- a/runtime/src/expected_rent_collection.rs +++ b/runtime/src/expected_rent_collection.rs @@ -684,7 +684,7 @@ pub mod tests { ); assert_eq!( result, - (!leave_alone).then(|| ExpectedRentCollection { + (!leave_alone).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -712,7 +712,7 @@ pub mod tests { ); assert_eq!( result, - (!greater).then(|| ExpectedRentCollection { + (!greater).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -909,7 +909,7 @@ pub mod tests { ); assert_eq!( result, - (account_rent_epoch != 0).then(|| ExpectedRentCollection { + (account_rent_epoch != 0).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch + 1, partition_index_from_max_slot: partition_index_max_inclusive, @@ -1084,7 +1084,7 @@ pub mod tests { }; assert_eq!( result, - some_expected.then(|| ExpectedRentCollection { + some_expected.then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot, diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index e3af855216e409..ac1c23167343fb 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -384,7 +384,7 @@ where .map(|path_buf| path_buf.as_path()) { Some(path) => { - accounts_path_processor(*file, path); + accounts_path_processor(file, path); UnpackPath::Valid(path) } None => UnpackPath::Invalid, diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index b252499267ba4b..c04e0eed1080bf 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -1418,6 +1418,8 @@ impl<'a> FlushGuard<'a> { #[must_use = "if unused, the `flushing` flag will immediately clear"] fn lock(flushing: &'a AtomicBool) -> Option { let already_flushing = flushing.swap(true, Ordering::AcqRel); + // Eager evaluation here would result in dropping Self and clearing flushing flag + #[allow(clippy::unnecessary_lazy_evaluations)] (!already_flushing).then(|| Self { flushing }) } } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 5b42208d042e7c..90d0c6db2e3220 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -65,7 +65,7 @@ pub(crate) enum SerdeStyle { const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; -#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq)] +#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq, Eq)] pub struct AccountsDbFields( HashMap>, StoredMetaWriteVersion, @@ -120,7 +120,7 @@ impl SnapshotAccountsDbFields { // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot incremental_snapshot_storages .iter() - .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then(|| ()).ok_or_else(|| { + .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| { io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!") })?; diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index 512737106aebc9..ab27961bf2a49c 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -201,7 +201,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), @@ -228,7 +228,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 1de6ee2a5d54c6..5834a23f969116 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -155,7 +155,7 @@ fn test_accounts_serialize_style(serde_style: SerdeStyle) { accountsdb_to_stream( serde_style, &mut writer, - &*accounts.accounts_db, + &accounts.accounts_db, 0, &accounts.accounts_db.get_snapshot_storages(0, None, None).0, ) diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 69e7a99e8e7601..94a82e1d482458 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -543,7 +543,7 @@ mod tests { .accounts .iter() .filter_map(|(pubkey, account)| { - stake::program::check_id(account.owner()).then(|| *pubkey) + stake::program::check_id(account.owner()).then_some(*pubkey) }) .collect(); expected_stake_accounts.push(bootstrap_validator_pubkey); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 19e9d02f684273..6018db95d3477b 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1181,7 +1181,7 @@ fn check_are_snapshots_compatible( let incremental_snapshot_archive_info = incremental_snapshot_archive_info.unwrap(); (full_snapshot_archive_info.slot() == incremental_snapshot_archive_info.base_slot()) - .then(|| ()) + .then_some(()) .ok_or_else(|| { SnapshotError::MismatchedBaseSlot( full_snapshot_archive_info.slot(), diff --git a/runtime/src/storable_accounts.rs b/runtime/src/storable_accounts.rs index 8d79c0f78c5fe4..bfa35cf71c3e6b 100644 --- a/runtime/src/storable_accounts.rs +++ b/runtime/src/storable_accounts.rs @@ -143,7 +143,7 @@ pub mod tests { slot, &vec![(&pk, &account, slot), (&pk, &account, slot)][..], ); - assert!(!(&test3).contains_multiple_slots()); + assert!(!test3.contains_multiple_slots()); let test3 = ( slot, &vec![(&pk, &account, slot), (&pk, &account, slot + 1)][..], diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 67f1f931147cef..3b738df1d8a0e4 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -1626,7 +1626,7 @@ mod tests { .unwrap(); // super fun time; callback chooses to .clean_accounts(None) or not - callback(&*bank); + callback(&bank); // create a normal account at the same pubkey as the zero-lamports account let lamports = genesis_config.rent.minimum_balance(len2); diff --git a/sdk/program/src/message/compiled_keys.rs b/sdk/program/src/message/compiled_keys.rs index d56c7aca2c4159..c689d08f39ae81 100644 --- a/sdk/program/src/message/compiled_keys.rs +++ b/sdk/program/src/message/compiled_keys.rs @@ -80,20 +80,20 @@ impl CompiledKeys { .chain( key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then(|| *key)), + .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then_some(*key)), ) .collect(); let readonly_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then_some(*key)) .collect(); let writable_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then_some(*key)) .collect(); let readonly_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then_some(*key)) .collect(); let signers_len = writable_signer_keys @@ -160,7 +160,7 @@ impl CompiledKeys { for search_key in self .key_meta_map .iter() - .filter_map(|(key, meta)| key_meta_filter(meta).then(|| key)) + .filter_map(|(key, meta)| key_meta_filter(meta).then_some(key)) { for (key_index, key) in lookup_table_addresses.iter().enumerate() { if key == search_key { diff --git a/sdk/program/src/nonce/state/mod.rs b/sdk/program/src/nonce/state/mod.rs index a4a850b93c1cdc..d55bc9063afcff 100644 --- a/sdk/program/src/nonce/state/mod.rs +++ b/sdk/program/src/nonce/state/mod.rs @@ -46,7 +46,7 @@ impl Versions { Self::Current(state) => match **state { State::Uninitialized => None, State::Initialized(ref data) => { - (recent_blockhash == &data.blockhash()).then(|| data) + (recent_blockhash == &data.blockhash()).then_some(data) } }, } diff --git a/sdk/program/src/stake/tools.rs b/sdk/program/src/stake/tools.rs index 842a822b0ea329..e0447f49fc69c9 100644 --- a/sdk/program/src/stake/tools.rs +++ b/sdk/program/src/stake/tools.rs @@ -28,7 +28,7 @@ fn get_minimum_delegation_return_data() -> Result { .ok_or(ProgramError::InvalidInstructionData) .and_then(|(program_id, return_data)| { (program_id == super::program::id()) - .then(|| return_data) + .then_some(return_data) .ok_or(ProgramError::IncorrectProgramId) }) .and_then(|return_data| { diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 3492f60c8933a8..1ef9b989304ebb 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -307,7 +307,7 @@ fn recv_send( let packets = packet_batch.iter().filter_map(|pkt| { let addr = pkt.meta.socket_addr(); let data = pkt.data(..)?; - socket_addr_space.check(&addr).then(|| (data, addr)) + socket_addr_space.check(&addr).then_some((data, addr)) }); batch_send(sock, &packets.collect::>())?; Ok(()) diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index fec9f6d409709c..c5a4b65d4b1229 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -409,7 +409,7 @@ pub fn attempt_download_genesis_and_snapshot( .map_err(|err| format!("Failed to get RPC node slot: {}", err))?; info!("RPC node root slot: {}", rpc_client_slot); - if let Err(err) = download_snapshots( + download_snapshots( full_snapshot_archives_dir, incremental_snapshot_archives_dir, validator_config, @@ -422,9 +422,7 @@ pub fn attempt_download_genesis_and_snapshot( download_abort_count, snapshot_hash, rpc_contact_info, - ) { - return Err(err); - }; + )?; if let Some(url) = bootstrap_config.check_vote_account.as_ref() { let rpc_client = RpcClient::new(url); diff --git a/zk-token-sdk/src/instruction/close_account.rs b/zk-token-sdk/src/instruction/close_account.rs index 4525f87901cd71..b6702e3051f168 100644 --- a/zk-token-sdk/src/instruction/close_account.rs +++ b/zk-token-sdk/src/instruction/close_account.rs @@ -41,7 +41,7 @@ impl CloseAccountData { keypair: &ElGamalKeypair, ciphertext: &ElGamalCiphertext, ) -> Result { - let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); let pod_ciphertext = pod::ElGamalCiphertext(ciphertext.to_bytes()); let mut transcript = CloseAccountProof::transcript_new(&pod_pubkey, &pod_ciphertext); diff --git a/zk-token-sdk/src/instruction/withdraw.rs b/zk-token-sdk/src/instruction/withdraw.rs index 9aa606e8ca4203..64f540a591804e 100644 --- a/zk-token-sdk/src/instruction/withdraw.rs +++ b/zk-token-sdk/src/instruction/withdraw.rs @@ -62,7 +62,7 @@ impl WithdrawData { // current source balance let final_ciphertext = current_ciphertext - &ElGamal::encode(amount); - let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); let pod_final_ciphertext: pod::ElGamalCiphertext = final_ciphertext.into(); let mut transcript = WithdrawProof::transcript_new(&pod_pubkey, &pod_final_ciphertext); let proof = WithdrawProof::new(keypair, final_balance, &final_ciphertext, &mut transcript); From 0d6a223e635402ebcf303f1754917e24caa67b62 Mon Sep 17 00:00:00 2001 From: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Date: Wed, 17 Aug 2022 22:07:40 -0400 Subject: [PATCH 18/51] docs: updated "transaction fees" page (#26861) * docs: transaction fees, compute units, compute budget * docs: added messages definition * Revert "docs: added messages definition" This reverts commit 3c56156dfaaf17158c5eafbc5877080a83607a06. * docs: added messages definition * Update docs/src/transaction_fees.md Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> * fix: updates from feedback Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> --- .../developing/programming-model/runtime.md | 37 +++++----- docs/src/terminology.md | 32 ++++++--- docs/src/transaction_fees.md | 67 ++++++++++++++++--- 3 files changed, 102 insertions(+), 34 deletions(-) diff --git a/docs/src/developing/programming-model/runtime.md b/docs/src/developing/programming-model/runtime.md index f0d402508808da..ac8284b723d92e 100644 --- a/docs/src/developing/programming-model/runtime.md +++ b/docs/src/developing/programming-model/runtime.md @@ -49,7 +49,9 @@ To prevent abuse of computational resources, each transaction is allocated a compute budget. The budget specifies a maximum number of compute units that a transaction can consume, the costs associated with different types of operations the transaction may perform, and operational bounds the transaction must adhere -to. As the transaction is processed compute units are consumed by its +to. + +As the transaction is processed compute units are consumed by its instruction's programs performing operations such as executing BPF instructions, calling syscalls, etc... When the transaction consumes its entire budget, or exceeds a bound such as attempting a call stack that is too deep, the runtime @@ -71,11 +73,11 @@ budget, or exceeds a bound, the entire invocation chain and the top level transaction processing are halted. The current [compute -budget](https://github.com/solana-labs/solana/blob/090e11210aa7222d8295610a6ccac4acda711bb9/program-runtime/src/compute_budget.rs#L26-L87) +budget](https://github.com/solana-labs/solana/blob/090e11210aa7222d8295610a6ccac4acda711bb9/program-runtime/src/compute_budget.rs#L26-L87) can be found in the Solana Program Runtime. -can be found in the Solana Program Runtime. +#### Example Compute Budget -For example, if the current budget is: +For example, if the compute budget set in the Solana runtime is: ```rust max_units: 1,400,000, @@ -89,21 +91,23 @@ log_pubkey_units: 100, ... ``` -Then the transaction +Then any transaction: - Could execute 1,400,000 BPF instructions, if it did nothing else. - Cannot exceed 4k of stack usage. - Cannot exceed a BPF call depth of 64. - Cannot exceed 4 levels of cross-program invocations. -Since the compute budget is consumed incrementally as the transaction executes, -the total budget consumption will be a combination of the various costs of the -operations it performs. +> **NOTE:** Since the compute budget is consumed incrementally as the transaction executes, +> the total budget consumption will be a combination of the various costs of the +> operations it performs. At runtime a program may log how much of the compute budget remains. See [debugging](developing/on-chain-programs/debugging.md#monitoring-compute-budget-consumption) for more information. +### Prioritization fees + A transaction may set the maximum number of compute units it is allowed to consume and the compute unit price by including a `SetComputeUnitLimit` and a `SetComputeUnitPrice` @@ -112,20 +116,19 @@ respectively. If no `SetComputeUnitLimit` is provided the limit will be calculated as the product of the number of instructions in the transaction (excluding the [Compute -budget -instructions](https://github.com/solana-labs/solana/blob/db32549c00a1b5370fcaf128981ad3323bbd9570/sdk/src/compute_budget.rs#L22)) -and the default per-instruction units, which is currently 200k. - -Note that a transaction's prioritization fee is calculated by multiplying the -number of compute units by the compute unit price (measured in micro-lamports) -set by the transaction via compute budget instructions. So transactions should -request the minimum amount of compute units required for execution to minimize +budget instructions](https://github.com/solana-labs/solana/blob/db32549c00a1b5370fcaf128981ad3323bbd9570/sdk/src/compute_budget.rs#L22)) and the default per-instruction units, which is currently 200k. + +> **NOTE:** A transaction's [prioritization fee](./../../terminology.md#prioritization-fee) is calculated by multiplying the +> number of _compute units_ by the _compute unit price_ (measured in micro-lamports) +> set by the transaction via compute budget instructions. + +Transactions should request the minimum amount of compute units required for execution to minimize fees. Also note that fees are not adjusted when the number of requested compute units exceeds the number of compute units actually consumed by an executed transaction. Compute Budget instructions don't require any accounts and don't consume any -compute units to process. Transactions can only contain one of each type of +compute units to process. Transactions can only contain one of each type of compute budget instruction, duplicate types will result in an error. The `ComputeBudgetInstruction::set_compute_unit_limit` and diff --git a/docs/src/terminology.md b/docs/src/terminology.md index 2c22efb2bbdfa3..038aa2d302c9c3 100644 --- a/docs/src/terminology.md +++ b/docs/src/terminology.md @@ -1,8 +1,10 @@ --- title: Terminology +description: "Learn the essential terminology used thoughtout the Solana blockchain and development models." +keywords: "terms, dictionary, definitions, define, programming models" --- -The following terms are used throughout the documentation. +The following terms are used throughout the Solana documentation and development ecosystem. ## account @@ -12,9 +14,9 @@ Like an account at a traditional bank, a Solana account may hold funds called [l The key may be one of: -* an ed25519 public key -* a program-derived account address (32byte value forced off the ed25519 curve) -* a hash of an ed25519 public key with a 32 character string +- an ed25519 public key +- a program-derived account address (32byte value forced off the ed25519 curve) +- a hash of an ed25519 public key with a 32 character string ## account owner @@ -34,7 +36,7 @@ A contiguous set of [entries](#entry) on the ledger covered by a [vote](#ledger- ## blockhash -A unique value ([hash](#hash)) that identifies a record (block). Solana computes a blockhash from the last [entry id](#entry-id) of the block. +A unique value ([hash](#hash)) that identifies a record (block). Solana computes a blockhash from the last [entry id](#entry-id) of the block. ## block height @@ -56,6 +58,14 @@ A computer program that accesses the Solana server network [cluster](#cluster). A set of [validators](#validator) maintaining a single [ledger](#ledger). +## compute budget + +The maximum number of [compute units](#compute-units) consumed per transaction. + +## compute units + +The smallest unit of measure for consumption of computational resources of the blockchain. + ## confirmation time The wallclock duration between a [leader](#leader) creating a [tick entry](#tick) and creating a [confirmed block](#confirmed-block). @@ -179,6 +189,12 @@ A [program](#program) with the ability to interpret the binary encoding of other The duration of time for which a [validator](#validator) is unable to [vote](#ledger-vote) on another [fork](#fork). +## message + +The structured contents of a [transaction](#transaction). Generally containing a header, array of account addresses, recent [blockhash](#blockhash), and an array of [instructions](#instruction). + +Learn more about the [message formatting inside of transactions](./developing/programming-model/transactions.md#message-format) here. + ## native token The [token](#token) used to track work done by [nodes](#node) in a cluster. @@ -221,7 +237,7 @@ A stack of proofs, each of which proves that some data existed before the proof ## prioritization fee -An additional fee user can specify in compute budget [instruction](#instruction) to prioritize their [transactions](#transaction). +An additional fee user can specify in the compute budget [instruction](#instruction) to prioritize their [transactions](#transaction). The prioritization fee is calculated by multiplying the requested maximum compute units by the compute-unit price (specified in increments of 0.000001 lamports per compute unit) rounded up to the nearest lamport. @@ -287,7 +303,7 @@ Tokens forfeit to the [cluster](#cluster) if malicious [validator](#validator) b ## sysvar -A system [account](#account). [Sysvars](developing/runtime-facilities/sysvars.md) provide cluster state information such as current tick height, rewards [points](#point) values, etc. Programs can access Sysvars via a Sysvar account (pubkey) or by querying via a syscall. +A system [account](#account). [Sysvars](developing/runtime-facilities/sysvars.md) provide cluster state information such as current tick height, rewards [points](#point) values, etc. Programs can access Sysvars via a Sysvar account (pubkey) or by querying via a syscall. ## thin client @@ -327,7 +343,7 @@ A set of [transactions](#transaction) that may be executed in parallel. ## validator -A full participant in a Solana network [cluster](#cluster) that produces new [blocks](#block). A validator validates the transactions added to the [ledger](#ledger) +A full participant in a Solana network [cluster](#cluster) that produces new [blocks](#block). A validator validates the transactions added to the [ledger](#ledger) ## VDF diff --git a/docs/src/transaction_fees.md b/docs/src/transaction_fees.md index c28cb32543457e..ee9fdfa43e9b93 100644 --- a/docs/src/transaction_fees.md +++ b/docs/src/transaction_fees.md @@ -1,21 +1,70 @@ --- title: Transaction Fees +description: "Transaction fees are the small fees paid to process instructions on the network. These fees are based on computation and an optional prioritization fee." +keywords: "instruction fee, processing fee, storage fee, low fee blockchain, gas, gwei, cheap network, affordable blockchain" --- -**Subject to change.** +The small fees paid to process [instructions](./terminology.md#instruction) on the Solana blockchain are known as "_transaction fees_". -Each transaction sent through the network, to be processed by the current leader validation-client and confirmed as a global state transaction, contains a transaction fee. Transaction fees offer many benefits in the Solana economic design, for example they: +As each transaction (which contains one or more instructions) is sent through the network, it gets processed by the current leader validation-client. Once confirmed as a global state transaction, this _transaction fee_ is paid to the network to help support the [economic design](#economic-design) of the Solana blockchain. -- provide unit compensation to the validator network for the CPU/GPU resources necessary to process the state transaction, +> **NOTE:** Transaction fees are different from [account rent](./terminology.md#rent)! +> While transaction fees are paid to process instructions on the Solana network, rent is paid to store data on the blockchain. + + + +## Why pay transaction fees? + +Transaction fees offer many benefits in the Solana [economic design](#basic-economic-design) described below. Mainly: + +- they provide compensation to the validator network for the CPU/GPU resources necessary to process transactions, - reduce network spam by introducing real cost to transactions, -- and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction, as described below. +- and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction + +> **NOTE:** Network consensus votes are sent as normal system transfers, which means that validators pay transaction fees to participate in consensus. + +## Basic economic design + +Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on _protocol-based rewards_ to support the economy in the short term. And when the protocol derived rewards expire, predict that the revenue generated through _transaction fees_ will support the economy in the long term. + +In an attempt to create a sustainable economy on Solana through _protocol-based rewards_ and _transaction fees_: + +- a fixed portion (initially 50%) of each transaction fee is _burned_ (aka destroyed), +- with the remaining fee going to the current [leader](./terminology.md#leader) processing the transaction. + +A scheduled global inflation rate provides a source for [rewards](./implemented-proposals/staking-rewards.md) distributed to [Solana Validators](../src/running-validator.md). + +### Why burn some fees? + +As mentioned above, a fixed proportion of each transaction fee is _burned_ (aka destroyed). The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time. While still providing an inflation limiting mechanism that protects against "tax evasion" attacks \(i.e. side-channel fee payments\). + +Burnt fees can also help prevent malicious validators from censoring transactions by being considered in [fork](./terminology.md#fork) selection. + +#### Example of an attack: + +In the case of a [Proof of History (PoH)](./terminology.md#proof-of-history-poh) fork with a malicious, censoring leader: + +- due to the fees lost from censoring, we would expect the total fees destroyed to be **_less than_** a comparable honest fork +- if the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves +- thus potentially reducing the incentive to censor in the first place + +## Calculating transaction fees + +Transactions fees are calculated based on two main parts: + +- a statically set base fee per signature, and +- the computational resources used during the transaction, measured in "[_compute units_](./terminology.md#compute-units)" + +Since each transaction may require a different amount of computational resources, they are alloted a maximum number of _compute units_ per transaction known as the "[_compute budget_](./terminology.md#compute-budget)". + +The execution of each instruction within a transactions consumes a different number of _compute units_. After the maximum number of _computer units_ has been consumed (aka compute budget exhaustion), the runtime will halt the transaction and return an error. Resulting in a failed transaction. -Network consensus votes are sent as normal system transfers, which means that validators pay transaction fees to participate in consensus. +> **Learn more:** compute units and the [Compute Budget](./developing/programming-model/runtime#compute-budget) in the Runtime and [requesting a fee estimate](./developing/clients/jsonrpc-api.md#getfeeformessage) from the RPC. -Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion (initially 50%) of each transaction fee is destroyed, with the remaining fee going to the current leader processing the transaction. A scheduled global inflation rate provides a source for rewards distributed to validation-clients, through the process described above. +## Prioritization fee -Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](implemented-proposals/transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical _signatures-per-slot_. In this way, the protocol can use the minimum fee to target a desired hardware utilization. By monitoring a protocol specified _signatures-per-slot_ with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual _signature-per-slot_ per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level. +Recently, Solana has introduced an optional fee called the "_[prioritization fee](./terminology.md#prioritization-fee)_". This additional fee can be paid to help boost how a transaction is prioritized against others, resulting in faster transaction execution times. -As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechanism that protects against "tax evasion" attacks \(i.e. side-channel fee payments\). +The prioritization fee is calculated by multiplying the requested maximum _compute units_ by the compute-unit price (specified in increments of 0.000001 lamports per compute unit) rounded up to the nearest lamport. -Additionally, the burnt fees can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total fees destroyed to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves, thus potentially reducing the incentive to censor in the first place. +You can read more about the [compute budget instruction](./developing/programming-model/runtime.md#compute-budget) here. From 68a5e05f81b27e2e651a79fdd722889de5164d6d Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Thu, 18 Aug 2022 04:52:54 +0200 Subject: [PATCH 19/51] sdk: Fix args after "--" in build-bpf and test-bpf (#27221) --- sdk/cargo-build-bpf/src/main.rs | 5 +++-- sdk/cargo-test-bpf/src/main.rs | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index 5de2742c2add32..0ea34f68c6cb74 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -26,8 +26,9 @@ fn main() { args.remove(0); } } - args.push("--arch".to_string()); - args.push("bpf".to_string()); + let index = args.iter().position(|x| x == "--").unwrap_or(args.len()); + args.insert(index, "bpf".to_string()); + args.insert(index, "--arch".to_string()); print!("cargo-build-bpf child: {}", program.display()); for a in &args { print!(" {}", a); diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs index fee4dc73811fe5..af5a382fdd4ec9 100644 --- a/sdk/cargo-test-bpf/src/main.rs +++ b/sdk/cargo-test-bpf/src/main.rs @@ -32,8 +32,9 @@ fn main() { args.remove(0); } } - args.push("--arch".to_string()); - args.push("bpf".to_string()); + let index = args.iter().position(|x| x == "--").unwrap_or(args.len()); + args.insert(index, "bpf".to_string()); + args.insert(index, "--arch".to_string()); print!("cargo-test-bpf child: {}", program.display()); for a in &args { print!(" {}", a); From 5c9d612180f8ca92d93abe662553b6ee799f60b0 Mon Sep 17 00:00:00 2001 From: Brennan Watt Date: Wed, 17 Aug 2022 19:56:57 -0700 Subject: [PATCH 20/51] Flaky Unit Test test_rpc_subscriptions (#27214) Increase unit test timeout from 5 seconds to 10 seconds --- rpc-test/tests/rpc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index 464560a309b214..5265e93f14f6fb 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -402,7 +402,7 @@ fn test_rpc_subscriptions() { } } - let deadline = Instant::now() + Duration::from_secs(5); + let deadline = Instant::now() + Duration::from_secs(10); let mut account_notifications = transactions.len(); while account_notifications > 0 { let timeout = deadline.saturating_duration_since(Instant::now()); From d2d4d4a240df1da4503cdf704f176c33a3a1474e Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 18 Aug 2022 12:33:30 +0800 Subject: [PATCH 21/51] chore: only buildkite pipelines use sccache in docker-run.sh (#27204) chore: only buildkite ci use sccache --- ci/docker-run.sh | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/ci/docker-run.sh b/ci/docker-run.sh index e154de2eefd96d..a7b94f902e8303 100755 --- a/ci/docker-run.sh +++ b/ci/docker-run.sh @@ -45,14 +45,16 @@ if [[ -n $CI ]]; then # Share the real ~/.cargo between docker containers in CI for speed ARGS+=(--volume "$HOME:/home") - # sccache - ARGS+=( - --env "RUSTC_WRAPPER=/home/.cargo/bin/sccache" - --env AWS_ACCESS_KEY_ID - --env AWS_SECRET_ACCESS_KEY - --env SCCACHE_BUCKET - --env SCCACHE_REGION - ) + if [[ -n $BUILDKITE ]]; then + # sccache + ARGS+=( + --env "RUSTC_WRAPPER=/home/.cargo/bin/sccache" + --env AWS_ACCESS_KEY_ID + --env AWS_SECRET_ACCESS_KEY + --env SCCACHE_BUCKET + --env SCCACHE_REGION + ) + fi else # Avoid sharing ~/.cargo when building locally to avoid a mixed macOS/Linux # ~/.cargo From 7d765e3d675d4b49c698b793ff9345d0b648c989 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 18 Aug 2022 06:21:16 +0100 Subject: [PATCH 22/51] clean feature: `prevent_calling_precompiles_as_programs` (#27100) * clean feature: prevent_calling_precompiles_as_programs * fix tests * fix test * remove comment * fix test * feedback --- programs/bpf_loader/src/syscalls/cpi.rs | 10 ++---- programs/bpf_loader/src/syscalls/mod.rs | 3 +- .../tests/process_transaction.rs | 25 --------------- runtime/src/bank.rs | 12 +++---- runtime/src/builtins.rs | 32 ++----------------- runtime/src/genesis_utils.rs | 2 ++ runtime/src/message_processor.rs | 8 ++--- sdk/src/feature_set.rs | 2 -- sdk/src/precompiles.rs | 8 ++--- 9 files changed, 20 insertions(+), 82 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index d465c506288349..215f7267fb6f7d 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -834,7 +834,6 @@ fn check_authorized_program( instruction_data: &[u8], invoke_context: &InvokeContext, ) -> Result<(), EbpfError> { - #[allow(clippy::blocks_in_if_conditions)] if native_loader::check_id(program_id) || bpf_loader::check_id(program_id) || bpf_loader_deprecated::check_id(program_id) @@ -842,12 +841,9 @@ fn check_authorized_program( && !(bpf_loader_upgradeable::is_upgrade_instruction(instruction_data) || bpf_loader_upgradeable::is_set_authority_instruction(instruction_data) || bpf_loader_upgradeable::is_close_instruction(instruction_data))) - || (invoke_context - .feature_set - .is_active(&prevent_calling_precompiles_as_programs::id()) - && is_precompile(program_id, |feature_id: &Pubkey| { - invoke_context.feature_set.is_active(feature_id) - })) + || is_precompile(program_id, |feature_id: &Pubkey| { + invoke_context.feature_set.is_active(feature_id) + }) { return Err(SyscallError::ProgramNotSupported(*program_id).into()); } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 467cdd4f74dd7a..2175a1f2a12c27 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -36,8 +36,7 @@ use { self, blake3_syscall_enabled, check_physical_overlapping, check_slice_translation_size, curve25519_syscall_enabled, disable_cpi_setting_executable_and_rent_epoch, disable_fees_sysvar, enable_early_verification_of_account_modifications, - libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id, - prevent_calling_precompiles_as_programs, syscall_saturated_math, + libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id, syscall_saturated_math, }, hash::{Hasher, HASH_BYTES}, instruction::{ diff --git a/programs/ed25519-tests/tests/process_transaction.rs b/programs/ed25519-tests/tests/process_transaction.rs index 0ef08e42fd9796..ac786a5e7094d1 100644 --- a/programs/ed25519-tests/tests/process_transaction.rs +++ b/programs/ed25519-tests/tests/process_transaction.rs @@ -4,7 +4,6 @@ use { solana_program_test::*, solana_sdk::{ ed25519_instruction::new_ed25519_instruction, - feature_set, signature::Signer, transaction::{Transaction, TransactionError}, }, @@ -60,27 +59,3 @@ async fn test_failure() { )) ); } - -#[tokio::test] -async fn test_success_call_builtin_program() { - let mut program_test = ProgramTest::default(); - program_test.deactivate_feature(feature_set::prevent_calling_precompiles_as_programs::id()); - let mut context = program_test.start_with_context().await; - - let client = &mut context.banks_client; - let payer = &context.payer; - let recent_blockhash = context.last_blockhash; - - let privkey = ed25519_dalek::Keypair::generate(&mut thread_rng()); - let message_arr = b"hello"; - let instruction = new_ed25519_instruction(&privkey, message_arr); - - let transaction = Transaction::new_signed_with_payer( - &[instruction], - Some(&payer.pubkey()), - &[payer], - recent_blockhash, - ); - - assert_matches!(client.process_transaction(transaction).await, Ok(())); -} diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4aea09da2d3ab4..f8a3187ad8b163 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -14571,25 +14571,25 @@ pub(crate) mod tests { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "9tLrxkBoNE7zEUZ2g72ZwE4fTfhUQnhC8A4Xt4EmYhP1" + "5gY6TCgB9NymbbxgFgAjvYLpXjyXiVyyruS1aEwbWKLK" ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "AxphC8xDj9gmFosor5gyiovNvPVMydJCFRUTxn2wFiQf" + "6uJ5C4QDXWCN39EjJ5Frcz73nnS2jMJ55KgkQff12Fqp" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "4vZCSbBuL8xjE43rCy9Cm3dCh1BMj45heMiMb6n6qgzA" + "4u8bxZRLYdQBkWRBwmpcwcQVMCJoEpzY7hCuAzxr3kCe" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "46LUpeBdJuisnfwgYisvh4x7jnxzBaLfHF614GtcTs59" + "4c5F8UbcDD8FM7qXcfv6BPPo6nHNYJQmN5gHiCMTdEzX" ); break; } @@ -14817,7 +14817,7 @@ pub(crate) mod tests { // No more slots should be shrunk assert_eq!(bank2.shrink_candidate_slots(), 0); // alive_counts represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(alive_counts, vec![9, 1, 7]); + assert_eq!(alive_counts, vec![11, 1, 7]); } #[test] @@ -14863,7 +14863,7 @@ pub(crate) mod tests { .map(|_| bank.process_stale_slot_with_budget(0, force_to_return_alive_account)) .sum(); // consumed_budgets represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(consumed_budgets, 10); + assert_eq!(consumed_budgets, 12); } #[test] diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index 0e3f98843a18a7..d7ec37aed3337e 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -2,10 +2,8 @@ use solana_frozen_abi::abi_example::AbiExample; use { crate::system_instruction_processor, - solana_program_runtime::invoke_context::{InvokeContext, ProcessInstructionWithContext}, - solana_sdk::{ - feature_set, instruction::InstructionError, pubkey::Pubkey, stake, system_program, - }, + solana_program_runtime::invoke_context::ProcessInstructionWithContext, + solana_sdk::{feature_set, pubkey::Pubkey, stake, system_program}, std::fmt, }; @@ -141,14 +139,6 @@ fn genesis_builtins() -> Vec { ] } -/// place holder for precompile programs, remove when the precompile program is deactivated via feature activation -fn dummy_process_instruction( - _first_instruction_account: usize, - _invoke_context: &mut InvokeContext, -) -> Result<(), InstructionError> { - Ok(()) -} - /// Dynamic feature transitions for builtin programs fn builtin_feature_transitions() -> Vec { vec![ @@ -160,24 +150,6 @@ fn builtin_feature_transitions() -> Vec { ), feature_id: feature_set::add_compute_budget_program::id(), }, - BuiltinFeatureTransition::RemoveOrRetain { - previously_added_builtin: Builtin::new( - "secp256k1_program", - solana_sdk::secp256k1_program::id(), - dummy_process_instruction, - ), - addition_feature_id: feature_set::secp256k1_program_enabled::id(), - removal_feature_id: feature_set::prevent_calling_precompiles_as_programs::id(), - }, - BuiltinFeatureTransition::RemoveOrRetain { - previously_added_builtin: Builtin::new( - "ed25519_program", - solana_sdk::ed25519_program::id(), - dummy_process_instruction, - ), - addition_feature_id: feature_set::ed25519_program_enabled::id(), - removal_feature_id: feature_set::prevent_calling_precompiles_as_programs::id(), - }, BuiltinFeatureTransition::Add { builtin: Builtin::new( "address_lookup_table_program", diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 73ab5c105b0d1a..d5330df0031487 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -27,6 +27,7 @@ pub fn bootstrap_validator_stake_lamports() -> u64 { // Number of lamports automatically used for genesis accounts pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { const NUM_BUILTIN_PROGRAMS: u64 = 4; + const NUM_PRECOMPILES: u64 = 2; const FEES_SYSVAR_MIN_BALANCE: u64 = 946_560; const STAKE_HISTORY_MIN_BALANCE: u64 = 114_979_200; const CLOCK_SYSVAR_MIN_BALANCE: u64 = 1_169_280; @@ -41,6 +42,7 @@ pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { + EPOCH_SCHEDULE_SYSVAR_MIN_BALANCE + RECENT_BLOCKHASHES_SYSVAR_MIN_BALANCE + NUM_BUILTIN_PROGRAMS + + NUM_PRECOMPILES } pub struct ValidatorVoteKeypairs { diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index c1b06c141dbc98..23eb1e800e9818 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -10,7 +10,7 @@ use { }, solana_sdk::{ account::WritableAccount, - feature_set::{prevent_calling_precompiles_as_programs, FeatureSet}, + feature_set::FeatureSet, hash::Hash, message::SanitizedMessage, precompiles::is_precompile, @@ -86,10 +86,8 @@ impl MessageProcessor { .zip(program_indices.iter()) .enumerate() { - let is_precompile = invoke_context - .feature_set - .is_active(&prevent_calling_precompiles_as_programs::id()) - && is_precompile(program_id, |id| invoke_context.feature_set.is_active(id)); + let is_precompile = + is_precompile(program_id, |id| invoke_context.feature_set.is_active(id)); // Fixup the special instructions key if present // before the account pre-values are taken care of diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 0c14aa18edb235..f3c86948079ac6 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -201,8 +201,6 @@ pub mod do_support_realloc { solana_sdk::declare_id!("75m6ysz33AfLA5DDEzWM1obBrnPQRSsdVQ2nRmc8Vuu1"); } -// Note: when this feature is cleaned up, also remove the secp256k1 program from -// the list of builtins and remove its files from /programs pub mod prevent_calling_precompiles_as_programs { solana_sdk::declare_id!("4ApgRX3ud6p7LNMJmsuaAcZY5HWctGPr5obAsjB3A54d"); } diff --git a/sdk/src/precompiles.rs b/sdk/src/precompiles.rs index 1f6149772c88e8..e97474b25cc38d 100644 --- a/sdk/src/precompiles.rs +++ b/sdk/src/precompiles.rs @@ -4,9 +4,7 @@ use { crate::{ - decode_error::DecodeError, - feature_set::{prevent_calling_precompiles_as_programs, FeatureSet}, - instruction::CompiledInstruction, + decode_error::DecodeError, feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, }, lazy_static::lazy_static, @@ -81,12 +79,12 @@ lazy_static! { static ref PRECOMPILES: Vec = vec![ Precompile::new( crate::secp256k1_program::id(), - Some(prevent_calling_precompiles_as_programs::id()), + None, // always enabled crate::secp256k1_instruction::verify, ), Precompile::new( crate::ed25519_program::id(), - Some(prevent_calling_precompiles_as_programs::id()), + None, // always enabled crate::ed25519_instruction::verify, ), ]; From fda395af83863d823d9d1cb701688cddba981c2f Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Thu, 18 Aug 2022 10:17:32 +0200 Subject: [PATCH 23/51] Add get_account_with_commitment to BenchTpsClient (#27176) --- bench-tps/src/bench_tps_client.rs | 7 +++++++ bench-tps/src/bench_tps_client/bank_client.rs | 14 ++++++++++++++ bench-tps/src/bench_tps_client/rpc_client.rs | 17 ++++++++++++++++- bench-tps/src/bench_tps_client/thin_client.rs | 16 +++++++++++++++- bench-tps/src/bench_tps_client/tpu_client.rs | 18 +++++++++++++++++- 5 files changed, 69 insertions(+), 3 deletions(-) diff --git a/bench-tps/src/bench_tps_client.rs b/bench-tps/src/bench_tps_client.rs index 3d34a3a041a361..0ecca308ef647f 100644 --- a/bench-tps/src/bench_tps_client.rs +++ b/bench-tps/src/bench_tps_client.rs @@ -83,6 +83,13 @@ pub trait BenchTpsClient { /// Returns all information associated with the account of the provided pubkey fn get_account(&self, pubkey: &Pubkey) -> Result; + + /// Returns all information associated with the account of the provided pubkey, using explicit commitment + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result; } mod bank_client; diff --git a/bench-tps/src/bench_tps_client/bank_client.rs b/bench-tps/src/bench_tps_client/bank_client.rs index 9fae1f7a93c7f6..20323656a3b3b0 100644 --- a/bench-tps/src/bench_tps_client/bank_client.rs +++ b/bench-tps/src/bench_tps_client/bank_client.rs @@ -93,4 +93,18 @@ impl BenchTpsClient for BankClient { }) }) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + SyncClient::get_account_with_commitment(self, pubkey, commitment_config) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/rpc_client.rs b/bench-tps/src/bench_tps_client/rpc_client.rs index dd34a11f5820d1..158fddd0a4a6fb 100644 --- a/bench-tps/src/bench_tps_client/rpc_client.rs +++ b/bench-tps/src/bench_tps_client/rpc_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::rpc_client::RpcClient, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, @@ -84,4 +84,19 @@ impl BenchTpsClient for RpcClient { fn get_account(&self, pubkey: &Pubkey) -> Result { RpcClient::get_account(self, pubkey).map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + RpcClient::get_account_with_commitment(self, pubkey, commitment_config) + .map(|res| res.value) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/thin_client.rs b/bench-tps/src/bench_tps_client/thin_client.rs index 13d77078453c8a..16686b8186ecfb 100644 --- a/bench-tps/src/bench_tps_client/thin_client.rs +++ b/bench-tps/src/bench_tps_client/thin_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::thin_client::ThinClient, solana_sdk::{ account::Account, @@ -90,4 +90,18 @@ impl BenchTpsClient for ThinClient { .get_account(pubkey) .map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + SyncClient::get_account_with_commitment(self, pubkey, commitment_config) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/tpu_client.rs b/bench-tps/src/bench_tps_client/tpu_client.rs index 53b0102a00f11b..aa86e793a2a498 100644 --- a/bench-tps/src/bench_tps_client/tpu_client.rs +++ b/bench-tps/src/bench_tps_client/tpu_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::tpu_client::TpuClient, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, @@ -102,4 +102,20 @@ impl BenchTpsClient for TpuClient { .get_account(pubkey) .map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + self.rpc_client() + .get_account_with_commitment(pubkey, commitment_config) + .map(|res| res.value) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } From 6d12bb6ec36dee3d46e0b530d7de6ca6d802a443 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Thu, 18 Aug 2022 02:37:19 -0700 Subject: [PATCH 24/51] Fix a corner-case panic in get_entries_in_data_block() (#27195) #### Problem get_entries_in_data_block() panics when there's inconsistency between slot_meta and data_shred. However, as we don't lock on reads, reading across multiple column families is not atomic (especially for older slots) and thus does not guarantee consistency as the background cleanup service could purge the slot in the middle. Such panic was reported in #26980 when the validator serves a high load of RPC calls. #### Summary of Changes This PR makes get_entries_in_data_block() panic only when the inconsistency between slot-meta and data-shred happens on a slot older than lowest_cleanup_slot. --- ledger/src/blockstore.rs | 43 ++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 66340b5cb00034..336dcd86bf930e 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2872,28 +2872,29 @@ impl Blockstore { .and_then(|serialized_shred| { if serialized_shred.is_none() { if let Some(slot_meta) = slot_meta { - panic!( - "Shred with - slot: {}, - index: {}, - consumed: {}, - completed_indexes: {:?} - must exist if shred index was included in a range: {} {}", - slot, - i, - slot_meta.consumed, - slot_meta.completed_data_indexes, - start_index, - end_index - ); - } else { - return Err(BlockstoreError::InvalidShredData(Box::new( - bincode::ErrorKind::Custom(format!( - "Missing shred for slot {}, index {}", - slot, i - )), - ))); + if slot > self.lowest_cleanup_slot() { + panic!( + "Shred with + slot: {}, + index: {}, + consumed: {}, + completed_indexes: {:?} + must exist if shred index was included in a range: {} {}", + slot, + i, + slot_meta.consumed, + slot_meta.completed_data_indexes, + start_index, + end_index + ); + } } + return Err(BlockstoreError::InvalidShredData(Box::new( + bincode::ErrorKind::Custom(format!( + "Missing shred for slot {}, index {}", + slot, i + )), + ))); } Shred::new_from_serialized_shred(serialized_shred.unwrap()).map_err(|err| { From d2868f439d497f472c8a7e51e90ba9fdc759066b Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 18 Aug 2022 09:48:58 -0400 Subject: [PATCH 25/51] Verify snapshot slot deltas (#26666) --- runtime/src/accounts_background_service.rs | 1 + runtime/src/snapshot_utils.rs | 293 ++++++++++++++++++++- 2 files changed, 292 insertions(+), 2 deletions(-) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index a0695e3373774e..c38203ab821e96 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -370,6 +370,7 @@ impl SnapshotRequestHandler { SnapshotError::MismatchedBaseSlot(..) => true, SnapshotError::NoSnapshotArchives => true, SnapshotError::MismatchedSlotHash(..) => true, + SnapshotError::VerifySlotDeltas(..) => true, } } } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 6018db95d3477b..81cdbcc37a6225 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -19,6 +19,7 @@ use { snapshot_package::{ AccountsPackage, PendingAccountsPackage, SnapshotPackage, SnapshotType, }, + status_cache, }, bincode::{config::Options, serialize_into}, bzip2::bufread::BzDecoder, @@ -28,7 +29,13 @@ use { rayon::prelude::*, regex::Regex, solana_measure::measure::Measure, - solana_sdk::{clock::Slot, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey}, + solana_sdk::{ + clock::Slot, + genesis_config::GenesisConfig, + hash::Hash, + pubkey::Pubkey, + slot_history::{Check, SlotHistory}, + }, std::{ cmp::Ordering, collections::{HashMap, HashSet}, @@ -223,9 +230,37 @@ pub enum SnapshotError { #[error("snapshot has mismatch: deserialized bank: {:?}, snapshot archive info: {:?}", .0, .1)] MismatchedSlotHash((Slot, Hash), (Slot, Hash)), + + #[error("snapshot slot deltas are invalid: {0}")] + VerifySlotDeltas(#[from] VerifySlotDeltasError), } pub type Result = std::result::Result; +/// Errors that can happen in `verify_slot_deltas()` +#[derive(Error, Debug, PartialEq, Eq)] +pub enum VerifySlotDeltasError { + #[error("too many entries: {0} (max: {1})")] + TooManyEntries(usize, usize), + + #[error("slot {0} is not a root")] + SlotIsNotRoot(Slot), + + #[error("slot {0} is greater than bank slot {1}")] + SlotGreaterThanMaxRoot(Slot, Slot), + + #[error("slot {0} has multiple entries")] + SlotHasMultipleEntries(Slot), + + #[error("slot {0} was not found in slot history")] + SlotNotFoundInHistory(Slot), + + #[error("slot {0} was in history but missing from slot deltas")] + SlotNotFoundInDeltas(Slot), + + #[error("slot history is bad and cannot be used to verify slot deltas")] + BadSlotHistory, +} + /// If the validator halts in the middle of `archive_snapshot_package()`, the temporary staging /// directory won't be cleaned up. Call this function to clean them up. pub fn remove_tmp_snapshot_archives(snapshot_archives_dir: impl AsRef) { @@ -1738,6 +1773,8 @@ fn rebuild_bank_from_snapshots( Ok(slot_deltas) })?; + verify_slot_deltas(slot_deltas.as_slice(), &bank)?; + bank.status_cache.write().unwrap().append(&slot_deltas); bank.prepare_rewrites_for_hash(); @@ -1746,6 +1783,106 @@ fn rebuild_bank_from_snapshots( Ok(bank) } +/// Verify that the snapshot's slot deltas are not corrupt/invalid +fn verify_slot_deltas( + slot_deltas: &[BankSlotDelta], + bank: &Bank, +) -> std::result::Result<(), VerifySlotDeltasError> { + let info = verify_slot_deltas_structural(slot_deltas, bank.slot())?; + verify_slot_deltas_with_history(&info.slots, &bank.get_slot_history(), bank.slot()) +} + +/// Verify that the snapshot's slot deltas are not corrupt/invalid +/// These checks are simple/structural +fn verify_slot_deltas_structural( + slot_deltas: &[BankSlotDelta], + bank_slot: Slot, +) -> std::result::Result { + // there should not be more entries than that status cache's max + let num_entries = slot_deltas.len(); + if num_entries > status_cache::MAX_CACHE_ENTRIES { + return Err(VerifySlotDeltasError::TooManyEntries( + num_entries, + status_cache::MAX_CACHE_ENTRIES, + )); + } + + let mut slots_seen_so_far = HashSet::new(); + for &(slot, is_root, ..) in slot_deltas { + // all entries should be roots + if !is_root { + return Err(VerifySlotDeltasError::SlotIsNotRoot(slot)); + } + + // all entries should be for slots less than or equal to the bank's slot + if slot > bank_slot { + return Err(VerifySlotDeltasError::SlotGreaterThanMaxRoot( + slot, bank_slot, + )); + } + + // there should only be one entry per slot + let is_duplicate = !slots_seen_so_far.insert(slot); + if is_duplicate { + return Err(VerifySlotDeltasError::SlotHasMultipleEntries(slot)); + } + } + + // detect serious logic error for future careless changes. :) + assert_eq!(slots_seen_so_far.len(), slot_deltas.len()); + + Ok(VerifySlotDeltasStructuralInfo { + slots: slots_seen_so_far, + }) +} + +/// Computed information from `verify_slot_deltas_structural()`, that may be reused/useful later. +#[derive(Debug, PartialEq, Eq)] +struct VerifySlotDeltasStructuralInfo { + /// All the slots in the slot deltas + slots: HashSet, +} + +/// Verify that the snapshot's slot deltas are not corrupt/invalid +/// These checks use the slot history for verification +fn verify_slot_deltas_with_history( + slots_from_slot_deltas: &HashSet, + slot_history: &SlotHistory, + bank_slot: Slot, +) -> std::result::Result<(), VerifySlotDeltasError> { + // ensure the slot history is valid (as much as possible), since we're using it to verify the + // slot deltas + if slot_history.newest() != bank_slot { + return Err(VerifySlotDeltasError::BadSlotHistory); + } + + // all slots in the slot deltas should be in the bank's slot history + let slot_missing_from_history = slots_from_slot_deltas + .iter() + .find(|slot| slot_history.check(**slot) != Check::Found); + if let Some(slot) = slot_missing_from_history { + return Err(VerifySlotDeltasError::SlotNotFoundInHistory(*slot)); + } + + // all slots in the history should be in the slot deltas (up to MAX_CACHE_ENTRIES) + // this ensures nothing was removed from the status cache + // + // go through the slot history and make sure there's an entry for each slot + // note: it's important to go highest-to-lowest since the status cache removes + // older entries first + // note: we already checked above that `bank_slot == slot_history.newest()` + let slot_missing_from_deltas = (slot_history.oldest()..=slot_history.newest()) + .rev() + .filter(|slot| slot_history.check(*slot) == Check::Found) + .take(status_cache::MAX_CACHE_ENTRIES) + .find(|slot| !slots_from_slot_deltas.contains(slot)); + if let Some(slot) = slot_missing_from_deltas { + return Err(VerifySlotDeltasError::SlotNotFoundInDeltas(slot)); + } + + Ok(()) +} + pub(crate) fn get_snapshot_file_name(slot: Slot) -> String { slot.to_string() } @@ -2167,13 +2304,14 @@ fn can_submit_accounts_package( mod tests { use { super::*, - crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, + crate::{accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, status_cache::Status}, assert_matches::assert_matches, bincode::{deserialize_from, serialize_into}, solana_sdk::{ genesis_config::create_genesis_config, native_token::sol_to_lamports, signature::{Keypair, Signer}, + slot_history::SlotHistory, system_transaction, transaction::SanitizedTransaction, }, @@ -3831,4 +3969,155 @@ mod tests { assert_eq!(expected_result, actual_result); } } + + #[test] + fn test_verify_slot_deltas_structural_good() { + // NOTE: slot deltas do not need to be sorted + let slot_deltas = vec![ + (222, true, Status::default()), + (333, true, Status::default()), + (111, true, Status::default()), + ]; + + let bank_slot = 333; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Ok(VerifySlotDeltasStructuralInfo { + slots: HashSet::from([111, 222, 333]) + }) + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_too_many_entries() { + let bank_slot = status_cache::MAX_CACHE_ENTRIES as Slot + 1; + let slot_deltas: Vec<_> = (0..bank_slot) + .map(|slot| (slot, true, Status::default())) + .collect(); + + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::TooManyEntries( + status_cache::MAX_CACHE_ENTRIES + 1, + status_cache::MAX_CACHE_ENTRIES + )), + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_not_root() { + let slot_deltas = vec![ + (111, true, Status::default()), + (222, false, Status::default()), // <-- slot is not a root + (333, true, Status::default()), + ]; + + let bank_slot = 333; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!(result, Err(VerifySlotDeltasError::SlotIsNotRoot(222))); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_greater_than_bank() { + let slot_deltas = vec![ + (222, true, Status::default()), + (111, true, Status::default()), + (555, true, Status::default()), // <-- slot is greater than the bank slot + ]; + + let bank_slot = 444; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotGreaterThanMaxRoot( + 555, bank_slot + )), + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_has_multiple_entries() { + let slot_deltas = vec![ + (111, true, Status::default()), + (222, true, Status::default()), + (111, true, Status::default()), // <-- slot is a duplicate + ]; + + let bank_slot = 222; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotHasMultipleEntries(111)), + ); + } + + #[test] + fn test_verify_slot_deltas_with_history_good() { + let mut slots_from_slot_deltas = HashSet::default(); + let mut slot_history = SlotHistory::default(); + // note: slot history expects slots to be added in numeric order + for slot in [0, 111, 222, 333, 444] { + slots_from_slot_deltas.insert(slot); + slot_history.add(slot); + } + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + assert_eq!(result, Ok(())); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_history() { + let bank_slot = 444; + let result = verify_slot_deltas_with_history( + &HashSet::default(), + &SlotHistory::default(), // <-- will only have an entry for slot 0 + bank_slot, + ); + assert_eq!(result, Err(VerifySlotDeltasError::BadSlotHistory)); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_not_in_history() { + let slots_from_slot_deltas = HashSet::from([ + 0, // slot history has slot 0 added by default + 444, 222, + ]); + let mut slot_history = SlotHistory::default(); + slot_history.add(444); // <-- slot history is missing slot 222 + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotNotFoundInHistory(222)), + ); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_not_in_deltas() { + let slots_from_slot_deltas = HashSet::from([ + 0, // slot history has slot 0 added by default + 444, 222, + // <-- slot deltas is missing slot 333 + ]); + let mut slot_history = SlotHistory::default(); + slot_history.add(222); + slot_history.add(333); + slot_history.add(444); + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotNotFoundInDeltas(333)), + ); + } } From 7569f8eded036e46a22483ef9719578a56d299b4 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Thu, 18 Aug 2022 10:00:04 -0500 Subject: [PATCH 26/51] store-tool: log lamports for each account (#27168) log lamports for each account --- runtime/store-tool/src/main.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/runtime/store-tool/src/main.rs b/runtime/store-tool/src/main.rs index edfc00d8ee1323..8f7d2f2a402a05 100644 --- a/runtime/store-tool/src/main.rs +++ b/runtime/store-tool/src/main.rs @@ -40,8 +40,12 @@ fn main() { break; } info!( - " account: {:?} version: {} data: {} hash: {:?}", - account.meta.pubkey, account.meta.write_version, account.meta.data_len, account.hash + " account: {:?} version: {} lamports: {} data: {} hash: {:?}", + account.meta.pubkey, + account.meta.write_version, + account.account_meta.lamports, + account.meta.data_len, + account.hash ); num_accounts = num_accounts.saturating_add(1); stored_accounts_len = stored_accounts_len.saturating_add(account.stored_size); From 7720b48aa67450485d6dc1e7a3db216b34a3bfe9 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 18 Aug 2022 10:35:22 -0500 Subject: [PATCH 27/51] add an assert for a debug feature to avoid wasted time (#27210) --- runtime/src/accounts_db.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 4c789751a2405d..03d343f0ac18a2 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -6895,6 +6895,11 @@ impl AccountsDb { stats.oldest_root = storages.range().start; + assert!( + !(config.store_detailed_debug_info_on_failure && config.use_write_cache), + "cannot accurately capture all data for debugging if accounts cache is being used" + ); + self.mark_old_slots_as_dirty(storages, Some(config.epoch_schedule.slots_per_epoch)); let (num_hash_scan_passes, bins_per_pass) = Self::bins_per_pass(self.num_hash_scan_passes); From 77563bc1c97130d3f056df36542b4f8efd44ec13 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 18 Aug 2022 11:01:05 -0500 Subject: [PATCH 28/51] remove redundant call that bumps age to future (#27215) --- runtime/src/in_mem_accounts_index.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index c04e0eed1080bf..9ab9da4e8099d2 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -315,7 +315,6 @@ impl InMemAccountsIndex { ) -> RT { self.get_only_in_mem(pubkey, |entry| { if let Some(entry) = entry { - entry.set_age(self.storage.future_age_to_flush()); callback(Some(entry)).1 } else { // not in cache, look on disk From 4634fb944cee287c9920275c6f1d3cb03c88c9dd Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Thu, 18 Aug 2022 11:06:52 -0500 Subject: [PATCH 29/51] Use from_secs api to create duration (#27222) use from_secs api to create duration --- gossip/tests/gossip.rs | 6 +++--- ledger/src/blockstore.rs | 2 +- runtime/src/bank.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/gossip/tests/gossip.rs b/gossip/tests/gossip.rs index f3e136cdba7f72..066bdb24f78b5b 100644 --- a/gossip/tests/gossip.rs +++ b/gossip/tests/gossip.rs @@ -109,7 +109,7 @@ where } else { trace!("not converged {} {} {}", i, total + num, num * num); } - sleep(Duration::new(1, 0)); + sleep(Duration::from_secs(1)); } exit.store(true, Ordering::Relaxed); for (_, dr, _) in listen { @@ -251,7 +251,7 @@ pub fn cluster_info_retransmit() { if done { break; } - sleep(Duration::new(1, 0)); + sleep(Duration::from_secs(1)); } assert!(done); let mut p = Packet::default(); @@ -269,7 +269,7 @@ pub fn cluster_info_retransmit() { .into_par_iter() .map(|s| { let mut p = Packet::default(); - s.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); + s.set_read_timeout(Some(Duration::from_secs(1))).unwrap(); let res = s.recv_from(p.buffer_mut()); res.is_err() //true if failed to receive the retransmit packet }) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 336dcd86bf930e..a4158a1778e2f1 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -5005,7 +5005,7 @@ pub mod tests { blockstore .insert_shreds(vec![shreds.remove(1)], None, false) .unwrap(); - let timer = Duration::new(1, 0); + let timer = Duration::from_secs(1); assert!(recvr.recv_timeout(timer).is_err()); // Insert first shred, now we've made a consecutive block blockstore diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f8a3187ad8b163..f4b4e6cdc65bbf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -12977,7 +12977,7 @@ pub(crate) mod tests { #[cfg(not(target_os = "linux"))] { error!("{} banks, sleeping for 5 sec", num_banks); - std::thread::sleep(Duration::new(5, 0)); + std::thread::sleep(Duration::from_secs(5)); } } } From f115e4d0ed5f2ea9b49558345610937a38e1684a Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 18 Aug 2022 11:30:11 -0500 Subject: [PATCH 30/51] reorder slot # in debug hash data path (#27217) --- runtime/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 03d343f0ac18a2..702f70456b68be 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -6876,8 +6876,8 @@ impl AccountsDb { } else { // this path executes when we are failing with a hash mismatch let mut new = self.accounts_hash_cache_path.clone(); - new.push(slot.to_string()); new.push("failed_calculate_accounts_hash_cache"); + new.push(slot.to_string()); let _ = std::fs::remove_dir_all(&new); CacheHashData::new(&new) } From e0472050c0216bdb712f4934f7f1c1a232a83c3e Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 18 Aug 2022 11:30:35 -0500 Subject: [PATCH 31/51] create helper fn for clarity (#27216) --- runtime/src/in_mem_accounts_index.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index 9ab9da4e8099d2..864915399c04cf 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -279,7 +279,7 @@ impl InMemAccountsIndex { m.stop(); callback(if let Some(entry) = result { - entry.set_age(self.storage.future_age_to_flush()); + self.set_age_to_future(entry); Some(entry) } else { drop(map); @@ -305,6 +305,10 @@ impl InMemAccountsIndex { self.get_internal(pubkey, |entry| (true, entry.map(Arc::clone))) } + fn set_age_to_future(&self, entry: &AccountMapEntry) { + entry.set_age(self.storage.future_age_to_flush()); + } + /// lookup 'pubkey' in index (in_mem or disk). /// call 'callback' whether found or not pub(crate) fn get_internal( @@ -473,7 +477,7 @@ impl InMemAccountsIndex { reclaims, reclaim, ); - current.set_age(self.storage.future_age_to_flush()); + self.set_age_to_future(current); } Entry::Vacant(vacant) => { // not in cache, look on disk From fa4122f9a8d01658d1088f8c15ccb5f6a49fe37a Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 18 Aug 2022 13:24:23 -0400 Subject: [PATCH 32/51] Verifying snapshot bank must always specify the snapshot slot (#27234) --- runtime/src/bank.rs | 10 +++++----- runtime/src/snapshot_utils.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f4b4e6cdc65bbf..f45365bafe543e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7150,19 +7150,19 @@ impl Bank { &self, test_hash_calculation: bool, accounts_db_skip_shrink: bool, - last_full_snapshot_slot: Option, + last_full_snapshot_slot: Slot, ) -> bool { let mut clean_time = Measure::start("clean"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("cleaning.."); - self.clean_accounts(true, true, last_full_snapshot_slot); + self.clean_accounts(true, true, Some(last_full_snapshot_slot)); } clean_time.stop(); let mut shrink_all_slots_time = Measure::start("shrink_all_slots"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("shrinking.."); - self.shrink_all_slots(true, last_full_snapshot_slot); + self.shrink_all_slots(true, Some(last_full_snapshot_slot)); } shrink_all_slots_time.stop(); @@ -11657,11 +11657,11 @@ pub(crate) mod tests { .unwrap(); bank.freeze(); bank.update_accounts_hash(); - assert!(bank.verify_snapshot_bank(true, false, None)); + assert!(bank.verify_snapshot_bank(true, false, bank.slot())); // tamper the bank after freeze! bank.increment_signature_count(1); - assert!(!bank.verify_snapshot_bank(true, false, None)); + assert!(!bank.verify_snapshot_bank(true, false, bank.slot())); } // Test that two bank forks with the same accounts should not hash to the same value. diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 81cdbcc37a6225..2273832c19e28a 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -986,7 +986,7 @@ pub fn bank_from_snapshot_archives( if !bank.verify_snapshot_bank( test_hash_calculation, accounts_db_skip_shrink || !full_snapshot_archive_info.is_remote(), - Some(full_snapshot_archive_info.slot()), + full_snapshot_archive_info.slot(), ) && limit_load_slot_count_from_snapshot.is_none() { panic!("Snapshot bank for slot {} failed to verify", bank.slot()); From 0b54b22f5837d528b38aa6a270ef3dd155b21263 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 18 Aug 2022 16:24:36 -0400 Subject: [PATCH 33/51] Remove `Bank::ensure_no_storage_rewards_pool()` (#26468) --- runtime/src/bank.rs | 82 --------------------------------------------- 1 file changed, 82 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f45365bafe543e..17f4b27ece6986 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7621,7 +7621,6 @@ impl Bank { ); self.reconfigure_token2_native_mint(); } - self.ensure_no_storage_rewards_pool(); if new_feature_activations.contains(&feature_set::cap_accounts_data_len::id()) { const ACCOUNTS_DATA_LEN: u64 = 50_000_000_000; @@ -7798,36 +7797,6 @@ impl Bank { } } - fn ensure_no_storage_rewards_pool(&mut self) { - let purge_window_epoch = match self.cluster_type() { - ClusterType::Development => false, - // never do this for devnet; we're pristine here. :) - ClusterType::Devnet => false, - // schedule to remove at testnet/tds - ClusterType::Testnet => self.epoch() == 93, - // never do this for stable; we're pristine here. :) - ClusterType::MainnetBeta => false, - }; - - if purge_window_epoch { - for reward_pubkey in self.rewards_pool_pubkeys.iter() { - if let Some(mut reward_account) = self.get_account_with_fixed_root(reward_pubkey) { - if reward_account.lamports() == u64::MAX { - reward_account.set_lamports(0); - self.store_account(reward_pubkey, &reward_account); - // Adjust capitalization.... it has been wrapping, reducing the real capitalization by 1-lamport - self.capitalization.fetch_add(1, Relaxed); - info!( - "purged rewards pool account: {}, new capitalization: {}", - reward_pubkey, - self.capitalization() - ); - } - }; - } - } - } - /// Get all the accounts for this bank and calculate stats pub fn get_total_accounts_stats(&self) -> ScanResult { let accounts = self.get_all_accounts_with_modified_slots()?; @@ -15221,57 +15190,6 @@ pub(crate) mod tests { assert_eq!(native_mint_account.owner(), &inline_spl_token::id()); } - #[test] - fn test_ensure_no_storage_rewards_pool() { - solana_logger::setup(); - - let mut genesis_config = - create_genesis_config_with_leader(5, &solana_sdk::pubkey::new_rand(), 0).genesis_config; - - // Testnet - Storage rewards pool is purged at epoch 93 - // Also this is with bad capitalization - genesis_config.cluster_type = ClusterType::Testnet; - genesis_config.inflation = Inflation::default(); - let reward_pubkey = solana_sdk::pubkey::new_rand(); - genesis_config.rewards_pools.insert( - reward_pubkey, - Account::new(u64::MAX, 0, &solana_sdk::pubkey::new_rand()), - ); - let bank0 = Bank::new_for_tests(&genesis_config); - // because capitalization has been reset with bogus capitalization calculation allowing overflows, - // deliberately substract 1 lamport to simulate it - bank0.capitalization.fetch_sub(1, Relaxed); - let bank0 = Arc::new(bank0); - assert_eq!(bank0.get_balance(&reward_pubkey), u64::MAX,); - - let bank1 = Bank::new_from_parent( - &bank0, - &Pubkey::default(), - genesis_config.epoch_schedule.get_first_slot_in_epoch(93), - ); - - // assert that everything gets in order.... - assert!(bank1.get_account(&reward_pubkey).is_none()); - let sysvar_and_builtin_program_delta = 1; - assert_eq!( - bank0.capitalization() + 1 + 1_000_000_000 + sysvar_and_builtin_program_delta, - bank1.capitalization() - ); - assert_eq!(bank1.capitalization(), bank1.calculate_capitalization(true)); - - // Depending on RUSTFLAGS, this test exposes rust's checked math behavior or not... - // So do some convolted setup; anyway this test itself will just be temporary - let bank0 = std::panic::AssertUnwindSafe(bank0); - let overflowing_capitalization = - std::panic::catch_unwind(|| bank0.calculate_capitalization(true)); - if let Ok(overflowing_capitalization) = overflowing_capitalization { - info!("asserting overflowing capitalization for bank0"); - assert_eq!(overflowing_capitalization, bank0.capitalization()); - } else { - info!("NOT-asserting overflowing capitalization for bank0"); - } - } - #[derive(Debug)] struct TestExecutor {} impl Executor for TestExecutor { From d8380e4d4abd6c639cf183f7b20989e1dadc0807 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 18 Aug 2022 22:12:53 +0100 Subject: [PATCH 34/51] cli: Add subcommands for address lookup tables (#27123) * cli: Add subcommand for creating address lookup tables * cli: Add additional subcommands for address lookup tables * short commands --- Cargo.lock | 1 + cli-output/src/cli_output.rs | 69 +++ cli/Cargo.toml | 1 + cli/src/address_lookup_table.rs | 832 ++++++++++++++++++++++++++++++ cli/src/clap_app.rs | 5 +- cli/src/cli.rs | 14 +- cli/src/lib.rs | 1 + cli/tests/address_lookup_table.rs | 216 ++++++++ 8 files changed, 1135 insertions(+), 4 deletions(-) create mode 100644 cli/src/address_lookup_table.rs create mode 100644 cli/tests/address_lookup_table.rs diff --git a/Cargo.lock b/Cargo.lock index 8616c2632fdfae..9348db394a09bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4874,6 +4874,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", + "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-clap-utils", "solana-cli-config", diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index f45c5713e4af29..645b7b66fbbb40 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -2111,6 +2111,75 @@ impl fmt::Display for CliUpgradeableBuffers { } } +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct CliAddressLookupTable { + pub lookup_table_address: String, + pub authority: Option, + pub deactivation_slot: u64, + pub last_extended_slot: u64, + pub addresses: Vec, +} +impl QuietDisplay for CliAddressLookupTable {} +impl VerboseDisplay for CliAddressLookupTable {} +impl fmt::Display for CliAddressLookupTable { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln_name_value(f, "Lookup Table Address:", &self.lookup_table_address)?; + if let Some(authority) = &self.authority { + writeln_name_value(f, "Authority:", authority)?; + } else { + writeln_name_value(f, "Authority:", "None (frozen)")?; + } + if self.deactivation_slot == u64::MAX { + writeln_name_value(f, "Deactivation Slot:", "None (still active)")?; + } else { + writeln_name_value(f, "Deactivation Slot:", &self.deactivation_slot.to_string())?; + } + if self.last_extended_slot == 0 { + writeln_name_value(f, "Last Extended Slot:", "None (empty)")?; + } else { + writeln_name_value( + f, + "Last Extended Slot:", + &self.last_extended_slot.to_string(), + )?; + } + if self.addresses.is_empty() { + writeln_name_value(f, "Address Table Entries:", "None (empty)")?; + } else { + writeln!(f, "{}", style("Address Table Entries:".to_string()).bold())?; + writeln!(f)?; + writeln!( + f, + "{}", + style(format!(" {:<5} {}", "Index", "Address")).bold() + )?; + for (index, address) in self.addresses.iter().enumerate() { + writeln!(f, " {:<5} {}", index, address)?; + } + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliAddressLookupTableCreated { + pub lookup_table_address: String, + pub signature: String, +} +impl QuietDisplay for CliAddressLookupTableCreated {} +impl VerboseDisplay for CliAddressLookupTableCreated {} +impl fmt::Display for CliAddressLookupTableCreated { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln_name_value(f, "Signature:", &self.signature)?; + writeln_name_value(f, "Lookup Table Address:", &self.lookup_table_address)?; + Ok(()) + } +} + #[derive(Debug, Default)] pub struct ReturnSignersConfig { pub dump_transaction_message: bool, diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 1ed5ddaef613a6..6fdfa258c45b60 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -28,6 +28,7 @@ serde = "1.0.143" serde_derive = "1.0.103" serde_json = "1.0.83" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } +solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.12.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.12.0" } solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-config = { path = "../cli-config", version = "=1.12.0" } diff --git a/cli/src/address_lookup_table.rs b/cli/src/address_lookup_table.rs new file mode 100644 index 00000000000000..7f0fa9d3137897 --- /dev/null +++ b/cli/src/address_lookup_table.rs @@ -0,0 +1,832 @@ +use { + crate::cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult}, + clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, + solana_address_lookup_table_program::{ + instruction::{ + close_lookup_table, create_lookup_table, deactivate_lookup_table, extend_lookup_table, + freeze_lookup_table, + }, + state::AddressLookupTable, + }, + solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}, + solana_cli_output::{CliAddressLookupTable, CliAddressLookupTableCreated, CliSignature}, + solana_client::{rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig}, + solana_remote_wallet::remote_wallet::RemoteWalletManager, + solana_sdk::{ + account::from_account, clock::Clock, commitment_config::CommitmentConfig, message::Message, + pubkey::Pubkey, sysvar, transaction::Transaction, + }, + std::sync::Arc, +}; + +#[derive(Debug, PartialEq, Eq)] +pub enum AddressLookupTableCliCommand { + CreateLookupTable { + authority_signer_index: SignerIndex, + payer_signer_index: SignerIndex, + }, + FreezeLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + bypass_warning: bool, + }, + ExtendLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + payer_signer_index: SignerIndex, + new_addresses: Vec, + }, + DeactivateLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + bypass_warning: bool, + }, + CloseLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + recipient_pubkey: Pubkey, + }, + ShowLookupTable { + lookup_table_pubkey: Pubkey, + }, +} + +pub trait AddressLookupTableSubCommands { + fn address_lookup_table_subcommands(self) -> Self; +} + +impl AddressLookupTableSubCommands for App<'_, '_> { + fn address_lookup_table_subcommands(self) -> Self { + self.subcommand( + SubCommand::with_name("address-lookup-table") + .about("Address lookup table management") + .setting(AppSettings::SubcommandRequiredElseHelp) + .subcommand( + SubCommand::with_name("create") + .about("Create a lookup table") + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("payer") + .long("payer") + .value_name("PAYER_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Account that will pay rent fees for the created lookup table [default: the default configured keypair]") + ) + ) + .subcommand( + SubCommand::with_name("freeze") + .about("Permanently freezes a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .validator(is_pubkey) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("bypass_warning") + .long("bypass-warning") + .takes_value(false) + .help("Bypass the permanent lookup table freeze warning"), + ), + ) + .subcommand( + SubCommand::with_name("extend") + .about("Append more addresses to a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .validator(is_pubkey) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("payer") + .long("payer") + .value_name("PAYER_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Account that will pay rent fees for the extended lookup table [default: the default configured keypair]") + ) + .arg( + Arg::with_name("addresses") + .long("addresses") + .value_name("ADDRESS_1,ADDRESS_2") + .takes_value(true) + .use_delimiter(true) + .required(true) + .validator(is_pubkey) + .help("Comma separated list of addresses to append") + ) + ) + .subcommand( + SubCommand::with_name("deactivate") + .about("Permanently deactivates a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("bypass_warning") + .long("bypass-warning") + .takes_value(false) + .help("Bypass the permanent lookup table deactivation warning"), + ), + ) + .subcommand( + SubCommand::with_name("close") + .about("Permanently closes a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("recipient") + .long("recipient") + .value_name("RECIPIENT_ADDRESS") + .takes_value(true) + .validator(is_pubkey) + .help("Address of the recipient account to deposit the closed account's lamports [default: the default configured keypair]") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + ) + .subcommand( + SubCommand::with_name("get") + .about("Display information about a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .help("Address of the lookup table to show") + ) + ) + ) + } +} + +pub fn parse_address_lookup_table_subcommand( + matches: &ArgMatches<'_>, + default_signer: &DefaultSigner, + wallet_manager: &mut Option>, +) -> Result { + let (subcommand, sub_matches) = matches.subcommand(); + + let response = match (subcommand, sub_matches) { + ("create", Some(matches)) => { + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let payer_pubkey = if let Ok((payer_signer, Some(payer_pubkey))) = + signer_of(matches, "payer", wallet_manager) + { + bulk_signers.push(payer_signer); + Some(payer_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::CreateLookupTable { + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + payer_signer_index: signer_info.index_of(payer_pubkey).unwrap(), + }, + ), + signers: signer_info.signers, + } + } + ("freeze", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + bypass_warning: matches.is_present("bypass_warning"), + }, + ), + signers: signer_info.signers, + } + } + ("extend", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let payer_pubkey = if let Ok((payer_signer, Some(payer_pubkey))) = + signer_of(matches, "payer", wallet_manager) + { + bulk_signers.push(payer_signer); + Some(payer_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let new_addresses: Vec = values_of(matches, "addresses").unwrap(); + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + payer_signer_index: signer_info.index_of(payer_pubkey).unwrap(), + new_addresses, + }, + ), + signers: signer_info.signers, + } + } + ("deactivate", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + bypass_warning: matches.is_present("bypass_warning"), + }, + ), + signers: signer_info.signers, + } + } + ("close", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let recipient_pubkey = if let Some(recipient_pubkey) = pubkey_of(matches, "recipient") { + recipient_pubkey + } else { + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey() + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::CloseLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + recipient_pubkey, + }, + ), + signers: signer_info.signers, + } + } + ("get", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }, + ), + signers: vec![], + } + } + _ => unreachable!(), + }; + Ok(response) +} + +pub fn process_address_lookup_table_subcommand( + rpc_client: Arc, + config: &CliConfig, + subcommand: &AddressLookupTableCliCommand, +) -> ProcessResult { + match subcommand { + AddressLookupTableCliCommand::CreateLookupTable { + authority_signer_index, + payer_signer_index, + } => process_create_lookup_table( + &rpc_client, + config, + *authority_signer_index, + *payer_signer_index, + ), + AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index, + bypass_warning, + } => process_freeze_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *bypass_warning, + ), + AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index, + payer_signer_index, + new_addresses, + } => process_extend_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *payer_signer_index, + new_addresses.to_vec(), + ), + AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index, + bypass_warning, + } => process_deactivate_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *bypass_warning, + ), + AddressLookupTableCliCommand::CloseLookupTable { + lookup_table_pubkey, + authority_signer_index, + recipient_pubkey, + } => process_close_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *recipient_pubkey, + ), + AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + } => process_show_lookup_table(&rpc_client, config, *lookup_table_pubkey), + } +} + +fn process_create_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + authority_signer_index: usize, + payer_signer_index: usize, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + let payer_signer = config.signers[payer_signer_index]; + + let get_clock_result = rpc_client + .get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::finalized())?; + let clock_account = get_clock_result.value.expect("Clock account doesn't exist"); + let clock: Clock = from_account(&clock_account).ok_or_else(|| { + CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string()) + })?; + + let authority_address = authority_signer.pubkey(); + let payer_address = payer_signer.pubkey(); + let (create_lookup_table_ix, lookup_table_address) = + create_lookup_table(authority_address, payer_address, clock.slot); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[create_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign( + &[config.signers[0], authority_signer, payer_signer], + blockhash, + )?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Create failed: {}", err).into()), + Ok(signature) => Ok(config + .output_format + .formatted_string(&CliAddressLookupTableCreated { + lookup_table_address: lookup_table_address.to_string(), + signature: signature.to_string(), + })), + } +} + +pub const FREEZE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ +Once a lookup table is frozen, it can never be modified or unfrozen again. \ +To proceed with freezing, rerun the `freeze` command with the `--bypass-warning` flag"; + +fn process_freeze_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + bypass_warning: bool, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + if !bypass_warning { + return Err(String::from(FREEZE_LOOKUP_TABLE_WARNING).into()); + } + + let authority_address = authority_signer.pubkey(); + let freeze_lookup_table_ix = freeze_lookup_table(lookup_table_pubkey, authority_address); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[freeze_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Freeze failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_extend_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + payer_signer_index: usize, + new_addresses: Vec, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + let payer_signer = config.signers[payer_signer_index]; + + if new_addresses.is_empty() { + return Err("Lookup tables must be extended by at least one address".into()); + } + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let authority_address = authority_signer.pubkey(); + let payer_address = payer_signer.pubkey(); + let extend_lookup_table_ix = extend_lookup_table( + lookup_table_pubkey, + authority_address, + Some(payer_address), + new_addresses, + ); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[extend_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Extend failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +pub const DEACTIVATE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ +Once a lookup table is deactivated, it is no longer usable by transactions. +Deactivated lookup tables may only be closed and cannot be recreated at the same address. \ +To proceed with deactivation, rerun the `deactivate` command with the `--bypass-warning` flag"; + +fn process_deactivate_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + bypass_warning: bool, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + if !bypass_warning { + return Err(String::from(DEACTIVATE_LOOKUP_TABLE_WARNING).into()); + } + + let authority_address = authority_signer.pubkey(); + let deactivate_lookup_table_ix = + deactivate_lookup_table(lookup_table_pubkey, authority_address); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[deactivate_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Deactivate failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_close_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + recipient_pubkey: Pubkey, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; + if lookup_table_account.meta.deactivation_slot == u64::MAX { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not deactivated. Only deactivated lookup tables may be closed", + ) + .into()); + } + + let authority_address = authority_signer.pubkey(); + let close_lookup_table_ix = + close_lookup_table(lookup_table_pubkey, authority_address, recipient_pubkey); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[close_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Close failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_show_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, +) -> ProcessResult { + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; + Ok(config + .output_format + .formatted_string(&CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: lookup_table_account + .meta + .authority + .as_ref() + .map(ToString::to_string), + deactivation_slot: lookup_table_account.meta.deactivation_slot, + last_extended_slot: lookup_table_account.meta.last_extended_slot, + addresses: lookup_table_account + .addresses + .iter() + .map(ToString::to_string) + .collect(), + })) +} diff --git a/cli/src/clap_app.rs b/cli/src/clap_app.rs index 3d48ed37160f93..1760b5161783f2 100644 --- a/cli/src/clap_app.rs +++ b/cli/src/clap_app.rs @@ -1,7 +1,7 @@ use { crate::{ - cli::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, stake::*, - validator_info::*, vote::*, wallet::*, + address_lookup_table::AddressLookupTableSubCommands, cli::*, cluster_query::*, feature::*, + inflation::*, nonce::*, program::*, stake::*, validator_info::*, vote::*, wallet::*, }, clap::{App, AppSettings, Arg, ArgGroup, SubCommand}, solana_clap_utils::{self, input_validators::*, keypair::*}, @@ -130,6 +130,7 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A .inflation_subcommands() .nonce_subcommands() .program_subcommands() + .address_lookup_table_subcommands() .stake_subcommands() .validator_info_subcommands() .vote_subcommands() diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 2a2397efd3e170..d202a2a69f66c8 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1,7 +1,7 @@ use { crate::{ - clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, - spend_utils::*, stake::*, validator_info::*, vote::*, wallet::*, + address_lookup_table::*, clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, + program::*, spend_utils::*, stake::*, validator_info::*, vote::*, wallet::*, }, clap::{crate_description, crate_name, value_t_or_exit, ArgMatches, Shell}, log::*, @@ -440,6 +440,8 @@ pub enum CliCommand { StakeMinimumDelegation { use_lamports_unit: bool, }, + // Address lookup table commands + AddressLookupTable(AddressLookupTableCliCommand), } #[derive(Debug, PartialEq)] @@ -687,6 +689,9 @@ pub fn parse_command( ("program", Some(matches)) => { parse_program_subcommand(matches, default_signer, wallet_manager) } + ("address-lookup-table", Some(matches)) => { + parse_address_lookup_table_subcommand(matches, default_signer, wallet_manager) + } ("wait-for-max-stake", Some(matches)) => { let max_stake_percent = value_t_or_exit!(matches, "max_percent", f32); Ok(CliCommandInfo { @@ -1627,6 +1632,11 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { derived_address_program_id.as_ref(), compute_unit_price.as_ref(), ), + + // Address Lookup Table Commands + CliCommand::AddressLookupTable(subcommand) => { + process_address_lookup_table_subcommand(rpc_client, config, subcommand) + } } } diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 85d90869ff41b3..c271990b58b7ce 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -23,6 +23,7 @@ extern crate const_format; extern crate serde_derive; +pub mod address_lookup_table; pub mod checks; pub mod clap_app; pub mod cli; diff --git a/cli/tests/address_lookup_table.rs b/cli/tests/address_lookup_table.rs new file mode 100644 index 00000000000000..5d370d48c4eafd --- /dev/null +++ b/cli/tests/address_lookup_table.rs @@ -0,0 +1,216 @@ +use { + solana_cli::{ + address_lookup_table::{ + AddressLookupTableCliCommand, DEACTIVATE_LOOKUP_TABLE_WARNING, + FREEZE_LOOKUP_TABLE_WARNING, + }, + cli::{process_command, CliCommand, CliConfig}, + }, + solana_cli_output::{CliAddressLookupTable, CliAddressLookupTableCreated, OutputFormat}, + solana_faucet::faucet::run_local_faucet, + solana_sdk::{ + native_token::LAMPORTS_PER_SOL, + pubkey::Pubkey, + signature::{Keypair, Signer}, + }, + solana_streamer::socket::SocketAddrSpace, + solana_test_validator::TestValidator, + std::str::FromStr, +}; + +#[test] +fn test_cli_create_extend_and_freeze_address_lookup_table() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.output_format = OutputFormat::JsonCompact; + + // Airdrop SOL for transaction fees + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 10 * LAMPORTS_PER_SOL, + }; + process_command(&config).unwrap(); + + // Create lookup table + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::CreateLookupTable { + authority_signer_index: 0, + payer_signer_index: 0, + }); + let response: CliAddressLookupTableCreated = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + let lookup_table_pubkey = Pubkey::from_str(&response.lookup_table_address).unwrap(); + + // Validate created lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let response: CliAddressLookupTable = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + response, + CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: Some(keypair.pubkey().to_string()), + deactivation_slot: u64::MAX, + last_extended_slot: 0, + addresses: vec![], + } + ); + } + + // Extend lookup table + let new_addresses: Vec = (0..5).map(|_| Pubkey::new_unique()).collect(); + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + payer_signer_index: 0, + new_addresses: new_addresses.clone(), + }); + process_command(&config).unwrap(); + + // Validate extended lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { + addresses, + last_extended_slot, + .. + } = serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + addresses + .into_iter() + .map(|address| Pubkey::from_str(&address).unwrap()) + .collect::>(), + new_addresses + ); + assert!(last_extended_slot > 0); + } + + // Freeze lookup table w/o bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: false, + }); + let process_err = process_command(&config).unwrap_err(); + assert_eq!(process_err.to_string(), FREEZE_LOOKUP_TABLE_WARNING); + + // Freeze lookup table w/ bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: true, + }); + process_command(&config).unwrap(); + + // Validate frozen lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { authority, .. } = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert!(authority.is_none()); + } +} + +#[test] +fn test_cli_create_and_deactivate_address_lookup_table() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.output_format = OutputFormat::JsonCompact; + + // Airdrop SOL for transaction fees + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 10 * LAMPORTS_PER_SOL, + }; + process_command(&config).unwrap(); + + // Create lookup table + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::CreateLookupTable { + authority_signer_index: 0, + payer_signer_index: 0, + }); + let response: CliAddressLookupTableCreated = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + let lookup_table_pubkey = Pubkey::from_str(&response.lookup_table_address).unwrap(); + + // Validate created lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let response: CliAddressLookupTable = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + response, + CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: Some(keypair.pubkey().to_string()), + deactivation_slot: u64::MAX, + last_extended_slot: 0, + addresses: vec![], + } + ); + } + + // Deactivate lookup table w/o bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: false, + }); + let process_err = process_command(&config).unwrap_err(); + assert_eq!(process_err.to_string(), DEACTIVATE_LOOKUP_TABLE_WARNING); + + // Deactivate lookup table w/ bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: true, + }); + process_command(&config).unwrap(); + + // Validate deactivated lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { + deactivation_slot, .. + } = serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_ne!(deactivation_slot, u64::MAX); + } +} From 6928b2a5af45bf6985b90ff116cf47f917ac1c9c Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 18 Aug 2022 22:39:31 +0000 Subject: [PATCH 35/51] adds hash domain to ping-pong protocol (#27193) In order to maintain backward compatibility, for now the responding node will hash the token both with and without domain so that the other node will accept the response regardless of its upgrade status. Once the cluster has upgraded to the new code, we will remove the legacy domain = false case. --- core/src/ancestor_hashes_service.rs | 15 +++++++---- core/src/serve_repair.rs | 15 +++++++---- gossip/src/cluster_info.rs | 30 +++++++++++++--------- gossip/src/ping_pong.rs | 40 ++++++++++++++++++++++++----- 4 files changed, 72 insertions(+), 28 deletions(-) diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index 330ebb072abc10..4813ed11685069 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -425,16 +425,21 @@ impl AncestorHashesService { stats.invalid_packets += 1; return None; } - if ping.verify() { - stats.ping_count += 1; - if let Ok(pong) = Pong::new(&ping, keypair) { + if !ping.verify() { + stats.ping_err_verify_count += 1; + return None; + } + stats.ping_count += 1; + // Respond both with and without domain so that the other node + // will accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, keypair) { let pong = RepairProtocol::Pong(pong); if let Ok(pong_bytes) = serialize(&pong) { let _ignore = ancestor_socket.send_to(&pong_bytes[..], from_addr); } } - } else { - stats.ping_err_verify_count += 1; } None } diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 2f755ebb17f4bb..47443bcd9acc2d 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -1044,11 +1044,16 @@ impl ServeRepair { } packet.meta.set_discard(true); stats.ping_count += 1; - if let Ok(pong) = Pong::new(&ping, keypair) { - let pong = RepairProtocol::Pong(pong); - if let Ok(pong_bytes) = serialize(&pong) { - let from_addr = packet.meta.socket_addr(); - pending_pongs.push((pong_bytes, from_addr)); + // Respond both with and without domain so that the other node + // will accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, keypair) { + let pong = RepairProtocol::Pong(pong); + if let Ok(pong_bytes) = serialize(&pong) { + let from_addr = packet.meta.socket_addr(); + pending_pongs.push((pong_bytes, from_addr)); + } } } } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 40142f70e3d336..9d692b8a08aba6 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2170,14 +2170,18 @@ impl ClusterInfo { I: IntoIterator, { let keypair = self.keypair(); - let pongs_and_dests: Vec<_> = pings - .into_iter() - .filter_map(|(addr, ping)| { - let pong = Pong::new(&ping, &keypair).ok()?; - let pong = Protocol::PongMessage(pong); - Some((addr, pong)) - }) - .collect(); + let mut pongs_and_dests = Vec::new(); + for (addr, ping) in pings { + // Respond both with and without domain so that the other node will + // accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, &keypair) { + let pong = Protocol::PongMessage(pong); + pongs_and_dests.push((addr, pong)); + } + } + } if pongs_and_dests.is_empty() { None } else { @@ -3287,7 +3291,9 @@ RPC Enabled Nodes: 1"#; let pongs: Vec<(SocketAddr, Pong)> = pings .iter() .zip(&remote_nodes) - .map(|(ping, (keypair, socket))| (*socket, Pong::new(ping, keypair).unwrap())) + .map(|(ping, (keypair, socket))| { + (*socket, Pong::new(/*domain:*/ true, ping, keypair).unwrap()) + }) .collect(); let now = now + Duration::from_millis(1); cluster_info.handle_batch_pong_messages(pongs, now); @@ -3330,7 +3336,7 @@ RPC Enabled Nodes: 1"#; .collect(); let pongs: Vec<_> = pings .iter() - .map(|ping| Pong::new(ping, &this_node).unwrap()) + .map(|ping| Pong::new(/*domain:*/ false, ping, &this_node).unwrap()) .collect(); let recycler = PacketBatchRecycler::default(); let packets = cluster_info @@ -3342,9 +3348,9 @@ RPC Enabled Nodes: 1"#; &recycler, ) .unwrap(); - assert_eq!(remote_nodes.len(), packets.len()); + assert_eq!(remote_nodes.len() * 2, packets.len()); for (packet, (_, socket), pong) in izip!( - packets.into_iter(), + packets.into_iter().step_by(2), remote_nodes.into_iter(), pongs.into_iter() ) { diff --git a/gossip/src/ping_pong.rs b/gossip/src/ping_pong.rs index 6c3a219cfdb81b..16961f26f18388 100644 --- a/gossip/src/ping_pong.rs +++ b/gossip/src/ping_pong.rs @@ -16,6 +16,8 @@ use { }, }; +const PING_PONG_HASH_PREFIX: &[u8] = "SOLANA_PING_PONG".as_bytes(); + #[derive(AbiExample, Debug, Deserialize, Serialize)] pub struct Ping { from: Pubkey, @@ -100,8 +102,17 @@ impl Signable for Ping { } impl Pong { - pub fn new(ping: &Ping, keypair: &Keypair) -> Result { - let hash = hash::hash(&serialize(&ping.token)?); + pub fn new( + domain: bool, + ping: &Ping, + keypair: &Keypair, + ) -> Result { + let token = serialize(&ping.token)?; + let hash = if domain { + hash::hashv(&[PING_PONG_HASH_PREFIX, &token]) + } else { + hash::hash(&token) + }; let pong = Pong { from: keypair.pubkey(), hash, @@ -187,9 +198,15 @@ impl PingCache { Some(t) if now.saturating_duration_since(*t) < delay => None, _ => { let ping = pingf()?; - let hash = hash::hash(&serialize(&ping.token).ok()?); - self.pings.put(node, now); + let token = serialize(&ping.token).ok()?; + // For backward compatibility, for now responses both with and + // without domain are accepted. + // TODO: remove no domain case once cluster is upgraded. + let hash = hash::hash(&token); + self.pending_cache.put(hash, node); + let hash = hash::hashv(&[PING_PONG_HASH_PREFIX, &token]); self.pending_cache.put(hash, node); + self.pings.put(node, now); Some(ping) } } @@ -281,10 +298,18 @@ mod tests { assert!(ping.verify()); assert!(ping.sanitize().is_ok()); - let pong = Pong::new(&ping, &keypair).unwrap(); + let pong = Pong::new(/*domain:*/ false, &ping, &keypair).unwrap(); assert!(pong.verify()); assert!(pong.sanitize().is_ok()); assert_eq!(hash::hash(&ping.token), pong.hash); + + let pong = Pong::new(/*domian:*/ true, &ping, &keypair).unwrap(); + assert!(pong.verify()); + assert!(pong.sanitize().is_ok()); + assert_eq!( + hash::hashv(&[PING_PONG_HASH_PREFIX, &ping.token]), + pong.hash + ); } #[test] @@ -339,7 +364,10 @@ mod tests { assert!(ping.is_none()); } Some(ping) => { - let pong = Pong::new(ping, keypair).unwrap(); + let domain = rng.gen_ratio(1, 2); + let pong = Pong::new(domain, ping, keypair).unwrap(); + assert!(cache.add(&pong, *socket, now)); + let pong = Pong::new(!domain, ping, keypair).unwrap(); assert!(cache.add(&pong, *socket, now)); } } From 7573000d87a75c5110f9fbd7a863b8a24d45bf78 Mon Sep 17 00:00:00 2001 From: Brennan Watt Date: Fri, 19 Aug 2022 01:19:44 -0700 Subject: [PATCH 36/51] Revert "Rust v1.63.0 (#27148)" (#27245) This reverts commit a2e7bdf50ac5e1d4c633f64f6362028b4164c003. --- account-decoder/src/parse_address_lookup_table.rs | 2 +- banks-server/src/banks_server.rs | 10 +++++++--- ci/docker-rust-nightly/Dockerfile | 2 +- ci/docker-rust/Dockerfile | 2 +- ci/rust-version.sh | 4 ++-- ci/test-checks.sh | 14 -------------- client/tests/quic_client.rs | 2 +- core/src/banking_stage.rs | 2 +- core/src/sigverify_shreds.rs | 2 +- frozen-abi/src/abi_example.rs | 2 +- gossip/src/crds_gossip_pull.rs | 2 +- ledger/src/bigtable_upload.rs | 2 +- ledger/src/blockstore.rs | 2 +- ledger/src/blockstore_meta.rs | 2 +- ledger/src/shred.rs | 2 +- ledger/src/shred/shred_code.rs | 2 +- local-cluster/src/local_cluster.rs | 2 +- perf/src/sigverify.rs | 7 ++++++- poh/src/poh_recorder.rs | 2 +- rpc/src/rpc.rs | 6 ++++-- rpc/src/rpc_subscriptions.rs | 5 ++++- runtime/src/account_rent_state.rs | 2 +- runtime/src/accounts.rs | 2 +- runtime/src/accounts_db.rs | 8 ++++---- runtime/src/bank.rs | 12 ++++++------ runtime/src/expected_rent_collection.rs | 8 ++++---- runtime/src/hardened_unpack.rs | 2 +- runtime/src/in_mem_accounts_index.rs | 2 -- runtime/src/serde_snapshot.rs | 4 ++-- runtime/src/serde_snapshot/newer.rs | 4 ++-- runtime/src/serde_snapshot/tests.rs | 2 +- runtime/src/snapshot_minimizer.rs | 2 +- runtime/src/snapshot_utils.rs | 2 +- runtime/src/storable_accounts.rs | 2 +- runtime/src/system_instruction_processor.rs | 2 +- sdk/program/src/message/compiled_keys.rs | 10 +++++----- sdk/program/src/nonce/state/mod.rs | 2 +- sdk/program/src/stake/tools.rs | 2 +- streamer/src/streamer.rs | 2 +- validator/src/bootstrap.rs | 6 ++++-- zk-token-sdk/src/instruction/close_account.rs | 2 +- zk-token-sdk/src/instruction/withdraw.rs | 2 +- 42 files changed, 78 insertions(+), 78 deletions(-) diff --git a/account-decoder/src/parse_address_lookup_table.rs b/account-decoder/src/parse_address_lookup_table.rs index ca461f2636e92a..26955d74a74242 100644 --- a/account-decoder/src/parse_address_lookup_table.rs +++ b/account-decoder/src/parse_address_lookup_table.rs @@ -19,7 +19,7 @@ pub fn parse_address_lookup_table( }) } -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase", tag = "type", content = "info")] pub enum LookupTableAccountType { Uninitialized, diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index a4b65601c389b3..c73844d2571560 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -153,9 +153,13 @@ fn verify_transaction( transaction: &Transaction, feature_set: &Arc, ) -> transaction::Result<()> { - transaction.verify()?; - transaction.verify_precompiles(feature_set)?; - Ok(()) + if let Err(err) = transaction.verify() { + Err(err) + } else if let Err(err) = transaction.verify_precompiles(feature_set) { + Err(err) + } else { + Ok(()) + } } fn simulate_transaction( diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index 12aeff7e5e0b81..fff0f366d32f29 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.63.0 +FROM solanalabs/rust:1.60.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index a256d308d9b27a..6805f85fcd85df 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.63.0 +FROM rust:1.60.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 792863c3280fa1..dc3570fa939e79 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.63.0 + stable_version=1.60.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2022-08-12 + nightly_version=2022-04-01 fi diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 65e5e6271aa4bf..72c174395bd1d9 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -65,25 +65,11 @@ fi _ ci/order-crates-for-publishing.py -nightly_clippy_allows=( - # This lint occurs all over the code base - "--allow=clippy::significant_drop_in_scrutinee" - - # The prost crate, used by solana-storage-proto, generates Rust source that - # triggers this lint. Need to resolve upstream in prost - "--allow=clippy::derive_partial_eq_without_eq" - - # This link seems to incorrectly trigger in - # `programs/bpf_loader/src/syscalls/{lib,cpi}.rs` - "--allow=clippy::explicit_auto_deref" -) - # -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612 # run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there _ scripts/cargo-for-all-lock-files.sh -- nightly clippy -Zunstable-options --all-targets -- \ --deny=warnings \ --deny=clippy::integer_arithmetic \ - "${nightly_clippy_allows[@]}" _ scripts/cargo-for-all-lock-files.sh -- nightly sort --workspace --check _ scripts/cargo-for-all-lock-files.sh -- nightly fmt --all -- --check diff --git a/client/tests/quic_client.rs b/client/tests/quic_client.rs index 1c5348177dd644..980476aee7b2c6 100644 --- a/client/tests/quic_client.rs +++ b/client/tests/quic_client.rs @@ -27,7 +27,7 @@ mod tests { let mut all_packets = vec![]; let now = Instant::now(); let mut total_packets: usize = 0; - while now.elapsed().as_secs() < 10 { + while now.elapsed().as_secs() < 5 { if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) { total_packets = total_packets.saturating_add(packets.len()); all_packets.push(packets) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 1c3e95e2bd6c5d..2547c00f94e5ca 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1335,7 +1335,7 @@ impl BankingStage { ); retryable_transaction_indexes.extend(execution_results.iter().enumerate().filter_map( - |(index, execution_result)| execution_result.was_executed().then_some(index), + |(index, execution_result)| execution_result.was_executed().then(|| index), )); return ExecuteAndCommitTransactionsOutput { diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index f1f08ec671d2f3..f9a50ab8b2a954 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -151,7 +151,7 @@ fn get_slot_leaders( let leader = leaders.entry(slot).or_insert_with(|| { let leader = leader_schedule_cache.slot_leader_at(slot, Some(bank))?; // Discard the shred if the slot leader is the node itself. - (&leader != self_pubkey).then_some(leader) + (&leader != self_pubkey).then(|| leader) }); if leader.is_none() { packet.meta.set_discard(true); diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index 2e1bdbcac16d0d..e0dfa50b8acea6 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -411,7 +411,7 @@ lazy_static! { impl AbiExample for &Vec { fn example() -> Self { info!("AbiExample for (&Vec): {}", type_name::()); - &VEC_U8 + &*VEC_U8 } } diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 04df91227b971c..2780bf7dabf56b 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -256,7 +256,7 @@ impl CrdsGossipPull { if let Some(ping) = ping { pings.push((peer.gossip, ping)); } - check.then_some((weight, peer)) + check.then(|| (weight, peer)) }) .unzip() }; diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index c8cdef587b1fc7..f43b07db12592a 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -60,7 +60,7 @@ pub async fn upload_confirmed_blocks( starting_slot, err ) })? - .map_while(|slot| (slot <= ending_slot).then_some(slot)) + .map_while(|slot| (slot <= ending_slot).then(|| slot)) .collect(); if blockstore_slots.is_empty() { diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index a4158a1778e2f1..2c0913a5ab8cfb 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3146,7 +3146,7 @@ impl Blockstore { } .expect("fetch from DuplicateSlots column family failed")?; let new_shred = Shred::new_from_serialized_shred(payload).unwrap(); - (existing_shred != *new_shred.payload()).then_some(existing_shred) + (existing_shred != *new_shred.payload()).then(|| existing_shred) } pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool { diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 5cacf78198dafb..65101fe98348ba 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -61,7 +61,7 @@ mod serde_compat { D: Deserializer<'de>, { let val = u64::deserialize(deserializer)?; - Ok((val != u64::MAX).then_some(val)) + Ok((val != u64::MAX).then(|| val)) } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index bef3df72515640..e17055b1e7d9a9 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -613,7 +613,7 @@ pub mod layout { merkle::ShredData::get_signed_message_range(proof_size)? } }; - (shred.len() <= range.end).then_some(range) + (shred.len() <= range.end).then(|| range) } pub(crate) fn get_reference_tick(shred: &[u8]) -> Result { diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 1fe3fef026ff18..538bb25427f38f 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -119,7 +119,7 @@ pub(super) fn erasure_shard_index(shred: &T) -> Option let position = usize::from(coding_header.position); let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; let index = position.checked_add(num_data_shreds)?; - (index < fec_set_size).then_some(index) + (index < fec_set_size).then(|| index) } pub(super) fn sanitize(shred: &T) -> Result<(), Error> { diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 0f1ca19f876aff..f7b68647053eaf 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -319,7 +319,7 @@ impl LocalCluster { }) .collect(); for (stake, validator_config, (key, _)) in izip!( - config.node_stakes[1..].iter(), + (&config.node_stakes[1..]).iter(), config.validator_configs[1..].iter(), validator_keys[1..].iter(), ) { diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 1e40d29adcf13d..aee1b310dd59d9 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -830,7 +830,12 @@ mod tests { pub fn memfind(a: &[A], b: &[A]) -> Option { assert!(a.len() >= b.len()); let end = a.len() - b.len() + 1; - (0..end).find(|&i| a[i..i + b.len()] == b[..]) + for i in 0..end { + if a[i..i + b.len()] == b[..] { + return Some(i); + } + } + None } #[test] diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index d6c85c3fdf7f3f..aef2d7393e9f51 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -505,7 +505,7 @@ impl PohRecorder { start: Arc::new(Instant::now()), min_tick_height: bank.tick_height(), max_tick_height: bank.max_tick_height(), - transaction_index: track_transaction_indexes.then_some(0), + transaction_index: track_transaction_indexes.then(|| 0), }; trace!("new working bank"); assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot()); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 9cad136b581927..fdf72d8f5d7299 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -129,7 +129,7 @@ fn new_response(bank: &Bank, value: T) -> RpcResponse { /// Wrapper for rpc return types of methods that provide responses both with and without context. /// Main purpose of this is to fix methods that lack context information in their return type, /// without breaking backwards compatibility. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum OptionalContext { Context(RpcResponse), @@ -3646,7 +3646,9 @@ pub mod rpc_full { } if !skip_preflight { - verify_transaction(&transaction, &preflight_bank.feature_set)?; + if let Err(e) = verify_transaction(&transaction, &preflight_bank.feature_set) { + return Err(e); + } match meta.health.check() { RpcHealthStatus::Ok => (), diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 896b6a9ad5f453..bd9fe337460279 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -1001,7 +1001,10 @@ impl RpcSubscriptions { let mut slots_to_notify: Vec<_> = (*w_last_unnotified_slot..slot).collect(); let ancestors = bank.proper_ancestors_set(); - slots_to_notify.retain(|slot| ancestors.contains(slot)); + slots_to_notify = slots_to_notify + .into_iter() + .filter(|slot| ancestors.contains(slot)) + .collect(); slots_to_notify.push(slot); for s in slots_to_notify { // To avoid skipping a slot that fails this condition, diff --git a/runtime/src/account_rent_state.rs b/runtime/src/account_rent_state.rs index 74cbc5b81af5f1..629502caf475fe 100644 --- a/runtime/src/account_rent_state.rs +++ b/runtime/src/account_rent_state.rs @@ -104,7 +104,7 @@ pub(crate) fn check_rent_state( .get_account_at_index(index) .expect(expect_msg) .borrow(), - include_account_index_in_err.then_some(index), + include_account_index_in_err.then(|| index), prevent_crediting_accounts_that_end_rent_paying, )?; } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index ade9d327ba1046..86d14aaf7b681c 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -440,7 +440,7 @@ impl Accounts { payer_account, feature_set .is_active(&feature_set::include_account_index_in_rent_error::ID) - .then_some(payer_index), + .then(|| payer_index), feature_set .is_active(&feature_set::prevent_crediting_accounts_that_end_rent_paying::id()), ) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 702f70456b68be..c84f45501faacf 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2174,7 +2174,7 @@ impl AccountsDb { // figure out how many ancient accounts have been reclaimed let old_reclaims = reclaims .iter() - .filter_map(|(slot, _)| (slot < &one_epoch_old).then_some(1)) + .filter_map(|(slot, _)| (slot < &one_epoch_old).then(|| 1)) .sum(); ancient_account_cleans.fetch_add(old_reclaims, Ordering::Relaxed); reclaims @@ -2392,7 +2392,7 @@ impl AccountsDb { .iter() .filter_map(|entry| { let slot = *entry.key(); - (slot <= max_slot).then_some(slot) + (slot <= max_slot).then(|| slot) }) .collect() } @@ -3676,7 +3676,7 @@ impl AccountsDb { ) -> Option { self.get_storages_for_slot(slot).and_then(|all_storages| { self.should_move_to_ancient_append_vec(&all_storages, current_ancient, slot) - .then_some(all_storages) + .then(|| all_storages) }) } @@ -5309,7 +5309,7 @@ impl AccountsDb { // with the same slot. let is_being_flushed = !currently_contended_slots.insert(*remove_slot); // If the cache is currently flushing this slot, add it to the list - is_being_flushed.then_some(remove_slot) + is_being_flushed.then(|| remove_slot) }) .cloned() .collect(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 17f4b27ece6986..8557b70bd06d30 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2285,7 +2285,7 @@ impl Bank { hash: *self.hash.read().unwrap(), parent_hash: self.parent_hash, parent_slot: self.parent_slot, - hard_forks: &self.hard_forks, + hard_forks: &*self.hard_forks, transaction_count: self.transaction_count.load(Relaxed), tick_height: self.tick_height.load(Relaxed), signature_count: self.signature_count.load(Relaxed), @@ -3293,7 +3293,7 @@ impl Bank { let vote_state = account.vote_state(); let vote_state = vote_state.as_ref().ok()?; let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?; - (slot_delta <= slots_per_epoch).then_some({ + (slot_delta <= slots_per_epoch).then(|| { ( *pubkey, ( @@ -3963,10 +3963,10 @@ impl Bank { } /// Prepare a transaction batch without locking accounts for transaction simulation. - pub(crate) fn prepare_simulation_batch( - &self, + pub(crate) fn prepare_simulation_batch<'a>( + &'a self, transaction: SanitizedTransaction, - ) -> TransactionBatch<'_, '_> { + ) -> TransactionBatch<'a, '_> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_result = transaction .get_account_locks(tx_account_lock_limit) @@ -4367,7 +4367,7 @@ impl Bank { self.feature_set.clone(), compute_budget, timings, - &self.sysvar_cache.read().unwrap(), + &*self.sysvar_cache.read().unwrap(), blockhash, lamports_per_signature, prev_accounts_data_len, diff --git a/runtime/src/expected_rent_collection.rs b/runtime/src/expected_rent_collection.rs index bd6a6bb4842a85..d049430933db33 100644 --- a/runtime/src/expected_rent_collection.rs +++ b/runtime/src/expected_rent_collection.rs @@ -684,7 +684,7 @@ pub mod tests { ); assert_eq!( result, - (!leave_alone).then_some(ExpectedRentCollection { + (!leave_alone).then(|| ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -712,7 +712,7 @@ pub mod tests { ); assert_eq!( result, - (!greater).then_some(ExpectedRentCollection { + (!greater).then(|| ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -909,7 +909,7 @@ pub mod tests { ); assert_eq!( result, - (account_rent_epoch != 0).then_some(ExpectedRentCollection { + (account_rent_epoch != 0).then(|| ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch + 1, partition_index_from_max_slot: partition_index_max_inclusive, @@ -1084,7 +1084,7 @@ pub mod tests { }; assert_eq!( result, - some_expected.then_some(ExpectedRentCollection { + some_expected.then(|| ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot, diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index ac1c23167343fb..e3af855216e409 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -384,7 +384,7 @@ where .map(|path_buf| path_buf.as_path()) { Some(path) => { - accounts_path_processor(file, path); + accounts_path_processor(*file, path); UnpackPath::Valid(path) } None => UnpackPath::Invalid, diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index 864915399c04cf..82312327bca621 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -1421,8 +1421,6 @@ impl<'a> FlushGuard<'a> { #[must_use = "if unused, the `flushing` flag will immediately clear"] fn lock(flushing: &'a AtomicBool) -> Option { let already_flushing = flushing.swap(true, Ordering::AcqRel); - // Eager evaluation here would result in dropping Self and clearing flushing flag - #[allow(clippy::unnecessary_lazy_evaluations)] (!already_flushing).then(|| Self { flushing }) } } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 90d0c6db2e3220..5b42208d042e7c 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -65,7 +65,7 @@ pub(crate) enum SerdeStyle { const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; -#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq, Eq)] +#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq)] pub struct AccountsDbFields( HashMap>, StoredMetaWriteVersion, @@ -120,7 +120,7 @@ impl SnapshotAccountsDbFields { // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot incremental_snapshot_storages .iter() - .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| { + .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then(|| ()).ok_or_else(|| { io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!") })?; diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index ab27961bf2a49c..512737106aebc9 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -201,7 +201,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), @@ -228,7 +228,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 5834a23f969116..1de6ee2a5d54c6 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -155,7 +155,7 @@ fn test_accounts_serialize_style(serde_style: SerdeStyle) { accountsdb_to_stream( serde_style, &mut writer, - &accounts.accounts_db, + &*accounts.accounts_db, 0, &accounts.accounts_db.get_snapshot_storages(0, None, None).0, ) diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 94a82e1d482458..69e7a99e8e7601 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -543,7 +543,7 @@ mod tests { .accounts .iter() .filter_map(|(pubkey, account)| { - stake::program::check_id(account.owner()).then_some(*pubkey) + stake::program::check_id(account.owner()).then(|| *pubkey) }) .collect(); expected_stake_accounts.push(bootstrap_validator_pubkey); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 2273832c19e28a..93cdbc0f33fc0c 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1216,7 +1216,7 @@ fn check_are_snapshots_compatible( let incremental_snapshot_archive_info = incremental_snapshot_archive_info.unwrap(); (full_snapshot_archive_info.slot() == incremental_snapshot_archive_info.base_slot()) - .then_some(()) + .then(|| ()) .ok_or_else(|| { SnapshotError::MismatchedBaseSlot( full_snapshot_archive_info.slot(), diff --git a/runtime/src/storable_accounts.rs b/runtime/src/storable_accounts.rs index bfa35cf71c3e6b..8d79c0f78c5fe4 100644 --- a/runtime/src/storable_accounts.rs +++ b/runtime/src/storable_accounts.rs @@ -143,7 +143,7 @@ pub mod tests { slot, &vec![(&pk, &account, slot), (&pk, &account, slot)][..], ); - assert!(!test3.contains_multiple_slots()); + assert!(!(&test3).contains_multiple_slots()); let test3 = ( slot, &vec![(&pk, &account, slot), (&pk, &account, slot + 1)][..], diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 3b738df1d8a0e4..67f1f931147cef 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -1626,7 +1626,7 @@ mod tests { .unwrap(); // super fun time; callback chooses to .clean_accounts(None) or not - callback(&bank); + callback(&*bank); // create a normal account at the same pubkey as the zero-lamports account let lamports = genesis_config.rent.minimum_balance(len2); diff --git a/sdk/program/src/message/compiled_keys.rs b/sdk/program/src/message/compiled_keys.rs index c689d08f39ae81..d56c7aca2c4159 100644 --- a/sdk/program/src/message/compiled_keys.rs +++ b/sdk/program/src/message/compiled_keys.rs @@ -80,20 +80,20 @@ impl CompiledKeys { .chain( key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then_some(*key)), + .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then(|| *key)), ) .collect(); let readonly_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then_some(*key)) + .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then(|| *key)) .collect(); let writable_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then_some(*key)) + .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then(|| *key)) .collect(); let readonly_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then_some(*key)) + .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then(|| *key)) .collect(); let signers_len = writable_signer_keys @@ -160,7 +160,7 @@ impl CompiledKeys { for search_key in self .key_meta_map .iter() - .filter_map(|(key, meta)| key_meta_filter(meta).then_some(key)) + .filter_map(|(key, meta)| key_meta_filter(meta).then(|| key)) { for (key_index, key) in lookup_table_addresses.iter().enumerate() { if key == search_key { diff --git a/sdk/program/src/nonce/state/mod.rs b/sdk/program/src/nonce/state/mod.rs index d55bc9063afcff..a4a850b93c1cdc 100644 --- a/sdk/program/src/nonce/state/mod.rs +++ b/sdk/program/src/nonce/state/mod.rs @@ -46,7 +46,7 @@ impl Versions { Self::Current(state) => match **state { State::Uninitialized => None, State::Initialized(ref data) => { - (recent_blockhash == &data.blockhash()).then_some(data) + (recent_blockhash == &data.blockhash()).then(|| data) } }, } diff --git a/sdk/program/src/stake/tools.rs b/sdk/program/src/stake/tools.rs index e0447f49fc69c9..842a822b0ea329 100644 --- a/sdk/program/src/stake/tools.rs +++ b/sdk/program/src/stake/tools.rs @@ -28,7 +28,7 @@ fn get_minimum_delegation_return_data() -> Result { .ok_or(ProgramError::InvalidInstructionData) .and_then(|(program_id, return_data)| { (program_id == super::program::id()) - .then_some(return_data) + .then(|| return_data) .ok_or(ProgramError::IncorrectProgramId) }) .and_then(|return_data| { diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 1ef9b989304ebb..3492f60c8933a8 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -307,7 +307,7 @@ fn recv_send( let packets = packet_batch.iter().filter_map(|pkt| { let addr = pkt.meta.socket_addr(); let data = pkt.data(..)?; - socket_addr_space.check(&addr).then_some((data, addr)) + socket_addr_space.check(&addr).then(|| (data, addr)) }); batch_send(sock, &packets.collect::>())?; Ok(()) diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index c5a4b65d4b1229..fec9f6d409709c 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -409,7 +409,7 @@ pub fn attempt_download_genesis_and_snapshot( .map_err(|err| format!("Failed to get RPC node slot: {}", err))?; info!("RPC node root slot: {}", rpc_client_slot); - download_snapshots( + if let Err(err) = download_snapshots( full_snapshot_archives_dir, incremental_snapshot_archives_dir, validator_config, @@ -422,7 +422,9 @@ pub fn attempt_download_genesis_and_snapshot( download_abort_count, snapshot_hash, rpc_contact_info, - )?; + ) { + return Err(err); + }; if let Some(url) = bootstrap_config.check_vote_account.as_ref() { let rpc_client = RpcClient::new(url); diff --git a/zk-token-sdk/src/instruction/close_account.rs b/zk-token-sdk/src/instruction/close_account.rs index b6702e3051f168..4525f87901cd71 100644 --- a/zk-token-sdk/src/instruction/close_account.rs +++ b/zk-token-sdk/src/instruction/close_account.rs @@ -41,7 +41,7 @@ impl CloseAccountData { keypair: &ElGamalKeypair, ciphertext: &ElGamalCiphertext, ) -> Result { - let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); + let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); let pod_ciphertext = pod::ElGamalCiphertext(ciphertext.to_bytes()); let mut transcript = CloseAccountProof::transcript_new(&pod_pubkey, &pod_ciphertext); diff --git a/zk-token-sdk/src/instruction/withdraw.rs b/zk-token-sdk/src/instruction/withdraw.rs index 64f540a591804e..9aa606e8ca4203 100644 --- a/zk-token-sdk/src/instruction/withdraw.rs +++ b/zk-token-sdk/src/instruction/withdraw.rs @@ -62,7 +62,7 @@ impl WithdrawData { // current source balance let final_ciphertext = current_ciphertext - &ElGamal::encode(amount); - let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); + let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); let pod_final_ciphertext: pod::ElGamalCiphertext = final_ciphertext.into(); let mut transcript = WithdrawProof::transcript_new(&pod_pubkey, &pod_final_ciphertext); let proof = WithdrawProof::new(keypair, final_balance, &final_ciphertext, &mut transcript); From 179445576680a46303e0d63b9d9a78d50123474e Mon Sep 17 00:00:00 2001 From: leonardkulms <42893075+leonardkulms@users.noreply.github.com> Date: Fri, 19 Aug 2022 12:18:19 +0200 Subject: [PATCH 37/51] correct double negation (#27240) --- sdk/src/transaction/error.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/src/transaction/error.rs b/sdk/src/transaction/error.rs index 2fe4e0e3756adf..e061b3ffebf951 100644 --- a/sdk/src/transaction/error.rs +++ b/sdk/src/transaction/error.rs @@ -142,9 +142,9 @@ pub enum TransactionError { #[error("Transaction contains a duplicate instruction ({0}) that is not allowed")] DuplicateInstruction(u8), - /// Transaction results in an account without insufficient funds for rent + /// Transaction results in an account with insufficient funds for rent #[error( - "Transaction results in an account ({account_index}) without insufficient funds for rent" + "Transaction results in an account ({account_index}) with insufficient funds for rent" )] InsufficientFundsForRent { account_index: u8 }, } From dba2fd5a16c1ed928dba84332c1d50e11dbb3fd8 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Fri, 19 Aug 2022 09:15:15 -0500 Subject: [PATCH 38/51] Enable QUIC client by default. Add arg to disable QUIC client. (Forward port #26927) (#27194) Enable QUIC client by default. Add arg to disable QUIC client. * Enable QUIC client by default. Add arg to disable QUIC client. * Deprecate --disable-quic-servers arg * Add #[ignore] annotation to failing tests --- banking-bench/src/main.rs | 6 +- bench-tps/src/cli.rs | 10 +-- bench-tps/tests/bench_tps.rs | 1 + client/src/connection_cache.rs | 30 ++++++--- core/src/banking_stage.rs | 2 + core/src/tpu.rs | 69 +++++++++------------ core/src/validator.rs | 3 - dos/src/main.rs | 2 + local-cluster/src/validator_configs.rs | 1 - local-cluster/tests/local_cluster.rs | 4 ++ local-cluster/tests/local_cluster_slow_1.rs | 2 + local-cluster/tests/local_cluster_slow_2.rs | 1 + multinode-demo/bootstrap-validator.sh | 2 +- validator/src/main.rs | 17 +++-- 14 files changed, 87 insertions(+), 63 deletions(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 51b0042abed374..2806a8a9e05a7a 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -214,10 +214,10 @@ fn main() { .help("Number of threads to use in the banking stage"), ) .arg( - Arg::new("tpu_use_quic") - .long("tpu-use-quic") + Arg::new("tpu_disable_quic") + .long("tpu-disable-quic") .takes_value(false) - .help("Forward messages to TPU using QUIC"), + .help("Disable forwarding messages to TPU using QUIC"), ) .get_matches(); diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index a1b5c28329ea76..9c583642d78d44 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -294,10 +294,10 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> { .help("Submit transactions with a TpuClient") ) .arg( - Arg::with_name("tpu_use_quic") - .long("tpu-use-quic") + Arg::with_name("tpu_disable_quic") + .long("tpu-disable-quic") .takes_value(false) - .help("Submit transactions via QUIC; only affects ThinClient (default) \ + .help("Do not submit transactions via QUIC; only affects ThinClient (default) \ or TpuClient sends"), ) .arg( @@ -358,8 +358,8 @@ pub fn extract_args(matches: &ArgMatches) -> Config { args.external_client_type = ExternalClientType::RpcClient; } - if matches.is_present("tpu_use_quic") { - args.use_quic = true; + if matches.is_present("tpu_disable_quic") { + args.use_quic = false; } if let Some(v) = matches.value_of("tpu_connection_pool_size") { diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 220980f9b0bf90..ec12c8b7aaabe1 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -136,6 +136,7 @@ fn test_bench_tps_test_validator(config: Config) { #[test] #[serial] +#[ignore] fn test_bench_tps_local_cluster_solana() { test_bench_tps_local_cluster(Config { tx_count: 100, diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index f0628d3e32b9de..9e5efff3f3e061 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -32,7 +32,7 @@ static MAX_CONNECTIONS: usize = 1024; /// Used to decide whether the TPU and underlying connection cache should use /// QUIC connections. -pub const DEFAULT_TPU_USE_QUIC: bool = false; +pub const DEFAULT_TPU_USE_QUIC: bool = true; /// Default TPU connection pool size per remote address pub const DEFAULT_TPU_CONNECTION_POOL_SIZE: usize = 4; @@ -683,6 +683,11 @@ mod tests { // be lazy and not connect until first use or handle connection errors somehow // (without crashing, as would be required in a real practical validator) let connection_cache = ConnectionCache::default(); + let port_offset = if connection_cache.use_quic() { + QUIC_PORT_OFFSET + } else { + 0 + }; let addrs = (0..MAX_CONNECTIONS) .into_iter() .map(|_| { @@ -695,18 +700,29 @@ mod tests { let map = connection_cache.map.read().unwrap(); assert!(map.len() == MAX_CONNECTIONS); addrs.iter().for_each(|a| { - let conn = &map.get(a).expect("Address not found").connections[0]; - let conn = conn.new_blocking_connection(*a, connection_cache.stats.clone()); - assert!(a.ip() == conn.tpu_addr().ip()); + let port = a + .port() + .checked_add(port_offset) + .unwrap_or_else(|| a.port()); + let addr = &SocketAddr::new(a.ip(), port); + + let conn = &map.get(addr).expect("Address not found").connections[0]; + let conn = conn.new_blocking_connection(*addr, connection_cache.stats.clone()); + assert!(addr.ip() == conn.tpu_addr().ip()); }); } - let addr = get_addr(&mut rng); - connection_cache.get_connection(&addr); + let addr = &get_addr(&mut rng); + connection_cache.get_connection(addr); + let port = addr + .port() + .checked_add(port_offset) + .unwrap_or_else(|| addr.port()); + let addr_with_quic_port = SocketAddr::new(addr.ip(), port); let map = connection_cache.map.read().unwrap(); assert!(map.len() == MAX_CONNECTIONS); - let _conn = map.get(&addr).expect("Address not found"); + let _conn = map.get(&addr_with_quic_port).expect("Address not found"); } #[test] diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 2547c00f94e5ca..83eae7330de1e4 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -4122,6 +4122,7 @@ mod tests { } #[test] + #[ignore] fn test_forwarder_budget() { solana_logger::setup(); // Create `PacketBatch` with 1 unprocessed packet @@ -4209,6 +4210,7 @@ mod tests { } #[test] + #[ignore] fn test_handle_forwarding() { solana_logger::setup(); // packets are deserialized upon receiving, failed packets will not be diff --git a/core/src/tpu.rs b/core/src/tpu.rs index e969ba90eed03a..606fee5cb3cded 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -63,8 +63,8 @@ pub struct Tpu { banking_stage: BankingStage, cluster_info_vote_listener: ClusterInfoVoteListener, broadcast_stage: BroadcastStage, - tpu_quic_t: Option>, - tpu_forwards_quic_t: Option>, + tpu_quic_t: thread::JoinHandle<()>, + tpu_forwards_quic_t: thread::JoinHandle<()>, find_packet_sender_stake_stage: FindPacketSenderStakeStage, vote_find_packet_sender_stake_stage: FindPacketSenderStakeStage, staked_nodes_updater_service: StakedNodesUpdaterService, @@ -97,7 +97,6 @@ impl Tpu { connection_cache: &Arc, keypair: &Keypair, log_messages_bytes_limit: Option, - enable_quic_servers: bool, staked_nodes: &Arc>, shared_staked_nodes_overrides: Arc>>, ) -> Self { @@ -157,37 +156,33 @@ impl Tpu { let (verified_sender, verified_receiver) = unbounded(); let stats = Arc::new(StreamStats::default()); - let tpu_quic_t = enable_quic_servers.then(|| { - spawn_server( - transactions_quic_sockets, - keypair, - cluster_info.my_contact_info().tpu.ip(), - packet_sender, - exit.clone(), - MAX_QUIC_CONNECTIONS_PER_PEER, - staked_nodes.clone(), - MAX_STAKED_CONNECTIONS, - MAX_UNSTAKED_CONNECTIONS, - stats.clone(), - ) - .unwrap() - }); + let tpu_quic_t = spawn_server( + transactions_quic_sockets, + keypair, + cluster_info.my_contact_info().tpu.ip(), + packet_sender, + exit.clone(), + MAX_QUIC_CONNECTIONS_PER_PEER, + staked_nodes.clone(), + MAX_STAKED_CONNECTIONS, + MAX_UNSTAKED_CONNECTIONS, + stats.clone(), + ) + .unwrap(); - let tpu_forwards_quic_t = enable_quic_servers.then(|| { - spawn_server( - transactions_forwards_quic_sockets, - keypair, - cluster_info.my_contact_info().tpu_forwards.ip(), - forwarded_packet_sender, - exit.clone(), - MAX_QUIC_CONNECTIONS_PER_PEER, - staked_nodes.clone(), - MAX_STAKED_CONNECTIONS.saturating_add(MAX_UNSTAKED_CONNECTIONS), - 0, // Prevent unstaked nodes from forwarding transactions - stats, - ) - .unwrap() - }); + let tpu_forwards_quic_t = spawn_server( + transactions_forwards_quic_sockets, + keypair, + cluster_info.my_contact_info().tpu_forwards.ip(), + forwarded_packet_sender, + exit.clone(), + MAX_QUIC_CONNECTIONS_PER_PEER, + staked_nodes.clone(), + MAX_STAKED_CONNECTIONS.saturating_add(MAX_UNSTAKED_CONNECTIONS), + 0, // Prevent unstaked nodes from forwarding transactions + stats, + ) + .unwrap(); let sigverify_stage = { let verifier = TransactionSigVerifier::new(verified_sender); @@ -274,13 +269,9 @@ impl Tpu { self.find_packet_sender_stake_stage.join(), self.vote_find_packet_sender_stake_stage.join(), self.staked_nodes_updater_service.join(), + self.tpu_quic_t.join(), + self.tpu_forwards_quic_t.join(), ]; - if let Some(tpu_quic_t) = self.tpu_quic_t { - tpu_quic_t.join()?; - } - if let Some(tpu_forwards_quic_t) = self.tpu_forwards_quic_t { - tpu_forwards_quic_t.join()?; - } let broadcast_result = self.broadcast_stage.join(); for result in results { result?; diff --git a/core/src/validator.rs b/core/src/validator.rs index 533cabab67e4c6..5477985c27ad66 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -175,7 +175,6 @@ pub struct ValidatorConfig { pub wait_to_vote_slot: Option, pub ledger_column_options: LedgerColumnOptions, pub runtime_config: RuntimeConfig, - pub enable_quic_servers: bool, } impl Default for ValidatorConfig { @@ -239,7 +238,6 @@ impl Default for ValidatorConfig { wait_to_vote_slot: None, ledger_column_options: LedgerColumnOptions::default(), runtime_config: RuntimeConfig::default(), - enable_quic_servers: true, } } } @@ -1025,7 +1023,6 @@ impl Validator { &connection_cache, &identity_keypair, config.runtime_config.log_messages_bytes_limit, - config.enable_quic_servers, &staked_nodes, config.staked_nodes_overrides.clone(), ); diff --git a/dos/src/main.rs b/dos/src/main.rs index fa75fe90b79435..baadc5c0016388 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -1185,11 +1185,13 @@ pub mod test { } #[test] + #[ignore] fn test_dos_with_blockhash_and_payer() { run_dos_with_blockhash_and_payer(/*tpu_use_quic*/ false) } #[test] + #[ignore] fn test_dos_with_blockhash_and_payer_and_quic() { run_dos_with_blockhash_and_payer(/*tpu_use_quic*/ true) } diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 4c3b281cb991bb..5d678319a3d636 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -65,7 +65,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { wait_to_vote_slot: config.wait_to_vote_slot, ledger_column_options: config.ledger_column_options.clone(), runtime_config: config.runtime_config.clone(), - enable_quic_servers: config.enable_quic_servers, } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 072239f5d951ab..6fad4c541c47c8 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -173,6 +173,7 @@ fn test_spend_and_verify_all_nodes_3() { #[test] #[serial] +#[ignore] fn test_local_cluster_signature_subscribe() { solana_logger::setup_with_default(RUST_LOG_FILTER); let num_nodes = 2; @@ -311,6 +312,7 @@ fn test_two_unbalanced_stakes() { #[test] #[serial] +#[ignore] fn test_forwarding() { solana_logger::setup_with_default(RUST_LOG_FILTER); // Set up a cluster where one node is never the leader, so all txs sent to this node @@ -1228,6 +1230,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st #[allow(unused_attributes)] #[test] #[serial] +#[ignore] fn test_snapshot_restart_tower() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 2 nodes @@ -2520,6 +2523,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { #[test] #[serial] +#[ignore] fn test_votes_land_in_fork_during_long_partition() { let total_stake = 3 * DEFAULT_NODE_STAKE; // Make `lighter_stake` insufficient for switching threshold diff --git a/local-cluster/tests/local_cluster_slow_1.rs b/local-cluster/tests/local_cluster_slow_1.rs index 29a5f314c41dd2..2faf69f1e503cb 100644 --- a/local-cluster/tests/local_cluster_slow_1.rs +++ b/local-cluster/tests/local_cluster_slow_1.rs @@ -50,6 +50,7 @@ mod common; #[test] #[serial] +#[ignore] // Steps in this test: // We want to create a situation like: /* @@ -588,6 +589,7 @@ fn test_duplicate_shreds_broadcast_leader() { #[test] #[serial] +#[ignore] fn test_switch_threshold_uses_gossip_votes() { solana_logger::setup_with_default(RUST_LOG_FILTER); let total_stake = 100 * DEFAULT_NODE_STAKE; diff --git a/local-cluster/tests/local_cluster_slow_2.rs b/local-cluster/tests/local_cluster_slow_2.rs index 6488ddea1e0e57..d6d315ed0d46d8 100644 --- a/local-cluster/tests/local_cluster_slow_2.rs +++ b/local-cluster/tests/local_cluster_slow_2.rs @@ -201,6 +201,7 @@ fn test_leader_failure_4() { #[test] #[serial] +#[ignore] fn test_ledger_cleanup_service() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_ledger_cleanup_service"); diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 9245f507c394e2..deb82f106fae04 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -61,7 +61,7 @@ while [[ -n $1 ]]; do elif [[ $1 = --enable-rpc-bigtable-ledger-storage ]]; then args+=("$1") shift - elif [[ $1 = --tpu-use-quic ]]; then + elif [[ $1 = --tpu-disable-quic ]]; then args+=("$1") shift elif [[ $1 = --rpc-send-batch-ms ]]; then diff --git a/validator/src/main.rs b/validator/src/main.rs index 5381155c6591a6..5d0a824feac4a0 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1218,13 +1218,21 @@ pub fn main() { Arg::with_name("tpu_use_quic") .long("tpu-use-quic") .takes_value(false) + .hidden(true) + .conflicts_with("tpu_disable_quic") .help("Use QUIC to send transactions."), ) + .arg( + Arg::with_name("tpu_disable_quic") + .long("tpu-disable-quic") + .takes_value(false) + .help("Do not use QUIC to send transactions."), + ) .arg( Arg::with_name("disable_quic_servers") .long("disable-quic-servers") .takes_value(false) - .help("Disable QUIC TPU servers"), + .hidden(true) ) .arg( Arg::with_name("enable_quic_servers") @@ -2394,8 +2402,7 @@ pub fn main() { let restricted_repair_only_mode = matches.is_present("restricted_repair_only_mode"); let accounts_shrink_optimize_total_space = value_t_or_exit!(matches, "accounts_shrink_optimize_total_space", bool); - let tpu_use_quic = matches.is_present("tpu_use_quic"); - let enable_quic_servers = !matches.is_present("disable_quic_servers"); + let tpu_use_quic = !matches.is_present("tpu_disable_quic"); let tpu_connection_pool_size = value_t_or_exit!(matches, "tpu_connection_pool_size", usize); let shrink_ratio = value_t_or_exit!(matches, "accounts_shrink_ratio", f64); @@ -2565,6 +2572,9 @@ pub fn main() { if matches.is_present("enable_quic_servers") { warn!("--enable-quic-servers is now the default behavior. This flag is deprecated and can be removed from the launch args"); } + if matches.is_present("disable_quic_servers") { + warn!("--disable-quic-servers is deprecated. The quic server cannot be disabled."); + } let rpc_bigtable_config = if matches.is_present("enable_rpc_bigtable_ledger_storage") || matches.is_present("enable_bigtable_ledger_upload") @@ -2749,7 +2759,6 @@ pub fn main() { log_messages_bytes_limit: value_of(&matches, "log_messages_bytes_limit"), ..RuntimeConfig::default() }, - enable_quic_servers, staked_nodes_overrides: staked_nodes_overrides.clone(), ..ValidatorConfig::default() }; From 40b9f2f2be20a30b47fce2fe00523afaf36bf182 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 19 Aug 2022 09:33:50 -0500 Subject: [PATCH 39/51] slots_connected: check if the range is connected (>= ending_slot) (#27152) --- ledger-tool/src/main.rs | 4 ++-- ledger/src/blockstore.rs | 27 +++++++++++++-------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index e2b6fdc2e7609a..ce3d42a06a0a9a 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -955,9 +955,9 @@ fn load_bank_forks( }; if let Some(halt_slot) = process_options.halt_at_slot { - // Check if we have the slot data necessary to replay from starting_slot to halt_slot. + // Check if we have the slot data necessary to replay from starting_slot to >= halt_slot. // - This will not catch the case when loading from genesis without a full slot 0. - if !blockstore.slots_connected(starting_slot, halt_slot) { + if !blockstore.slot_range_connected(starting_slot, halt_slot) { eprintln!( "Unable to load bank forks at slot {} due to disconnected blocks.", halt_slot, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 2c0913a5ab8cfb..5c246b5ab8045c 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -546,9 +546,9 @@ impl Blockstore { self.prepare_rooted_slot_iterator(slot, IteratorDirection::Reverse) } - /// Determines if `starting_slot` and `ending_slot` are connected by full slots + /// Determines if we can iterate from `starting_slot` to >= `ending_slot` by full slots /// `starting_slot` is excluded from the `is_full()` check - pub fn slots_connected(&self, starting_slot: Slot, ending_slot: Slot) -> bool { + pub fn slot_range_connected(&self, starting_slot: Slot, ending_slot: Slot) -> bool { if starting_slot == ending_slot { return true; } @@ -562,8 +562,7 @@ impl Blockstore { if slot_meta.is_full() { match slot.cmp(&ending_slot) { cmp::Ordering::Less => next_slots.extend(slot_meta.next_slots), - cmp::Ordering::Equal => return true, - cmp::Ordering::Greater => {} // slot is greater than the ending slot, so all its children would be as well + _ => return true, } } } @@ -5502,7 +5501,7 @@ pub mod tests { } */ #[test] - fn test_slots_connected_chain() { + fn test_slot_range_connected_chain() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5511,12 +5510,12 @@ pub mod tests { make_and_insert_slot(&blockstore, slot, slot.saturating_sub(1)); } - assert!(blockstore.slots_connected(1, 3)); - assert!(!blockstore.slots_connected(1, 4)); // slot 4 does not exist + assert!(blockstore.slot_range_connected(1, 3)); + assert!(!blockstore.slot_range_connected(1, 4)); // slot 4 does not exist } #[test] - fn test_slots_connected_disconnected() { + fn test_slot_range_connected_disconnected() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5524,20 +5523,20 @@ pub mod tests { make_and_insert_slot(&blockstore, 2, 1); make_and_insert_slot(&blockstore, 4, 2); - assert!(!blockstore.slots_connected(1, 3)); // Slot 3 does not exit - assert!(blockstore.slots_connected(1, 4)); + assert!(blockstore.slot_range_connected(1, 3)); // Slot 3 does not exist, but we can still replay this range to slot 4 + assert!(blockstore.slot_range_connected(1, 4)); } #[test] - fn test_slots_connected_same_slot() { + fn test_slot_range_connected_same_slot() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - assert!(blockstore.slots_connected(54, 54)); + assert!(blockstore.slot_range_connected(54, 54)); } #[test] - fn test_slots_connected_starting_slot_not_full() { + fn test_slot_range_connected_starting_slot_not_full() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5545,7 +5544,7 @@ pub mod tests { make_and_insert_slot(&blockstore, 6, 5); assert!(!blockstore.meta(4).unwrap().unwrap().is_full()); - assert!(blockstore.slots_connected(4, 6)); + assert!(blockstore.slot_range_connected(4, 6)); } #[test] From 6da3eb028c90ec48808f0a7bf1d2f151c7175703 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 19 Aug 2022 09:34:35 -0500 Subject: [PATCH 40/51] create-snapshot check if snapshot slot exists (#27153) --- ledger-tool/src/main.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index ce3d42a06a0a9a..9484e1b691fa8a 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -2817,6 +2817,11 @@ fn main() { value_t_or_exit!(arg_matches, "snapshot_slot", Slot) }; + assert!( + blockstore.meta(snapshot_slot).unwrap().is_some(), + "snapshot slot doesn't exist" + ); + let ending_slot = if is_minimized { let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); if ending_slot <= snapshot_slot { From 2184d0ff26ff7d2768f506a0c0d530ec0cb07335 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 19 Aug 2022 12:09:35 -0400 Subject: [PATCH 41/51] Add Bank::clean_accounts_for_tests() (#27209) --- runtime/src/bank.rs | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8557b70bd06d30..1728a2bbfea247 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8085,6 +8085,12 @@ pub(crate) mod tests { } } + impl Bank { + fn clean_accounts_for_tests(&self) { + self.rc.accounts.accounts_db.clean_accounts_for_tests() + } + } + #[test] fn test_nonce_info() { let lamports_per_signature = 42; @@ -10403,7 +10409,7 @@ pub(crate) mod tests { bank.squash(); bank.force_flush_accounts_cache(); let hash = bank.update_accounts_hash(); - bank.clean_accounts(false, false, None); + bank.clean_accounts_for_tests(); assert_eq!(bank.update_accounts_hash(), hash); let bank0 = Arc::new(new_from_parent(&bank)); @@ -10426,7 +10432,7 @@ pub(crate) mod tests { info!("bank0 purge"); let hash = bank0.update_accounts_hash(); - bank0.clean_accounts(false, false, None); + bank0.clean_accounts_for_tests(); assert_eq!(bank0.update_accounts_hash(), hash); assert_eq!( @@ -10436,7 +10442,7 @@ pub(crate) mod tests { assert_eq!(bank1.get_account(&keypair.pubkey()), None); info!("bank1 purge"); - bank1.clean_accounts(false, false, None); + bank1.clean_accounts_for_tests(); assert_eq!( bank0.get_account(&keypair.pubkey()).unwrap().lamports(), @@ -10460,7 +10466,7 @@ pub(crate) mod tests { assert_eq!(bank0.get_account(&keypair.pubkey()), None); assert_eq!(bank1.get_account(&keypair.pubkey()), None); bank1.force_flush_accounts_cache(); - bank1.clean_accounts(false, false, None); + bank1.clean_accounts_for_tests(); assert!(bank1.verify_bank_hash(VerifyBankHash::default_for_test())); } @@ -14689,7 +14695,7 @@ pub(crate) mod tests { // Clean accounts, which should add earlier slots to the shrink // candidate set - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); let mut bank3 = Arc::new(Bank::new_from_parent(&bank2, &Pubkey::default(), 3)); bank3.deposit(&pubkey1, some_lamports + 1).unwrap(); @@ -14698,7 +14704,7 @@ pub(crate) mod tests { bank3.squash(); bank3.force_flush_accounts_cache(); - bank3.clean_accounts(false, false, None); + bank3.clean_accounts_for_tests(); assert_eq!( bank3.rc.accounts.accounts_db.ref_count_for_pubkey(&pubkey0), 2 @@ -14767,7 +14773,7 @@ pub(crate) mod tests { // Clean accounts, which should add earlier slots to the shrink // candidate set - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); // Slots 0 and 1 should be candidates for shrinking, but slot 2 // shouldn't because none of its accounts are outdated by a later @@ -14821,7 +14827,7 @@ pub(crate) mod tests { goto_end_of_slot(Arc::::get_mut(&mut bank).unwrap()); bank.squash(); - bank.clean_accounts(false, false, None); + bank.clean_accounts_for_tests(); let force_to_return_alive_account = 0; assert_eq!( bank.process_stale_slot_with_budget(22, force_to_return_alive_account), @@ -16206,7 +16212,7 @@ pub(crate) mod tests { current_major_fork_bank.squash(); // Try to get cache flush/clean to overlap with the scan current_major_fork_bank.force_flush_accounts_cache(); - current_major_fork_bank.clean_accounts(false, false, None); + current_major_fork_bank.clean_accounts_for_tests(); // Move purge here so that Bank::drop()->purge_slots() doesn't race // with clean. Simulates the call from AccountsBackgroundService abs_request_handler.handle_pruned_banks(¤t_major_fork_bank, true); @@ -17335,7 +17341,7 @@ pub(crate) mod tests { bank2.squash(); drop(bank1); - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); let expected_ref_count_for_cleaned_up_keys = 0; let expected_ref_count_for_keys_in_both_slot1_and_slot2 = 1; From b3c72bc8a2a306e40770da7eefb7b167378d380f Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 19 Aug 2022 12:09:47 -0400 Subject: [PATCH 42/51] Call `AccountsDb::shrink_all_slots()` directly (#27235) --- runtime/src/bank.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1728a2bbfea247..7cc04184ce02be 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7162,7 +7162,10 @@ impl Bank { let mut shrink_all_slots_time = Measure::start("shrink_all_slots"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("shrinking.."); - self.shrink_all_slots(true, Some(last_full_snapshot_slot)); + self.rc + .accounts + .accounts_db + .shrink_all_slots(true, Some(last_full_snapshot_slot)); } shrink_all_slots_time.stop(); @@ -7458,13 +7461,6 @@ impl Bank { ); } - pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option) { - self.rc - .accounts - .accounts_db - .shrink_all_slots(is_startup, last_full_snapshot_slot); - } - pub fn print_accounts_stats(&self) { self.rc.accounts.accounts_db.print_accounts_stats(""); } From 6f045d608074da7cc7a227eb26ee014c559532f0 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Fri, 19 Aug 2022 12:14:37 -0500 Subject: [PATCH 43/51] add ed25519_program to built-in instruction cost list (#27199) * add ed25519_program to built-in instruction cost list * Remove unnecessary and stale comment --- runtime/src/block_cost_limits.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/runtime/src/block_cost_limits.rs b/runtime/src/block_cost_limits.rs index 31964f88cdaee3..cc279b4dd7549e 100644 --- a/runtime/src/block_cost_limits.rs +++ b/runtime/src/block_cost_limits.rs @@ -3,7 +3,8 @@ use { lazy_static::lazy_static, solana_sdk::{ - feature, incinerator, native_loader, pubkey::Pubkey, secp256k1_program, system_program, + ed25519_program, feature, incinerator, native_loader, pubkey::Pubkey, secp256k1_program, + system_program, }, std::collections::HashMap, }; @@ -38,8 +39,8 @@ lazy_static! { (solana_sdk::stake::program::id(), COMPUTE_UNIT_TO_US_RATIO * 25), (solana_config_program::id(), COMPUTE_UNIT_TO_US_RATIO * 15), (solana_vote_program::id(), COMPUTE_UNIT_TO_US_RATIO * 70), - // secp256k1 is executed in banking stage, it should cost similar to sigverify (secp256k1_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), + (ed25519_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), (system_program::id(), COMPUTE_UNIT_TO_US_RATIO * 5), ] .iter() From 2031e0afaf6d5501da1d6fca51df240e2465961f Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 19 Aug 2022 12:40:42 -0500 Subject: [PATCH 44/51] simple refactorings to disk idx (#27238) --- runtime/src/accounts_index.rs | 3 ++- runtime/src/bucket_map_holder.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index a2bd30d35d4bda..386367e00520c1 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -412,7 +412,8 @@ impl PreAllocatedAccountMapEntry { account_info: T, storage: &Arc>, ) -> AccountMapEntry { - let ref_count = if account_info.is_cached() { 0 } else { 1 }; + let is_cached = account_info.is_cached(); + let ref_count = if is_cached { 0 } else { 1 }; let meta = AccountMapEntryMeta::new_dirty(storage); Arc::new(AccountMapEntryInner::new( vec![(slot, account_info)], diff --git a/runtime/src/bucket_map_holder.rs b/runtime/src/bucket_map_holder.rs index b0237c59101cb9..aea586dca659a1 100644 --- a/runtime/src/bucket_map_holder.rs +++ b/runtime/src/bucket_map_holder.rs @@ -31,6 +31,7 @@ pub struct BucketMapHolder { pub disk: Option>, pub count_buckets_flushed: AtomicUsize, + /// rolling 'current' age pub age: AtomicU8, pub stats: BucketMapHolderStats, From e132583a24da318f4fea25da14f9d1bb26377fdd Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 19 Aug 2022 14:17:32 -0500 Subject: [PATCH 45/51] add _inclusive for clarity (#27239) --- runtime/src/accounts_db.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index c84f45501faacf..838f9bfb649aaf 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2387,12 +2387,12 @@ impl AccountsDb { /// Collect all the uncleaned slots, up to a max slot /// /// Search through the uncleaned Pubkeys and return all the slots, up to a maximum slot. - fn collect_uncleaned_slots_up_to_slot(&self, max_slot: Slot) -> Vec { + fn collect_uncleaned_slots_up_to_slot(&self, max_slot_inclusive: Slot) -> Vec { self.uncleaned_pubkeys .iter() .filter_map(|entry| { let slot = *entry.key(); - (slot <= max_slot).then(|| slot) + (slot <= max_slot_inclusive).then(|| slot) }) .collect() } @@ -2419,9 +2419,9 @@ impl AccountsDb { /// fn remove_uncleaned_slots_and_collect_pubkeys_up_to_slot( &self, - max_slot: Slot, + max_slot_inclusive: Slot, ) -> Vec> { - let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot); + let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot_inclusive); self.remove_uncleaned_slots_and_collect_pubkeys(uncleaned_slots) } @@ -2435,10 +2435,11 @@ impl AccountsDb { timings: &mut CleanKeyTimings, ) -> Vec { let mut dirty_store_processing_time = Measure::start("dirty_store_processing"); - let max_slot = max_clean_root.unwrap_or_else(|| self.accounts_index.max_root_inclusive()); + let max_slot_inclusive = + max_clean_root.unwrap_or_else(|| self.accounts_index.max_root_inclusive()); let mut dirty_stores = Vec::with_capacity(self.dirty_stores.len()); self.dirty_stores.retain(|(slot, _store_id), store| { - if *slot > max_slot { + if *slot > max_slot_inclusive { true } else { dirty_stores.push((*slot, store.clone())); @@ -2447,7 +2448,7 @@ impl AccountsDb { }); let dirty_stores_len = dirty_stores.len(); let pubkeys = DashSet::new(); - timings.oldest_dirty_slot = max_slot.saturating_add(1); + timings.oldest_dirty_slot = max_slot_inclusive.saturating_add(1); for (slot, store) in dirty_stores { timings.oldest_dirty_slot = std::cmp::min(timings.oldest_dirty_slot, slot); store.accounts.account_iter().for_each(|account| { @@ -2464,7 +2465,8 @@ impl AccountsDb { timings.dirty_store_processing_us += dirty_store_processing_time.as_us(); let mut collect_delta_keys = Measure::start("key_create"); - let delta_keys = self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot); + let delta_keys = + self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot_inclusive); collect_delta_keys.stop(); timings.collect_delta_keys_us += collect_delta_keys.as_us(); @@ -2496,7 +2498,7 @@ impl AccountsDb { self.zero_lamport_accounts_to_purge_after_full_snapshot .retain(|(slot, pubkey)| { let is_candidate_for_clean = - max_slot >= *slot && last_full_snapshot_slot >= *slot; + max_slot_inclusive >= *slot && last_full_snapshot_slot >= *slot; if is_candidate_for_clean { pubkeys.push(*pubkey); } From 42e227778ba9ea0f97938ec016ca2a2d7d4f4dc7 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 19 Aug 2022 14:18:00 -0500 Subject: [PATCH 46/51] eliminate unnecessary ZERO_RAW_LAMPORTS_SENTINEL (#27218) --- runtime/src/accounts_db.rs | 9 +-------- runtime/src/accounts_hash.rs | 16 +++++++++------- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 838f9bfb649aaf..2d83547095b790 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1820,14 +1820,7 @@ impl<'a, T: Fn(Slot) -> Option + Sync + Send + Clone> AppendVecScan for Sc // when we are scanning with bin ranges, we don't need to use exact bin numbers. Subtract to make first bin we care about at index 0. self.pubkey_to_bin_index -= self.bin_range.start; - let raw_lamports = loaded_account.lamports(); - let zero_raw_lamports = raw_lamports == 0; - let balance = if zero_raw_lamports { - crate::accounts_hash::ZERO_RAW_LAMPORTS_SENTINEL - } else { - raw_lamports - }; - + let balance = loaded_account.lamports(); let loaded_hash = loaded_account.loaded_hash(); let new_hash = ExpectedRentCollection::maybe_rehash_skipped_rewrite( loaded_account, diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs index c36a95d3e02640..251050816a0a24 100644 --- a/runtime/src/accounts_hash.rs +++ b/runtime/src/accounts_hash.rs @@ -18,7 +18,6 @@ use { }, }, }; -pub const ZERO_RAW_LAMPORTS_SENTINEL: u64 = std::u64::MAX; pub const MERKLE_FANOUT: usize = 16; #[derive(Default, Debug)] @@ -844,7 +843,7 @@ impl AccountsHash { ); // add lamports, get hash as long as the lamports are > 0 - if item.lamports != ZERO_RAW_LAMPORTS_SENTINEL + if item.lamports != 0 && (!filler_accounts_enabled || !self.is_filler_account(&item.pubkey)) { overall_sum = Self::checked_cast_for_capitalization( @@ -1042,7 +1041,7 @@ pub mod tests { // 2nd key - zero lamports, so will be removed let key = Pubkey::new(&[12u8; 32]); let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); let accounts_hash = AccountsHash::default(); @@ -1116,7 +1115,7 @@ pub mod tests { // 2nd key - zero lamports, so will be removed let key = Pubkey::new(&[12u8; 32]); let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); let mut previous_pass = PreviousPass::default(); @@ -1395,10 +1394,13 @@ pub mod tests { #[test] fn test_accountsdb_de_dup_accounts_zero_chunks() { - let vec = [vec![vec![CalculateHashIntermediate::default()]]]; + let vec = [vec![vec![CalculateHashIntermediate { + lamports: 1, + ..CalculateHashIntermediate::default() + }]]]; let (hashes, lamports, _) = AccountsHash::default().de_dup_accounts_in_parallel(&vec, 0); assert_eq!(vec![&Hash::default()], hashes); - assert_eq!(lamports, 0); + assert_eq!(lamports, 1); } #[test] @@ -1653,7 +1655,7 @@ pub mod tests { assert_eq!(result, (vec![&val.hash], val.lamports as u64, 1)); // zero original lamports, higher version - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); // has to be after previous entry since account_maps are in slot order let vecs = vec![vec![account_maps.to_vec()]]; From 1b441f7a2d0ce1d85149846483c8187f3ff1b758 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 19 Aug 2022 14:39:20 -0500 Subject: [PATCH 47/51] make test code more clear (#27260) --- runtime/src/accounts_db.rs | 3 ++- runtime/src/bank.rs | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 2d83547095b790..adef621aac3afd 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -5536,7 +5536,8 @@ impl AccountsDb { .fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed); } - pub fn flush_accounts_cache_slot(&self, slot: Slot) { + #[cfg(test)] + pub(crate) fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) { self.flush_slot_cache(slot); } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7cc04184ce02be..ae9e54ad6b3c67 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6421,11 +6421,11 @@ impl Bank { } #[cfg(test)] - pub fn flush_accounts_cache_slot(&self) { + pub fn flush_accounts_cache_slot_for_tests(&self) { self.rc .accounts .accounts_db - .flush_accounts_cache_slot(self.slot()) + .flush_accounts_cache_slot_for_tests(self.slot()) } pub fn expire_old_recycle_stores(&self) { @@ -14672,7 +14672,7 @@ pub(crate) mod tests { bank1.deposit(&pubkey0, some_lamports).unwrap(); goto_end_of_slot(Arc::::get_mut(&mut bank1).unwrap()); bank1.freeze(); - bank1.flush_accounts_cache_slot(); + bank1.flush_accounts_cache_slot_for_tests(); bank1.print_accounts_stats(); From eb06bb61e83570d33b787c868c960377ec5cb1da Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 19 Aug 2022 15:16:56 -0500 Subject: [PATCH 48/51] banking stage: actually aggregate tracer packet stats (#27118) * aggregated_tracer_packet_stats_option was alwasys None * Actually accumulate tracer packet stats --- core/src/banking_stage.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 83eae7330de1e4..2ec79c951675d7 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -2000,26 +2000,26 @@ impl BankingStage { packet_count_upperbound: usize, ) -> Result<(Vec, Option), RecvTimeoutError> { let start = Instant::now(); - let mut aggregated_tracer_packet_stats_option: Option = None; - let (mut packet_batches, new_tracer_packet_stats_option) = + let (mut packet_batches, mut aggregated_tracer_packet_stats_option) = verified_receiver.recv_timeout(recv_timeout)?; - if let Some(new_tracer_packet_stats) = &new_tracer_packet_stats_option { - if let Some(aggregated_tracer_packet_stats) = &mut aggregated_tracer_packet_stats_option - { - aggregated_tracer_packet_stats.aggregate(new_tracer_packet_stats); - } else { - aggregated_tracer_packet_stats_option = new_tracer_packet_stats_option; - } - } - let mut num_packets_received: usize = packet_batches.iter().map(|batch| batch.len()).sum(); - while let Ok((packet_batch, _tracer_packet_stats_option)) = verified_receiver.try_recv() { + while let Ok((packet_batch, tracer_packet_stats_option)) = verified_receiver.try_recv() { trace!("got more packet batches in banking stage"); let (packets_received, packet_count_overflowed) = num_packets_received .overflowing_add(packet_batch.iter().map(|batch| batch.len()).sum()); packet_batches.extend(packet_batch); + if let Some(tracer_packet_stats) = &tracer_packet_stats_option { + if let Some(aggregated_tracer_packet_stats) = + &mut aggregated_tracer_packet_stats_option + { + aggregated_tracer_packet_stats.aggregate(tracer_packet_stats); + } else { + aggregated_tracer_packet_stats_option = tracer_packet_stats_option; + } + } + // Spend any leftover receive time budget to greedily receive more packet batches, // until the upperbound of the packet count is reached. if start.elapsed() >= recv_timeout From a54ea4d62d10df7a014db312369676a21d733515 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 19 Aug 2022 16:04:24 -0500 Subject: [PATCH 49/51] Refactor epoch reward 1 (#27253) * refactor: extract store_stake_accounts fn * clippy: slice Co-authored-by: haoran --- runtime/src/bank.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ae9e54ad6b3c67..7e5aeef16465cd 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3208,14 +3208,7 @@ impl Bank { m.stop(); metrics.redeem_rewards_us += m.as_us(); - // store stake account even if stakers_reward is 0 - // because credits observed has changed - let mut m = Measure::start("store_stake_account"); - self.store_accounts((self.slot(), &stake_rewards[..])); - m.stop(); - metrics - .store_stake_accounts_us - .fetch_add(m.as_us(), Relaxed); + self.store_stake_accounts(&stake_rewards, metrics); let mut m = Measure::start("store_vote_accounts"); let mut vote_rewards = vote_account_rewards @@ -3265,6 +3258,17 @@ impl Bank { point_value.rewards as f64 / point_value.points as f64 } + fn store_stake_accounts(&self, stake_rewards: &[StakeReward], metrics: &mut RewardsMetrics) { + // store stake account even if stakers_reward is 0 + // because credits observed has changed + let mut m = Measure::start("store_stake_account"); + self.store_accounts((self.slot(), stake_rewards)); + m.stop(); + metrics + .store_stake_accounts_us + .fetch_add(m.as_us(), Relaxed); + } + fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { From c0b63351ae4fb64587b25cd89eeb6708eaa39ab4 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 19 Aug 2022 21:07:32 +0000 Subject: [PATCH 50/51] recovers merkle shreds from erasure codes (#27136) The commit * Identifies Merkle shreds when recovering from erasure codes and dispatches specialized code to reconstruct shreds. * Coding shred headers are added to recovered erasure shards. * Merkle tree is reconstructed for the erasure batch and added to recovered shreds. * The common signature (for the root of Merkle tree) is attached to all recovered shreds. --- Cargo.lock | 1 + ledger/Cargo.toml | 1 + ledger/src/blockstore.rs | 31 ++- ledger/src/shred.rs | 53 +++- ledger/src/shred/merkle.rs | 489 ++++++++++++++++++++++++++++++++++++- ledger/src/shredder.rs | 2 +- 6 files changed, 552 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9348db394a09bb..8e0d13a53004f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5506,6 +5506,7 @@ dependencies = [ "spl-token-2022", "static_assertions", "tempfile", + "test-case", "thiserror", "tokio", "tokio-stream", diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index c8f16585eef955..915bf2038de3c8 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -73,6 +73,7 @@ bs58 = "0.4.0" matches = "0.1.9" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-logger = { path = "../logger", version = "=1.12.0" } +test-case = "2.1.0" [build-dependencies] rustc_version = "0.4" diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 5c246b5ab8045c..5bddc02bb90a4e 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -625,7 +625,7 @@ impl Blockstore { index: &mut Index, erasure_meta: &ErasureMeta, prev_inserted_shreds: &HashMap, - recovered_data_shreds: &mut Vec, + recovered_shreds: &mut Vec, data_cf: &LedgerColumn, code_cf: &LedgerColumn, ) { @@ -646,9 +646,9 @@ impl Blockstore { code_cf, )) .collect(); - if let Ok(mut result) = Shredder::try_recovery(available_shreds) { + if let Ok(mut result) = shred::recover(available_shreds) { Self::submit_metrics(slot, erasure_meta, true, "complete".into(), result.len()); - recovered_data_shreds.append(&mut result); + recovered_shreds.append(&mut result); } else { Self::submit_metrics(slot, erasure_meta, true, "incomplete".into(), 0); } @@ -709,7 +709,7 @@ impl Blockstore { ) -> Vec { let data_cf = db.column::(); let code_cf = db.column::(); - let mut recovered_data_shreds = vec![]; + let mut recovered_shreds = vec![]; // Recovery rules: // 1. Only try recovery around indexes for which new data or coding shreds are received // 2. For new data shreds, check if an erasure set exists. If not, don't try recovery @@ -725,7 +725,7 @@ impl Blockstore { index, erasure_meta, prev_inserted_shreds, - &mut recovered_data_shreds, + &mut recovered_shreds, &data_cf, &code_cf, ); @@ -744,7 +744,7 @@ impl Blockstore { } }; } - recovered_data_shreds + recovered_shreds } /// The main helper function that performs the shred insertion logic @@ -888,15 +888,18 @@ impl Blockstore { metrics.insert_shreds_elapsed_us += start.as_us(); let mut start = Measure::start("Shred recovery"); if let Some(leader_schedule_cache) = leader_schedule { - let recovered_data_shreds = Self::try_shred_recovery( + let recovered_shreds = Self::try_shred_recovery( db, &erasure_metas, &mut index_working_set, &just_inserted_shreds, ); - metrics.num_recovered += recovered_data_shreds.len(); - let recovered_data_shreds: Vec<_> = recovered_data_shreds + metrics.num_recovered += recovered_shreds + .iter() + .filter(|shred| shred.is_data()) + .count(); + let recovered_shreds: Vec<_> = recovered_shreds .into_iter() .filter_map(|shred| { let leader = @@ -905,6 +908,12 @@ impl Blockstore { metrics.num_recovered_failed_sig += 1; return None; } + // Since the data shreds are fully recovered from the + // erasure batch, no need to store coding shreds in + // blockstore. + if shred.is_code() { + return Some(shred); + } match self.check_insert_data_shred( shred.clone(), &mut erasure_metas, @@ -941,10 +950,10 @@ impl Blockstore { // Always collect recovered-shreds so that above insert code is // executed even if retransmit-sender is None. .collect(); - if !recovered_data_shreds.is_empty() { + if !recovered_shreds.is_empty() { if let Some(retransmit_sender) = retransmit_sender { let _ = retransmit_sender.send( - recovered_data_shreds + recovered_shreds .into_iter() .map(Shred::into_payload) .collect(), diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index e17055b1e7d9a9..cee63cb45df57d 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -61,6 +61,7 @@ use { crate::blockstore::{self, MAX_DATA_SHREDS_PER_SLOT}, bitflags::bitflags, num_enum::{IntoPrimitive, TryFromPrimitive}, + reed_solomon_erasure::Error::TooFewShardsPresent, serde::{Deserialize, Serialize}, solana_entry::entry::{create_ticks, Entry}, solana_perf::packet::Packet, @@ -144,6 +145,10 @@ pub enum Error { InvalidPayloadSize(/*payload size:*/ usize), #[error("Invalid proof size: {0}")] InvalidProofSize(/*proof_size:*/ u8), + #[error("Invalid recovered shred")] + InvalidRecoveredShred, + #[error("Invalid shard size: {0}")] + InvalidShardSize(/*shard_size:*/ usize), #[error("Invalid shred flags: {0}")] InvalidShredFlags(u8), #[error("Invalid {0:?} shred index: {1}")] @@ -211,7 +216,7 @@ struct DataShredHeader { struct CodingShredHeader { num_data_shreds: u16, num_coding_shreds: u16, - position: u16, + position: u16, // [0..num_coding_shreds) } #[derive(Clone, Debug, PartialEq, Eq)] @@ -294,6 +299,8 @@ macro_rules! dispatch { } } +use dispatch; + impl Shred { dispatch!(fn common_header(&self) -> &ShredCommonHeader); dispatch!(fn set_signature(&mut self, signature: Signature)); @@ -494,6 +501,7 @@ impl Shred { } } + #[must_use] pub fn verify(&self, pubkey: &Pubkey) -> bool { let message = self.signed_message(); self.signature().verify(pubkey.as_ref(), message) @@ -642,6 +650,28 @@ impl From for Shred { } } +impl From for Shred { + fn from(shred: merkle::Shred) -> Self { + match shred { + merkle::Shred::ShredCode(shred) => Self::ShredCode(ShredCode::Merkle(shred)), + merkle::Shred::ShredData(shred) => Self::ShredData(ShredData::Merkle(shred)), + } + } +} + +impl TryFrom for merkle::Shred { + type Error = Error; + + fn try_from(shred: Shred) -> Result { + match shred { + Shred::ShredCode(ShredCode::Legacy(_)) => Err(Error::InvalidShredVariant), + Shred::ShredCode(ShredCode::Merkle(shred)) => Ok(Self::ShredCode(shred)), + Shred::ShredData(ShredData::Legacy(_)) => Err(Error::InvalidShredVariant), + Shred::ShredData(ShredData::Merkle(shred)) => Ok(Self::ShredData(shred)), + } + } +} + impl From for ShredType { #[inline] fn from(shred_variant: ShredVariant) -> Self { @@ -682,6 +712,27 @@ impl TryFrom for ShredVariant { } } +pub(crate) fn recover(shreds: Vec) -> Result, Error> { + match shreds + .first() + .ok_or(TooFewShardsPresent)? + .common_header() + .shred_variant + { + ShredVariant::LegacyData | ShredVariant::LegacyCode => Shredder::try_recovery(shreds), + ShredVariant::MerkleCode(_) | ShredVariant::MerkleData(_) => { + let shreds = shreds + .into_iter() + .map(merkle::Shred::try_from) + .collect::>()?; + Ok(merkle::recover(shreds)? + .into_iter() + .map(Shred::from) + .collect()) + } + } +} + // Accepts shreds in the slot range [root + 1, max_slot]. #[must_use] pub fn should_discard_shred( diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 5b224632a2a880..9d0482b95354a5 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -1,12 +1,20 @@ +#[cfg(test)] +use {crate::shred::ShredType, solana_sdk::pubkey::Pubkey}; use { - crate::shred::{ - common::impl_shred_common, - shred_code, shred_data, - traits::{Shred, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait}, - CodingShredHeader, DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredVariant, - SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADERS, - SIZE_OF_SIGNATURE, + crate::{ + shred::{ + common::impl_shred_common, + dispatch, shred_code, shred_data, + traits::{ + Shred as ShredTrait, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait, + }, + CodingShredHeader, DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredVariant, + SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADERS, + SIZE_OF_SIGNATURE, + }, + shredder::ReedSolomon, }, + reed_solomon_erasure::Error::{InvalidIndex, TooFewParityShards, TooFewShards}, solana_perf::packet::deserialize_from_with_limit, solana_sdk::{ clock::Slot, @@ -58,12 +66,58 @@ pub struct ShredCode { payload: Vec, } +#[derive(Clone, Debug, Eq, PartialEq)] +pub(super) enum Shred { + ShredCode(ShredCode), + ShredData(ShredData), +} + #[derive(Clone, Debug, Eq, PartialEq)] struct MerkleBranch { root: MerkleRoot, proof: Vec, } +impl Shred { + dispatch!(fn common_header(&self) -> &ShredCommonHeader); + dispatch!(fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); + dispatch!(fn erasure_shard_index(&self) -> Result); + dispatch!(fn merkle_tree_node(&self) -> Result); + dispatch!(fn sanitize(&self) -> Result<(), Error>); + dispatch!(fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error>); + + fn merkle_root(&self) -> &MerkleRoot { + match self { + Self::ShredCode(shred) => &shred.merkle_branch.root, + Self::ShredData(shred) => &shred.merkle_branch.root, + } + } +} + +#[cfg(test)] +impl Shred { + dispatch!(fn set_signature(&mut self, signature: Signature)); + dispatch!(fn signed_message(&self) -> &[u8]); + + fn index(&self) -> u32 { + self.common_header().index + } + + fn shred_type(&self) -> ShredType { + ShredType::from(self.common_header().shred_variant) + } + + fn signature(&self) -> Signature { + self.common_header().signature + } + + #[must_use] + fn verify(&self, pubkey: &Pubkey) -> bool { + let message = self.signed_message(); + self.signature().verify(pubkey.as_ref(), message) + } +} + impl ShredData { // proof_size is the number of proof entries in the merkle tree branch. fn proof_size(&self) -> Result { @@ -104,6 +158,52 @@ impl ShredData { let index = self.erasure_shard_index()?; Ok(verify_merkle_proof(index, node, &self.merkle_branch)) } + + fn from_recovered_shard(signature: &Signature, mut shard: Vec) -> Result { + let shard_size = shard.len(); + if shard_size + SIZE_OF_SIGNATURE > Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidShardSize(shard_size)); + } + shard.resize(Self::SIZE_OF_PAYLOAD, 0u8); + shard.copy_within(0..shard_size, SIZE_OF_SIGNATURE); + shard[0..SIZE_OF_SIGNATURE].copy_from_slice(signature.as_ref()); + // Deserialize headers. + let mut cursor = Cursor::new(&shard[..]); + let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleData(proof_size) => proof_size, + _ => return Err(Error::InvalidShredVariant), + }; + if ShredCode::capacity(proof_size)? != shard_size { + return Err(Error::InvalidShardSize(shard_size)); + } + let data_header = deserialize_from_with_limit(&mut cursor)?; + Ok(Self { + common_header, + data_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload: shard, + }) + } + + fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error> { + let proof_size = self.proof_size()?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let mut cursor = Cursor::new( + self.payload + .get_mut(offset..) + .ok_or(Error::InvalidProofSize(proof_size))?, + ); + bincode::serialize_into(&mut cursor, &merkle_branch.root)?; + for entry in &merkle_branch.proof { + bincode::serialize_into(&mut cursor, entry)?; + } + self.merkle_branch = merkle_branch; + Ok(()) + } } impl ShredCode { @@ -154,9 +254,66 @@ impl ShredCode { || self.merkle_branch.root != other.merkle_branch.root || self.common_header.signature != other.common_header.signature } + + fn from_recovered_shard( + common_header: ShredCommonHeader, + coding_header: CodingShredHeader, + mut shard: Vec, + ) -> Result { + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleCode(proof_size) => proof_size, + _ => return Err(Error::InvalidShredVariant), + }; + let shard_size = shard.len(); + if Self::capacity(proof_size)? != shard_size { + return Err(Error::InvalidShardSize(shard_size)); + } + if shard_size + Self::SIZE_OF_HEADERS > Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidShardSize(shard_size)); + } + shard.resize(Self::SIZE_OF_PAYLOAD, 0u8); + shard.copy_within(0..shard_size, Self::SIZE_OF_HEADERS); + let mut cursor = Cursor::new(&mut shard[..]); + bincode::serialize_into(&mut cursor, &common_header)?; + bincode::serialize_into(&mut cursor, &coding_header)?; + Ok(Self { + common_header, + coding_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload: shard, + }) + } + + fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error> { + let proof_size = self.proof_size()?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let mut cursor = Cursor::new( + self.payload + .get_mut(offset..) + .ok_or(Error::InvalidProofSize(proof_size))?, + ); + bincode::serialize_into(&mut cursor, &merkle_branch.root)?; + for entry in &merkle_branch.proof { + bincode::serialize_into(&mut cursor, entry)?; + } + self.merkle_branch = merkle_branch; + Ok(()) + } } -impl Shred for ShredData { +impl MerkleBranch { + fn new_zeroed(proof_size: u8) -> Self { + Self { + root: MerkleRoot::default(), + proof: vec![MerkleProofEntry::default(); usize::from(proof_size)], + } + } +} + +impl ShredTrait for ShredData { impl_shred_common!(); // Also equal to: @@ -249,7 +406,7 @@ impl Shred for ShredData { } } -impl Shred for ShredCode { +impl ShredTrait for ShredCode { impl_shred_common!(); const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; const SIZE_OF_HEADERS: usize = SIZE_OF_CODING_SHRED_HEADERS; @@ -391,7 +548,6 @@ fn verify_merkle_proof(index: usize, node: Hash, merkle_branch: &MerkleBranch) - (index, root) == (0usize, &merkle_branch.root[..]) } -#[cfg(test)] fn make_merkle_tree(mut nodes: Vec) -> Vec { let mut size = nodes.len(); while size > 1 { @@ -407,7 +563,6 @@ fn make_merkle_tree(mut nodes: Vec) -> Vec { nodes } -#[cfg(test)] fn make_merkle_branch( mut index: usize, // leaf index ~ shred's erasure shard index. mut size: usize, // number of leaves ~ erasure batch size. @@ -434,9 +589,170 @@ fn make_merkle_branch( Some(MerkleBranch { root, proof }) } +pub(super) fn recover(mut shreds: Vec) -> Result, Error> { + // Grab {common, coding} headers from first coding shred. + let headers = shreds.iter().find_map(|shred| { + let shred = match shred { + Shred::ShredCode(shred) => shred, + Shred::ShredData(_) => return None, + }; + let position = u32::from(shred.coding_header.position); + let common_header = ShredCommonHeader { + index: shred.common_header.index.checked_sub(position)?, + ..shred.common_header + }; + let coding_header = CodingShredHeader { + position: 0u16, + ..shred.coding_header + }; + Some((common_header, coding_header)) + }); + let (common_header, coding_header) = headers.ok_or(TooFewParityShards)?; + debug_assert!(matches!( + common_header.shred_variant, + ShredVariant::MerkleCode(_) + )); + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleCode(proof_size) => proof_size, + ShredVariant::MerkleData(_) | ShredVariant::LegacyCode | ShredVariant::LegacyData => { + return Err(Error::InvalidShredVariant); + } + }; + // Verify that shreds belong to the same erasure batch + // and have consistent headers. + debug_assert!(shreds.iter().all(|shred| { + let ShredCommonHeader { + signature, + shred_variant, + slot, + index: _, + version, + fec_set_index, + } = shred.common_header(); + signature == &common_header.signature + && slot == &common_header.slot + && version == &common_header.version + && fec_set_index == &common_header.fec_set_index + && match shred { + Shred::ShredData(_) => shred_variant == &ShredVariant::MerkleData(proof_size), + Shred::ShredCode(shred) => { + let CodingShredHeader { + num_data_shreds, + num_coding_shreds, + position: _, + } = shred.coding_header; + shred_variant == &ShredVariant::MerkleCode(proof_size) + && num_data_shreds == coding_header.num_data_shreds + && num_coding_shreds == coding_header.num_coding_shreds + } + } + })); + let num_data_shreds = usize::from(coding_header.num_data_shreds); + let num_coding_shreds = usize::from(coding_header.num_coding_shreds); + let num_shards = num_data_shreds + num_coding_shreds; + // Obtain erasure encoded shards from shreds. + let shreds = { + let mut batch = vec![None; num_shards]; + while let Some(shred) = shreds.pop() { + let index = match shred.erasure_shard_index() { + Ok(index) if index < batch.len() => index, + _ => return Err(Error::from(InvalidIndex)), + }; + batch[index] = Some(shred); + } + batch + }; + let mut shards: Vec>> = shreds + .iter() + .map(|shred| Some(shred.as_ref()?.erasure_shard_as_slice().ok()?.to_vec())) + .collect(); + ReedSolomon::new(num_data_shreds, num_coding_shreds)?.reconstruct(&mut shards)?; + let mask: Vec<_> = shreds.iter().map(Option::is_some).collect(); + // Reconstruct code and data shreds from erasure encoded shards. + let mut shreds: Vec<_> = shreds + .into_iter() + .zip(shards) + .enumerate() + .map(|(index, (shred, shard))| { + if let Some(shred) = shred { + return Ok(shred); + } + let shard = shard.ok_or(TooFewShards)?; + if index < num_data_shreds { + let shred = ShredData::from_recovered_shard(&common_header.signature, shard)?; + let ShredCommonHeader { + signature: _, + shred_variant, + slot, + index: _, + version, + fec_set_index, + } = shred.common_header; + if shred_variant != ShredVariant::MerkleData(proof_size) + || common_header.slot != slot + || common_header.version != version + || common_header.fec_set_index != fec_set_index + { + return Err(Error::InvalidRecoveredShred); + } + Ok(Shred::ShredData(shred)) + } else { + let offset = index - num_data_shreds; + let coding_header = CodingShredHeader { + position: offset as u16, + ..coding_header + }; + let common_header = ShredCommonHeader { + index: common_header.index + offset as u32, + ..common_header + }; + let shred = ShredCode::from_recovered_shard(common_header, coding_header, shard)?; + Ok(Shred::ShredCode(shred)) + } + }) + .collect::>()?; + // Compute merkle tree and set the merkle branch on the recovered shreds. + let nodes: Vec<_> = shreds + .iter() + .map(Shred::merkle_tree_node) + .collect::>()?; + let tree = make_merkle_tree(nodes); + let merkle_root = &tree.last().unwrap().as_ref()[..SIZE_OF_MERKLE_ROOT]; + let merkle_root = MerkleRoot::try_from(merkle_root).unwrap(); + for (index, (shred, mask)) in shreds.iter_mut().zip(&mask).enumerate() { + if *mask { + if shred.merkle_root() != &merkle_root { + return Err(Error::InvalidMerkleProof); + } + } else { + let merkle_branch = + make_merkle_branch(index, num_shards, &tree).ok_or(Error::InvalidMerkleProof)?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + shred.set_merkle_branch(merkle_branch)?; + } + } + // TODO: No need to verify merkle proof in sanitize here. + shreds + .into_iter() + .zip(mask) + .filter(|(_, mask)| !mask) + .map(|(shred, _)| shred.sanitize().map(|_| shred)) + .collect() +} + #[cfg(test)] mod test { - use {super::*, rand::Rng, std::iter::repeat_with}; + use { + super::*, + itertools::Itertools, + matches::assert_matches, + rand::{seq::SliceRandom, CryptoRng, Rng}, + solana_sdk::signature::{Keypair, Signer}, + std::{cmp::Ordering, iter::repeat_with}, + test_case::test_case, + }; // Total size of a data shred including headers and merkle branch. fn shred_data_size_of_payload(proof_size: u8) -> usize { @@ -525,4 +841,153 @@ mod test { run_merkle_tree_round_trip(size); } } + + #[test_case(37)] + #[test_case(64)] + #[test_case(73)] + fn test_recover_merkle_shreds(num_shreds: usize) { + let mut rng = rand::thread_rng(); + for num_data_shreds in 1..num_shreds { + let num_coding_shreds = num_shreds - num_data_shreds; + run_recover_merkle_shreds(&mut rng, num_data_shreds, num_coding_shreds); + } + } + + fn run_recover_merkle_shreds( + rng: &mut R, + num_data_shreds: usize, + num_coding_shreds: usize, + ) { + let keypair = Keypair::generate(rng); + let num_shreds = num_data_shreds + num_coding_shreds; + let proof_size = (num_shreds as f64).log2().ceil() as u8; + let capacity = ShredData::capacity(proof_size).unwrap(); + let common_header = ShredCommonHeader { + signature: Signature::default(), + shred_variant: ShredVariant::MerkleData(proof_size), + slot: 145865705, + index: 1835, + version: 4978, + fec_set_index: 1835, + }; + let data_header = DataShredHeader { + parent_offset: 25, + flags: unsafe { ShredFlags::from_bits_unchecked(0b0010_1010) }, + size: 0, + }; + let coding_header = CodingShredHeader { + num_data_shreds: num_data_shreds as u16, + num_coding_shreds: num_coding_shreds as u16, + position: 0, + }; + let mut shreds = Vec::with_capacity(num_shreds); + for i in 0..num_data_shreds { + let common_header = ShredCommonHeader { + index: common_header.index + i as u32, + ..common_header + }; + let size = ShredData::SIZE_OF_HEADERS + rng.gen_range(0, capacity); + let data_header = DataShredHeader { + size: size as u16, + ..data_header + }; + let mut payload = vec![0u8; ShredData::SIZE_OF_PAYLOAD]; + let mut cursor = Cursor::new(&mut payload[..]); + bincode::serialize_into(&mut cursor, &common_header).unwrap(); + bincode::serialize_into(&mut cursor, &data_header).unwrap(); + rng.fill(&mut payload[ShredData::SIZE_OF_HEADERS..size]); + let shred = ShredData { + common_header, + data_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload, + }; + shreds.push(Shred::ShredData(shred)); + } + let data: Vec<_> = shreds + .iter() + .map(Shred::erasure_shard_as_slice) + .collect::>() + .unwrap(); + let mut parity = vec![vec![0u8; data[0].len()]; num_coding_shreds]; + ReedSolomon::new(num_data_shreds, num_coding_shreds) + .unwrap() + .encode_sep(&data, &mut parity[..]) + .unwrap(); + for (i, code) in parity.into_iter().enumerate() { + let common_header = ShredCommonHeader { + shred_variant: ShredVariant::MerkleCode(proof_size), + index: common_header.index + i as u32 + 7, + ..common_header + }; + let coding_header = CodingShredHeader { + position: i as u16, + ..coding_header + }; + let mut payload = vec![0u8; ShredCode::SIZE_OF_PAYLOAD]; + let mut cursor = Cursor::new(&mut payload[..]); + bincode::serialize_into(&mut cursor, &common_header).unwrap(); + bincode::serialize_into(&mut cursor, &coding_header).unwrap(); + payload[ShredCode::SIZE_OF_HEADERS..ShredCode::SIZE_OF_HEADERS + code.len()] + .copy_from_slice(&code); + let shred = ShredCode { + common_header, + coding_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload, + }; + shreds.push(Shred::ShredCode(shred)); + } + let nodes: Vec<_> = shreds + .iter() + .map(Shred::merkle_tree_node) + .collect::>() + .unwrap(); + let tree = make_merkle_tree(nodes); + for (index, shred) in shreds.iter_mut().enumerate() { + let merkle_branch = make_merkle_branch(index, num_shreds, &tree).unwrap(); + assert_eq!(merkle_branch.proof.len(), usize::from(proof_size)); + shred.set_merkle_branch(merkle_branch).unwrap(); + let signature = keypair.sign_message(shred.signed_message()); + shred.set_signature(signature); + assert!(shred.verify(&keypair.pubkey())); + assert_matches!(shred.sanitize(), Ok(())); + } + assert_eq!(shreds.iter().map(Shred::signature).dedup().count(), 1); + for size in num_data_shreds..num_shreds { + let mut shreds = shreds.clone(); + let mut removed_shreds = Vec::new(); + while shreds.len() > size { + let index = rng.gen_range(0, shreds.len()); + removed_shreds.push(shreds.swap_remove(index)); + } + shreds.shuffle(rng); + // Should at least contain one coding shred. + if shreds.iter().all(|shred| { + matches!( + shred.common_header().shred_variant, + ShredVariant::MerkleData(_) + ) + }) { + assert_matches!( + recover(shreds), + Err(Error::ErasureError(TooFewParityShards)) + ); + continue; + } + let recovered_shreds = recover(shreds).unwrap(); + assert_eq!(size + recovered_shreds.len(), num_shreds); + assert_eq!(recovered_shreds.len(), removed_shreds.len()); + removed_shreds.sort_by(|a, b| { + if a.shred_type() == b.shred_type() { + a.index().cmp(&b.index()) + } else if a.shred_type() == ShredType::Data { + Ordering::Less + } else { + Ordering::Greater + } + }); + assert_eq!(recovered_shreds, removed_shreds); + } + } } diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index d3a50cb82dc1ca..671cc0b7c44c47 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -33,7 +33,7 @@ const ERASURE_BATCH_SIZE: [usize; 33] = [ 55, 56, 58, 59, 60, 62, 63, 64, // 32 ]; -type ReedSolomon = reed_solomon_erasure::ReedSolomon; +pub(crate) type ReedSolomon = reed_solomon_erasure::ReedSolomon; #[derive(Debug)] pub struct Shredder { From 510d195620edded54e8a8289f5b1c74fcf8a24f0 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 19 Aug 2022 18:15:04 -0400 Subject: [PATCH 51/51] Simplify `Bank::clean_accounts()` by removing params (#27254) --- runtime/src/accounts_background_service.rs | 8 ++----- runtime/src/bank.rs | 24 ++++++++++++--------- runtime/src/snapshot_utils.rs | 6 +++--- runtime/src/system_instruction_processor.rs | 5 +++-- 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index c38203ab821e96..8d21fed9c7c939 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -254,11 +254,7 @@ impl SnapshotRequestHandler { }; let mut clean_time = Measure::start("clean_time"); - // Don't clean the slot we're snapshotting because it may have zero-lamport - // accounts that were included in the bank delta hash when the bank was frozen, - // and if we clean them here, the newly created snapshot's hash may not match - // the frozen hash. - snapshot_root_bank.clean_accounts(true, false, *last_full_snapshot_slot); + snapshot_root_bank.clean_accounts(*last_full_snapshot_slot); clean_time.stop(); if accounts_db_caching_enabled { @@ -564,7 +560,7 @@ impl AccountsBackgroundService { // slots >= bank.slot() bank.force_flush_accounts_cache(); } - bank.clean_accounts(true, false, last_full_snapshot_slot); + bank.clean_accounts(last_full_snapshot_slot); last_cleaned_block_height = bank.block_height(); } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7e5aeef16465cd..a6bbb8044b26ef 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7159,7 +7159,7 @@ impl Bank { let mut clean_time = Measure::start("clean"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("cleaning.."); - self.clean_accounts(true, true, Some(last_full_snapshot_slot)); + self._clean_accounts(true, true, Some(last_full_snapshot_slot)); } clean_time.stop(); @@ -7443,12 +7443,7 @@ impl Bank { debug!("Added precompiled program {:?}", program_id); } - pub fn clean_accounts( - &self, - skip_last: bool, - is_startup: bool, - last_full_snapshot_slot: Option, - ) { + pub(crate) fn clean_accounts(&self, last_full_snapshot_slot: Option) { // Don't clean the slot we're snapshotting because it may have zero-lamport // accounts that were included in the bank delta hash when the bank was frozen, // and if we clean them here, any newly created snapshot's hash for this bank @@ -7456,10 +7451,19 @@ impl Bank { // // So when we're snapshotting, set `skip_last` to true so the highest slot to clean is // lowered by one. - let highest_slot_to_clean = skip_last.then(|| self.slot().saturating_sub(1)); + self._clean_accounts(true, false, last_full_snapshot_slot) + } + + fn _clean_accounts( + &self, + skip_last: bool, + is_startup: bool, + last_full_snapshot_slot: Option, + ) { + let max_clean_root = skip_last.then(|| self.slot().saturating_sub(1)); self.rc.accounts.accounts_db.clean_accounts( - highest_slot_to_clean, + max_clean_root, is_startup, last_full_snapshot_slot, ); @@ -16260,7 +16264,7 @@ pub(crate) mod tests { current_bank.squash(); if current_bank.slot() % 2 == 0 { current_bank.force_flush_accounts_cache(); - current_bank.clean_accounts(true, false, None); + current_bank.clean_accounts(None); } prev_bank = current_bank.clone(); current_bank = Arc::new(Bank::new_from_parent( diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 93cdbc0f33fc0c..4717bb1ab4f356 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2078,7 +2078,7 @@ pub fn bank_to_full_snapshot_archive( assert!(bank.is_complete()); bank.squash(); // Bank may not be a root bank.force_flush_accounts_cache(); - bank.clean_accounts(true, false, Some(bank.slot())); + bank.clean_accounts(Some(bank.slot())); bank.update_accounts_hash(); bank.rehash(); // Bank accounts may have been manually modified by the caller @@ -2125,7 +2125,7 @@ pub fn bank_to_incremental_snapshot_archive( assert!(bank.slot() > full_snapshot_slot); bank.squash(); // Bank may not be a root bank.force_flush_accounts_cache(); - bank.clean_accounts(true, false, Some(full_snapshot_slot)); + bank.clean_accounts(Some(full_snapshot_slot)); bank.update_accounts_hash(); bank.rehash(); // Bank accounts may have been manually modified by the caller @@ -3771,7 +3771,7 @@ mod tests { // Ensure account1 has been cleaned/purged from everywhere bank4.squash(); - bank4.clean_accounts(true, false, Some(full_snapshot_slot)); + bank4.clean_accounts(Some(full_snapshot_slot)); assert!( bank4.get_account_modified_slot(&key1.pubkey()).is_none(), "Ensure Account1 has been cleaned and purged from AccountsDb" diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 67f1f931147cef..c0b588c25c42c4 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -1626,6 +1626,7 @@ mod tests { .unwrap(); // super fun time; callback chooses to .clean_accounts(None) or not + let bank = Arc::new(Bank::new_from_parent(&bank, &collector, bank.slot() + 1)); callback(&*bank); // create a normal account at the same pubkey as the zero-lamports account @@ -1651,9 +1652,9 @@ mod tests { bank.squash(); bank.force_flush_accounts_cache(); // do clean and assert that it actually did its job + assert_eq!(4, bank.get_snapshot_storages(None).len()); + bank.clean_accounts(None); assert_eq!(3, bank.get_snapshot_storages(None).len()); - bank.clean_accounts(false, false, None); - assert_eq!(2, bank.get_snapshot_storages(None).len()); }); }