From c071cf5cd0808a61cd25c4f7e892e695a523de9b Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Sat, 20 Jan 2024 11:20:51 +0900 Subject: [PATCH 001/401] [clap-v3-utils] Replace `pubkeys_sigs_of` with `try_pubkeys_sigs_of` (#34801) * replace `pubkeys_sigs_of` with `try_pubkeys_sigs_of` * propagate error from `try_pubkeys_sigs_of` to the caller --- clap-v3-utils/src/keypair.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clap-v3-utils/src/keypair.rs b/clap-v3-utils/src/keypair.rs index 98baec3b3ef0b3..8adbfff3631f8b 100644 --- a/clap-v3-utils/src/keypair.rs +++ b/clap-v3-utils/src/keypair.rs @@ -11,7 +11,7 @@ use { crate::{ - input_parsers::{pubkeys_sigs_of, STDOUT_OUTFILE_TOKEN}, + input_parsers::{signer::try_pubkeys_sigs_of, STDOUT_OUTFILE_TOKEN}, offline::{SIGNER_ARG, SIGN_ONLY_ARG}, ArgConstant, }, @@ -807,7 +807,7 @@ pub fn signer_from_path_with_config( } } SignerSourceKind::Pubkey(pubkey) => { - let presigner = pubkeys_sigs_of(matches, SIGNER_ARG.name) + let presigner = try_pubkeys_sigs_of(matches, SIGNER_ARG.name)? .as_ref() .and_then(|presigners| presigner_from_pubkey_sigs(&pubkey, presigners)); if let Some(presigner) = presigner { From 9a520fd5b42bafefa4815afe3e5390b4ea7482ca Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Sat, 20 Jan 2024 16:08:16 +0000 Subject: [PATCH 002/401] adds chained merkle shreds variant (#34787) With the new chained variants, each Merkle shred will also embed the Merkle root of the previous erasure batch. --- ledger/src/shred.rs | 182 +++++++++---- ledger/src/shred/common.rs | 4 +- ledger/src/shred/merkle.rs | 462 +++++++++++++++++++++++---------- ledger/src/shred/shred_data.rs | 12 +- ledger/src/shred/stats.rs | 12 + ledger/src/shredder.rs | 1 + ledger/src/sigverify_shreds.rs | 5 +- 7 files changed, 477 insertions(+), 201 deletions(-) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 1ce6c7ccc164cb..bed8965073429a 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -49,6 +49,7 @@ //! So, given a) - c), we must restrict data shred's payload length such that the entire coding //! payload can fit into one coding shred / packet. +pub(crate) use self::merkle::SIZE_OF_MERKLE_ROOT; #[cfg(test)] pub(crate) use self::shred_code::MAX_CODE_SHREDS_PER_SLOT; use { @@ -192,9 +193,15 @@ pub enum ShredType { enum ShredVariant { LegacyCode, // 0b0101_1010 LegacyData, // 0b1010_0101 - // proof_size is the number of merkle proof entries. - MerkleCode(/*proof_size:*/ u8), // 0b0100_???? - MerkleData(/*proof_size:*/ u8), // 0b1000_???? + // proof_size is the number of Merkle proof entries, and is encoded in the + // lowest 4 bits of the binary representation. The first 4 bits identify + // the shred variant: + // 0b0100_???? MerkleCode + // 0b0110_???? MerkleCode chained + // 0b1000_???? MerkleData + // 0b1001_???? MerkleData chained + MerkleCode(/*proof_size:*/ u8, /*chained:*/ bool), // 0b01?0_???? + MerkleData(/*proof_size:*/ u8, /*chained:*/ bool), // 0b100?_???? } /// A common header that is present in data and code shred headers @@ -383,11 +390,11 @@ impl Shred { let shred = legacy::ShredData::from_payload(shred)?; Self::from(ShredData::from(shred)) } - ShredVariant::MerkleCode(_) => { + ShredVariant::MerkleCode(..) => { let shred = merkle::ShredCode::from_payload(shred)?; Self::from(ShredCode::from(shred)) } - ShredVariant::MerkleData(_) => { + ShredVariant::MerkleData(..) => { let shred = merkle::ShredData::from_payload(shred)?; Self::from(ShredData::from(shred)) } @@ -646,12 +653,14 @@ pub mod layout { let chunk = shred.get(self::legacy::SIGNED_MESSAGE_OFFSETS)?; SignedData::Chunk(chunk) } - ShredVariant::MerkleCode(proof_size) => { - let merkle_root = self::merkle::ShredCode::get_merkle_root(shred, proof_size)?; + ShredVariant::MerkleCode(proof_size, chained) => { + let merkle_root = + self::merkle::ShredCode::get_merkle_root(shred, proof_size, chained)?; SignedData::MerkleRoot(merkle_root) } - ShredVariant::MerkleData(proof_size) => { - let merkle_root = self::merkle::ShredData::get_merkle_root(shred, proof_size)?; + ShredVariant::MerkleData(proof_size, chained) => { + let merkle_root = + self::merkle::ShredData::get_merkle_root(shred, proof_size, chained)?; SignedData::MerkleRoot(merkle_root) } }; @@ -668,8 +677,8 @@ pub mod layout { // Merkle shreds sign merkle tree root which can be recovered from // the merkle proof embedded in the payload but itself is not // stored the payload. - ShredVariant::MerkleCode(_) => None, - ShredVariant::MerkleData(_) => None, + ShredVariant::MerkleCode(..) => None, + ShredVariant::MerkleData(..) => None, } } @@ -686,11 +695,11 @@ pub mod layout { pub(crate) fn get_merkle_root(shred: &[u8]) -> Option { match get_shred_variant(shred).ok()? { ShredVariant::LegacyCode | ShredVariant::LegacyData => None, - ShredVariant::MerkleCode(proof_size) => { - merkle::ShredCode::get_merkle_root(shred, proof_size) + ShredVariant::MerkleCode(proof_size, chained) => { + merkle::ShredCode::get_merkle_root(shred, proof_size, chained) } - ShredVariant::MerkleData(proof_size) => { - merkle::ShredData::get_merkle_root(shred, proof_size) + ShredVariant::MerkleData(proof_size, chained) => { + merkle::ShredData::get_merkle_root(shred, proof_size, chained) } } } @@ -710,7 +719,7 @@ pub mod layout { let shred = get_shred(packet).unwrap(); let merkle_proof_size = match get_shred_variant(shred).unwrap() { ShredVariant::LegacyCode | ShredVariant::LegacyData => None, - ShredVariant::MerkleCode(proof_size) | ShredVariant::MerkleData(proof_size) => { + ShredVariant::MerkleCode(proof_size, _) | ShredVariant::MerkleData(proof_size, _) => { Some(proof_size) } }; @@ -793,8 +802,8 @@ impl From for ShredType { match shred_variant { ShredVariant::LegacyCode => ShredType::Code, ShredVariant::LegacyData => ShredType::Data, - ShredVariant::MerkleCode(_) => ShredType::Code, - ShredVariant::MerkleData(_) => ShredType::Data, + ShredVariant::MerkleCode(..) => ShredType::Code, + ShredVariant::MerkleData(..) => ShredType::Data, } } } @@ -804,8 +813,10 @@ impl From for u8 { match shred_variant { ShredVariant::LegacyCode => u8::from(ShredType::Code), ShredVariant::LegacyData => u8::from(ShredType::Data), - ShredVariant::MerkleCode(proof_size) => proof_size | 0x40, - ShredVariant::MerkleData(proof_size) => proof_size | 0x80, + ShredVariant::MerkleCode(proof_size, false) => proof_size | 0x40, + ShredVariant::MerkleCode(proof_size, true) => proof_size | 0x60, + ShredVariant::MerkleData(proof_size, false) => proof_size | 0x80, + ShredVariant::MerkleData(proof_size, true) => proof_size | 0x90, } } } @@ -818,9 +829,16 @@ impl TryFrom for ShredVariant { } else if shred_variant == u8::from(ShredType::Data) { Ok(ShredVariant::LegacyData) } else { + let proof_size = shred_variant & 0x0F; match shred_variant & 0xF0 { - 0x40 => Ok(ShredVariant::MerkleCode(shred_variant & 0x0F)), - 0x80 => Ok(ShredVariant::MerkleData(shred_variant & 0x0F)), + 0x40 => Ok(ShredVariant::MerkleCode( + proof_size, /*chained:*/ false, + )), + 0x60 => Ok(ShredVariant::MerkleCode(proof_size, /*chained:*/ true)), + 0x80 => Ok(ShredVariant::MerkleData( + proof_size, /*chained:*/ false, + )), + 0x90 => Ok(ShredVariant::MerkleData(proof_size, /*chained:*/ true)), _ => Err(Error::InvalidShredVariant), } } @@ -840,7 +858,7 @@ pub(crate) fn recover( ShredVariant::LegacyData | ShredVariant::LegacyCode => { Shredder::try_recovery(shreds, reed_solomon_cache) } - ShredVariant::MerkleCode(_) | ShredVariant::MerkleData(_) => { + ShredVariant::MerkleCode(..) | ShredVariant::MerkleData(..) => { let shreds = shreds .into_iter() .map(merkle::Shred::try_from) @@ -863,6 +881,7 @@ pub(crate) fn make_merkle_shreds_from_entries( shred_version: u16, reference_tick: u8, is_last_in_slot: bool, + chained_merkle_root: Option, next_shred_index: u32, next_code_index: u32, reed_solomon_cache: &ReedSolomonCache, @@ -874,6 +893,7 @@ pub(crate) fn make_merkle_shreds_from_entries( let shreds = merkle::make_shreds_from_data( thread_pool, keypair, + chained_merkle_root, &entries[..], slot, parent_slot, @@ -975,12 +995,20 @@ pub fn should_discard_shred( return true; } } - ShredVariant::MerkleCode(_) => { + ShredVariant::MerkleCode(_, /*chained:*/ false) => { stats.num_shreds_merkle_code = stats.num_shreds_merkle_code.saturating_add(1); } - ShredVariant::MerkleData(_) => { + ShredVariant::MerkleCode(_, /*chained:*/ true) => { + stats.num_shreds_merkle_code_chained = + stats.num_shreds_merkle_code_chained.saturating_add(1); + } + ShredVariant::MerkleData(_, /*chained:*/ false) => { stats.num_shreds_merkle_data = stats.num_shreds_merkle_data.saturating_add(1); } + ShredVariant::MerkleData(_, /*chained:*/ true) => { + stats.num_shreds_merkle_data_chained = + stats.num_shreds_merkle_data_chained.saturating_add(1); + } } false } @@ -996,8 +1024,8 @@ pub fn max_entries_per_n_shred( shred_data_size: Option, ) -> u64 { // Default 32:32 erasure batches yields 64 shreds; log2(64) = 6. - let merkle_proof_size = Some(6); - let data_buffer_size = ShredData::capacity(merkle_proof_size).unwrap(); + let merkle_variant = Some((/*proof_size:*/ 6, /*chained:*/ false)); + let data_buffer_size = ShredData::capacity(merkle_variant).unwrap(); let shred_data_size = shred_data_size.unwrap_or(data_buffer_size) as u64; let vec_size = bincode::serialized_size(&vec![entry]).unwrap(); let entry_size = bincode::serialized_size(entry).unwrap(); @@ -1040,6 +1068,7 @@ mod tests { super::*, assert_matches::assert_matches, bincode::serialized_size, + itertools::iproduct, rand::Rng, rand_chacha::{rand_core::SeedableRng, ChaChaRng}, solana_sdk::{shred_version, signature::Signer, signer::keypair::keypair_from_seed}, @@ -1097,7 +1126,8 @@ mod tests { ); assert_eq!( SIZE_OF_SHRED_VARIANT, - bincode::serialized_size(&ShredVariant::MerkleCode(15)).unwrap() as usize + bincode::serialized_size(&ShredVariant::MerkleCode(15, /*chained:*/ true)).unwrap() + as usize ); assert_eq!( SIZE_OF_SHRED_SLOT, @@ -1389,71 +1419,115 @@ mod tests { Ok(ShredVariant::LegacyData) ); // Merkle coding shred. - assert_eq!(u8::from(ShredVariant::MerkleCode(5)), 0b0100_0101); assert_eq!( - ShredType::from(ShredVariant::MerkleCode(5)), - ShredType::Code + u8::from(ShredVariant::MerkleCode(5, /*chained:*/ false)), + 0b0100_0101 + ); + assert_eq!( + u8::from(ShredVariant::MerkleCode(5, /*chained:*/ true)), + 0b0110_0101 ); + for chained in [false, true] { + assert_eq!( + ShredType::from(ShredVariant::MerkleCode(5, chained)), + ShredType::Code + ); + } assert_matches!( ShredVariant::try_from(0b0100_0101), - Ok(ShredVariant::MerkleCode(5)) + Ok(ShredVariant::MerkleCode(5, /*chained:*/ false)) + ); + assert_matches!( + ShredVariant::try_from(0b0110_0101), + Ok(ShredVariant::MerkleCode(5, /*chained:*/ true)) ); - let buf = bincode::serialize(&ShredVariant::MerkleCode(5)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleCode(5, /*chained:*/ false)).unwrap(); assert_eq!(buf, vec![0b0100_0101]); assert_matches!( bincode::deserialize::(&[0b0100_0101]), - Ok(ShredVariant::MerkleCode(5)) + Ok(ShredVariant::MerkleCode(5, /*chained:*/ false)) ); - for proof_size in 0..=15u8 { - let byte = proof_size | 0b0100_0000; - assert_eq!(u8::from(ShredVariant::MerkleCode(proof_size)), byte); + let buf = bincode::serialize(&ShredVariant::MerkleCode(5, /*chained:*/ true)).unwrap(); + assert_eq!(buf, vec![0b0110_0101]); + assert_matches!( + bincode::deserialize::(&[0b0110_0101]), + Ok(ShredVariant::MerkleCode(5, /*chained:*/ true)) + ); + for (proof_size, chained) in iproduct!(0..=15u8, [false, true]) { + let byte = proof_size | if chained { 0b0110_0000 } else { 0b0100_0000 }; + assert_eq!( + u8::from(ShredVariant::MerkleCode(proof_size, chained)), + byte + ); assert_eq!( - ShredType::from(ShredVariant::MerkleCode(proof_size)), + ShredType::from(ShredVariant::MerkleCode(proof_size, chained)), ShredType::Code ); assert_eq!( ShredVariant::try_from(byte).unwrap(), - ShredVariant::MerkleCode(proof_size) + ShredVariant::MerkleCode(proof_size, chained) ); - let buf = bincode::serialize(&ShredVariant::MerkleCode(proof_size)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleCode(proof_size, chained)).unwrap(); assert_eq!(buf, vec![byte]); assert_eq!( bincode::deserialize::(&[byte]).unwrap(), - ShredVariant::MerkleCode(proof_size) + ShredVariant::MerkleCode(proof_size, chained) ); } // Merkle data shred. - assert_eq!(u8::from(ShredVariant::MerkleData(10)), 0b1000_1010); assert_eq!( - ShredType::from(ShredVariant::MerkleData(10)), - ShredType::Data + u8::from(ShredVariant::MerkleData(10, /*chained:*/ false)), + 0b1000_1010 + ); + assert_eq!( + u8::from(ShredVariant::MerkleData(10, /*chained:*/ true)), + 0b1001_1010 ); + for chained in [false, true] { + assert_eq!( + ShredType::from(ShredVariant::MerkleData(10, chained)), + ShredType::Data + ); + } assert_matches!( ShredVariant::try_from(0b1000_1010), - Ok(ShredVariant::MerkleData(10)) + Ok(ShredVariant::MerkleData(10, /*chained:*/ false)) ); - let buf = bincode::serialize(&ShredVariant::MerkleData(10)).unwrap(); + assert_matches!( + ShredVariant::try_from(0b1001_1010), + Ok(ShredVariant::MerkleData(10, /*chained:*/ true)) + ); + let buf = bincode::serialize(&ShredVariant::MerkleData(10, /*chained:*/ false)).unwrap(); assert_eq!(buf, vec![0b1000_1010]); assert_matches!( bincode::deserialize::(&[0b1000_1010]), - Ok(ShredVariant::MerkleData(10)) + Ok(ShredVariant::MerkleData(10, /*chained:*/ false)) + ); + let buf = bincode::serialize(&ShredVariant::MerkleData(10, /*chained:*/ true)).unwrap(); + assert_eq!(buf, vec![0b1001_1010]); + assert_matches!( + bincode::deserialize::(&[0b1001_1010]), + Ok(ShredVariant::MerkleData(10, /*chained:*/ true)) ); - for proof_size in 0..=15u8 { - let byte = proof_size | 0b1000_0000; - assert_eq!(u8::from(ShredVariant::MerkleData(proof_size)), byte); + for (proof_size, chained) in iproduct!(0..=15u8, [false, true]) { + let byte = proof_size | if chained { 0b1001_0000 } else { 0b1000_0000 }; + assert_eq!( + u8::from(ShredVariant::MerkleData(proof_size, chained)), + byte + ); assert_eq!( - ShredType::from(ShredVariant::MerkleData(proof_size)), + ShredType::from(ShredVariant::MerkleData(proof_size, chained)), ShredType::Data ); assert_eq!( ShredVariant::try_from(byte).unwrap(), - ShredVariant::MerkleData(proof_size) + ShredVariant::MerkleData(proof_size, chained) ); - let buf = bincode::serialize(&ShredVariant::MerkleData(proof_size)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleData(proof_size, chained)).unwrap(); assert_eq!(buf, vec![byte]); assert_eq!( bincode::deserialize::(&[byte]).unwrap(), - ShredVariant::MerkleData(proof_size) + ShredVariant::MerkleData(proof_size, chained) ); } } diff --git a/ledger/src/shred/common.rs b/ledger/src/shred/common.rs index 330e4a8da9ff6d..64b4c775469a24 100644 --- a/ledger/src/shred/common.rs +++ b/ledger/src/shred/common.rs @@ -56,7 +56,7 @@ macro_rules! impl_shred_common { self.common_header.index = index; bincode::serialize_into(&mut self.payload[..], &self.common_header).unwrap(); } - ShredVariant::MerkleCode(_) | ShredVariant::MerkleData(_) => { + ShredVariant::MerkleCode(..) | ShredVariant::MerkleData(..) => { panic!("Not Implemented!"); } } @@ -69,7 +69,7 @@ macro_rules! impl_shred_common { self.common_header.slot = slot; bincode::serialize_into(&mut self.payload[..], &self.common_header).unwrap(); } - ShredVariant::MerkleCode(_) | ShredVariant::MerkleData(_) => { + ShredVariant::MerkleCode(..) | ShredVariant::MerkleData(..) => { panic!("Not Implemented!"); } } diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index f6fe87d7ef84db..ebc4a711b8c774 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -35,6 +35,8 @@ use { }, }; +const_assert_eq!(SIZE_OF_MERKLE_ROOT, 32); +pub(crate) const SIZE_OF_MERKLE_ROOT: usize = std::mem::size_of::(); const_assert_eq!(SIZE_OF_MERKLE_PROOF_ENTRY, 20); const SIZE_OF_MERKLE_PROOF_ENTRY: usize = std::mem::size_of::(); const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1203); @@ -48,10 +50,12 @@ const MERKLE_HASH_PREFIX_NODE: &[u8] = b"\x01SOLANA_MERKLE_SHREDS_NODE"; type MerkleProofEntry = [u8; 20]; -// Layout: {common, data} headers | data buffer | merkle proof -// The slice past signature and before the merkle proof is erasure coded. -// Same slice is hashed to generate merkle tree. -// The root of merkle tree is signed. +// Layout: {common, data} headers | data buffer +// | [Merkle root of the previous erasure batch if chained] +// | Merkle proof +// The slice past signature till the end of the data buffer is erasure coded. +// The slice past signature and before the merkle proof is hashed to generate +// the Merkle tree. The root of the Merkle tree is signed. #[derive(Clone, Debug, Eq, PartialEq)] pub struct ShredData { common_header: ShredCommonHeader, @@ -59,9 +63,11 @@ pub struct ShredData { payload: Vec, } -// Layout: {common, coding} headers | erasure coded shard | merkle proof +// Layout: {common, coding} headers | erasure coded shard +// | [Merkle root of the previous erasure batch if chained] +// | Merkle proof // The slice past signature and before the merkle proof is hashed to generate -// merkle tree. The root of merkle tree is signed. +// the Merkle tree. The root of the Merkle tree is signed. #[derive(Clone, Debug, Eq, PartialEq)] pub struct ShredCode { common_header: ShredCommonHeader, @@ -108,8 +114,8 @@ impl Shred { fn from_payload(shred: Vec) -> Result { match shred::layout::get_shred_variant(&shred)? { ShredVariant::LegacyCode | ShredVariant::LegacyData => Err(Error::InvalidShredVariant), - ShredVariant::MerkleCode(_) => Ok(Self::ShredCode(ShredCode::from_payload(shred)?)), - ShredVariant::MerkleData(_) => Ok(Self::ShredData(ShredData::from_payload(shred)?)), + ShredVariant::MerkleCode(..) => Ok(Self::ShredCode(ShredCode::from_payload(shred)?)), + ShredVariant::MerkleData(..) => Ok(Self::ShredData(ShredData::from_payload(shred)?)), } } } @@ -117,6 +123,7 @@ impl Shred { #[cfg(test)] impl Shred { dispatch!(fn merkle_root(&self) -> Result); + dispatch!(fn proof_size(&self) -> Result); fn index(&self) -> u32 { self.common_header().index @@ -131,7 +138,7 @@ impl ShredData { // proof_size is the number of merkle proof entries. fn proof_size(&self) -> Result { match self.common_header.shred_variant { - ShredVariant::MerkleData(proof_size) => Ok(proof_size), + ShredVariant::MerkleData(proof_size, _) => Ok(proof_size), _ => Err(Error::InvalidShredVariant), } } @@ -141,24 +148,46 @@ impl ShredData { // ShredCode::capacity(proof_size).unwrap() // - ShredData::SIZE_OF_HEADERS // + SIZE_OF_SIGNATURE - pub(super) fn capacity(proof_size: u8) -> Result { + pub(super) fn capacity(proof_size: u8, chained: bool) -> Result { Self::SIZE_OF_PAYLOAD .checked_sub( - Self::SIZE_OF_HEADERS + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY, + Self::SIZE_OF_HEADERS + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY, ) .ok_or(Error::InvalidProofSize(proof_size)) } // Where the merkle proof starts in the shred binary. fn proof_offset(&self) -> Result { - let ShredVariant::MerkleData(proof_size) = self.common_header.shred_variant else { + let ShredVariant::MerkleData(proof_size, chained) = self.common_header.shred_variant else { + return Err(Error::InvalidShredVariant); + }; + Self::get_proof_offset(proof_size, chained) + } + + fn get_proof_offset(proof_size: u8, chained: bool) -> Result { + Ok(Self::SIZE_OF_HEADERS + + Self::capacity(proof_size, chained)? + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }) + } + + fn chained_merkle_root_offset(&self) -> Result { + let ShredVariant::MerkleData(proof_size, /*chained:*/ true) = + self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; - Self::get_proof_offset(proof_size) + Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?) } - fn get_proof_offset(proof_size: u8) -> Result { - Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?) + fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { + let offset = self.chained_merkle_root_offset()?; + let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else { + return Err(Error::InvalidPayloadSize(self.payload.len())); + }; + buffer.copy_from_slice(chained_merkle_root.as_ref()); + Ok(()) } pub(super) fn merkle_root(&self) -> Result { @@ -181,7 +210,11 @@ impl ShredData { get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset) } - fn from_recovered_shard(signature: &Signature, mut shard: Vec) -> Result { + fn from_recovered_shard( + signature: &Signature, + chained_merkle_root: &Option, + mut shard: Vec, + ) -> Result { let shard_size = shard.len(); if shard_size + SIZE_OF_SIGNATURE > Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidShardSize(shard_size)); @@ -192,18 +225,21 @@ impl ShredData { // Deserialize headers. let mut cursor = Cursor::new(&shard[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - let ShredVariant::MerkleData(proof_size) = common_header.shred_variant else { + let ShredVariant::MerkleData(proof_size, chained) = common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - if ShredCode::capacity(proof_size)? != shard_size { + if ShredCode::capacity(proof_size, chained)? != shard_size { return Err(Error::InvalidShardSize(shard_size)); } let data_header = deserialize_from_with_limit(&mut cursor)?; - let shred = Self { + let mut shred = Self { common_header, data_header, payload: shard, }; + if let Some(chained_merkle_root) = chained_merkle_root { + shred.set_chained_merkle_root(chained_merkle_root)?; + } shred.sanitize()?; Ok(shred) } @@ -225,10 +261,10 @@ impl ShredData { Ok(()) } - pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8) -> Option { + pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8, chained: bool) -> Option { debug_assert_eq!( shred::layout::get_shred_variant(shred).unwrap(), - ShredVariant::MerkleData(proof_size) + ShredVariant::MerkleData(proof_size, chained) ); // Shred index in the erasure batch. let index = { @@ -240,7 +276,7 @@ impl ShredData { .map(usize::try_from)? .ok()? }; - let proof_offset = Self::get_proof_offset(proof_size).ok()?; + let proof_offset = Self::get_proof_offset(proof_size, chained).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; get_merkle_root(index, node, proof).ok() @@ -251,32 +287,62 @@ impl ShredCode { // proof_size is the number of merkle proof entries. fn proof_size(&self) -> Result { match self.common_header.shred_variant { - ShredVariant::MerkleCode(proof_size) => Ok(proof_size), + ShredVariant::MerkleCode(proof_size, _) => Ok(proof_size), _ => Err(Error::InvalidShredVariant), } } // Size of buffer embedding erasure codes. - fn capacity(proof_size: u8) -> Result { + fn capacity(proof_size: u8, chained: bool) -> Result { // Merkle proof is generated and signed after coding shreds are // generated. Coding shred headers cannot be erasure coded either. Self::SIZE_OF_PAYLOAD .checked_sub( - Self::SIZE_OF_HEADERS + SIZE_OF_MERKLE_PROOF_ENTRY * usize::from(proof_size), + Self::SIZE_OF_HEADERS + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY, ) .ok_or(Error::InvalidProofSize(proof_size)) } // Where the merkle proof starts in the shred binary. fn proof_offset(&self) -> Result { - let ShredVariant::MerkleCode(proof_size) = self.common_header.shred_variant else { + let ShredVariant::MerkleCode(proof_size, chained) = self.common_header.shred_variant else { + return Err(Error::InvalidShredVariant); + }; + Self::get_proof_offset(proof_size, chained) + } + + fn get_proof_offset(proof_size: u8, chained: bool) -> Result { + Ok(Self::SIZE_OF_HEADERS + + Self::capacity(proof_size, chained)? + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }) + } + + fn chained_merkle_root_offset(&self) -> Result { + let ShredVariant::MerkleCode(proof_size, /*chained:*/ true) = + self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; - Self::get_proof_offset(proof_size) + Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?) + } + + fn chained_merkle_root(&self) -> Result { + let offset = self.chained_merkle_root_offset()?; + self.payload + .get(offset..offset + SIZE_OF_MERKLE_ROOT) + .map(Hash::new) + .ok_or(Error::InvalidPayloadSize(self.payload.len())) } - fn get_proof_offset(proof_size: u8) -> Result { - Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?) + fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { + let offset = self.chained_merkle_root_offset()?; + let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else { + return Err(Error::InvalidPayloadSize(self.payload.len())); + }; + buffer.copy_from_slice(chained_merkle_root.as_ref()); + Ok(()) } pub(super) fn merkle_root(&self) -> Result { @@ -302,13 +368,14 @@ impl ShredCode { fn from_recovered_shard( common_header: ShredCommonHeader, coding_header: CodingShredHeader, + chained_merkle_root: &Option, mut shard: Vec, ) -> Result { - let ShredVariant::MerkleCode(proof_size) = common_header.shred_variant else { + let ShredVariant::MerkleCode(proof_size, chained) = common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; let shard_size = shard.len(); - if Self::capacity(proof_size)? != shard_size { + if Self::capacity(proof_size, chained)? != shard_size { return Err(Error::InvalidShardSize(shard_size)); } if shard_size + Self::SIZE_OF_HEADERS > Self::SIZE_OF_PAYLOAD { @@ -319,11 +386,14 @@ impl ShredCode { let mut cursor = Cursor::new(&mut shard[..]); bincode::serialize_into(&mut cursor, &common_header)?; bincode::serialize_into(&mut cursor, &coding_header)?; - let shred = Self { + let mut shred = Self { common_header, coding_header, payload: shard, }; + if let Some(chained_merkle_root) = chained_merkle_root { + shred.set_chained_merkle_root(chained_merkle_root)?; + } shred.sanitize()?; Ok(shred) } @@ -345,10 +415,10 @@ impl ShredCode { Ok(()) } - pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8) -> Option { + pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8, chained: bool) -> Option { debug_assert_eq!( shred::layout::get_shred_variant(shred).unwrap(), - ShredVariant::MerkleCode(proof_size) + ShredVariant::MerkleCode(proof_size, chained) ); // Shred index in the erasure batch. let index = { @@ -362,7 +432,7 @@ impl ShredCode { .ok()?; num_data_shreds.checked_add(position)? }; - let proof_offset = Self::get_proof_offset(proof_size).ok()?; + let proof_offset = Self::get_proof_offset(proof_size, chained).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; get_merkle_root(index, node, proof).ok() @@ -376,7 +446,8 @@ impl<'a> ShredTrait<'a> for ShredData { // Also equal to: // ShredData::SIZE_OF_HEADERS - // + ShredData::capacity(proof_size).unwrap() + // + ShredData::capacity(proof_size, chained).unwrap() + // + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } // + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY const SIZE_OF_PAYLOAD: usize = ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS + SIZE_OF_SIGNATURE; @@ -390,7 +461,7 @@ impl<'a> ShredTrait<'a> for ShredData { payload.truncate(Self::SIZE_OF_PAYLOAD); let mut cursor = Cursor::new(&payload[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - if !matches!(common_header.shred_variant, ShredVariant::MerkleData(_)) { + if !matches!(common_header.shred_variant, ShredVariant::MerkleData(..)) { return Err(Error::InvalidShredVariant); } let data_header = deserialize_from_with_limit(&mut cursor)?; @@ -414,10 +485,13 @@ impl<'a> ShredTrait<'a> for ShredData { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - let proof_offset = self.proof_offset()?; + let ShredVariant::MerkleData(proof_size, chained) = self.common_header.shred_variant else { + return Err(Error::InvalidShredVariant); + }; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; let mut shard = self.payload; - shard.truncate(proof_offset); - shard.drain(0..SIZE_OF_SIGNATURE); + shard.truncate(offset); + shard.drain(..SIZE_OF_SIGNATURE); Ok(shard) } @@ -425,15 +499,18 @@ impl<'a> ShredTrait<'a> for ShredData { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - let proof_offset = self.proof_offset()?; + let ShredVariant::MerkleData(proof_size, chained) = self.common_header.shred_variant else { + return Err(Error::InvalidShredVariant); + }; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; self.payload - .get(SIZE_OF_SIGNATURE..proof_offset) + .get(SIZE_OF_SIGNATURE..offset) .ok_or(Error::InvalidPayloadSize(self.payload.len())) } fn sanitize(&self) -> Result<(), Error> { let shred_variant = self.common_header.shred_variant; - if !matches!(shred_variant, ShredVariant::MerkleData(_)) { + if !matches!(shred_variant, ShredVariant::MerkleData(..)) { return Err(Error::InvalidShredVariant); } let _ = self.merkle_proof()?; @@ -455,7 +532,7 @@ impl<'a> ShredTrait<'a> for ShredCode { fn from_payload(mut payload: Vec) -> Result { let mut cursor = Cursor::new(&payload[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - if !matches!(common_header.shred_variant, ShredVariant::MerkleCode(_)) { + if !matches!(common_header.shred_variant, ShredVariant::MerkleCode(..)) { return Err(Error::InvalidShredVariant); } let coding_header = deserialize_from_with_limit(&mut cursor)?; @@ -484,9 +561,12 @@ impl<'a> ShredTrait<'a> for ShredCode { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - let proof_offset = self.proof_offset()?; + let ShredVariant::MerkleCode(proof_size, chained) = self.common_header.shred_variant else { + return Err(Error::InvalidShredVariant); + }; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; let mut shard = self.payload; - shard.truncate(proof_offset); + shard.truncate(offset); shard.drain(..Self::SIZE_OF_HEADERS); Ok(shard) } @@ -495,15 +575,18 @@ impl<'a> ShredTrait<'a> for ShredCode { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - let proof_offset = self.proof_offset()?; + let ShredVariant::MerkleCode(proof_size, chained) = self.common_header.shred_variant else { + return Err(Error::InvalidShredVariant); + }; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; self.payload - .get(Self::SIZE_OF_HEADERS..proof_offset) + .get(Self::SIZE_OF_HEADERS..offset) .ok_or(Error::InvalidPayloadSize(self.payload.len())) } fn sanitize(&self) -> Result<(), Error> { let shred_variant = self.common_header.shred_variant; - if !matches!(shred_variant, ShredVariant::MerkleCode(_)) { + if !matches!(shred_variant, ShredVariant::MerkleCode(..)) { return Err(Error::InvalidShredVariant); } let _ = self.merkle_proof()?; @@ -522,8 +605,10 @@ impl ShredDataTrait for ShredData { } fn data(&self) -> Result<&[u8], Error> { - let proof_size = self.proof_size()?; - let data_buffer_size = Self::capacity(proof_size)?; + let ShredVariant::MerkleData(proof_size, chained) = self.common_header.shred_variant else { + return Err(Error::InvalidShredVariant); + }; + let data_buffer_size = Self::capacity(proof_size, chained)?; let size = usize::from(self.data_header.size); if size > self.payload.len() || size < Self::SIZE_OF_HEADERS @@ -635,26 +720,29 @@ pub(super) fn recover( reed_solomon_cache: &ReedSolomonCache, ) -> Result, Error> { // Grab {common, coding} headers from first coding shred. - let headers = shreds.iter().find_map(|shred| { - let Shred::ShredCode(shred) = shred else { - return None; - }; - let position = u32::from(shred.coding_header.position); - let common_header = ShredCommonHeader { - index: shred.common_header.index.checked_sub(position)?, - ..shred.common_header - }; - let coding_header = CodingShredHeader { - position: 0u16, - ..shred.coding_header - }; - Some((common_header, coding_header)) - }); - let (common_header, coding_header) = headers.ok_or(TooFewParityShards)?; - debug_assert_matches!(common_header.shred_variant, ShredVariant::MerkleCode(_)); - let proof_size = match common_header.shred_variant { - ShredVariant::MerkleCode(proof_size) => proof_size, - ShredVariant::MerkleData(_) | ShredVariant::LegacyCode | ShredVariant::LegacyData => { + let (common_header, coding_header, chained_merkle_root) = shreds + .iter() + .find_map(|shred| { + let Shred::ShredCode(shred) = shred else { + return None; + }; + let chained_merkle_root = shred.chained_merkle_root().ok(); + let position = u32::from(shred.coding_header.position); + let common_header = ShredCommonHeader { + index: shred.common_header.index.checked_sub(position)?, + ..shred.common_header + }; + let coding_header = CodingShredHeader { + position: 0u16, + ..shred.coding_header + }; + Some((common_header, coding_header, chained_merkle_root)) + }) + .ok_or(TooFewParityShards)?; + debug_assert_matches!(common_header.shred_variant, ShredVariant::MerkleCode(..)); + let (proof_size, chained) = match common_header.shred_variant { + ShredVariant::MerkleCode(proof_size, chained) => (proof_size, chained), + ShredVariant::MerkleData(..) | ShredVariant::LegacyCode | ShredVariant::LegacyData => { return Err(Error::InvalidShredVariant); } }; @@ -674,14 +762,16 @@ pub(super) fn recover( && version == &common_header.version && fec_set_index == &common_header.fec_set_index && match shred { - Shred::ShredData(_) => shred_variant == &ShredVariant::MerkleData(proof_size), + Shred::ShredData(_) => { + shred_variant == &ShredVariant::MerkleData(proof_size, chained) + } Shred::ShredCode(shred) => { let CodingShredHeader { num_data_shreds, num_coding_shreds, position: _, } = shred.coding_header; - shred_variant == &ShredVariant::MerkleCode(proof_size) + shred_variant == &ShredVariant::MerkleCode(proof_size, chained) && num_data_shreds == coding_header.num_data_shreds && num_coding_shreds == coding_header.num_coding_shreds } @@ -721,7 +811,11 @@ pub(super) fn recover( } let shard = shard.ok_or(TooFewShards)?; if index < num_data_shreds { - let shred = ShredData::from_recovered_shard(&common_header.signature, shard)?; + let shred = ShredData::from_recovered_shard( + &common_header.signature, + &chained_merkle_root, + shard, + )?; let ShredCommonHeader { signature: _, shred_variant, @@ -730,7 +824,7 @@ pub(super) fn recover( version, fec_set_index, } = shred.common_header; - if shred_variant != ShredVariant::MerkleData(proof_size) + if shred_variant != ShredVariant::MerkleData(proof_size, chained) || common_header.slot != slot || common_header.version != version || common_header.fec_set_index != fec_set_index @@ -748,7 +842,12 @@ pub(super) fn recover( index: common_header.index + offset as u32, ..common_header }; - let shred = ShredCode::from_recovered_shard(common_header, coding_header, shard)?; + let shred = ShredCode::from_recovered_shard( + common_header, + coding_header, + &chained_merkle_root, + shard, + )?; Ok(Shred::ShredCode(shred)) } }) @@ -802,6 +901,8 @@ fn get_proof_size(num_shreds: usize) -> u8 { pub(super) fn make_shreds_from_data( thread_pool: &ThreadPool, keypair: &Keypair, + // The Merkle root of the previous erasure batch if chained. + chained_merkle_root: Option, mut data: &[u8], // Serialized &[Entry] slot: Slot, parent_slot: Slot, @@ -829,14 +930,15 @@ pub(super) fn make_shreds_from_data( } } let now = Instant::now(); + let chained = chained_merkle_root.is_some(); let erasure_batch_size = shredder::get_erasure_batch_size(DATA_SHREDS_PER_FEC_BLOCK, is_last_in_slot); let proof_size = get_proof_size(erasure_batch_size); - let data_buffer_size = ShredData::capacity(proof_size)?; + let data_buffer_size = ShredData::capacity(proof_size, chained)?; let chunk_size = DATA_SHREDS_PER_FEC_BLOCK * data_buffer_size; let mut common_header = ShredCommonHeader { signature: Signature::default(), - shred_variant: ShredVariant::MerkleData(proof_size), + shred_variant: ShredVariant::MerkleData(proof_size, chained), slot, index: next_shred_index, version: shred_version, @@ -878,7 +980,7 @@ pub(super) fn make_shreds_from_data( // which can embed the remaining data. let (proof_size, data_buffer_size) = (1u8..32) .find_map(|proof_size| { - let data_buffer_size = ShredData::capacity(proof_size).ok()?; + let data_buffer_size = ShredData::capacity(proof_size, chained).ok()?; let num_data_shreds = (data.len() + data_buffer_size - 1) / data_buffer_size; let num_data_shreds = num_data_shreds.max(1); let erasure_batch_size = @@ -887,7 +989,7 @@ pub(super) fn make_shreds_from_data( .then_some((proof_size, data_buffer_size)) }) .ok_or(Error::UnknownProofSize)?; - common_header.shred_variant = ShredVariant::MerkleData(proof_size); + common_header.shred_variant = ShredVariant::MerkleData(proof_size, chained); common_header.fec_set_index = common_header.index; let chunks = if data.is_empty() { // Generate one data shred with empty data. @@ -907,7 +1009,7 @@ pub(super) fn make_shreds_from_data( // Only the very last shred may have residual data buffer. debug_assert!(shreds.iter().rev().skip(1).all(|shred| { let proof_size = shred.proof_size().unwrap(); - let capacity = ShredData::capacity(proof_size).unwrap(); + let capacity = ShredData::capacity(proof_size, chained).unwrap(); shred.data().unwrap().len() == capacity })); // Adjust flags for the very last shred. @@ -951,7 +1053,31 @@ pub(super) fn make_shreds_from_data( .collect(); // Generate coding shreds, populate merkle proof // for all shreds and attach signature. - let shreds: Result, Error> = if shreds.len() <= 1 { + let shreds: Result, Error> = if let Some(chained_merkle_root) = chained_merkle_root { + shreds + .into_iter() + .zip(next_code_index) + .scan( + chained_merkle_root, + |chained_merkle_root, (shreds, next_code_index)| { + Some( + make_erasure_batch( + keypair, + shreds, + Some(*chained_merkle_root), + next_code_index, + is_last_in_slot, + reed_solomon_cache, + ) + .map(|(merkle_root, shreds)| { + *chained_merkle_root = merkle_root; + shreds + }), + ) + }, + ) + .collect() + } else if shreds.len() <= 1 { shreds .into_iter() .zip(next_code_index) @@ -959,10 +1085,12 @@ pub(super) fn make_shreds_from_data( make_erasure_batch( keypair, shreds, + None, // chained_merkle_root next_code_index, is_last_in_slot, reed_solomon_cache, ) + .map(|(_merkle_root, shreds)| shreds) }) .collect() } else { @@ -974,10 +1102,12 @@ pub(super) fn make_shreds_from_data( make_erasure_batch( keypair, shreds, + None, // chained_merkle_root next_code_index, is_last_in_slot, reed_solomon_cache, ) + .map(|(_merkle_root, shreds)| shreds) }) .collect() }) @@ -990,22 +1120,31 @@ pub(super) fn make_shreds_from_data( // shreds and attaches signature. fn make_erasure_batch( keypair: &Keypair, - shreds: Vec, + mut shreds: Vec, + // The Merkle root of the previous erasure batch if chained. + chained_merkle_root: Option, next_code_index: u32, is_last_in_slot: bool, reed_solomon_cache: &ReedSolomonCache, -) -> Result, Error> { +) -> Result<(/*merkle root:*/ Hash, Vec), Error> { let num_data_shreds = shreds.len(); + let chained = chained_merkle_root.is_some(); let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot); let num_coding_shreds = erasure_batch_size - num_data_shreds; let proof_size = get_proof_size(erasure_batch_size); debug_assert!(shreds .iter() - .all(|shred| shred.common_header.shred_variant == ShredVariant::MerkleData(proof_size))); + .all(|shred| shred.common_header.shred_variant + == ShredVariant::MerkleData(proof_size, chained))); let mut common_header = match shreds.first() { - None => return Ok(Vec::default()), + None => return Err(Error::from(TooFewShards)), Some(shred) => shred.common_header, }; + if let Some(hash) = chained_merkle_root { + for shred in &mut shreds { + shred.set_chained_merkle_root(&hash)?; + } + } // Generate erasure codings for encoded shard of data shreds. let data: Vec<_> = shreds .iter() @@ -1020,7 +1159,7 @@ fn make_erasure_batch( let mut shreds: Vec<_> = shreds.into_iter().map(Shred::ShredData).collect(); // Initialize coding shreds from erasure coding shards. common_header.index = next_code_index; - common_header.shred_variant = ShredVariant::MerkleCode(proof_size); + common_header.shred_variant = ShredVariant::MerkleCode(proof_size, chained); let mut coding_header = CodingShredHeader { num_data_shreds: num_data_shreds as u16, num_coding_shreds: num_coding_shreds as u16, @@ -1032,6 +1171,9 @@ fn make_erasure_batch( bincode::serialize_into(&mut cursor, &common_header)?; bincode::serialize_into(&mut cursor, &coding_header)?; cursor.write_all(&code)?; + if let Some(chained_merkle_root) = chained_merkle_root { + cursor.write_all(chained_merkle_root.as_ref())?; + } let shred = ShredCode { common_header, coding_header, @@ -1049,10 +1191,8 @@ fn make_erasure_batch( .collect::>()?, ); // Sign root of Merkle tree. - let signature = { - let root = tree.last().ok_or(Error::InvalidMerkleProof)?; - keypair.sign_message(root.as_ref()) - }; + let root = tree.last().ok_or(Error::InvalidMerkleProof)?; + let signature = keypair.sign_message(root.as_ref()); // Populate merkle proof for all shreds and attach signature. for (index, shred) in shreds.iter_mut().enumerate() { let proof = @@ -1068,7 +1208,7 @@ fn make_erasure_batch( &Shred::from_payload(shred).unwrap() }); } - Ok(shreds) + Ok((*root, shreds)) } #[cfg(test)] @@ -1089,9 +1229,10 @@ mod test { }; // Total size of a data shred including headers and merkle proof. - fn shred_data_size_of_payload(proof_size: u8) -> usize { + fn shred_data_size_of_payload(proof_size: u8, chained: bool) -> usize { ShredData::SIZE_OF_HEADERS - + ShredData::capacity(proof_size).unwrap() + + ShredData::capacity(proof_size, chained).unwrap() + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY } @@ -1099,44 +1240,48 @@ mod test { // All payload excluding merkle proof and the signature are erasure coded. // Therefore the data capacity is equal to erasure encoded shard size minus // size of erasure encoded header. - fn shred_data_capacity(proof_size: u8) -> usize { + fn shred_data_capacity(proof_size: u8, chained: bool) -> usize { const SIZE_OF_ERASURE_ENCODED_HEADER: usize = ShredData::SIZE_OF_HEADERS - SIZE_OF_SIGNATURE; - ShredCode::capacity(proof_size).unwrap() - SIZE_OF_ERASURE_ENCODED_HEADER + ShredCode::capacity(proof_size, chained).unwrap() - SIZE_OF_ERASURE_ENCODED_HEADER } - fn shred_data_size_of_erasure_encoded_slice(proof_size: u8) -> usize { + fn shred_data_size_of_erasure_encoded_slice(proof_size: u8, chained: bool) -> usize { ShredData::SIZE_OF_PAYLOAD - SIZE_OF_SIGNATURE + - if chained { SIZE_OF_MERKLE_ROOT } else { 0 } - usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY } - #[test] - fn test_shred_data_size_of_payload() { + #[test_case(false)] + #[test_case(true)] + fn test_shred_data_size_of_payload(chained: bool) { for proof_size in 0..0x15 { assert_eq!( ShredData::SIZE_OF_PAYLOAD, - shred_data_size_of_payload(proof_size) + shred_data_size_of_payload(proof_size, chained) ); } } - #[test] - fn test_shred_data_capacity() { + #[test_case(false)] + #[test_case(true)] + fn test_shred_data_capacity(chained: bool) { for proof_size in 0..0x15 { assert_eq!( - ShredData::capacity(proof_size).unwrap(), - shred_data_capacity(proof_size) + ShredData::capacity(proof_size, chained).unwrap(), + shred_data_capacity(proof_size, chained) ); } } - #[test] - fn test_shred_code_capacity() { + #[test_case(false)] + #[test_case(true)] + fn test_shred_code_capacity(chained: bool) { for proof_size in 0..0x15 { assert_eq!( - ShredCode::capacity(proof_size).unwrap(), - shred_data_size_of_erasure_encoded_slice(proof_size), + ShredCode::capacity(proof_size, chained).unwrap(), + shred_data_size_of_erasure_encoded_slice(proof_size, chained), ); } } @@ -1177,16 +1322,20 @@ mod test { } } - #[test_case(37)] - #[test_case(64)] - #[test_case(73)] - fn test_recover_merkle_shreds(num_shreds: usize) { + #[test_case(37, false)] + #[test_case(37, true)] + #[test_case(64, false)] + #[test_case(64, true)] + #[test_case(73, false)] + #[test_case(73, true)] + fn test_recover_merkle_shreds(num_shreds: usize, chained: bool) { let mut rng = rand::thread_rng(); let reed_solomon_cache = ReedSolomonCache::default(); for num_data_shreds in 1..num_shreds { let num_coding_shreds = num_shreds - num_data_shreds; run_recover_merkle_shreds( &mut rng, + chained, num_data_shreds, num_coding_shreds, &reed_solomon_cache, @@ -1196,6 +1345,7 @@ mod test { fn run_recover_merkle_shreds( rng: &mut R, + chained: bool, num_data_shreds: usize, num_coding_shreds: usize, reed_solomon_cache: &ReedSolomonCache, @@ -1203,10 +1353,10 @@ mod test { let keypair = Keypair::new(); let num_shreds = num_data_shreds + num_coding_shreds; let proof_size = get_proof_size(num_shreds); - let capacity = ShredData::capacity(proof_size).unwrap(); + let capacity = ShredData::capacity(proof_size, chained).unwrap(); let common_header = ShredCommonHeader { signature: Signature::default(), - shred_variant: ShredVariant::MerkleData(proof_size), + shred_variant: ShredVariant::MerkleData(proof_size, chained), slot: 145_865_705, index: 1835, version: rng.gen(), @@ -1261,7 +1411,7 @@ mod test { .unwrap(); for (i, code) in parity.into_iter().enumerate() { let common_header = ShredCommonHeader { - shred_variant: ShredVariant::MerkleCode(proof_size), + shred_variant: ShredVariant::MerkleCode(proof_size, chained), index: common_header.index + i as u32 + 7, ..common_header }; @@ -1307,7 +1457,7 @@ mod test { if shreds.iter().all(|shred| { matches!( shred.common_header().shred_variant, - ShredVariant::MerkleData(_) + ShredVariant::MerkleData(..) ) }) { assert_matches!( @@ -1354,53 +1504,85 @@ mod test { } } - #[test_case(0, false)] - #[test_case(0, true)] - #[test_case(15600, false)] - #[test_case(15600, true)] - #[test_case(31200, false)] - #[test_case(31200, true)] - #[test_case(46800, false)] - #[test_case(46800, true)] - fn test_make_shreds_from_data(data_size: usize, is_last_in_slot: bool) { + #[test_case(0, false, false)] + #[test_case(0, false, true)] + #[test_case(0, true, false)] + #[test_case(0, true, true)] + #[test_case(15600, false, false)] + #[test_case(15600, false, true)] + #[test_case(15600, true, false)] + #[test_case(15600, true, true)] + #[test_case(31200, false, false)] + #[test_case(31200, false, true)] + #[test_case(31200, true, false)] + #[test_case(31200, true, true)] + #[test_case(46800, false, false)] + #[test_case(46800, false, true)] + #[test_case(46800, true, false)] + #[test_case(46800, true, true)] + fn test_make_shreds_from_data(data_size: usize, chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let data_size = data_size.saturating_sub(16); let reed_solomon_cache = ReedSolomonCache::default(); for data_size in data_size..data_size + 32 { - run_make_shreds_from_data(&mut rng, data_size, is_last_in_slot, &reed_solomon_cache); + run_make_shreds_from_data( + &mut rng, + data_size, + chained, + is_last_in_slot, + &reed_solomon_cache, + ); } } - #[test_case(false)] - #[test_case(true)] - fn test_make_shreds_from_data_rand(is_last_in_slot: bool) { + #[test_case(false, false)] + #[test_case(false, true)] + #[test_case(true, false)] + #[test_case(true, true)] + fn test_make_shreds_from_data_rand(chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let reed_solomon_cache = ReedSolomonCache::default(); for _ in 0..32 { let data_size = rng.gen_range(0..31200 * 7); - run_make_shreds_from_data(&mut rng, data_size, is_last_in_slot, &reed_solomon_cache); + run_make_shreds_from_data( + &mut rng, + data_size, + chained, + is_last_in_slot, + &reed_solomon_cache, + ); } } #[ignore] - #[test_case(false)] - #[test_case(true)] - fn test_make_shreds_from_data_paranoid(is_last_in_slot: bool) { + #[test_case(false, false)] + #[test_case(false, true)] + #[test_case(true, false)] + #[test_case(true, true)] + fn test_make_shreds_from_data_paranoid(chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let reed_solomon_cache = ReedSolomonCache::default(); for data_size in 0..=PACKET_DATA_SIZE * 4 * 64 { - run_make_shreds_from_data(&mut rng, data_size, is_last_in_slot, &reed_solomon_cache); + run_make_shreds_from_data( + &mut rng, + data_size, + chained, + is_last_in_slot, + &reed_solomon_cache, + ); } } fn run_make_shreds_from_data( rng: &mut R, data_size: usize, + chained: bool, is_last_in_slot: bool, reed_solomon_cache: &ReedSolomonCache, ) { let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); let keypair = Keypair::new(); + let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); let slot = 149_745_689; let parent_slot = slot - rng.gen_range(1..65536); let shred_version = rng.gen(); @@ -1412,6 +1594,7 @@ mod test { let shreds = make_shreds_from_data( &thread_pool, &keypair, + chained_merkle_root, &data[..], slot, parent_slot, @@ -1483,15 +1666,22 @@ mod test { let common_header = shred.common_header(); assert_eq!(common_header.slot, slot); assert_eq!(common_header.version, shred_version); + let proof_size = shred.proof_size().unwrap(); match shred { Shred::ShredCode(_) => { assert_eq!(common_header.index, next_code_index + num_coding_shreds); - assert_matches!(common_header.shred_variant, ShredVariant::MerkleCode(_)); + assert_eq!( + common_header.shred_variant, + ShredVariant::MerkleCode(proof_size, chained) + ); num_coding_shreds += 1; } Shred::ShredData(shred) => { assert_eq!(common_header.index, next_shred_index + num_data_shreds); - assert_matches!(common_header.shred_variant, ShredVariant::MerkleData(_)); + assert_eq!( + common_header.shred_variant, + ShredVariant::MerkleData(proof_size, chained) + ); assert!(common_header.fec_set_index <= common_header.index); assert_eq!( Slot::from(shred.data_header.parent_offset), diff --git a/ledger/src/shred/shred_data.rs b/ledger/src/shred/shred_data.rs index ecb40367b4ef08..5b9965afd787c8 100644 --- a/ledger/src/shred/shred_data.rs +++ b/ledger/src/shred/shred_data.rs @@ -97,8 +97,8 @@ impl ShredData { // Possibly zero pads bytes stored in blockstore. pub(crate) fn resize_stored_shred(shred: Vec) -> Result, Error> { match shred::layout::get_shred_variant(&shred)? { - ShredVariant::LegacyCode | ShredVariant::MerkleCode(_) => Err(Error::InvalidShredType), - ShredVariant::MerkleData(_) => { + ShredVariant::LegacyCode | ShredVariant::MerkleCode(..) => Err(Error::InvalidShredType), + ShredVariant::MerkleData(..) => { if shred.len() != merkle::ShredData::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(shred.len())); } @@ -111,10 +111,12 @@ impl ShredData { // Maximum size of ledger data that can be embedded in a data-shred. // merkle_proof_size is the number of merkle proof entries. // None indicates a legacy data-shred. - pub fn capacity(merkle_proof_size: Option) -> Result { - match merkle_proof_size { + pub fn capacity( + merkle_variant: Option<(/*proof_size:*/ u8, /*chained:*/ bool)>, + ) -> Result { + match merkle_variant { None => Ok(legacy::ShredData::CAPACITY), - Some(proof_size) => merkle::ShredData::capacity(proof_size), + Some((proof_size, chained)) => merkle::ShredData::capacity(proof_size, chained), } } diff --git a/ledger/src/shred/stats.rs b/ledger/src/shred/stats.rs index b1c4769d5f876c..5b4a75a2489bbb 100644 --- a/ledger/src/shred/stats.rs +++ b/ledger/src/shred/stats.rs @@ -33,7 +33,9 @@ pub struct ShredFetchStats { pub index_overrun: usize, pub shred_count: usize, pub(crate) num_shreds_merkle_code: usize, + pub(crate) num_shreds_merkle_code_chained: usize, pub(crate) num_shreds_merkle_data: usize, + pub(crate) num_shreds_merkle_data_chained: usize, pub ping_count: usize, pub ping_err_verify_count: usize, pub(crate) index_bad_deserialize: usize, @@ -117,7 +119,17 @@ impl ShredFetchStats { ("index_overrun", self.index_overrun, i64), ("shred_count", self.shred_count, i64), ("num_shreds_merkle_code", self.num_shreds_merkle_code, i64), + ( + "num_shreds_merkle_code_chained", + self.num_shreds_merkle_code_chained, + i64 + ), ("num_shreds_merkle_data", self.num_shreds_merkle_data, i64), + ( + "num_shreds_merkle_data_chained", + self.num_shreds_merkle_data_chained, + i64 + ), ("ping_count", self.ping_count, i64), ("ping_err_verify_count", self.ping_err_verify_count, i64), ("slot_bad_deserialize", self.slot_bad_deserialize, i64), diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index f3203876de7066..07a0fe0ae5b41b 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -93,6 +93,7 @@ impl Shredder { self.version, self.reference_tick, is_last_in_slot, + None, // chained_merkle_root next_shred_index, next_code_index, reed_solomon_cache, diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 238ab42f9c93e1..d52af07bf2cf46 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -1,6 +1,6 @@ #![allow(clippy::implicit_hasher)] use { - crate::shred, + crate::shred::{self, SIZE_OF_MERKLE_ROOT}, itertools::{izip, Itertools}, rayon::{prelude::*, ThreadPool}, sha2::{Digest, Sha512}, @@ -18,13 +18,10 @@ use { pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }, - static_assertions::const_assert_eq, std::{collections::HashMap, iter::repeat, mem::size_of, ops::Range, sync::Arc}, }; const SIGN_SHRED_GPU_MIN: usize = 256; -const_assert_eq!(SIZE_OF_MERKLE_ROOT, 32); -const SIZE_OF_MERKLE_ROOT: usize = std::mem::size_of::(); #[must_use] pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap) -> bool { From 90933fea7534faef1e5d0cfbb1fc6d22cffe800c Mon Sep 17 00:00:00 2001 From: Brooks Date: Sun, 21 Jan 2024 20:54:25 -0500 Subject: [PATCH 003/401] Replaces fs-err in snapshot_bank_utils.rs (#34861) --- runtime/src/snapshot_bank_utils.rs | 32 ++++++++++++++---------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index cd6aeaa11d4e85..1f734fb32b70be 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -2038,15 +2038,15 @@ mod tests { let accounts_hardlinks_dir = get_bank_snapshot_dir(&bank_snapshots_dir, bank.slot()) .join(snapshot_utils::SNAPSHOT_ACCOUNTS_HARDLINKS); - assert!(fs_err::metadata(&accounts_hardlinks_dir).is_ok()); + assert!(fs::metadata(&accounts_hardlinks_dir).is_ok()); let mut hardlink_dirs: Vec = Vec::new(); // This directory contain symlinks to all accounts snapshot directories. - for entry in fs_err::read_dir(accounts_hardlinks_dir).unwrap() { + for entry in fs::read_dir(accounts_hardlinks_dir).unwrap() { let entry = entry.unwrap(); let symlink = entry.path(); - let dst_path = fs_err::read_link(symlink).unwrap(); - assert!(fs_err::metadata(&dst_path).is_ok()); + let dst_path = fs::read_link(symlink).unwrap(); + assert!(fs::metadata(&dst_path).is_ok()); hardlink_dirs.push(dst_path); } @@ -2054,9 +2054,7 @@ mod tests { assert!(purge_bank_snapshot(bank_snapshot_dir).is_ok()); // When the bank snapshot is removed, all the snapshot hardlink directories should be removed. - assert!(hardlink_dirs - .iter() - .all(|dir| fs_err::metadata(dir).is_err())); + assert!(hardlink_dirs.iter().all(|dir| fs::metadata(dir).is_err())); } #[test] @@ -2071,7 +2069,7 @@ mod tests { let complete_flag_file = snapshot .snapshot_dir .join(snapshot_utils::SNAPSHOT_STATE_COMPLETE_FILENAME); - fs_err::remove_file(complete_flag_file).unwrap(); + fs::remove_file(complete_flag_file).unwrap(); // The incomplete snapshot dir should still exist let snapshot_dir_4 = snapshot.snapshot_dir; assert!(snapshot_dir_4.exists()); @@ -2081,14 +2079,14 @@ mod tests { let snapshot_version_file = snapshot .snapshot_dir .join(snapshot_utils::SNAPSHOT_VERSION_FILENAME); - fs_err::remove_file(snapshot_version_file).unwrap(); + fs::remove_file(snapshot_version_file).unwrap(); let snapshot = get_highest_bank_snapshot(&bank_snapshots_dir).unwrap(); assert_eq!(snapshot.slot, 2); let status_cache_file = snapshot .snapshot_dir .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME); - fs_err::remove_file(status_cache_file).unwrap(); + fs::remove_file(status_cache_file).unwrap(); let snapshot = get_highest_bank_snapshot(&bank_snapshots_dir).unwrap(); assert_eq!(snapshot.slot, 1); } @@ -2133,21 +2131,21 @@ mod tests { // the symlinks point to the account snapshot hardlink directories /snapshot// for slot 2 // get them via read_link - let hardlink_dirs_slot_2: Vec = fs_err::read_dir(accounts_link_dir_slot_2) + let hardlink_dirs_slot_2: Vec = fs::read_dir(accounts_link_dir_slot_2) .unwrap() .map(|entry| { let symlink = entry.unwrap().path(); - fs_err::read_link(symlink).unwrap() + fs::read_link(symlink).unwrap() }) .collect(); // remove the bank snapshot directory for slot 2, so the account snapshot slot 2 directories become orphaned - fs_err::remove_dir_all(snapshot_dir_slot_2).unwrap(); + fs::remove_dir_all(snapshot_dir_slot_2).unwrap(); // verify the orphaned account snapshot hardlink directories are still there assert!(hardlink_dirs_slot_2 .iter() - .all(|dir| fs_err::metadata(dir).is_ok())); + .all(|dir| fs::metadata(dir).is_ok())); let account_snapshot_paths: Vec = hardlink_dirs_slot_2 .iter() @@ -2159,7 +2157,7 @@ mod tests { // verify the hardlink directories are gone assert!(hardlink_dirs_slot_2 .iter() - .all(|dir| fs_err::metadata(dir).is_err())); + .all(|dir| fs::metadata(dir).is_err())); } #[test] @@ -2173,7 +2171,7 @@ mod tests { let bank_snapshot_dir = get_bank_snapshot_dir(&bank_snapshots_dir, slot); let state_complete_file = bank_snapshot_dir.join(snapshot_utils::SNAPSHOT_STATE_COMPLETE_FILENAME); - fs_err::remove_file(state_complete_file).unwrap(); + fs::remove_file(state_complete_file).unwrap(); } purge_incomplete_bank_snapshots(&bank_snapshots_dir); @@ -2393,7 +2391,7 @@ mod tests { // Verify that the next_append_vec_id tracking is correct let mut max_id = 0; for path in account_paths { - fs_err::read_dir(path).unwrap().for_each(|entry| { + fs::read_dir(path).unwrap().for_each(|entry| { let path = entry.unwrap().path(); let filename = path.file_name().unwrap(); let (_slot, append_vec_id) = get_slot_and_append_vec_id(filename.to_str().unwrap()); From f9bfb60c8333eee59c5a3f81968e604cdbafa729 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 22 Jan 2024 10:43:21 +0800 Subject: [PATCH 004/401] ci: publish crates directly (#34794) --- ci/publish-crate.sh | 46 ++++++++++++++++++--------------------------- 1 file changed, 18 insertions(+), 28 deletions(-) diff --git a/ci/publish-crate.sh b/ci/publish-crate.sh index fb13ec1d53dc9d..099d02129e3cb8 100755 --- a/ci/publish-crate.sh +++ b/ci/publish-crate.sh @@ -63,37 +63,27 @@ for Cargo_toml in $Cargo_tomls; do ( set -x + crate=$(dirname "$Cargo_toml") - # The rocksdb package does not build with the stock rust docker image so use - # the solana rust docker image cargoCommand="cargo publish --token $CRATES_IO_TOKEN" - ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand" - ) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues - - numRetries=30 - for ((i = 1 ; i <= numRetries ; i++)); do - echo "Attempt ${i} of ${numRetries}" - if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then - echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io REST API" - - really_uploaded=0 - ( - set -x - rm -rf crate-test - cargo init crate-test - cd crate-test/ - echo "${crate_name} = \"=${expectedCrateVersion}\"" >> Cargo.toml - echo "[workspace]" >> Cargo.toml - cargo check - ) && really_uploaded=1 - if ((really_uploaded)); then - break; + + numRetries=10 + for ((i = 1; i <= numRetries; i++)); do + echo "Attempt ${i} of ${numRetries}" + # The rocksdb package does not build with the stock rust docker image so use + # the solana rust docker image + if ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"; then + break + fi + + if [ "$i" -lt "$numRetries" ]; then + sleep 3 + else + echo "couldn't publish '$crate_name'" + exit 1 fi - echo "${crate_name} not yet available for download from crates.io" - fi - echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds." - sleep 2 - done + done + ) done exit 0 From d005b3a5b84394c6a52b77022d86231482ce67dd Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 22 Jan 2024 06:55:13 -0500 Subject: [PATCH 005/401] Removes fs-err dependency from accounts-db crate (#34869) --- Cargo.lock | 1 - accounts-db/Cargo.toml | 1 - programs/sbf/Cargo.lock | 1 - 3 files changed, 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index baaa0cb61fb1d0..e5511988a65ecb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5314,7 +5314,6 @@ dependencies = [ "ed25519-dalek", "flate2", "fnv", - "fs-err", "im", "index_list", "itertools", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 6ce4d2f087e72d..567a901da90a88 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -21,7 +21,6 @@ crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } flate2 = { workspace = true } fnv = { workspace = true } -fs-err = { workspace = true } im = { workspace = true, features = ["rayon", "serde"] } index_list = { workspace = true } itertools = { workspace = true } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 57660f7cb9efc1..14855dc1a167ed 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4679,7 +4679,6 @@ dependencies = [ "dashmap", "flate2", "fnv", - "fs-err", "im", "index_list", "itertools", From b78d41792aabe65a78a44d90174439b2f5579866 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 22 Jan 2024 06:55:32 -0500 Subject: [PATCH 006/401] Replaces fs-err in snapshot_utils tests (#34870) --- runtime/src/snapshot_utils.rs | 36 +++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 85a8e42de809e8..756ce648a01ddb 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2626,21 +2626,21 @@ mod tests { ) { for slot in min_slot..max_slot { let snapshot_dir = get_bank_snapshot_dir(bank_snapshots_dir, slot); - fs_err::create_dir_all(&snapshot_dir).unwrap(); + fs::create_dir_all(&snapshot_dir).unwrap(); let snapshot_filename = get_snapshot_file_name(slot); let snapshot_path = snapshot_dir.join(snapshot_filename); - fs_err::File::create(snapshot_path).unwrap(); + fs::File::create(snapshot_path).unwrap(); let status_cache_file = snapshot_dir.join(SNAPSHOT_STATUS_CACHE_FILENAME); - fs_err::File::create(status_cache_file).unwrap(); + fs::File::create(status_cache_file).unwrap(); let version_path = snapshot_dir.join(SNAPSHOT_VERSION_FILENAME); - fs_err::write(version_path, SnapshotVersion::default().as_str().as_bytes()).unwrap(); + fs::write(version_path, SnapshotVersion::default().as_str().as_bytes()).unwrap(); // Mark this directory complete so it can be used. Check this flag first before selecting for deserialization. let state_complete_path = snapshot_dir.join(SNAPSHOT_STATE_COMPLETE_FILENAME); - fs_err::File::create(state_complete_path).unwrap(); + fs::File::create(state_complete_path).unwrap(); } } @@ -2680,8 +2680,8 @@ mod tests { min_incremental_snapshot_slot: Slot, max_incremental_snapshot_slot: Slot, ) { - fs_err::create_dir_all(full_snapshot_archives_dir).unwrap(); - fs_err::create_dir_all(incremental_snapshot_archives_dir).unwrap(); + fs::create_dir_all(full_snapshot_archives_dir).unwrap(); + fs::create_dir_all(incremental_snapshot_archives_dir).unwrap(); for full_snapshot_slot in min_full_snapshot_slot..max_full_snapshot_slot { for incremental_snapshot_slot in min_incremental_snapshot_slot..max_incremental_snapshot_slot @@ -2693,13 +2693,13 @@ mod tests { Hash::default() ); let snapshot_filepath = incremental_snapshot_archives_dir.join(snapshot_filename); - fs_err::File::create(snapshot_filepath).unwrap(); + fs::File::create(snapshot_filepath).unwrap(); } let snapshot_filename = format!("snapshot-{}-{}.tar", full_snapshot_slot, Hash::default()); let snapshot_filepath = full_snapshot_archives_dir.join(snapshot_filename); - fs_err::File::create(snapshot_filepath).unwrap(); + fs::File::create(snapshot_filepath).unwrap(); // Add in an incremental snapshot with a bad filename and high slot to ensure filename are filtered and sorted correctly let bad_filename = format!( @@ -2708,14 +2708,14 @@ mod tests { max_incremental_snapshot_slot + 1, ); let bad_filepath = incremental_snapshot_archives_dir.join(bad_filename); - fs_err::File::create(bad_filepath).unwrap(); + fs::File::create(bad_filepath).unwrap(); } // Add in a snapshot with a bad filename and high slot to ensure filename are filtered and // sorted correctly let bad_filename = format!("snapshot-{}-bad!hash.tar", max_full_snapshot_slot + 1); let bad_filepath = full_snapshot_archives_dir.join(bad_filename); - fs_err::File::create(bad_filepath).unwrap(); + fs::File::create(bad_filepath).unwrap(); } #[test] @@ -2887,7 +2887,7 @@ mod tests { for snap_name in snapshot_names { let snap_path = temp_snap_dir.path().join(snap_name); - let mut _snap_file = fs_err::File::create(snap_path); + let mut _snap_file = fs::File::create(snap_path); } purge_old_snapshot_archives( temp_snap_dir.path(), @@ -2897,7 +2897,7 @@ mod tests { ); let mut retained_snaps = HashSet::new(); - for entry in fs_err::read_dir(temp_snap_dir.path()).unwrap() { + for entry in fs::read_dir(temp_snap_dir.path()).unwrap() { let entry_path_buf = entry.unwrap().path(); let entry_path = entry_path_buf.as_path(); let snapshot_name = entry_path @@ -2969,7 +2969,7 @@ mod tests { let full_snapshot_archive_path = full_snapshot_archives_dir .as_ref() .join(full_snapshot_archive_file_name); - fs_err::File::create(full_snapshot_archive_path).unwrap(); + fs::File::create(full_snapshot_archive_path).unwrap(); // don't purge-and-check until enough snapshot archives have been created if slot < starting_slot + maximum_snapshots_to_retain.get() as Slot { @@ -3030,7 +3030,7 @@ mod tests { let snapshot_filename = format!("snapshot-{}-{}.tar", full_snapshot_slot, Hash::default()); let snapshot_path = full_snapshot_archives_dir.path().join(&snapshot_filename); - fs_err::File::create(snapshot_path).unwrap(); + fs::File::create(snapshot_path).unwrap(); snapshot_filenames.push(snapshot_filename); (full_snapshot_slot..) @@ -3047,7 +3047,7 @@ mod tests { let snapshot_path = incremental_snapshot_archives_dir .path() .join(&snapshot_filename); - fs_err::File::create(snapshot_path).unwrap(); + fs::File::create(snapshot_path).unwrap(); snapshot_filenames.push(snapshot_filename); }); }); @@ -3150,7 +3150,7 @@ mod tests { let snapshot_path = incremental_snapshot_archives_dir .path() .join(snapshot_filenames); - fs_err::File::create(snapshot_path).unwrap(); + fs::File::create(snapshot_path).unwrap(); } purge_old_snapshot_archives( @@ -3174,7 +3174,7 @@ mod tests { let bank_snapshots_dir_tmp = tempfile::TempDir::new().unwrap(); let bank_snapshot_dir = bank_snapshots_dir_tmp.path().join(slot.to_string()); let accounts_hardlinks_dir = bank_snapshot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS); - fs_err::create_dir_all(&accounts_hardlinks_dir).unwrap(); + fs::create_dir_all(&accounts_hardlinks_dir).unwrap(); let (_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); let appendvec_filename = format!("{slot}.0"); From 5e4332ee35507515644d62f94f5828678cef7c5c Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 22 Jan 2024 09:22:49 -0800 Subject: [PATCH 007/401] Feature Impl: cost model uses number of requested write locks (#34820) --- cost-model/src/cost_model.rs | 45 ++++++++++++++++++++++++---- sdk/program/src/message/sanitized.rs | 2 ++ sdk/src/feature_set.rs | 5 ++++ 3 files changed, 47 insertions(+), 5 deletions(-) diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index ba01ed9fe993a5..1e15735426737f 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -18,7 +18,7 @@ use { solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - feature_set::{include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, + feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, instruction::CompiledInstruction, program_utils::limited_deserialize, @@ -44,7 +44,7 @@ impl CostModel { let mut tx_cost = UsageCostDetails::new_with_default_capacity(); tx_cost.signature_cost = Self::get_signature_cost(transaction); - Self::get_write_lock_cost(&mut tx_cost, transaction); + Self::get_write_lock_cost(&mut tx_cost, transaction, feature_set); Self::get_transaction_cost(&mut tx_cost, transaction, feature_set); tx_cost.account_data_size = Self::calculate_account_data_size(transaction); @@ -73,10 +73,19 @@ impl CostModel { .collect() } - fn get_write_lock_cost(tx_cost: &mut UsageCostDetails, transaction: &SanitizedTransaction) { + fn get_write_lock_cost( + tx_cost: &mut UsageCostDetails, + transaction: &SanitizedTransaction, + feature_set: &FeatureSet, + ) { tx_cost.writable_accounts = Self::get_writable_accounts(transaction); - tx_cost.write_lock_cost = - WRITE_LOCK_UNITS.saturating_mul(tx_cost.writable_accounts.len() as u64); + let num_write_locks = + if feature_set.is_active(&feature_set::cost_model_requested_write_lock_cost::id()) { + transaction.message().num_write_locks() + } else { + tx_cost.writable_accounts.len() as u64 + }; + tx_cost.write_lock_cost = WRITE_LOCK_UNITS.saturating_mul(num_write_locks); } fn get_transaction_cost( @@ -329,6 +338,32 @@ mod tests { assert_eq!(0, tx_cost.data_bytes_cost); } + #[test] + fn test_cost_model_demoted_write_lock() { + let (mint_keypair, start_hash) = test_setup(); + + // Cannot write-lock the system program, it will be demoted when taking locks. + // However, the cost should be calculated as if it were taken. + let simple_transaction = SanitizedTransaction::from_transaction_for_tests( + system_transaction::transfer(&mint_keypair, &system_program::id(), 2, start_hash), + ); + + // Feature not enabled - write lock is demoted and does not count towards cost + { + let tx_cost = CostModel::calculate_cost(&simple_transaction, &FeatureSet::default()); + assert_eq!(WRITE_LOCK_UNITS, tx_cost.write_lock_cost()); + assert_eq!(1, tx_cost.writable_accounts().len()); + } + + // Feature enabled - write lock is demoted but still counts towards cost + { + let tx_cost = + CostModel::calculate_cost(&simple_transaction, &FeatureSet::all_enabled()); + assert_eq!(2 * WRITE_LOCK_UNITS, tx_cost.write_lock_cost()); + assert_eq!(1, tx_cost.writable_accounts().len()); + } + } + #[test] fn test_cost_model_compute_budget_transaction() { let (mint_keypair, start_hash) = test_setup(); diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 640159a7ad2dea..098a781ea4dbf7 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -360,6 +360,8 @@ impl SanitizedMessage { num_signatures } + /// Returns the number of requested write-locks in this message. + /// This does not consider if write-locks are demoted. pub fn num_write_locks(&self) -> u64 { self.account_keys() .len() diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index f2e9c63ff1b2c9..2941c94ae81cb3 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -768,6 +768,10 @@ pub mod enable_zk_proof_from_account { solana_sdk::declare_id!("zkiTNuzBKxrCLMKehzuQeKZyLtX2yvFcEKMML8nExU8"); } +pub mod cost_model_requested_write_lock_cost { + solana_sdk::declare_id!("wLckV1a64ngtcKPRGU4S4grVTestXjmNjxBjaKZrAcn"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -955,6 +959,7 @@ lazy_static! { (deprecate_executable_meta_update_in_bpf_loader::id(), "deprecate executable meta flag update in bpf loader #34194"), (enable_zk_proof_from_account::id(), "Enable zk token proof program to read proof from accounts instead of instruction data #34750"), (curve25519_restrict_msm_length::id(), "restrict curve25519 multiscalar multiplication vector lengths #34763"), + (cost_model_requested_write_lock_cost::id(), "cost model uses number of requested write locks #34819"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From c264307f1041e41c168e5850f880374840106822 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 22 Jan 2024 14:07:29 -0500 Subject: [PATCH 008/401] Removes get_io_error() (#34863) --- runtime/src/snapshot_utils.rs | 43 +++++++++++-------- .../snapshot_storage_rebuilder.rs | 12 +++--- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 756ce648a01ddb..800b5f80bc8ec1 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -31,7 +31,7 @@ use { cmp::Ordering, collections::{HashMap, HashSet}, fmt, fs, - io::{BufReader, BufWriter, Error as IoError, ErrorKind, Read, Seek, Write}, + io::{BufReader, BufWriter, Error as IoError, Read, Seek, Write}, num::NonZeroUsize, path::{Path, PathBuf}, process::ExitStatus, @@ -1067,9 +1067,10 @@ where let consumed_size = data_file_stream.stream_position()?; if consumed_size > maximum_file_size { let error_message = format!( - "too large snapshot data file to serialize: {data_file_path:?} has {consumed_size} bytes" + "too large snapshot data file to serialize: '{}' has {consumed_size} bytes", + data_file_path.display(), ); - return Err(get_io_error(&error_message)); + return Err(IoError::other(error_message).into()); } Ok(consumed_size) } @@ -1133,12 +1134,12 @@ fn create_snapshot_data_file_stream( if snapshot_file_size > maximum_file_size { let error_message = format!( - "too large snapshot data file to deserialize: {} has {} bytes (max size is {} bytes)", + "too large snapshot data file to deserialize: '{}' has {} bytes (max size is {} bytes)", snapshot_root_file_path.as_ref().display(), snapshot_file_size, maximum_file_size, ); - return Err(get_io_error(&error_message)); + return Err(IoError::other(error_message).into()); } let snapshot_data_file = fs_err::File::open(snapshot_root_file_path.as_ref())?; @@ -1158,12 +1159,12 @@ fn check_deserialize_file_consumed( if consumed_size != file_size { let error_message = format!( - "invalid snapshot data file: {} has {} bytes, however consumed {} bytes to deserialize", + "invalid snapshot data file: '{}' has {} bytes, however consumed {} bytes to deserialize", file_path.as_ref().display(), file_size, consumed_size, ); - return Err(get_io_error(&error_message)); + return Err(IoError::other(error_message).into()); } Ok(()) @@ -1601,12 +1602,12 @@ fn snapshot_version_from_file(path: impl AsRef) -> Result { let file_size = fs_err::metadata(&path)?.len(); if file_size > MAX_SNAPSHOT_VERSION_FILE_SIZE { let error_message = format!( - "snapshot version file too large: {} has {} bytes (max size is {} bytes)", + "snapshot version file too large: '{}' has {} bytes (max size is {} bytes)", path.as_ref().display(), file_size, MAX_SNAPSHOT_VERSION_FILE_SIZE, ); - return Err(get_io_error(&error_message)); + return Err(IoError::other(error_message).into()); } // Read snapshot_version from file. @@ -2024,11 +2025,20 @@ pub fn verify_unpacked_snapshots_dir_and_version( let mut bank_snapshots = get_bank_snapshots_post(&unpacked_snapshots_dir_and_version.unpacked_snapshots_dir); if bank_snapshots.len() > 1 { - return Err(get_io_error("invalid snapshot format")); - } - let root_paths = bank_snapshots - .pop() - .ok_or_else(|| get_io_error("No snapshots found in snapshots directory"))?; + return Err(IoError::other(format!( + "invalid snapshot format: only one snapshot allowed, but found {}", + bank_snapshots.len(), + )) + .into()); + } + let root_paths = bank_snapshots.pop().ok_or_else(|| { + IoError::other(format!( + "no snapshots found in snapshots directory '{}'", + unpacked_snapshots_dir_and_version + .unpacked_snapshots_dir + .display(), + )) + })?; Ok((snapshot_version, root_paths)) } @@ -2044,11 +2054,6 @@ pub fn get_bank_snapshot_dir(bank_snapshots_dir: impl AsRef, slot: Slot) - .join(get_snapshot_file_name(slot)) } -fn get_io_error(error: &str) -> SnapshotError { - warn!("Snapshot Error: {:?}", error); - SnapshotError::Io(IoError::new(ErrorKind::Other, error)) -} - #[derive(Debug, Copy, Clone)] /// allow tests to specify what happened to the serialized format pub enum VerifyBank { diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index 4971c694ba206e..0c6116274b1cb1 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -1,9 +1,7 @@ //! Provides interfaces for rebuilding snapshot storages use { - super::{ - get_io_error, snapshot_version_from_file, SnapshotError, SnapshotFrom, SnapshotVersion, - }, + super::{snapshot_version_from_file, SnapshotError, SnapshotFrom, SnapshotVersion}, crate::serde_snapshot::{ self, reconstruct_single_storage, remap_and_reconstruct_single_storage, snapshot_storage_lengths_from_fields, SerdeStyle, SerializedAppendVecId, @@ -25,7 +23,7 @@ use { std::{ collections::HashMap, fs::File, - io::BufReader, + io::{BufReader, Error as IoError}, path::{Path, PathBuf}, sync::{ atomic::{AtomicUsize, Ordering}, @@ -84,9 +82,9 @@ impl SnapshotStorageRebuilder { let (snapshot_version_path, snapshot_file_path, append_vec_files) = Self::get_version_and_snapshot_files(&file_receiver); let snapshot_version_str = snapshot_version_from_file(snapshot_version_path)?; - let snapshot_version = snapshot_version_str.parse().map_err(|_| { - get_io_error(&format!( - "unsupported snapshot version: {snapshot_version_str}", + let snapshot_version = snapshot_version_str.parse().map_err(|err| { + IoError::other(format!( + "unsupported snapshot version '{snapshot_version_str}': {err}", )) })?; let snapshot_storage_lengths = From 9db4e84e723f9b9e4c5c5ac627718301af982783 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 23 Jan 2024 03:18:07 +0800 Subject: [PATCH 009/401] bump shlex to 1.3.0 (#34878) --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5511988a65ecb..5b6a5f51ee775b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5114,9 +5114,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shlex" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 14855dc1a167ed..931e70ca12d2f5 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4527,9 +4527,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shlex" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook" From a5c470d2fbfa926296dd6282f23132b06e58108b Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Mon, 22 Jan 2024 13:38:56 -0600 Subject: [PATCH 010/401] harden bank tests (#34821) * harden bank tests by specifying exact genesis config wrt the fee rate and rent * rename to clarify test function, add comments --- runtime/src/bank/tests.rs | 102 ++++++++++++++++++++++++++------------ 1 file changed, 69 insertions(+), 33 deletions(-) diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 132368ac196287..0a8309bdd37cb5 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -71,7 +71,7 @@ use { feature_set::{self, FeatureSet}, fee::FeeStructure, fee_calculator::FeeRateGovernor, - genesis_config::{create_genesis_config, ClusterType, GenesisConfig}, + genesis_config::{ClusterType, GenesisConfig}, hash::{hash, Hash}, incinerator, instruction::{AccountMeta, CompiledInstruction, Instruction, InstructionError}, @@ -170,6 +170,28 @@ fn new_bank_from_parent_with_bank_forks( .clone_without_scheduler() } +fn create_genesis_config_no_tx_fee_no_rent(lamports: u64) -> (GenesisConfig, Keypair) { + // genesis_util creates config with no tx fee and no rent + let genesis_config_info = solana_runtime::genesis_utils::create_genesis_config(lamports); + ( + genesis_config_info.genesis_config, + genesis_config_info.mint_keypair, + ) +} + +fn create_genesis_config_no_tx_fee(lamports: u64) -> (GenesisConfig, Keypair) { + // genesis_config creates config with default fee rate and default rent + // override to set fee rate to zero. + let (mut genesis_config, mint_keypair) = + solana_sdk::genesis_config::create_genesis_config(lamports); + genesis_config.fee_rate_governor = FeeRateGovernor::new(0, 0); + (genesis_config, mint_keypair) +} + +fn create_genesis_config(lamports: u64) -> (GenesisConfig, Keypair) { + solana_sdk::genesis_config::create_genesis_config(lamports) +} + #[test] fn test_race_register_tick_freeze() { solana_logger::setup(); @@ -428,7 +450,7 @@ fn test_credit_debit_rent_no_side_effect_on_hash() { for set_exempt_rent_epoch_max in [false, true] { solana_logger::setup(); - let (mut genesis_config, _mint_keypair) = create_genesis_config(10); + let (mut genesis_config, _mint_keypair) = create_genesis_config_no_tx_fee(10); genesis_config.rent = rent_with_exemption_threshold(21.0); @@ -2128,7 +2150,7 @@ fn test_purge_empty_accounts() { // so we have to stop at various points and restart to actively test. for pass in 0..3 { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); let (mut bank, bank_forks) = Bank::new_for_tests(&genesis_config).wrap_with_bank_forks_for_tests(); @@ -2238,7 +2260,7 @@ fn test_two_payments_to_one_party() { #[test] fn test_one_source_two_tx_one_batch() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.)); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; @@ -2268,7 +2290,7 @@ fn test_one_source_two_tx_one_batch() { #[test] fn test_one_tx_two_out_atomic_fail() { let amount = sol_to_lamports(1.); - let (genesis_config, mint_keypair) = create_genesis_config(amount); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(amount); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; @@ -2289,7 +2311,7 @@ fn test_one_tx_two_out_atomic_fail() { #[test] fn test_one_tx_two_out_atomic_pass() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.)); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; @@ -2360,7 +2382,7 @@ fn test_account_not_found() { #[test] fn test_insufficient_funds() { let mint_amount = sol_to_lamports(1.); - let (genesis_config, mint_keypair) = create_genesis_config(mint_amount); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(mint_amount); let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let amount = genesis_config.rent.minimum_balance(0); @@ -2643,20 +2665,20 @@ fn test_bank_tx_compute_unit_fee() { } = create_genesis_config_with_leader(mint, &leader, 3); genesis_config.fee_rate_governor = FeeRateGovernor::new(4, 0); // something divisible by 2 + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let expected_fee_paid = calculate_test_fee( &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), genesis_config .fee_rate_governor .create_fee_calculator() .lamports_per_signature, - &FeeStructure::default(), + &bank.fee_structure, ); let (expected_fee_collected, expected_fee_burned) = genesis_config.fee_rate_governor.burn(expected_fee_paid); - let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let capitalization = bank.capitalization(); let tx = system_transaction::transfer( @@ -2773,9 +2795,14 @@ fn test_bank_blockhash_fee_structure() { let tx = system_transaction::transfer(&mint_keypair, &key, 1, cheap_blockhash); assert_eq!(bank.process_transaction(&tx), Ok(())); assert_eq!(bank.get_balance(&key), 1); + let cheap_fee = calculate_test_fee( + &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), + cheap_lamports_per_signature, + &bank.fee_structure, + ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), - initial_mint_balance - 1 - cheap_lamports_per_signature + initial_mint_balance - 1 - cheap_fee ); // Send a transfer using expensive_blockhash @@ -2784,9 +2811,14 @@ fn test_bank_blockhash_fee_structure() { let tx = system_transaction::transfer(&mint_keypair, &key, 1, expensive_blockhash); assert_eq!(bank.process_transaction(&tx), Ok(())); assert_eq!(bank.get_balance(&key), 1); + let expensive_fee = calculate_test_fee( + &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), + expensive_lamports_per_signature, + &bank.fee_structure, + ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), - initial_mint_balance - 1 - expensive_lamports_per_signature + initial_mint_balance - 1 - expensive_fee ); } @@ -2828,7 +2860,7 @@ fn test_bank_blockhash_compute_unit_fee_structure() { let cheap_fee = calculate_test_fee( &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), cheap_lamports_per_signature, - &FeeStructure::default(), + &bank.fee_structure, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -2844,7 +2876,7 @@ fn test_bank_blockhash_compute_unit_fee_structure() { let expensive_fee = calculate_test_fee( &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), expensive_lamports_per_signature, - &FeeStructure::default(), + &bank.fee_structure, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -2955,7 +2987,7 @@ fn test_filter_program_errors_and_collect_compute_unit_fee() { .fee_rate_governor .create_fee_calculator() .lamports_per_signature, - &FeeStructure::default(), + &bank.fee_structure, ) * 2 ) .0 @@ -2966,7 +2998,8 @@ fn test_filter_program_errors_and_collect_compute_unit_fee() { #[test] fn test_debits_before_credits() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(2.)); + let (genesis_config, mint_keypair) = + create_genesis_config_no_tx_fee_no_rent(sol_to_lamports(2.)); let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair = Keypair::new(); let tx0 = system_transaction::transfer( @@ -3204,7 +3237,7 @@ fn test_bank_invalid_account_index() { #[test] fn test_bank_pay_to_self() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.)); let key1 = Keypair::new(); let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); @@ -3297,11 +3330,12 @@ fn test_bank_parent_already_processed() { /// Verifies that last ids and accounts are correctly referenced from parent #[test] fn test_bank_parent_account_spend() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); let key1 = Keypair::new(); let key2 = Keypair::new(); let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); + println!("==== amount {}", amount); let tx = system_transaction::transfer(&mint_keypair, &key1.pubkey(), amount, genesis_config.hash()); @@ -3314,7 +3348,8 @@ fn test_bank_parent_account_spend() { #[test] fn test_bank_hash_internal_state() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = + create_genesis_config_no_tx_fee_no_rent(sol_to_lamports(1.)); let (bank0, _) = Bank::new_with_bank_forks_for_tests(&genesis_config); let (bank1, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -3345,7 +3380,8 @@ fn test_bank_hash_internal_state() { fn test_bank_hash_internal_state_verify() { for pass in 0..3 { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = + create_genesis_config_no_tx_fee_no_rent(sol_to_lamports(1.)); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -3538,7 +3574,7 @@ fn test_bank_hash_internal_state_squash() { #[test] fn test_bank_squash() { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(2.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(2.)); let key1 = Keypair::new(); let key2 = Keypair::new(); let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); @@ -4544,7 +4580,7 @@ fn test_status_cache_ancestors() { #[test] fn test_add_builtin() { - let (genesis_config, mint_keypair) = create_genesis_config(500); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(500); let mut bank = Bank::new_for_tests(&genesis_config); fn mock_vote_program_id() -> Pubkey { @@ -5968,7 +6004,7 @@ fn test_pre_post_transaction_balances() { #[test] fn test_transaction_with_duplicate_accounts_in_instruction() { - let (genesis_config, mint_keypair) = create_genesis_config(500); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(500); let mock_program_id = Pubkey::from([2u8; 32]); let bank = @@ -6025,7 +6061,7 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { #[test] fn test_transaction_with_program_ids_passed_to_programs() { - let (genesis_config, mint_keypair) = create_genesis_config(500); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(500); let mock_program_id = Pubkey::from([2u8; 32]); let bank = @@ -6061,7 +6097,7 @@ fn test_transaction_with_program_ids_passed_to_programs() { #[test] fn test_account_ids_after_program_ids() { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(500); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(500); let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let from_pubkey = solana_sdk::pubkey::new_rand(); @@ -6100,7 +6136,7 @@ fn test_account_ids_after_program_ids() { #[test] fn test_incinerator() { - let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000_000); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(1_000_000_000_000); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Move to the first normal slot so normal rent behaviour applies @@ -6241,7 +6277,7 @@ fn test_program_id_as_payer() { #[test] fn test_ref_account_key_after_program_id() { - let (genesis_config, mint_keypair) = create_genesis_config(500); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(500); let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let from_pubkey = solana_sdk::pubkey::new_rand(); @@ -7145,7 +7181,7 @@ fn test_bank_load_program() { #[test] fn test_bpf_loader_upgradeable_deploy_with_max_len() { - let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(1_000_000_000); let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); @@ -10190,7 +10226,7 @@ fn test_calculate_fee_secp256k1() { #[test] fn test_an_empty_instruction_without_program() { - let (genesis_config, mint_keypair) = create_genesis_config(1); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(1); let destination = solana_sdk::pubkey::new_rand(); let mut ix = system_instruction::transfer(&mint_keypair.pubkey(), &destination, 0); ix.program_id = native_loader::id(); // Empty executable account chain @@ -13004,7 +13040,7 @@ fn test_store_vote_accounts_partitioned_empty() { #[test] fn test_system_instruction_allocate() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let bank_client = BankClient::new_shared(bank); let data_len = 2; @@ -13062,7 +13098,7 @@ where let len2 = 456; // create initial bank and fund the alice account - let (genesis_config, mint_keypair) = create_genesis_config(mint_lamports); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(mint_lamports); let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank.clone()); bank_client @@ -13131,7 +13167,7 @@ fn test_create_zero_lamport_without_clean() { #[test] fn test_system_instruction_assign_with_seed() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let bank_client = BankClient::new_shared(bank); @@ -13166,7 +13202,7 @@ fn test_system_instruction_assign_with_seed() { #[test] fn test_system_instruction_unsigned_transaction() { - let (genesis_config, alice_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, alice_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); let alice_pubkey = alice_keypair.pubkey(); let mallory_keypair = Keypair::new(); let mallory_pubkey = mallory_keypair.pubkey(); From 16698b19d8248d038a5d2bd430996d82fdaaff4a Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 22 Jan 2024 16:36:22 -0500 Subject: [PATCH 011/401] Replaces fs-err in snapshot_utils fns (#34883) --- runtime/src/snapshot_utils.rs | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 800b5f80bc8ec1..699460183fdf0a 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -928,9 +928,12 @@ pub fn archive_snapshot_package( /// Get the bank snapshots in a directory pub fn get_bank_snapshots(bank_snapshots_dir: impl AsRef) -> Vec { let mut bank_snapshots = Vec::default(); - match fs_err::read_dir(bank_snapshots_dir.as_ref()) { + match fs::read_dir(&bank_snapshots_dir) { Err(err) => { - info!("Unable to read bank snapshots directory: {err}"); + info!( + "Unable to read bank snapshots directory '{}': {err}", + bank_snapshots_dir.as_ref().display(), + ); } Ok(paths) => paths .filter_map(|entry| { @@ -1761,10 +1764,13 @@ where F: Fn(PathBuf) -> Result, { let walk_dir = |dir: &Path| -> Vec { - let entry_iter = fs_err::read_dir(dir); + let entry_iter = fs::read_dir(dir); match entry_iter { Err(err) => { - info!("Unable to read snapshot archives directory: {err}"); + info!( + "Unable to read snapshot archives directory '{}': {err}", + dir.display(), + ); vec![] } Ok(entries) => entries @@ -1892,9 +1898,12 @@ pub fn purge_old_snapshot_archives( fn remove_archives(archives: &[T]) { for path in archives.iter().map(|a| a.path()) { trace!("Removing snapshot archive: {}", path.display()); - let result = fs_err::remove_file(path); + let result = fs::remove_file(path); if let Err(err) = result { - info!("Failed to remove snapshot archive: {err}",); + info!( + "Failed to remove snapshot archive '{}': {err}", + path.display() + ); } } } From 8cfad7f165d5f3f039dea296892a99eb12e4ffa3 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 22 Jan 2024 17:20:00 -0500 Subject: [PATCH 012/401] Replaces fs-err in verify_snapshot_archive() (#34887) --- runtime/src/snapshot_utils.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 699460183fdf0a..d1a04b5b77fc1c 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2102,7 +2102,7 @@ pub fn verify_snapshot_archive( // collect all the appendvecs in account_paths//snapshot/ into one directory for later comparison. let storages_to_verify = unpack_dir.join("storages_to_verify"); // Create the directory if it doesn't exist - fs_err::create_dir_all(&storages_to_verify).unwrap(); + fs::create_dir_all(&storages_to_verify).unwrap(); let slot = slot.to_string(); let snapshot_slot_dir = snapshots_to_verify.as_ref().join(&slot); @@ -2112,8 +2112,8 @@ pub fn verify_snapshot_archive( let p1 = snapshots_to_verify.as_ref().join(&slot).join(&slot); let p2 = unpacked_snapshots.join(&slot).join(&slot); assert!(crate::serde_snapshot::compare_two_serialized_banks(&p1, &p2).unwrap()); - fs_err::remove_file(p1).unwrap(); - fs_err::remove_file(p2).unwrap(); + fs::remove_file(p1).unwrap(); + fs::remove_file(p2).unwrap(); } // The new the status_cache file is inside the slot directory together with the snapshot file. @@ -2126,7 +2126,7 @@ pub fn verify_snapshot_archive( let new_unpacked_status_cache_file = unpacked_snapshots .join(&slot) .join(SNAPSHOT_STATUS_CACHE_FILENAME); - fs_err::rename( + fs::rename( existing_unpacked_status_cache_file, new_unpacked_status_cache_file, ) @@ -2135,26 +2135,26 @@ pub fn verify_snapshot_archive( let accounts_hardlinks_dir = snapshot_slot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS); if accounts_hardlinks_dir.is_dir() { // This directory contain symlinks to all /snapshot/ directories. - for entry in fs_err::read_dir(&accounts_hardlinks_dir).unwrap() { - let link_dst_path = fs_err::read_link(entry.unwrap().path()).unwrap(); + for entry in fs::read_dir(&accounts_hardlinks_dir).unwrap() { + let link_dst_path = fs::read_link(entry.unwrap().path()).unwrap(); // Copy all the files in dst_path into the storages_to_verify directory. - for entry in fs_err::read_dir(&link_dst_path).unwrap() { + for entry in fs::read_dir(&link_dst_path).unwrap() { let src_path = entry.unwrap().path(); let dst_path = storages_to_verify.join(src_path.file_name().unwrap()); - fs_err::copy(src_path, dst_path).unwrap(); + fs::copy(src_path, dst_path).unwrap(); } } - fs_err::remove_dir_all(accounts_hardlinks_dir).unwrap(); + fs::remove_dir_all(accounts_hardlinks_dir).unwrap(); } let version_path = snapshot_slot_dir.join(SNAPSHOT_VERSION_FILENAME); if version_path.is_file() { - fs_err::remove_file(version_path).unwrap(); + fs::remove_file(version_path).unwrap(); } let state_complete_path = snapshot_slot_dir.join(SNAPSHOT_STATE_COMPLETE_FILENAME); if state_complete_path.is_file() { - fs_err::remove_file(state_complete_path).unwrap(); + fs::remove_file(state_complete_path).unwrap(); } assert!(!dir_diff::is_different(&snapshots_to_verify, unpacked_snapshots).unwrap()); @@ -2164,7 +2164,7 @@ pub fn verify_snapshot_archive( // Remove the empty "accounts" directory for the directory comparison below. // In some test cases the directory to compare do not come from unarchiving. // Ignore the error when this directory does not exist. - _ = fs_err::remove_dir(unpack_account_dir.join("accounts")); + _ = fs::remove_dir(unpack_account_dir.join("accounts")); // Check the account entries are the same assert!(!dir_diff::is_different(&storages_to_verify, unpack_account_dir).unwrap()); } From 2f744f1639e8826376f569150f9246f79a9a928a Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 22 Jan 2024 18:18:43 -0500 Subject: [PATCH 013/401] Moves create_all_accounts_run_and_snapshot_dirs() into accounts-db utils (#34877) --- accounts-db/src/accounts_db.rs | 81 +---------------- accounts-db/src/lib.rs | 1 + accounts-db/src/utils.rs | 119 +++++++++++++++++++++++++ ledger-tool/src/ledger_utils.rs | 10 ++- local-cluster/src/integration_tests.rs | 2 +- local-cluster/src/local_cluster.rs | 2 +- local-cluster/tests/local_cluster.rs | 2 +- runtime/src/snapshot_bank_utils.rs | 44 ++------- runtime/src/snapshot_utils.rs | 35 ++------ test-validator/src/lib.rs | 4 +- validator/src/main.rs | 4 +- 11 files changed, 152 insertions(+), 152 deletions(-) create mode 100644 accounts-db/src/utils.rs diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index d93b9e29d13be7..493a8b22c9d0ae 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -68,7 +68,7 @@ use { rent_collector::RentCollector, sorted_storages::SortedStorages, storable_accounts::StorableAccounts, - u64_align, + u64_align, utils, verify_accounts_hash_in_background::VerifyAccountsHashInBackground, }, blake3::traits::digest::Digest, @@ -1197,82 +1197,6 @@ impl AccountStorageEntry { } } -/// To allow generating a bank snapshot directory with full state information, we need to -/// hardlink account appendvec files from the runtime operation directory to a snapshot -/// hardlink directory. This is to create the run/ and snapshot sub directories for an -/// account_path provided by the user. These two sub directories are on the same file -/// system partition to allow hard-linking. -pub fn create_accounts_run_and_snapshot_dirs( - account_dir: impl AsRef, -) -> std::io::Result<(PathBuf, PathBuf)> { - let run_path = account_dir.as_ref().join("run"); - let snapshot_path = account_dir.as_ref().join("snapshot"); - if (!run_path.is_dir()) || (!snapshot_path.is_dir()) { - // If the "run/" or "snapshot" sub directories do not exist, the directory may be from - // an older version for which the appendvec files are at this directory. Clean up - // them first. - // This will be done only once when transitioning from an old image without run directory - // to this new version using run and snapshot directories. - // The run/ content cleanup will be done at a later point. The snapshot/ content persists - // across the process boot, and will be purged by the account_background_service. - if fs::remove_dir_all(&account_dir).is_err() { - delete_contents_of_path(&account_dir); - } - fs::create_dir_all(&run_path)?; - fs::create_dir_all(&snapshot_path)?; - } - - Ok((run_path, snapshot_path)) -} - -/// For all account_paths, create the run/ and snapshot/ sub directories. -/// If an account_path directory does not exist, create it. -/// It returns (account_run_paths, account_snapshot_paths) or error -pub fn create_all_accounts_run_and_snapshot_dirs( - account_paths: &[PathBuf], -) -> std::io::Result<(Vec, Vec)> { - let mut run_dirs = Vec::with_capacity(account_paths.len()); - let mut snapshot_dirs = Vec::with_capacity(account_paths.len()); - for account_path in account_paths { - // create the run/ and snapshot/ sub directories for each account_path - let (run_dir, snapshot_dir) = create_accounts_run_and_snapshot_dirs(account_path)?; - run_dirs.push(run_dir); - snapshot_dirs.push(snapshot_dir); - } - Ok((run_dirs, snapshot_dirs)) -} - -/// Delete the files and subdirectories in a directory. -/// This is useful if the process does not have permission -/// to delete the top level directory it might be able to -/// delete the contents of that directory. -pub fn delete_contents_of_path(path: impl AsRef) { - match fs::read_dir(&path) { - Err(err) => { - warn!( - "Failed to delete contents of '{}': could not read dir: {err}", - path.as_ref().display(), - ) - } - Ok(dir_entries) => { - for entry in dir_entries.flatten() { - let sub_path = entry.path(); - let result = if sub_path.is_dir() { - fs::remove_dir_all(&sub_path) - } else { - fs::remove_file(&sub_path) - }; - if let Err(err) = result { - warn!( - "Failed to delete contents of '{}': {err}", - sub_path.display(), - ); - } - } - } - } -} - pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec, Vec)> { let temp_dirs: IoResult> = (0..count).map(|_| TempDir::new()).collect(); let temp_dirs = temp_dirs?; @@ -1280,7 +1204,8 @@ pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec, Vec> = temp_dirs .iter() .map(|temp_dir| { - create_accounts_run_and_snapshot_dirs(temp_dir).map(|(run_dir, _snapshot_dir)| run_dir) + utils::create_accounts_run_and_snapshot_dirs(temp_dir) + .map(|(run_dir, _snapshot_dir)| run_dir) }) .collect(); let paths = paths?; diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 61cfcdaccb194a..74fdb8627193ee 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -46,6 +46,7 @@ pub mod storable_accounts; pub mod tiered_storage; pub mod transaction_error_metrics; pub mod transaction_results; +pub mod utils; mod verify_accounts_hash_in_background; pub mod waitable_condvar; diff --git a/accounts-db/src/utils.rs b/accounts-db/src/utils.rs new file mode 100644 index 00000000000000..1e3a6855570ec0 --- /dev/null +++ b/accounts-db/src/utils.rs @@ -0,0 +1,119 @@ +use { + log::*, + std::{ + fs, + path::{Path, PathBuf}, + }, +}; + +pub const ACCOUNTS_RUN_DIR: &str = "run"; +pub const ACCOUNTS_SNAPSHOT_DIR: &str = "snapshot"; + +/// For all account_paths, create the run/ and snapshot/ sub directories. +/// If an account_path directory does not exist, create it. +/// It returns (account_run_paths, account_snapshot_paths) or error +pub fn create_all_accounts_run_and_snapshot_dirs( + account_paths: &[PathBuf], +) -> std::io::Result<(Vec, Vec)> { + let mut run_dirs = Vec::with_capacity(account_paths.len()); + let mut snapshot_dirs = Vec::with_capacity(account_paths.len()); + for account_path in account_paths { + // create the run/ and snapshot/ sub directories for each account_path + let (run_dir, snapshot_dir) = create_accounts_run_and_snapshot_dirs(account_path)?; + run_dirs.push(run_dir); + snapshot_dirs.push(snapshot_dir); + } + Ok((run_dirs, snapshot_dirs)) +} + +/// To allow generating a bank snapshot directory with full state information, we need to +/// hardlink account appendvec files from the runtime operation directory to a snapshot +/// hardlink directory. This is to create the run/ and snapshot sub directories for an +/// account_path provided by the user. These two sub directories are on the same file +/// system partition to allow hard-linking. +pub fn create_accounts_run_and_snapshot_dirs( + account_dir: impl AsRef, +) -> std::io::Result<(PathBuf, PathBuf)> { + let run_path = account_dir.as_ref().join(ACCOUNTS_RUN_DIR); + let snapshot_path = account_dir.as_ref().join(ACCOUNTS_SNAPSHOT_DIR); + if (!run_path.is_dir()) || (!snapshot_path.is_dir()) { + // If the "run/" or "snapshot" sub directories do not exist, the directory may be from + // an older version for which the appendvec files are at this directory. Clean up + // them first. + // This will be done only once when transitioning from an old image without run directory + // to this new version using run and snapshot directories. + // The run/ content cleanup will be done at a later point. The snapshot/ content persists + // across the process boot, and will be purged by the account_background_service. + if fs::remove_dir_all(&account_dir).is_err() { + delete_contents_of_path(&account_dir); + } + fs::create_dir_all(&run_path)?; + fs::create_dir_all(&snapshot_path)?; + } + + Ok((run_path, snapshot_path)) +} + +/// Delete the files and subdirectories in a directory. +/// This is useful if the process does not have permission +/// to delete the top level directory it might be able to +/// delete the contents of that directory. +pub fn delete_contents_of_path(path: impl AsRef) { + match fs::read_dir(&path) { + Err(err) => { + warn!( + "Failed to delete contents of '{}': could not read dir: {err}", + path.as_ref().display(), + ) + } + Ok(dir_entries) => { + for entry in dir_entries.flatten() { + let sub_path = entry.path(); + let result = if sub_path.is_dir() { + fs::remove_dir_all(&sub_path) + } else { + fs::remove_file(&sub_path) + }; + if let Err(err) = result { + warn!( + "Failed to delete contents of '{}': {err}", + sub_path.display(), + ); + } + } + } + } +} + +#[cfg(test)] +mod tests { + use {super::*, tempfile::TempDir}; + + #[test] + pub fn test_create_all_accounts_run_and_snapshot_dirs() { + let (_tmp_dirs, account_paths): (Vec, Vec) = (0..4) + .map(|_| { + let tmp_dir = tempfile::TempDir::new().unwrap(); + let account_path = tmp_dir.path().join("accounts"); + (tmp_dir, account_path) + }) + .unzip(); + + // create the `run/` and `snapshot/` dirs, and ensure they're there + let (account_run_paths, account_snapshot_paths) = + create_all_accounts_run_and_snapshot_dirs(&account_paths).unwrap(); + account_run_paths.iter().all(|path| path.is_dir()); + account_snapshot_paths.iter().all(|path| path.is_dir()); + + // delete a `run/` and `snapshot/` dir, then re-create it + let account_path_first = account_paths.first().unwrap(); + delete_contents_of_path(account_path_first); + assert!(account_path_first.exists()); + assert!(!account_path_first.join(ACCOUNTS_RUN_DIR).exists()); + assert!(!account_path_first.join(ACCOUNTS_SNAPSHOT_DIR).exists()); + + _ = create_all_accounts_run_and_snapshot_dirs(&account_paths).unwrap(); + account_run_paths.iter().all(|path| path.is_dir()); + account_snapshot_paths.iter().all(|path| path.is_dir()); + } +} diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 1476c2df06adbc..a5142ea2a3d65d 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -3,7 +3,9 @@ use { clap::{value_t, value_t_or_exit, values_t_or_exit, ArgMatches}, crossbeam_channel::unbounded, log::*, - solana_accounts_db::hardened_unpack::open_genesis_config, + solana_accounts_db::{ + hardened_unpack::open_genesis_config, utils::create_all_accounts_run_and_snapshot_dirs, + }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, validator::BlockVerificationMethod, }, @@ -35,8 +37,8 @@ use { snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, snapshot_utils::{ - self, clean_orphaned_account_snapshot_dirs, create_all_accounts_run_and_snapshot_dirs, - move_and_async_delete_path_contents, SnapshotError, + self, clean_orphaned_account_snapshot_dirs, move_and_async_delete_path_contents, + SnapshotError, }, }, solana_sdk::{ @@ -68,7 +70,7 @@ pub(crate) enum LoadAndProcessLedgerError { CleanOrphanedAccountSnapshotDirectories(#[source] SnapshotError), #[error("failed to create all run and snapshot directories: {0}")] - CreateAllAccountsRunAndSnapshotDirectories(#[source] SnapshotError), + CreateAllAccountsRunAndSnapshotDirectories(#[source] std::io::Error), #[error("custom accounts path is not supported with seconday blockstore access")] CustomAccountsPathUnsupported(#[source] BlockstoreError), diff --git a/local-cluster/src/integration_tests.rs b/local-cluster/src/integration_tests.rs index 26d87d0d39ad85..db394cd394adbd 100644 --- a/local-cluster/src/integration_tests.rs +++ b/local-cluster/src/integration_tests.rs @@ -17,7 +17,7 @@ use { validator_configs::*, }, log::*, - solana_accounts_db::accounts_db::create_accounts_run_and_snapshot_dirs, + solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, solana_core::{ consensus::{tower_storage::FileTowerStorage, Tower, SWITCH_FORK_THRESHOLD}, validator::{is_snapshot_config_valid, ValidatorConfig}, diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index d180a4abaf0e3a..9d1b483d85fdd3 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -6,7 +6,7 @@ use { }, itertools::izip, log::*, - solana_accounts_db::accounts_db::create_accounts_run_and_snapshot_dirs, + solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient}, solana_core::{ consensus::tower_storage::FileTowerStorage, diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 74039f1a64a330..aa919e75f0366d 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -7,7 +7,7 @@ use { rand::seq::IteratorRandom, serial_test::serial, solana_accounts_db::{ - accounts_db::create_accounts_run_and_snapshot_dirs, hardened_unpack::open_genesis_config, + hardened_unpack::open_genesis_config, utils::create_accounts_run_and_snapshot_dirs, }, solana_client::thin_client::ThinClient, solana_core::{ diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 1f734fb32b70be..5494eb1beb716c 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -13,11 +13,10 @@ use { snapshot_hash::SnapshotHash, snapshot_package::{AccountsPackage, AccountsPackageKind, SnapshotKind, SnapshotPackage}, snapshot_utils::{ - self, archive_snapshot_package, delete_contents_of_path, - deserialize_snapshot_data_file, deserialize_snapshot_data_files, get_bank_snapshot_dir, - get_highest_bank_snapshot_post, get_highest_full_snapshot_archive_info, - get_highest_incremental_snapshot_archive_info, get_snapshot_file_name, - get_storages_to_serialize, hard_link_storages_to_snapshot, + self, archive_snapshot_package, deserialize_snapshot_data_file, + deserialize_snapshot_data_files, get_bank_snapshot_dir, get_highest_bank_snapshot_post, + get_highest_full_snapshot_archive_info, get_highest_incremental_snapshot_archive_info, + get_snapshot_file_name, get_storages_to_serialize, hard_link_storages_to_snapshot, rebuild_storages_from_snapshot_dir, serialize_snapshot_data_file, verify_and_unarchive_snapshots, verify_unpacked_snapshots_dir_and_version, AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, BankSnapshotType, SnapshotError, @@ -36,6 +35,7 @@ use { accounts_hash::AccountsHash, accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, + utils::delete_contents_of_path, }, solana_measure::{measure, measure::Measure}, solana_sdk::{ @@ -1259,9 +1259,9 @@ mod tests { bank_forks::BankForks, genesis_utils, snapshot_utils::{ - clean_orphaned_account_snapshot_dirs, create_all_accounts_run_and_snapshot_dirs, - create_tmp_accounts_dir_for_tests, get_bank_snapshots, get_bank_snapshots_post, - get_bank_snapshots_pre, get_highest_bank_snapshot, purge_bank_snapshot, + clean_orphaned_account_snapshot_dirs, create_tmp_accounts_dir_for_tests, + get_bank_snapshots, get_bank_snapshots_post, get_bank_snapshots_pre, + get_highest_bank_snapshot, purge_bank_snapshot, purge_bank_snapshots_older_than_slot, purge_incomplete_bank_snapshots, purge_old_bank_snapshots, purge_old_bank_snapshots_at_startup, snapshot_storage_rebuilder::get_slot_and_append_vec_id, ArchiveFormat, @@ -2091,34 +2091,6 @@ mod tests { assert_eq!(snapshot.slot, 1); } - #[test] - pub fn test_create_all_accounts_run_and_snapshot_dirs() { - let (_tmp_dirs, account_paths): (Vec, Vec) = (0..4) - .map(|_| { - let tmp_dir = tempfile::TempDir::new().unwrap(); - let account_path = tmp_dir.path().join("accounts"); - (tmp_dir, account_path) - }) - .unzip(); - - // create the `run/` and `snapshot/` dirs, and ensure they're there - let (account_run_paths, account_snapshot_paths) = - create_all_accounts_run_and_snapshot_dirs(&account_paths).unwrap(); - account_run_paths.iter().all(|path| path.is_dir()); - account_snapshot_paths.iter().all(|path| path.is_dir()); - - // delete a `run/` and `snapshot/` dir, then re-create it - let account_path_first = account_paths.first().unwrap(); - delete_contents_of_path(account_path_first); - assert!(account_path_first.exists()); - assert!(!account_path_first.join("run").exists()); - assert!(!account_path_first.join("snapshot").exists()); - - _ = create_all_accounts_run_and_snapshot_dirs(&account_paths).unwrap(); - account_run_paths.iter().all(|path| path.is_dir()); - account_snapshot_paths.iter().all(|path| path.is_dir()); - } - #[test] fn test_clean_orphaned_account_snapshot_dirs() { let genesis_config = GenesisConfig::default(); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index d1a04b5b77fc1c..371c12512a67a1 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -19,11 +19,12 @@ use { regex::Regex, solana_accounts_db::{ account_storage::AccountStorageMap, - accounts_db::{self, AccountStorageEntry, AtomicAppendVecId}, + accounts_db::{AccountStorageEntry, AtomicAppendVecId}, accounts_file::AccountsFileError, append_vec::AppendVec, hardened_unpack::{self, ParallelSelector, UnpackError}, shared_buffer_reader::{SharedBuffer, SharedBufferReader}, + utils::delete_contents_of_path, }, solana_measure::{measure, measure::Measure}, solana_sdk::{clock::Slot, hash::Hash}, @@ -44,7 +45,10 @@ use { thiserror::Error, }; #[cfg(feature = "dev-context-only-utils")] -use {hardened_unpack::UnpackedAppendVecMap, rayon::prelude::*}; +use { + hardened_unpack::UnpackedAppendVecMap, rayon::prelude::*, + solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, +}; mod archive_format; pub mod snapshot_storage_rebuilder; @@ -538,14 +542,6 @@ pub fn create_and_canonicalize_directories(directories: &[PathBuf]) -> Result) { - accounts_db::delete_contents_of_path(path) -} - /// Moves and asynchronously deletes the contents of a directory to avoid blocking on it. /// The directory is re-created after the move, and should now be empty. pub fn move_and_async_delete_path_contents(path: impl AsRef) { @@ -1173,17 +1169,6 @@ fn check_deserialize_file_consumed( Ok(()) } -/// For all account_paths, create the run/ and snapshot/ sub directories. -/// If an account_path directory does not exist, create it. -/// It returns (account_run_paths, account_snapshot_paths) or error -pub fn create_all_accounts_run_and_snapshot_dirs( - account_paths: &[PathBuf], -) -> Result<(Vec, Vec)> { - accounts_db::create_all_accounts_run_and_snapshot_dirs(account_paths).map_err(|err| { - SnapshotError::IoWithSource(err, "Unable to create account run and snapshot directories") - }) -} - /// Return account path from the appendvec path after checking its format. fn get_account_path_from_appendvec_path(appendvec_path: &Path) -> Option { let run_path = appendvec_path.parent()?; @@ -2083,9 +2068,7 @@ pub fn verify_snapshot_archive( ) { let temp_dir = tempfile::TempDir::new().unwrap(); let unpack_dir = temp_dir.path(); - let unpack_account_dir = accounts_db::create_accounts_run_and_snapshot_dirs(unpack_dir) - .unwrap() - .0; + let unpack_account_dir = create_accounts_run_and_snapshot_dirs(unpack_dir).unwrap().0; untar_snapshot_in( snapshot_archive, unpack_dir, @@ -2263,9 +2246,7 @@ pub fn should_take_incremental_snapshot( #[cfg(feature = "dev-context-only-utils")] pub fn create_tmp_accounts_dir_for_tests() -> (TempDir, PathBuf) { let tmp_dir = tempfile::TempDir::new().unwrap(); - let account_dir = accounts_db::create_accounts_run_and_snapshot_dirs(&tmp_dir) - .unwrap() - .0; + let account_dir = create_accounts_run_and_snapshot_dirs(&tmp_dir).unwrap().0; (tmp_dir, account_dir) } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 9f994eee9a19df..f041d80e6148e3 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -4,9 +4,9 @@ use { crossbeam_channel::Receiver, log::*, solana_accounts_db::{ - accounts_db::{create_accounts_run_and_snapshot_dirs, AccountsDbConfig}, - accounts_index::AccountsIndexConfig, + accounts_db::AccountsDbConfig, accounts_index::AccountsIndexConfig, hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + utils::create_accounts_run_and_snapshot_dirs, }, solana_cli_output::CliAccount, solana_client::rpc_request::MAX_MULTIPLE_ACCOUNTS, diff --git a/validator/src/main.rs b/validator/src/main.rs index 0877c50869af52..986a38929494fc 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -14,6 +14,7 @@ use { AccountsIndexConfig, IndexLimitMb, }, partitioned_rewards::TestPartitionedEpochRewards, + utils::create_all_accounts_run_and_snapshot_dirs, }, solana_clap_utils::input_parsers::{keypair_of, keypairs_of, pubkey_of, value_of}, solana_core::{ @@ -48,8 +49,7 @@ use { snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_config::{SnapshotConfig, SnapshotUsage}, snapshot_utils::{ - self, create_all_accounts_run_and_snapshot_dirs, create_and_canonicalize_directories, - ArchiveFormat, SnapshotVersion, + self, create_and_canonicalize_directories, ArchiveFormat, SnapshotVersion, }, }, solana_sdk::{ From 45a2a701de08428f6854d08ab6b16b512e4ff6a4 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 22 Jan 2024 16:21:06 -0800 Subject: [PATCH 014/401] TransactionState: add TransactionCost (#34881) --- .../prio_graph_scheduler.rs | 14 ++++---- .../scheduler_controller.rs | 4 +++ .../transaction_state.rs | 30 +++++++++++++++++ .../transaction_state_container.rs | 33 +++++++++++++++---- 4 files changed, 69 insertions(+), 12 deletions(-) diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index d397fd6b603f6b..e17f34d3223411 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -191,9 +191,7 @@ impl PrioGraphScheduler { saturating_add_assign!(num_scheduled, 1); let sanitized_transaction_ttl = transaction_state.transition_to_pending(); - let cu_limit = transaction_state - .transaction_priority_details() - .compute_unit_limit; + let cost = transaction_state.transaction_cost().sum(); let SanitizedTransactionTTL { transaction, @@ -203,7 +201,7 @@ impl PrioGraphScheduler { batches.transactions[thread_id].push(transaction); batches.ids[thread_id].push(id.id); batches.max_age_slots[thread_id].push(max_age_slot); - saturating_add_assign!(batches.total_cus[thread_id], cu_limit); + saturating_add_assign!(batches.total_cus[thread_id], cost); // If target batch size is reached, send only this batch. if batches.ids[thread_id].len() >= TARGET_NUM_TRANSACTIONS_PER_BATCH { @@ -492,10 +490,12 @@ mod tests { crate::banking_stage::consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, + solana_cost_model::cost_model::CostModel, solana_runtime::transaction_priority_details::TransactionPriorityDetails, solana_sdk::{ - compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, pubkey::Pubkey, - signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, + compute_budget::ComputeBudgetInstruction, feature_set::FeatureSet, hash::Hash, + message::Message, pubkey::Pubkey, signature::Keypair, signer::Signer, + system_instruction, transaction::Transaction, }, std::borrow::Borrow, }; @@ -568,6 +568,7 @@ mod tests { let id = TransactionId::new(index as u64); let transaction = prioritized_tranfers(from_keypair.borrow(), to_pubkeys, lamports, priority); + let transaction_cost = CostModel::calculate_cost(&transaction, &FeatureSet::default()); let transaction_ttl = SanitizedTransactionTTL { transaction, max_age_slot: Slot::MAX, @@ -579,6 +580,7 @@ mod tests { priority, compute_unit_limit: 1, }, + transaction_cost, ); } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index fc43bb8ad51290..225ff6a53e18c5 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -18,6 +18,7 @@ use { }, crossbeam_channel::RecvTimeoutError, solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_cost_model::cost_model::CostModel, solana_measure::measure_us, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ @@ -342,6 +343,8 @@ impl SchedulerController { { saturating_add_assign!(post_transaction_check_count, 1); let transaction_id = self.transaction_id_generator.next(); + + let transaction_cost = CostModel::calculate_cost(&transaction, &bank.feature_set); let transaction_ttl = SanitizedTransactionTTL { transaction, max_age_slot: last_slot_in_epoch, @@ -351,6 +354,7 @@ impl SchedulerController { transaction_id, transaction_ttl, priority_details, + transaction_cost, ) { saturating_add_assign!(self.count_metrics.num_dropped_on_capacity, 1); } diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state.rs b/core/src/banking_stage/transaction_scheduler/transaction_state.rs index c3ea1df03d036d..650ffa1cd3ce7e 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state.rs @@ -1,4 +1,5 @@ use { + solana_cost_model::transaction_cost::TransactionCost, solana_runtime::transaction_priority_details::TransactionPriorityDetails, solana_sdk::{slot_history::Slot, transaction::SanitizedTransaction}, }; @@ -34,11 +35,13 @@ pub(crate) enum TransactionState { Unprocessed { transaction_ttl: SanitizedTransactionTTL, transaction_priority_details: TransactionPriorityDetails, + transaction_cost: TransactionCost, forwarded: bool, }, /// The transaction is currently scheduled or being processed. Pending { transaction_priority_details: TransactionPriorityDetails, + transaction_cost: TransactionCost, forwarded: bool, }, } @@ -48,10 +51,12 @@ impl TransactionState { pub(crate) fn new( transaction_ttl: SanitizedTransactionTTL, transaction_priority_details: TransactionPriorityDetails, + transaction_cost: TransactionCost, ) -> Self { Self::Unprocessed { transaction_ttl, transaction_priority_details, + transaction_cost, forwarded: false, } } @@ -70,6 +75,18 @@ impl TransactionState { } } + /// Returns a reference to the transaction cost of the transaction. + pub(crate) fn transaction_cost(&self) -> &TransactionCost { + match self { + Self::Unprocessed { + transaction_cost, .. + } => transaction_cost, + Self::Pending { + transaction_cost, .. + } => transaction_cost, + } + } + /// Returns the priority of the transaction. pub(crate) fn priority(&self) -> u64 { self.transaction_priority_details().priority @@ -103,10 +120,12 @@ impl TransactionState { TransactionState::Unprocessed { transaction_ttl, transaction_priority_details, + transaction_cost, forwarded, } => { *self = TransactionState::Pending { transaction_priority_details, + transaction_cost, forwarded, }; transaction_ttl @@ -128,11 +147,13 @@ impl TransactionState { TransactionState::Unprocessed { .. } => panic!("already unprocessed"), TransactionState::Pending { transaction_priority_details, + transaction_cost, forwarded, } => { *self = Self::Unprocessed { transaction_ttl, transaction_priority_details, + transaction_cost, forwarded, } } @@ -162,6 +183,9 @@ impl TransactionState { priority: 0, compute_unit_limit: 0, }, + transaction_cost: TransactionCost::SimpleVote { + writable_accounts: vec![], + }, forwarded: false, }, ) @@ -172,6 +196,7 @@ impl TransactionState { mod tests { use { super::*, + solana_cost_model::transaction_cost::UsageCostDetails, solana_sdk::{ compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, @@ -190,6 +215,10 @@ mod tests { ]; let message = Message::new(&ixs, Some(&from_keypair.pubkey())); let tx = Transaction::new(&[&from_keypair], message, Hash::default()); + let transaction_cost = TransactionCost::Transaction(UsageCostDetails { + signature_cost: 5000, + ..UsageCostDetails::default() + }); let transaction_ttl = SanitizedTransactionTTL { transaction: SanitizedTransaction::from_transaction_for_tests(tx), @@ -202,6 +231,7 @@ mod tests { priority, compute_unit_limit: 0, }, + transaction_cost, ) } diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index 10401a88eff405..7c95f843537934 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -5,6 +5,7 @@ use { }, crate::banking_stage::scheduler_messages::TransactionId, min_max_heap::MinMaxHeap, + solana_cost_model::transaction_cost::TransactionCost, solana_runtime::transaction_priority_details::TransactionPriorityDetails, std::collections::HashMap, }; @@ -125,12 +126,17 @@ impl TransactionStateContainer { transaction_id: TransactionId, transaction_ttl: SanitizedTransactionTTL, transaction_priority_details: TransactionPriorityDetails, + transaction_cost: TransactionCost, ) -> bool { let priority_id = TransactionPriorityId::new(transaction_priority_details.priority, transaction_id); self.id_to_transaction_state.insert( transaction_id, - TransactionState::new(transaction_ttl, transaction_priority_details), + TransactionState::new( + transaction_ttl, + transaction_priority_details, + transaction_cost, + ), ); self.push_id_into_queue(priority_id) } @@ -176,8 +182,10 @@ impl TransactionStateContainer { mod tests { use { super::*, + solana_cost_model::cost_model::CostModel, solana_sdk::{ compute_budget::ComputeBudgetInstruction, + feature_set::FeatureSet, hash::Hash, message::Message, signature::Keypair, @@ -188,7 +196,13 @@ mod tests { }, }; - fn test_transaction(priority: u64) -> (SanitizedTransactionTTL, TransactionPriorityDetails) { + fn test_transaction( + priority: u64, + ) -> ( + SanitizedTransactionTTL, + TransactionPriorityDetails, + TransactionCost, + ) { let from_keypair = Keypair::new(); let ixs = vec![ system_instruction::transfer( @@ -199,10 +213,14 @@ mod tests { ComputeBudgetInstruction::set_compute_unit_price(priority), ]; let message = Message::new(&ixs, Some(&from_keypair.pubkey())); - let tx = Transaction::new(&[&from_keypair], message, Hash::default()); - + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( + &[&from_keypair], + message, + Hash::default(), + )); + let transaction_cost = CostModel::calculate_cost(&tx, &FeatureSet::default()); let transaction_ttl = SanitizedTransactionTTL { - transaction: SanitizedTransaction::from_transaction_for_tests(tx), + transaction: tx, max_age_slot: Slot::MAX, }; ( @@ -211,17 +229,20 @@ mod tests { priority, compute_unit_limit: 0, }, + transaction_cost, ) } fn push_to_container(container: &mut TransactionStateContainer, num: usize) { for id in 0..num as u64 { let priority = id; - let (transaction_ttl, transaction_priority_details) = test_transaction(priority); + let (transaction_ttl, transaction_priority_details, transaction_cost) = + test_transaction(priority); container.insert_new_transaction( TransactionId::new(id), transaction_ttl, transaction_priority_details, + transaction_cost, ); } } From 9caf9e8f17a1767091b73e862a60bc8d3ee593a0 Mon Sep 17 00:00:00 2001 From: Joe C Date: Mon, 22 Jan 2024 19:27:27 -0500 Subject: [PATCH 015/401] rpc: add tests for simulate transaction inner instructions (#34495) * rpc: add tests for simulate transaction inner instructions * update encoding * take out extra test case --- rpc/src/rpc.rs | 209 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 209 insertions(+) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 8bdee0df65de42..5cc5b82344e0d1 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -6294,6 +6294,215 @@ pub mod tests { assert_eq!(result, expected); } + #[test] + fn test_rpc_simulate_transaction_with_inner_instructions() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let recent_blockhash = bank.confirmed_last_blockhash(); + let RpcHandler { + ref meta, ref io, .. + } = rpc; + + let recent_slot = 123; + let mut slot_hashes = SlotHashes::default(); + slot_hashes.add(recent_slot, Hash::new_unique()); + bank.set_sysvar_for_tests(&slot_hashes); + + let lookup_table_authority = Keypair::new(); + let lookup_table_space = solana_sdk::address_lookup_table::state::LOOKUP_TABLE_META_SIZE; + let lookup_table_lamports = bank.get_minimum_balance_for_rent_exemption(lookup_table_space); + + let (instruction, lookup_table_address) = + solana_sdk::address_lookup_table::instruction::create_lookup_table( + lookup_table_authority.pubkey(), + rpc.mint_keypair.pubkey(), + recent_slot, + ); + let tx = Transaction::new_signed_with_payer( + &[instruction], + Some(&rpc.mint_keypair.pubkey()), + &[&rpc.mint_keypair], + recent_blockhash, + ); + let tx_serialized_encoded = + base64::prelude::BASE64_STANDARD.encode(serialize(&tx).unwrap()); + + // Simulation bank must be frozen + bank.freeze(); + + // `innerInstructions` not provided, should not be in response + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ "encoding": "base64" }} + ] + }}"#, + tx_serialized_encoded, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts": null, + "err":null, + "innerInstructions": null, + "logs":[ + "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program AddressLookupTab1e1111111111111111111111111 success" + ], + "returnData":null, + "unitsConsumed":1200, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // `innerInstructions` provided as `false`, should not be in response + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ "innerInstructions": false, "encoding": "base64" }} + ] + }}"#, + tx_serialized_encoded, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts": null, + "err":null, + "innerInstructions": null, + "logs":[ + "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program AddressLookupTab1e1111111111111111111111111 success" + ], + "returnData":null, + "unitsConsumed":1200, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // `innerInstructions` provided as `true`, should have parsed inner instructions + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ "innerInstructions": true, "encoding": "base64" }} + ] + }}"#, + tx_serialized_encoded, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts": null, + "err":null, + "innerInstructions": [ + { + "index": 0, + "instructions": [ + { + "parsed": { + "info": { + "destination": lookup_table_address.to_string(), + "lamports": lookup_table_lamports, + "source": rpc.mint_keypair.pubkey().to_string() + }, + "type": "transfer" + }, + "program": "system", + "programId": "11111111111111111111111111111111", + "stackHeight": 2 + }, + { + "parsed": { + "info": { + "account": lookup_table_address.to_string(), + "space": lookup_table_space + }, + "type": "allocate" + }, + "program": "system", + "programId": "11111111111111111111111111111111", + "stackHeight": 2 + }, + { + "parsed": { + "info": { + "account": lookup_table_address.to_string(), + "owner": "AddressLookupTab1e1111111111111111111111111" + }, + "type": "assign" + }, + "program": "system", + "programId": "11111111111111111111111111111111", + "stackHeight": 2 + } + ] + } + ], + "logs":[ + "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program AddressLookupTab1e1111111111111111111111111 success" + ], + "returnData":null, + "unitsConsumed":1200, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + } + #[test] #[should_panic(expected = "simulation bank must be frozen")] fn test_rpc_simulate_transaction_panic_on_unfrozen_bank() { From 098076f5cab81a54335330993842fc0831e1ca65 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 22 Jan 2024 19:47:58 -0500 Subject: [PATCH 016/401] Uses accounts run and snapshot dir constants (#34879) --- runtime/src/snapshot_utils.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 371c12512a67a1..f20f5297ffce6b 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -24,7 +24,7 @@ use { append_vec::AppendVec, hardened_unpack::{self, ParallelSelector, UnpackError}, shared_buffer_reader::{SharedBuffer, SharedBufferReader}, - utils::delete_contents_of_path, + utils::{delete_contents_of_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, }, solana_measure::{measure, measure::Measure}, solana_sdk::{clock::Slot, hash::Hash}, @@ -1175,7 +1175,7 @@ fn get_account_path_from_appendvec_path(appendvec_path: &Path) -> Option/run/. // When generating the bank snapshot directory, they are hardlinked to /snapshot// - if run_file_name != "run" { + if run_file_name != ACCOUNTS_RUN_DIR { error!( "The account path {} does not have run/ as its immediate parent directory.", run_path.display() @@ -1198,7 +1198,9 @@ fn get_snapshot_accounts_hardlink_dir( GetSnapshotAccountsHardLinkDirError::GetAccountPath(appendvec_path.to_path_buf()) })?; - let snapshot_hardlink_dir = account_path.join("snapshot").join(bank_slot.to_string()); + let snapshot_hardlink_dir = account_path + .join(ACCOUNTS_SNAPSHOT_DIR) + .join(bank_slot.to_string()); // Use the hashset to track, to avoid checking the file system. Only set up the hardlink directory // and the symlink to it at the first time of seeing the account_path. @@ -1539,7 +1541,7 @@ pub fn rebuild_storages_from_snapshot_dir( .ok_or_else(|| SnapshotError::InvalidAccountPath(account_snapshot_path.clone()))? .parent() .ok_or_else(|| SnapshotError::InvalidAccountPath(account_snapshot_path.clone()))? - .join("run"); + .join(ACCOUNTS_RUN_DIR); if !account_run_paths.contains(&account_run_path) { // The appendvec from the bank snapshot storage does not match any of the provided account_paths set. // The accout paths have changed so the snapshot is no longer usable. From 9122193e17dd7bea32243cc6b9cfe53cb44d9741 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 22 Jan 2024 19:14:51 -0600 Subject: [PATCH 017/401] blockstore: Make is_orphan() a method of SlotMeta (#34889) The old function's only input is a SlotMeta, so makes sense to move it a member function of SlotMeta --- ledger/src/blockstore.rs | 23 +++++++++-------------- ledger/src/blockstore_meta.rs | 7 +++++++ 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 156e29999acd71..5b1dc475b9cf04 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1803,7 +1803,7 @@ impl Blockstore { }; // Parent for slot meta should have been set by this point - assert!(!is_orphan(slot_meta)); + assert!(!slot_meta.is_orphan()); let new_consumed = if slot_meta.consumed == index { let mut current_index = index + 1; @@ -3822,7 +3822,8 @@ impl Blockstore { let meta_backup = &slot_meta_entry.old_slot_meta; { let mut meta_mut = meta.borrow_mut(); - let was_orphan_slot = meta_backup.is_some() && is_orphan(meta_backup.as_ref().unwrap()); + let was_orphan_slot = + meta_backup.is_some() && meta_backup.as_ref().unwrap().is_orphan(); // If: // 1) This is a new slot @@ -3848,7 +3849,7 @@ impl Blockstore { // If the parent of `slot` is a newly inserted orphan, insert it into the orphans // column family - if is_orphan(&RefCell::borrow(&*prev_slot_meta)) { + if RefCell::borrow(&*prev_slot_meta).is_orphan() { write_batch.put::(prev_slot, &true)?; } } @@ -3956,7 +3957,7 @@ impl Blockstore { // during the chaining process, see the function find_slot_meta_in_cached_state() // for details. Slots that are orphans are missing a parent_slot, so we should // fill in the parent now that we know it. - if is_orphan(&meta) { + if meta.is_orphan() { meta.parent_slot = Some(parent_slot); } @@ -4216,12 +4217,6 @@ fn find_slot_meta_in_cached_state<'a>( } } -fn is_orphan(meta: &SlotMeta) -> bool { - // If we have no parent, then this is the head of a detached chain of - // slots - meta.parent_slot.is_none() -} - // 1) Chain current_slot to the previous slot defined by prev_slot_meta fn chain_new_slot_to_prev_slot( prev_slot_meta: &mut SlotMeta, @@ -6324,7 +6319,7 @@ pub mod tests { .meta(1) .expect("Expect database get to succeed") .unwrap(); - assert!(is_orphan(&meta)); + assert!(meta.is_orphan()); assert_eq!( blockstore.orphans_iterator(0).unwrap().collect::>(), vec![1] @@ -6340,12 +6335,12 @@ pub mod tests { .meta(1) .expect("Expect database get to succeed") .unwrap(); - assert!(!is_orphan(&meta)); + assert!(!meta.is_orphan()); let meta = blockstore .meta(0) .expect("Expect database get to succeed") .unwrap(); - assert!(is_orphan(&meta)); + assert!(meta.is_orphan()); assert_eq!( blockstore.orphans_iterator(0).unwrap().collect::>(), vec![0] @@ -6369,7 +6364,7 @@ pub mod tests { .meta(i) .expect("Expect database get to succeed") .unwrap(); - assert!(!is_orphan(&meta)); + assert!(!meta.is_orphan()); } // Orphans cf is empty assert!(blockstore.orphans_cf.is_empty().unwrap()); diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 60f8a223c8a3b6..c8b5f6cb4fee99 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -263,6 +263,13 @@ impl SlotMeta { Some(self.consumed) == self.last_index.map(|ix| ix + 1) } + /// Returns a boolean indicating whether this meta's parent slot is known. + /// This value being true indicates that this meta's slot is the head of a + /// detached chain of slots. + pub(crate) fn is_orphan(&self) -> bool { + self.parent_slot.is_none() + } + /// Returns a boolean indicating whether the meta is connected. pub fn is_connected(&self) -> bool { self.connected_flags.contains(ConnectedFlags::CONNECTED) From 8aa726bfdfbdb630196815204e5538ebcb3e19ff Mon Sep 17 00:00:00 2001 From: Tyera Date: Mon, 22 Jan 2024 19:14:29 -0700 Subject: [PATCH 018/401] Define epoch-rewards partition data program id (#34862) * Create new program id for epoch-rewards partition data PDAs * Remove misleading repr attribute * Remove storage of HasherKind * Split up seeds --- runtime/src/bank.rs | 3 +-- sdk/program/src/epoch_rewards_partition_data.rs | 15 ++++++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 33d314fff8afd3..0c13e30ff7f6f8 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -140,7 +140,7 @@ use { }, epoch_info::EpochInfo, epoch_rewards_partition_data::{ - get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, HasherKind, + get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, PartitionData, }, epoch_schedule::EpochSchedule, @@ -3603,7 +3603,6 @@ impl Bank { let epoch_rewards_partition_data = EpochRewardsPartitionDataVersion::V0(PartitionData { num_partitions, parent_blockhash, - hasher_kind: HasherKind::Sip13, }); let address = get_epoch_rewards_partition_data_address(self.epoch()); diff --git a/sdk/program/src/epoch_rewards_partition_data.rs b/sdk/program/src/epoch_rewards_partition_data.rs index 62e75ca5112d5a..2ff511af8fb72b 100644 --- a/sdk/program/src/epoch_rewards_partition_data.rs +++ b/sdk/program/src/epoch_rewards_partition_data.rs @@ -8,7 +8,14 @@ pub enum EpochRewardsPartitionDataVersion { V0(PartitionData), } -#[repr(u8)] +impl EpochRewardsPartitionDataVersion { + pub fn get_hasher_kind(&self) -> HasherKind { + match self { + EpochRewardsPartitionDataVersion::V0(_) => HasherKind::Sip13, + } + } +} + #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] pub enum HasherKind { Sip13, @@ -21,14 +28,12 @@ pub struct PartitionData { pub num_partitions: usize, /// Blockhash of the last block of the previous epoch, used to create EpochRewardsHasher pub parent_blockhash: Hash, - /// Kind of hasher used to generate partitions - pub hasher_kind: HasherKind, } pub fn get_epoch_rewards_partition_data_address(epoch: u64) -> Pubkey { let (address, _bump_seed) = Pubkey::find_program_address( - &[b"EpochRewardsPartitionData", &epoch.to_le_bytes()], - &crate::stake::program::id(), + &[b"EpochRewards", b"PartitionData", &epoch.to_le_bytes()], + &crate::sysvar::id(), ); address } From 00b037fc25927fce84a05d779b50647c9ad515a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:25:16 +0800 Subject: [PATCH 019/401] build(deps): bump openssl from 0.10.62 to 0.10.63 (#34874) * build(deps): bump openssl from 0.10.62 to 0.10.63 Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.62 to 0.10.63. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.62...openssl-v0.10.63) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- programs/sbf/Cargo.lock | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5b6a5f51ee775b..1e84b4e983229f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3571,9 +3571,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.62" +version = "0.10.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" +checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" dependencies = [ "bitflags 2.4.2", "cfg-if 1.0.0", @@ -3612,9 +3612,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.98" +version = "0.9.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" +checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" dependencies = [ "cc", "libc", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 931e70ca12d2f5..74fc608d24a3e6 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3196,9 +3196,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.62" +version = "0.10.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" +checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" dependencies = [ "bitflags 2.4.2", "cfg-if 1.0.0", @@ -3237,9 +3237,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.98" +version = "0.9.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" +checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" dependencies = [ "cc", "libc", From ac1f8ca5d635eba134c151b8a8294630d533c8cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:26:11 +0800 Subject: [PATCH 020/401] build(deps): bump regex from 1.10.2 to 1.10.3 (#34871) * build(deps): bump regex from 1.10.2 to 1.10.3 Bumps [regex](https://github.com/rust-lang/regex) from 1.10.2 to 1.10.3. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.10.2...1.10.3) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 10 +++++----- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1e84b4e983229f..acac57a366eb16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4482,13 +4482,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.4", "regex-syntax", ] @@ -4500,9 +4500,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/Cargo.toml b/Cargo.toml index ab62d337131e5d..87337ac3cc9e64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -283,7 +283,7 @@ raptorq = "1.8.0" rayon = "1.8.1" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.10.2" +regex = "1.10.3" reqwest = { version = "0.11.23", default-features = false } rolling-file = "0.2.0" rpassword = "7.3" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 74fc608d24a3e6..4afb9dc308a0e9 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3989,9 +3989,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -4001,9 +4001,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" dependencies = [ "aho-corasick 1.0.1", "memchr", From b3ac7c7ae53ffa53fedf74e77fa96edbcc13c3b2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:26:57 +0800 Subject: [PATCH 021/401] build(deps): bump smallvec from 1.12.0 to 1.13.1 (#34872) * build(deps): bump smallvec from 1.12.0 to 1.13.1 Bumps [smallvec](https://github.com/servo/rust-smallvec) from 1.12.0 to 1.13.1. - [Release notes](https://github.com/servo/rust-smallvec/releases) - [Commits](https://github.com/servo/rust-smallvec/compare/v1.12.0...v1.13.1) --- updated-dependencies: - dependency-name: smallvec dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index acac57a366eb16..2ecf8a6a6756ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5173,9 +5173,9 @@ checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "smpl_jwt" diff --git a/Cargo.toml b/Cargo.toml index 87337ac3cc9e64..6f866c27ad416d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -304,7 +304,7 @@ sha2 = "0.10.8" sha3 = "0.10.4" signal-hook = "0.3.17" siphasher = "0.3.11" -smallvec = "1.12.0" +smallvec = "1.13.1" smpl_jwt = "0.7.1" socket2 = "0.5.5" soketto = "0.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 4afb9dc308a0e9..499469e4ee2c38 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4586,9 +4586,9 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "smpl_jwt" From 9263cc6c82a7b0eb7b685ae8fd323c7c678629c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:30:28 +0800 Subject: [PATCH 022/401] build(deps): bump proc-macro2 from 1.0.76 to 1.0.78 (#34873) * build(deps): bump proc-macro2 from 1.0.76 to 1.0.78 Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.76 to 1.0.78. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.76...1.0.78) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ecf8a6a6756ee..f890de73f4420f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4079,9 +4079,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.76" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] diff --git a/Cargo.toml b/Cargo.toml index 6f866c27ad416d..3da03d9963ff91 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -266,7 +266,7 @@ pkcs8 = "0.8.0" predicates = "2.1" pretty-hex = "0.3.0" prio-graph = "0.2.1" -proc-macro2 = "1.0.76" +proc-macro2 = "1.0.78" proptest = "1.4" prost = "0.11.9" prost-build = "0.11.9" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 499469e4ee2c38..91f57058127856 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3670,9 +3670,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.76" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] From 8ff511e8fa86e574eb9fb695af0aff3c5c11a397 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 06:46:27 -0500 Subject: [PATCH 023/401] Moves create_and_canonicalize_directories() into accounts-db utils (#34882) --- accounts-db/src/utils.rs | 14 ++++++++++++++ ledger-tool/src/args.rs | 21 ++++++++++++--------- runtime/src/snapshot_utils.rs | 12 ------------ validator/src/main.rs | 29 ++++++++++++++++------------- 4 files changed, 42 insertions(+), 34 deletions(-) diff --git a/accounts-db/src/utils.rs b/accounts-db/src/utils.rs index 1e3a6855570ec0..7a38d23b04f68a 100644 --- a/accounts-db/src/utils.rs +++ b/accounts-db/src/utils.rs @@ -85,6 +85,20 @@ pub fn delete_contents_of_path(path: impl AsRef) { } } +/// Creates directories if they do not exist, and canonicalizes the paths. +pub fn create_and_canonicalize_directories( + directories: impl IntoIterator>, +) -> std::io::Result> { + directories + .into_iter() + .map(|path| { + fs::create_dir_all(&path)?; + let path = fs::canonicalize(&path)?; + Ok(path) + }) + .collect() +} + #[cfg(test)] mod tests { use {super::*, tempfile::TempDir}; diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index db39f606227273..1c6f9744437555 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -5,13 +5,14 @@ use { accounts_db::{AccountsDb, AccountsDbConfig}, accounts_index::{AccountsIndexConfig, IndexLimitMb}, partitioned_rewards::TestPartitionedEpochRewards, + utils::create_and_canonicalize_directories, }, solana_clap_utils::input_parsers::pubkeys_of, solana_ledger::{ blockstore_processor::ProcessOptions, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, - solana_runtime::{runtime_config::RuntimeConfig, snapshot_utils}, + solana_runtime::runtime_config::RuntimeConfig, solana_sdk::clock::Slot, std::{ collections::HashSet, @@ -116,14 +117,16 @@ pub fn get_accounts_db_config( .unwrap_or_else(|| { ledger_tool_ledger_path.join(AccountsDb::DEFAULT_ACCOUNTS_HASH_CACHE_DIR) }); - let accounts_hash_cache_path = - snapshot_utils::create_and_canonicalize_directories(&[accounts_hash_cache_path]) - .unwrap_or_else(|err| { - eprintln!("Unable to access accounts hash cache path: {err}"); - std::process::exit(1); - }) - .pop() - .unwrap(); + let accounts_hash_cache_path = create_and_canonicalize_directories([&accounts_hash_cache_path]) + .unwrap_or_else(|err| { + eprintln!( + "Unable to access accounts hash cache path '{}': {err}", + accounts_hash_cache_path.display(), + ); + std::process::exit(1); + }) + .pop() + .unwrap(); AccountsDbConfig { index: Some(accounts_index_config), diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index f20f5297ffce6b..76a2d8cf60793a 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -530,18 +530,6 @@ pub enum GetSnapshotAccountsHardLinkDirError { }, } -/// Creates directories if they do not exist, and canonicalizes the paths. -pub fn create_and_canonicalize_directories(directories: &[PathBuf]) -> Result> { - directories - .iter() - .map(|path| { - fs_err::create_dir_all(path)?; - let path = fs_err::canonicalize(path)?; - Ok(path) - }) - .collect() -} - /// Moves and asynchronously deletes the contents of a directory to avoid blocking on it. /// The directory is re-created after the move, and should now be empty. pub fn move_and_async_delete_path_contents(path: impl AsRef) { diff --git a/validator/src/main.rs b/validator/src/main.rs index 986a38929494fc..c0ea702da973fd 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -14,7 +14,7 @@ use { AccountsIndexConfig, IndexLimitMb, }, partitioned_rewards::TestPartitionedEpochRewards, - utils::create_all_accounts_run_and_snapshot_dirs, + utils::{create_all_accounts_run_and_snapshot_dirs, create_and_canonicalize_directories}, }, solana_clap_utils::input_parsers::{keypair_of, keypairs_of, pubkey_of, value_of}, solana_core::{ @@ -48,9 +48,7 @@ use { runtime_config::RuntimeConfig, snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_config::{SnapshotConfig, SnapshotUsage}, - snapshot_utils::{ - self, create_and_canonicalize_directories, ArchiveFormat, SnapshotVersion, - }, + snapshot_utils::{self, ArchiveFormat, SnapshotVersion}, }, solana_sdk::{ clock::{Slot, DEFAULT_S_PER_SLOT}, @@ -991,9 +989,12 @@ pub fn main() { .map(BlockstoreRecoveryMode::from); // Canonicalize ledger path to avoid issues with symlink creation - let ledger_path = create_and_canonicalize_directories(&[ledger_path]) + let ledger_path = create_and_canonicalize_directories([&ledger_path]) .unwrap_or_else(|err| { - eprintln!("Unable to access ledger path: {err}"); + eprintln!( + "Unable to access ledger path '{}': {err}", + ledger_path.display(), + ); exit(1); }) .pop() @@ -1003,9 +1004,12 @@ pub fn main() { .value_of("accounts_hash_cache_path") .map(Into::into) .unwrap_or_else(|| ledger_path.join(AccountsDb::DEFAULT_ACCOUNTS_HASH_CACHE_DIR)); - let accounts_hash_cache_path = create_and_canonicalize_directories(&[accounts_hash_cache_path]) + let accounts_hash_cache_path = create_and_canonicalize_directories([&accounts_hash_cache_path]) .unwrap_or_else(|err| { - eprintln!("Unable to access accounts hash cache path: {err}"); + eprintln!( + "Unable to access accounts hash cache path '{}': {err}", + accounts_hash_cache_path.display(), + ); exit(1); }) .pop() @@ -1443,11 +1447,10 @@ pub fn main() { } else { vec![ledger_path.join("accounts")] }; - let account_paths = snapshot_utils::create_and_canonicalize_directories(&account_paths) - .unwrap_or_else(|err| { - eprintln!("Unable to access account path: {err}"); - exit(1); - }); + let account_paths = create_and_canonicalize_directories(account_paths).unwrap_or_else(|err| { + eprintln!("Unable to access account path: {err}"); + exit(1); + }); let account_shrink_paths: Option> = values_t!(matches, "account_shrink_path", String) From bb829c0bcffd24a66dc60ec90acb60d296b3a89e Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 23 Jan 2024 09:32:35 -0800 Subject: [PATCH 024/401] remove unused functions (#34895) --- .../transaction_state_container.rs | 78 ------------------- 1 file changed, 78 deletions(-) diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index 7c95f843537934..d7d79cb21b7c32 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -63,33 +63,6 @@ impl TransactionStateContainer { self.priority_queue.pop_max() } - /// Get an iterator of the top `n` transaction ids in the priority queue. - /// This will remove the ids from the queue, but not drain the remainder - /// of the queue. - pub(crate) fn take_top_n( - &mut self, - n: usize, - ) -> impl Iterator + '_ { - (0..n).map_while(|_| self.pop()) - } - - /// Serialize entire priority queue. `hold` indicates whether the priority queue should - /// be drained or not. - /// If `hold` is true, these ids should not be removed from the map while processing. - pub(crate) fn priority_ordered_ids(&mut self, hold: bool) -> Vec { - let priority_queue = if hold { - self.priority_queue.clone() - } else { - let capacity = self.priority_queue.capacity(); - core::mem::replace( - &mut self.priority_queue, - MinMaxHeap::with_capacity(capacity), - ) - }; - - priority_queue.into_vec_desc() - } - /// Get mutable transaction state by id. pub(crate) fn get_mut_transaction_state( &mut self, @@ -274,57 +247,6 @@ mod tests { ); } - #[test] - fn test_take_top_n() { - let mut container = TransactionStateContainer::with_capacity(5); - push_to_container(&mut container, 5); - - let taken = container.take_top_n(3).collect::>(); - assert_eq!( - taken, - vec![ - TransactionPriorityId::new(4, TransactionId::new(4)), - TransactionPriorityId::new(3, TransactionId::new(3)), - TransactionPriorityId::new(2, TransactionId::new(2)), - ] - ); - // The remainder of the queue should not be empty - assert_eq!(container.priority_queue.len(), 2); - } - - #[test] - fn test_priority_ordered_ids() { - let mut container = TransactionStateContainer::with_capacity(5); - push_to_container(&mut container, 5); - - let ordered = container.priority_ordered_ids(false); - assert_eq!( - ordered, - vec![ - TransactionPriorityId::new(4, TransactionId::new(4)), - TransactionPriorityId::new(3, TransactionId::new(3)), - TransactionPriorityId::new(2, TransactionId::new(2)), - TransactionPriorityId::new(1, TransactionId::new(1)), - TransactionPriorityId::new(0, TransactionId::new(0)), - ] - ); - assert!(container.priority_queue.is_empty()); - - push_to_container(&mut container, 5); - let ordered = container.priority_ordered_ids(true); - assert_eq!( - ordered, - vec![ - TransactionPriorityId::new(4, TransactionId::new(4)), - TransactionPriorityId::new(3, TransactionId::new(3)), - TransactionPriorityId::new(2, TransactionId::new(2)), - TransactionPriorityId::new(1, TransactionId::new(1)), - TransactionPriorityId::new(0, TransactionId::new(0)), - ] - ); - assert_eq!(container.priority_queue.len(), 5); - } - #[test] fn test_get_mut_transaction_state() { let mut container = TransactionStateContainer::with_capacity(5); From 4bd8eedc06b3ff5b35fa15b75ab716a5d817293c Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 12:42:44 -0500 Subject: [PATCH 025/401] Replaces fs-err in move_and_async_delete_path() (#34903) --- runtime/src/snapshot_utils.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 76a2d8cf60793a..36e1837fdd6295 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -570,8 +570,12 @@ pub fn move_and_async_delete_path(path: impl AsRef) { path_delete.file_name().unwrap().to_str().unwrap(), "_to_be_deleted" )); - if let Err(err) = fs_err::rename(&path, &path_delete) { - warn!("Path renaming failed, falling back to rm_dir in sync mode: {err}"); + if let Err(err) = fs::rename(&path, &path_delete) { + warn!( + "Cannot async delete, retrying in sync mode: failed to rename '{}' to '{}': {err}", + path.as_ref().display(), + path_delete.display(), + ); // Although the delete here is synchronous, we want to prevent another thread // from moving & deleting this directory via `move_and_async_delete_path`. lock.insert(path.as_ref().to_path_buf()); @@ -588,8 +592,10 @@ pub fn move_and_async_delete_path(path: impl AsRef) { .name("solDeletePath".to_string()) .spawn(move || { trace!("background deleting {}...", path_delete.display()); - let (_, measure_delete) = - measure!(fs_err::remove_dir_all(&path_delete).expect("background delete")); + let (result, measure_delete) = measure!(fs::remove_dir_all(&path_delete)); + if let Err(err) = result { + panic!("Failed to async delete '{}': {err}", path_delete.display()); + } trace!( "background deleting {}... Done, and{measure_delete}", path_delete.display() From a21cfbd13f2800de299ff0cc2cd0e70d36ea9105 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 12:46:31 -0500 Subject: [PATCH 026/401] Replaces fs-err in untar_snapshot_create_shared_buffer() (#34905) --- runtime/src/snapshot_utils.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 36e1837fdd6295..d1b625e5efbc04 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1977,7 +1977,16 @@ fn untar_snapshot_create_shared_buffer( snapshot_tar: &Path, archive_format: ArchiveFormat, ) -> SharedBuffer { - let open_file = || fs_err::File::open(snapshot_tar).unwrap(); + let open_file = || { + fs::File::open(snapshot_tar) + .map_err(|err| { + IoError::other(format!( + "failed to open snapshot archive '{}': {err}", + snapshot_tar.display(), + )) + }) + .unwrap() + }; match archive_format { ArchiveFormat::TarBzip2 => SharedBuffer::new(BzDecoder::new(BufReader::new(open_file()))), ArchiveFormat::TarGzip => SharedBuffer::new(GzDecoder::new(BufReader::new(open_file()))), From 94f35da8e22209ee13c329486ed805f65fcb4f0a Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 12:50:39 -0500 Subject: [PATCH 027/401] Replaces fs-err in purge_bank_snapshot() (#34906) --- runtime/src/snapshot_utils.rs | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index d1b625e5efbc04..b645157f863644 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2215,16 +2215,34 @@ fn purge_bank_snapshots<'a>(bank_snapshots: impl IntoIterator) -> Result<()> { + const FN_ERR: &str = "failed to purge bank snapshot"; let accounts_hardlinks_dir = bank_snapshot_dir.as_ref().join(SNAPSHOT_ACCOUNTS_HARDLINKS); if accounts_hardlinks_dir.is_dir() { // This directory contain symlinks to all accounts snapshot directories. // They should all be removed. - for accounts_hardlink_dir in fs_err::read_dir(accounts_hardlinks_dir)? { - let accounts_hardlink_dir = fs_err::read_link(accounts_hardlink_dir?.path())?; + let read_dir = fs::read_dir(&accounts_hardlinks_dir).map_err(|err| { + IoError::other(format!( + "{FN_ERR}: failed to read accounts hardlinks dir '{}': {err}", + accounts_hardlinks_dir.display(), + )) + })?; + for entry in read_dir { + let accounts_hardlink_dir = entry?.path(); + let accounts_hardlink_dir = fs::read_link(&accounts_hardlink_dir).map_err(|err| { + IoError::other(format!( + "{FN_ERR}: failed to read symlink '{}': {err}", + accounts_hardlink_dir.display(), + )) + })?; move_and_async_delete_path(&accounts_hardlink_dir); } } - fs_err::remove_dir_all(bank_snapshot_dir)?; + fs::remove_dir_all(&bank_snapshot_dir).map_err(|err| { + IoError::other(format!( + "{FN_ERR}: failed to remove dir '{}': {err}", + bank_snapshot_dir.as_ref().display(), + )) + })?; Ok(()) } From 22fc49c444e0030a3e920d48a5d9812ceeaf2e0b Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 13:26:23 -0500 Subject: [PATCH 028/401] Replaces fs-err in rebuild_storages_from_snapshot_dir() (#34907) --- runtime/src/snapshot_utils.rs | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index b645157f863644..adc251dd49f041 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1525,11 +1525,22 @@ pub fn rebuild_storages_from_snapshot_dir( let accounts_hardlinks = bank_snapshot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS); let account_run_paths: HashSet<_> = HashSet::from_iter(account_paths); - for dir_entry in fs_err::read_dir(accounts_hardlinks)? { + let read_dir = fs::read_dir(&accounts_hardlinks).map_err(|err| { + IoError::other(format!( + "failed to read accounts hardlinks dir '{}': {err}", + accounts_hardlinks.display(), + )) + })?; + for dir_entry in read_dir { let symlink_path = dir_entry?.path(); // The symlink point to /snapshot/ which contain the account files hardlinks // The corresponding run path should be /run/ - let account_snapshot_path = fs_err::read_link(&symlink_path)?; + let account_snapshot_path = fs::read_link(&symlink_path).map_err(|err| { + IoError::other(format!( + "failed to read symlink '{}': {err}", + symlink_path.display(), + )) + })?; let account_run_path = account_snapshot_path .parent() .ok_or_else(|| SnapshotError::InvalidAccountPath(account_snapshot_path.clone()))? @@ -1543,13 +1554,25 @@ pub fn rebuild_storages_from_snapshot_dir( } // Generate hard-links to make the account files available in the main accounts/, and let the new appendvec // paths be in accounts/ - for file in fs_err::read_dir(&account_snapshot_path)? { + let read_dir = fs::read_dir(&account_snapshot_path).map_err(|err| { + IoError::other(format!( + "failed to read account snapshot dir '{}': {err}", + account_snapshot_path.display(), + )) + })?; + for file in read_dir { let file_path = file?.path(); let file_name = file_path .file_name() .ok_or_else(|| SnapshotError::InvalidAppendVecPath(file_path.to_path_buf()))?; let dest_path = account_run_path.join(file_name); - fs_err::hard_link(&file_path, &dest_path)?; + fs::hard_link(&file_path, &dest_path).map_err(|err| { + IoError::other(format!( + "failed to hard link from '{}' to '{}': {err}", + file_path.display(), + dest_path.display(), + )) + })?; } } From 73d3973c7c26848c50522396962daf520be4536e Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Tue, 23 Jan 2024 12:59:50 -0600 Subject: [PATCH 029/401] Remove congestion multiplier from calculate fee (#34865) * remove println from a test * sync fee_structure with fee_rate_governor; remove congestion_multiplier from calculacte_fee(), leave parameters unused for now. --- runtime/src/bank.rs | 11 +++++++++++ runtime/src/bank/tests.rs | 1 - sdk/src/fee.rs | 12 ++---------- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0c13e30ff7f6f8..b1b34289290746 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6660,6 +6660,17 @@ impl Bank { &self.runtime_config.compute_budget.unwrap_or_default(), false, /* debugging_features */ )); + + // genesis_config loaded by accounts_db::open_genesis_config() from ledger + // has it's lamports_per_signature set to zero; bank sets its value correctly + // after the first block with a transaction in it. This is a hack to mimic + // the process. + let derived_fee_rate_governor = + FeeRateGovernor::new_derived(&genesis_config.fee_rate_governor, 0); + // new bank's fee_structure.lamports_per_signature should be inline with + // what's configured in GenesisConfig + self.fee_structure.lamports_per_signature = + derived_fee_rate_governor.lamports_per_signature; } pub fn set_inflation(&self, inflation: Inflation) { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 0a8309bdd37cb5..efe8b1970dfeff 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -3335,7 +3335,6 @@ fn test_bank_parent_account_spend() { let key2 = Keypair::new(); let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); - println!("==== amount {}", amount); let tx = system_transaction::transfer(&mint_keypair, &key1.pubkey(), amount, genesis_config.hash()); diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index f3377b5254f0a6..2fb045aba5d73e 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -80,17 +80,10 @@ impl FeeStructure { pub fn calculate_fee( &self, message: &SanitizedMessage, - lamports_per_signature: u64, + _lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, ) -> u64 { - // Fee based on compute units and signatures - let congestion_multiplier = if lamports_per_signature == 0 { - 0.0 // test only - } else { - 1.0 // multiplier that has no effect - }; - let signature_fee = message .num_signatures() .saturating_mul(self.lamports_per_signature); @@ -122,12 +115,11 @@ impl FeeStructure { .unwrap_or_default() }); - ((budget_limits + (budget_limits .prioritization_fee .saturating_add(signature_fee) .saturating_add(write_lock_fee) .saturating_add(compute_fee) as f64) - * congestion_multiplier) .round() as u64 } } From c30db7ad92a4a01b90de39f9ef11baf184b2458d Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 14:35:10 -0500 Subject: [PATCH 030/401] Replaces fs-err in snapshot_version_from_file() (#34904) --- runtime/src/snapshot_utils.rs | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index adc251dd49f041..c0ff24f9806233 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1606,7 +1606,13 @@ pub fn rebuild_storages_from_snapshot_dir( /// threshold, it is not opened and an error is returned. fn snapshot_version_from_file(path: impl AsRef) -> Result { // Check file size. - let file_size = fs_err::metadata(&path)?.len(); + let file_metadata = fs::metadata(&path).map_err(|err| { + IoError::other(format!( + "failed to query snapshot version file metadata '{}': {err}", + path.as_ref().display(), + )) + })?; + let file_size = file_metadata.len(); if file_size > MAX_SNAPSHOT_VERSION_FILE_SIZE { let error_message = format!( "snapshot version file too large: '{}' has {} bytes (max size is {} bytes)", @@ -1619,7 +1625,19 @@ fn snapshot_version_from_file(path: impl AsRef) -> Result { // Read snapshot_version from file. let mut snapshot_version = String::new(); - fs_err::File::open(path.as_ref()).and_then(|mut f| f.read_to_string(&mut snapshot_version))?; + let mut file = fs::File::open(&path).map_err(|err| { + IoError::other(format!( + "failed to open snapshot version file '{}': {err}", + path.as_ref().display() + )) + })?; + file.read_to_string(&mut snapshot_version).map_err(|err| { + IoError::other(format!( + "failed to read snapshot version from file '{}': {err}", + path.as_ref().display() + )) + })?; + Ok(snapshot_version.trim().to_string()) } From b150de6d1031e40d04b648a85e682c13fe81ae66 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 14:46:02 -0500 Subject: [PATCH 031/401] Replaces fs-err in clean_orphaned_account_snapshot_dirs() (#34902) * Replaces fs-err in clean_orphaned_account_snapshot_dirs() * pr: revert info message format changes --- core/src/validator.rs | 2 +- ledger-tool/src/ledger_utils.rs | 3 +-- runtime/src/snapshot_utils.rs | 29 +++++++++++++++++++++++------ 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index a8751e6d285536..4e96a3c2b5b4ff 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -607,7 +607,7 @@ impl Validator { &config.snapshot_config.bank_snapshots_dir, &config.account_snapshot_paths, ) - .map_err(|err| format!("Failed to clean orphaned account snapshot directories: {err:?}"))?; + .map_err(|err| format!("failed to clean orphaned account snapshot directories: {err}"))?; timer.stop(); info!("Cleaning orphaned account snapshot directories done. {timer}"); diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index a5142ea2a3d65d..82797146d3a408 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -38,7 +38,6 @@ use { snapshot_hash::StartingSnapshotHashes, snapshot_utils::{ self, clean_orphaned_account_snapshot_dirs, move_and_async_delete_path_contents, - SnapshotError, }, }, solana_sdk::{ @@ -67,7 +66,7 @@ const PROCESS_SLOTS_HELP_STRING: &str = #[derive(Error, Debug)] pub(crate) enum LoadAndProcessLedgerError { #[error("failed to clean orphaned account snapshot directories: {0}")] - CleanOrphanedAccountSnapshotDirectories(#[source] SnapshotError), + CleanOrphanedAccountSnapshotDirectories(#[source] std::io::Error), #[error("failed to create all run and snapshot directories: {0}")] CreateAllAccountsRunAndSnapshotDirectories(#[source] std::io::Error), diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index c0ff24f9806233..879dcaa6c587b8 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -32,7 +32,7 @@ use { cmp::Ordering, collections::{HashMap, HashSet}, fmt, fs, - io::{BufReader, BufWriter, Error as IoError, Read, Seek, Write}, + io::{BufReader, BufWriter, Error as IoError, Read, Result as IoResult, Seek, Write}, num::NonZeroUsize, path::{Path, PathBuf}, process::ExitStatus, @@ -616,7 +616,7 @@ pub fn move_and_async_delete_path(path: impl AsRef) { pub fn clean_orphaned_account_snapshot_dirs( bank_snapshots_dir: impl AsRef, account_snapshot_paths: &[PathBuf], -) -> Result<()> { +) -> IoResult<()> { // Create the HashSet of the account snapshot hardlink directories referenced by the snapshot dirs. // This is used to clean up any hardlinks that are no longer referenced by the snapshot dirs. let mut account_snapshot_dirs_referenced = HashSet::new(); @@ -624,20 +624,37 @@ pub fn clean_orphaned_account_snapshot_dirs( for snapshot in snapshots { let account_hardlinks_dir = snapshot.snapshot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS); // loop through entries in the snapshot_hardlink_dir, read the symlinks, add the target to the HashSet - for entry in fs_err::read_dir(&account_hardlinks_dir)? { + let read_dir = fs::read_dir(&account_hardlinks_dir).map_err(|err| { + IoError::other(format!( + "failed to read account hardlinks dir '{}': {err}", + account_hardlinks_dir.display(), + )) + })?; + for entry in read_dir { let path = entry?.path(); - let target = fs_err::read_link(&path)?; + let target = fs::read_link(&path).map_err(|err| { + IoError::other(format!( + "failed to read symlink '{}': {err}", + path.display(), + )) + })?; account_snapshot_dirs_referenced.insert(target); } } // loop through the account snapshot hardlink directories, if the directory is not in the account_snapshot_dirs_referenced set, delete it for account_snapshot_path in account_snapshot_paths { - for entry in fs_err::read_dir(account_snapshot_path)? { + let read_dir = fs::read_dir(account_snapshot_path).map_err(|err| { + IoError::other(format!( + "failed to read account snapshot dir '{}': {err}", + account_snapshot_path.display(), + )) + })?; + for entry in read_dir { let path = entry?.path(); if !account_snapshot_dirs_referenced.contains(&path) { info!( - "Removing orphaned account snapshot hardlink directory: {}", + "Removing orphaned account snapshot hardlink directory '{}'...", path.display() ); move_and_async_delete_path(&path); From e74f04ad0df92640581d34f04975f3c6d10574d2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 14:48:30 -0500 Subject: [PATCH 032/401] Replaces fs-err in snapshot_utils.rs (#34908) --- runtime/src/snapshot_utils.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 879dcaa6c587b8..ff0afc1e779b0a 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -13,7 +13,6 @@ use { bzip2::bufread::BzDecoder, crossbeam_channel::Sender, flate2::read::GzDecoder, - fs_err, lazy_static::lazy_static, log::*, regex::Regex, @@ -1069,7 +1068,7 @@ fn serialize_snapshot_data_file_capped( where F: FnOnce(&mut BufWriter) -> Result<()>, { - let data_file = fs_err::File::create(data_file_path)?.into(); + let data_file = fs::File::create(data_file_path)?; let mut data_file_stream = BufWriter::new(data_file); serializer(&mut data_file_stream)?; data_file_stream.flush()?; @@ -1140,7 +1139,7 @@ fn create_snapshot_data_file_stream( snapshot_root_file_path: impl AsRef, maximum_file_size: u64, ) -> Result<(u64, BufReader)> { - let snapshot_file_size = fs_err::metadata(&snapshot_root_file_path)?.len(); + let snapshot_file_size = fs::metadata(&snapshot_root_file_path)?.len(); if snapshot_file_size > maximum_file_size { let error_message = format!( @@ -1152,8 +1151,8 @@ fn create_snapshot_data_file_stream( return Err(IoError::other(error_message).into()); } - let snapshot_data_file = fs_err::File::open(snapshot_root_file_path.as_ref())?; - let snapshot_data_file_stream = BufReader::new(snapshot_data_file.into()); + let snapshot_data_file = fs::File::open(snapshot_root_file_path)?; + let snapshot_data_file_stream = BufReader::new(snapshot_data_file); Ok((snapshot_file_size, snapshot_data_file_stream)) } @@ -1436,16 +1435,16 @@ fn create_snapshot_meta_files_for_unarchived_snapshot(unpack_dir: impl AsRef Date: Tue, 23 Jan 2024 15:28:57 -0600 Subject: [PATCH 033/401] Update changelog in preparation for creating new v1.18 branch (#34912) --- CHANGELOG.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 028c3bbdaa4766..99d52beaa0b83e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,12 +8,14 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm and follows a [Backwards Compatibility Policy](https://docs.solanalabs.com/backwards-compatibility) Release channels have their own copy of this changelog: -* [edge - v1.18](#edge-channel) -* [beta - v1.17](https://github.com/solana-labs/solana/blob/v1.17/CHANGELOG.md) -* [stable - v1.16](https://github.com/solana-labs/solana/blob/v1.16/CHANGELOG.md) +* [edge - v2.0](#edge-channel) +* [beta - v1.18](https://github.com/solana-labs/solana/blob/v1.18/CHANGELOG.md) +* [stable - v1.17](https://github.com/solana-labs/solana/blob/v1.17/CHANGELOG.md) -## [1.18.0] - Unreleased +## [2.0.0] - Unreleased + +## [1.18.0] * Changes * Added a github check to support `changelog` label * The default for `--use-snapshot-archives-at-startup` is now `when-newest` (#33883) From 7ebe0bccd69abf0c5cad253bb916e59d5fbb23a4 Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 23 Jan 2024 15:19:21 -0700 Subject: [PATCH 034/401] Fix epoch rewards partition-data program owner (#34913) Fix account program owner --- runtime/src/bank.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b1b34289290746..38de7ff18de594 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3611,7 +3611,7 @@ impl Bank { let new_account = AccountSharedData::new_data( account_balance, &epoch_rewards_partition_data, - &solana_sdk::stake::program::id(), + &solana_sdk::sysvar::id(), ) .unwrap(); self.store_account_and_update_capitalization(&address, &new_account); From 3303c2566c4257410d073fe22d98fa4598248ec0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 17:25:03 -0500 Subject: [PATCH 035/401] Removes fs-err dependency (#34911) --- Cargo.lock | 10 ---------- Cargo.toml | 1 - programs/sbf/Cargo.lock | 10 ---------- runtime/Cargo.toml | 1 - 4 files changed, 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f890de73f4420f..6b09bbe58b23cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2028,15 +2028,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" -[[package]] -name = "fs-err" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" -dependencies = [ - "autocfg", -] - [[package]] name = "fs_extra" version = "1.3.0" @@ -7033,7 +7024,6 @@ dependencies = [ "ed25519-dalek", "flate2", "fnv", - "fs-err", "im", "index_list", "itertools", diff --git a/Cargo.toml b/Cargo.toml index 3da03d9963ff91..b4782eed20070d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -201,7 +201,6 @@ fast-math = "0.1" fd-lock = "3.0.13" flate2 = "1.0.28" fnv = "1.0.7" -fs-err = "2.11.0" fs_extra = "1.3.0" futures = "0.3.30" futures-util = "0.3.29" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 91f57058127856..2dd44c1385dde1 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1732,15 +1732,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" -[[package]] -name = "fs-err" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" -dependencies = [ - "autocfg", -] - [[package]] name = "fs_extra" version = "1.3.0" @@ -5738,7 +5729,6 @@ dependencies = [ "dir-diff", "flate2", "fnv", - "fs-err", "im", "index_list", "itertools", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index f0509811497037..f781067592d7e2 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -24,7 +24,6 @@ dashmap = { workspace = true, features = ["rayon", "raw-api"] } dir-diff = { workspace = true } flate2 = { workspace = true } fnv = { workspace = true } -fs-err = { workspace = true } im = { workspace = true, features = ["rayon", "serde"] } index_list = { workspace = true } itertools = { workspace = true } From 1810feadc283b44655be33e7665941eaabd1522f Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Tue, 23 Jan 2024 14:57:53 -0800 Subject: [PATCH 036/401] [TieredStorage] In-memory struct for writing OwnersBlock (#34853) #### Problem To write the owners-block, it requires an in-memory struct that maintains a set of unique owner addresses while providing a look-up function to obtain the OwnerOffset with the specified owner address. #### Summary of Changes This PR adds OwnersTable, the in-memory struct that maintains a set of unique owner addresses while providing a look-up function to obtain the OwnerOffset with the specified owner address. #### Test Plan A new unit-test is added. --- Cargo.lock | 1 + accounts-db/Cargo.toml | 1 + accounts-db/src/tiered_storage/hot.rs | 20 ++++++-- accounts-db/src/tiered_storage/owners.rs | 61 ++++++++++++++++++++++-- programs/sbf/Cargo.lock | 1 + 5 files changed, 76 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b09bbe58b23cc..b257708f91495c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5307,6 +5307,7 @@ dependencies = [ "fnv", "im", "index_list", + "indexmap 2.1.0", "itertools", "lazy_static", "libsecp256k1", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 567a901da90a88..80559f5fb27821 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -23,6 +23,7 @@ flate2 = { workspace = true } fnv = { workspace = true } im = { workspace = true, features = ["rayon", "serde"] } index_list = { workspace = true } +indexmap = { workspace = true } itertools = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index ace6649ba26f49..3bb5f54e470b3c 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -470,7 +470,7 @@ pub mod tests { hot::{HotAccountMeta, HotStorageReader}, index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, - owners::OwnersBlockFormat, + owners::{OwnersBlockFormat, OwnersTable}, }, assert_matches::assert_matches, memoffset::offset_of, @@ -823,9 +823,13 @@ pub mod tests { { let file = TieredStorageFile::new_writable(&path).unwrap(); + let mut owners_table = OwnersTable::default(); + addresses.iter().for_each(|owner_address| { + owners_table.insert(owner_address); + }); footer .owners_block_format - .write_owners_block(&file, &addresses) + .write_owners_block(&file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer @@ -893,9 +897,13 @@ pub mod tests { // the owners_block_offset set to the end of the accounts blocks. footer.owners_block_offset = footer.index_block_offset; + let mut owners_table = OwnersTable::default(); + owner_addresses.iter().for_each(|owner_address| { + owners_table.insert(owner_address); + }); footer .owners_block_format - .write_owners_block(&file, &owner_addresses) + .write_owners_block(&file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer @@ -1029,9 +1037,13 @@ pub mod tests { // write owners block footer.owners_block_offset = current_offset as u64; + let mut owners_table = OwnersTable::default(); + owners.iter().for_each(|owner_address| { + owners_table.insert(owner_address); + }); footer .owners_block_format - .write_owners_block(&file, &owners) + .write_owners_block(&file, &owners_table) .unwrap(); footer.write_footer_block(&file).unwrap(); diff --git a/accounts-db/src/tiered_storage/owners.rs b/accounts-db/src/tiered_storage/owners.rs index d8a963ce143401..45bfafe1645430 100644 --- a/accounts-db/src/tiered_storage/owners.rs +++ b/accounts-db/src/tiered_storage/owners.rs @@ -3,6 +3,7 @@ use { file::TieredStorageFile, footer::TieredStorageFooter, mmap_utils::get_pod, TieredStorageResult, }, + indexmap::set::IndexSet, memmap2::Mmap, solana_sdk::pubkey::Pubkey, }; @@ -43,13 +44,13 @@ impl OwnersBlockFormat { pub fn write_owners_block( &self, file: &TieredStorageFile, - addresses: &[Pubkey], + owners_table: &OwnersTable, ) -> TieredStorageResult { match self { Self::AddressesOnly => { let mut bytes_written = 0; - for address in addresses { - bytes_written += file.write_pod(address)?; + for address in &owners_table.owners_set { + bytes_written += file.write_pod(*address)?; } Ok(bytes_written) @@ -77,6 +78,27 @@ impl OwnersBlockFormat { } } +/// The in-memory representation of owners block for write. +/// It manages a set of unique addresses of account owners. +#[derive(Debug, Default)] +pub struct OwnersTable<'a> { + owners_set: IndexSet<&'a Pubkey>, +} + +/// OwnersBlock is persisted as a consecutive bytes of pubkeys without any +/// meta-data. For each account meta, it has a owner_offset field to +/// access its owner's address in the OwnersBlock. +impl<'a> OwnersTable<'a> { + /// Add the specified pubkey as the owner into the OwnersWriterTable + /// if the specified pubkey has not existed in the OwnersWriterTable + /// yet. In any case, the function returns its OwnerOffset. + pub fn insert(&mut self, pubkey: &'a Pubkey) -> OwnerOffset { + let (offset, _existed) = self.owners_set.insert_full(pubkey); + + OwnerOffset(offset as u32) + } +} + #[cfg(test)] mod tests { use { @@ -105,9 +127,13 @@ mod tests { { let file = TieredStorageFile::new_writable(&path).unwrap(); + let mut owners_table = OwnersTable::default(); + addresses.iter().for_each(|owner_address| { + owners_table.insert(owner_address); + }); footer .owners_block_format - .write_owners_block(&file, &addresses) + .write_owners_block(&file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer @@ -128,4 +154,31 @@ mod tests { ); } } + + #[test] + fn test_owners_table() { + let mut owners_table = OwnersTable::default(); + const NUM_OWNERS: usize = 99; + + let addresses: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_OWNERS) + .collect(); + + // as we insert sequentially, we expect each entry has same OwnerOffset + // as its index inside the Vector. + for (i, address) in addresses.iter().enumerate() { + assert_eq!(owners_table.insert(address), OwnerOffset(i as u32)); + } + + let cloned_addresses = addresses.clone(); + + // insert again and expect the same OwnerOffset + for (i, address) in cloned_addresses.iter().enumerate() { + assert_eq!(owners_table.insert(address), OwnerOffset(i as u32)); + } + + // make sure the size of the resulting owner table is the same + // as the input + assert_eq!(owners_table.owners_set.len(), addresses.len()); + } } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2dd44c1385dde1..32f70ea3c7f9dc 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4672,6 +4672,7 @@ dependencies = [ "fnv", "im", "index_list", + "indexmap 2.1.0", "itertools", "lazy_static", "log", From bfbe03a5364610bea9805f22feef0b5d869aa17a Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 23 Jan 2024 22:29:21 -0500 Subject: [PATCH 037/401] Updates mergify backport actions for new minor version (#34921) --- .mergify.yml | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index 68c6426cf37f3b..ef576943d5d635 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -95,9 +95,9 @@ pull_request_rules: - automerge comment: message: automerge label removed due to a CI failure - - name: v1.16 feature-gate backport + - name: v1.17 feature-gate backport conditions: - - label=v1.16 + - label=v1.17 - label=feature-gate actions: backport: @@ -108,10 +108,10 @@ pull_request_rules: labels: - feature-gate branches: - - v1.16 - - name: v1.16 non-feature-gate backport + - v1.17 + - name: v1.17 non-feature-gate backport conditions: - - label=v1.16 + - label=v1.17 - label!=feature-gate actions: backport: @@ -119,10 +119,10 @@ pull_request_rules: title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})" ignore_conflicts: true branches: - - v1.16 - - name: v1.16 backport warning comment + - v1.17 + - name: v1.17 backport warning comment conditions: - - label=v1.16 + - label=v1.17 actions: comment: message: > @@ -133,9 +133,9 @@ pull_request_rules: refactoring, plumbing, cleanup, etc that are not strictly necessary to achieve the goal. Any of the latter should go only into master and ride the normal stabilization schedule. - - name: v1.17 feature-gate backport + - name: v1.18 feature-gate backport conditions: - - label=v1.17 + - label=v1.18 - label=feature-gate actions: backport: @@ -145,10 +145,10 @@ pull_request_rules: labels: - feature-gate branches: - - v1.17 - - name: v1.17 non-feature-gate backport + - v1.18 + - name: v1.18 non-feature-gate backport conditions: - - label=v1.17 + - label=v1.18 - label!=feature-gate actions: backport: @@ -156,10 +156,10 @@ pull_request_rules: title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})" ignore_conflicts: true branches: - - v1.17 - - name: v1.17 backport warning comment + - v1.18 + - name: v1.18 backport warning comment conditions: - - label=v1.17 + - label=v1.18 actions: comment: message: > From bd103865df740f46fe31e8e9acae30b35f23cc0b Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Wed, 24 Jan 2024 12:46:16 +0900 Subject: [PATCH 038/401] Introduce primitive threading in unified scheduler (#34676) * Introduce primitive threading in unified scheduler * Make the internal struct ExecutedTask not pub * Improve wording a bit * Explain scheduler main loop's overhead sensitivity * Improve wording a bit * Define ChainedChannel{Sender, Receiver} wrappers * Clean up a bit * Use derivative to avoid manual Clone impl * Clarify comment * Remove extra whitespace in comment * Remove unneeded dyn trait for ChainedChannel * Remove the accumulator thread for now * Fix typo * Use unimplemented!() to convey intention better --- Cargo.lock | 6 + Cargo.toml | 1 + programs/sbf/Cargo.lock | 7 + unified-scheduler-logic/Cargo.toml | 3 + unified-scheduler-logic/src/lib.rs | 21 +- unified-scheduler-pool/Cargo.toml | 4 + unified-scheduler-pool/src/lib.rs | 553 ++++++++++++++++++++++++++--- 7 files changed, 535 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b257708f91495c..a7863f06e62754 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7534,12 +7534,18 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-logic" version = "1.18.0" +dependencies = [ + "solana-sdk", +] [[package]] name = "solana-unified-scheduler-pool" version = "1.18.0" dependencies = [ "assert_matches", + "crossbeam-channel", + "derivative", + "log", "solana-ledger", "solana-logger", "solana-program-runtime", diff --git a/Cargo.toml b/Cargo.toml index b4782eed20070d..ba7d88e75dc724 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -185,6 +185,7 @@ ctrlc = "3.4.2" curve25519-dalek = "3.2.1" dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } +derivative = "2.2.0" dialoguer = "0.10.4" digest = "0.10.7" dir-diff = "0.3.3" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 32f70ea3c7f9dc..dcf8c5cc3d597e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6534,11 +6534,18 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-logic" version = "1.18.0" +dependencies = [ + "solana-sdk", +] [[package]] name = "solana-unified-scheduler-pool" version = "1.18.0" dependencies = [ + "assert_matches", + "crossbeam-channel", + "derivative", + "log", "solana-ledger", "solana-program-runtime", "solana-runtime", diff --git a/unified-scheduler-logic/Cargo.toml b/unified-scheduler-logic/Cargo.toml index 764bb0192f5632..b2e80c79c7a08f 100644 --- a/unified-scheduler-logic/Cargo.toml +++ b/unified-scheduler-logic/Cargo.toml @@ -8,3 +8,6 @@ repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } + +[dependencies] +solana-sdk = { workspace = true } diff --git a/unified-scheduler-logic/src/lib.rs b/unified-scheduler-logic/src/lib.rs index 73a5a82f6d3a7b..997c6c1745a7c9 100644 --- a/unified-scheduler-logic/src/lib.rs +++ b/unified-scheduler-logic/src/lib.rs @@ -1 +1,20 @@ -// This file will be populated with actual implementation later. +use solana_sdk::transaction::SanitizedTransaction; + +pub struct Task { + transaction: SanitizedTransaction, + index: usize, +} + +impl Task { + pub fn create_task(transaction: SanitizedTransaction, index: usize) -> Self { + Task { transaction, index } + } + + pub fn task_index(&self) -> usize { + self.index + } + + pub fn transaction(&self) -> &SanitizedTransaction { + &self.transaction + } +} diff --git a/unified-scheduler-pool/Cargo.toml b/unified-scheduler-pool/Cargo.toml index 213bc5bb86c0ef..7626215b1e1126 100644 --- a/unified-scheduler-pool/Cargo.toml +++ b/unified-scheduler-pool/Cargo.toml @@ -10,6 +10,10 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +assert_matches = { workspace = true } +crossbeam-channel = { workspace = true } +derivative = { workspace = true } +log = { workspace = true } solana-ledger = { workspace = true } solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 10cb5309e5e01d..deae3697807705 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -9,6 +9,10 @@ //! `solana-ledger`'s helper function called `execute_batch()`. use { + assert_matches::assert_matches, + crossbeam_channel::{select, unbounded, Receiver, SendError, Sender}, + derivative::Derivative, + log::*, solana_ledger::blockstore_processor::{ execute_batch, TransactionBatchWithIndexes, TransactionStatusSender, }, @@ -23,6 +27,7 @@ use { prioritization_fee_cache::PrioritizationFeeCache, }, solana_sdk::transaction::{Result, SanitizedTransaction}, + solana_unified_scheduler_logic::Task, solana_vote::vote_sender_types::ReplayVoteSender, std::{ fmt::Debug, @@ -31,6 +36,7 @@ use { atomic::{AtomicU64, Ordering::Relaxed}, Arc, Mutex, Weak, }, + thread::{self, JoinHandle}, }, }; @@ -194,6 +200,155 @@ impl TaskHandler for DefaultTaskHandler { } } +struct ExecutedTask { + task: Task, + result_with_timings: ResultWithTimings, +} + +impl ExecutedTask { + fn new_boxed(task: Task) -> Box { + Box::new(Self { + task, + result_with_timings: initialized_result_with_timings(), + }) + } +} + +// A very tiny generic message type to signal about opening and closing of subchannels, which are +// logically segmented series of Payloads (P1) over a single continuous time-span, potentially +// carrying some subchannel metadata (P2) upon opening a new subchannel. +// Note that the above properties can be upheld only when this is used inside MPSC or SPSC channels +// (i.e. the consumer side needs to be single threaded). For the multiple consumer cases, +// ChainedChannel can be used instead. +enum SubchanneledPayload { + Payload(P1), + OpenSubchannel(P2), + CloseSubchannel, +} + +type NewTaskPayload = SubchanneledPayload; + +// A tiny generic message type to synchronize multiple threads everytime some contextual data needs +// to be switched (ie. SchedulingContext), just using a single communication channel. +// +// Usually, there's no way to prevent one of those threads from mixing current and next contexts +// while processing messages with a multiple-consumer channel. A condvar or other +// out-of-bound mechanism is needed to notify about switching of contextual data. That's because +// there's no way to block those threads reliably on such a switching event just with a channel. +// +// However, if the number of consumer can be determined, this can be accomplished just over a +// single channel, which even carries an in-bound control meta-message with the contexts. The trick +// is that identical meta-messages as many as the number of threads are sent over the channel, +// along with new channel receivers to be used (hence the name of _chained_). Then, the receiving +// thread drops the old channel and is now blocked on receiving from the new channel. In this way, +// this switching can happen exactly once for each thread. +// +// Overall, this greatly simplifies the code, reduces CAS/syscall overhead per messaging to the +// minimum at the cost of a single channel recreation per switching. Needless to say, such an +// allocation can be amortized to be negligible. +mod chained_channel { + use super::*; + + // hide variants by putting this inside newtype + enum ChainedChannelPrivate { + Payload(P), + ContextAndChannel(C, Receiver>), + } + + pub(super) struct ChainedChannel(ChainedChannelPrivate); + + impl ChainedChannel { + fn chain_to_new_channel(context: C, receiver: Receiver) -> Self { + Self(ChainedChannelPrivate::ContextAndChannel(context, receiver)) + } + } + + pub(super) struct ChainedChannelSender { + sender: Sender>, + } + + impl ChainedChannelSender { + fn new(sender: Sender>) -> Self { + Self { sender } + } + + pub(super) fn send_payload( + &self, + payload: P, + ) -> std::result::Result<(), SendError>> { + self.sender + .send(ChainedChannel(ChainedChannelPrivate::Payload(payload))) + } + + pub(super) fn send_chained_channel( + &mut self, + context: C, + count: usize, + ) -> std::result::Result<(), SendError>> { + let (chained_sender, chained_receiver) = crossbeam_channel::unbounded(); + for _ in 0..count { + self.sender.send(ChainedChannel::chain_to_new_channel( + context.clone(), + chained_receiver.clone(), + ))? + } + self.sender = chained_sender; + Ok(()) + } + } + + // P doesn't need to be `: Clone`, yet rustc derive can't handle it. + // see https://github.com/rust-lang/rust/issues/26925 + #[derive(Derivative)] + #[derivative(Clone(bound = "C: Clone"))] + pub(super) struct ChainedChannelReceiver { + receiver: Receiver>, + context: C, + } + + impl ChainedChannelReceiver { + fn new(receiver: Receiver>, initial_context: C) -> Self { + Self { + receiver, + context: initial_context, + } + } + + pub(super) fn context(&self) -> &C { + &self.context + } + + pub(super) fn for_select(&self) -> &Receiver> { + &self.receiver + } + + pub(super) fn after_select(&mut self, message: ChainedChannel) -> Option

{ + match message.0 { + ChainedChannelPrivate::Payload(payload) => Some(payload), + ChainedChannelPrivate::ContextAndChannel(context, channel) => { + self.context = context; + self.receiver = channel; + None + } + } + } + } + + pub(super) fn unbounded( + initial_context: C, + ) -> (ChainedChannelSender, ChainedChannelReceiver) { + let (sender, receiver) = crossbeam_channel::unbounded(); + ( + ChainedChannelSender::new(sender), + ChainedChannelReceiver::new(receiver, initial_context), + ) + } +} + +fn initialized_result_with_timings() -> ResultWithTimings { + (Ok(()), ExecuteTimings::default()) +} + // Currently, simplest possible implementation (i.e. single-threaded) // this will be replaced with more proper implementation... // not usable at all, especially for mainnet-beta @@ -201,27 +356,306 @@ impl TaskHandler for DefaultTaskHandler { pub struct PooledScheduler { inner: PooledSchedulerInner, context: SchedulingContext, - result_with_timings: Mutex, } #[derive(Debug)] pub struct PooledSchedulerInner, TH: TaskHandler> { - id: SchedulerId, + thread_manager: ThreadManager, +} + +// This type manages the OS threads for scheduling and executing transactions. The term +// `session` is consistently used to mean a group of Tasks scoped under a single SchedulingContext. +// This is equivalent to a particular bank for block verification. However, new terms is introduced +// here to mean some continuous time over multiple continuous banks/slots for the block production, +// which is planned to be implemented in the future. +#[derive(Debug)] +struct ThreadManager, TH: TaskHandler> { + scheduler_id: SchedulerId, pool: Arc>, + handler_count: usize, + new_task_sender: Sender, + new_task_receiver: Receiver, + session_result_sender: Sender>, + session_result_receiver: Receiver>, + session_result_with_timings: Option, + scheduler_thread: Option>, + handler_threads: Vec>, } impl PooledScheduler { fn do_spawn(pool: Arc>, initial_context: SchedulingContext) -> Self { + // we're hard-coding the number of handler thread to 1, meaning this impl is currently + // single-threaded still. + let handler_count = 1; + Self::from_inner( PooledSchedulerInner:: { - id: pool.new_scheduler_id(), - pool, + thread_manager: ThreadManager::new(pool, handler_count), }, initial_context, ) } } +impl, TH: TaskHandler> ThreadManager { + fn new(pool: Arc>, handler_count: usize) -> Self { + let (new_task_sender, new_task_receiver) = unbounded(); + let (session_result_sender, session_result_receiver) = unbounded(); + + Self { + scheduler_id: pool.new_scheduler_id(), + pool, + handler_count, + new_task_sender, + new_task_receiver, + session_result_sender, + session_result_receiver, + session_result_with_timings: None, + scheduler_thread: None, + handler_threads: Vec::with_capacity(handler_count), + } + } + + fn execute_task_with_handler( + bank: &Arc, + executed_task: &mut Box, + handler_context: &HandlerContext, + ) { + debug!("handling task at {:?}", thread::current()); + TH::handle( + &mut executed_task.result_with_timings.0, + &mut executed_task.result_with_timings.1, + bank, + executed_task.task.transaction(), + executed_task.task.task_index(), + handler_context, + ); + } + + fn accumulate_result_with_timings( + (result, timings): &mut ResultWithTimings, + executed_task: Box, + ) { + match executed_task.result_with_timings.0 { + Ok(()) => {} + Err(error) => { + error!("error is detected while accumulating....: {error:?}"); + // Override errors intentionally for simplicity, not retaining the + // first error unlike the block verification in the + // blockstore_processor. This will be addressed with more + // full-fledged impl later. + *result = Err(error); + } + } + timings.accumulate(&executed_task.result_with_timings.1); + } + + fn take_session_result_with_timings(&mut self) -> ResultWithTimings { + self.session_result_with_timings.take().unwrap() + } + + fn put_session_result_with_timings(&mut self, result_with_timings: ResultWithTimings) { + assert_matches!( + self.session_result_with_timings + .replace(result_with_timings), + None + ); + } + + fn start_threads(&mut self, context: &SchedulingContext) { + let (mut runnable_task_sender, runnable_task_receiver) = + chained_channel::unbounded::(context.clone()); + let (finished_task_sender, finished_task_receiver) = unbounded::>(); + + let mut result_with_timings = self.session_result_with_timings.take(); + + // High-level flow of new tasks: + // 1. the replay stage thread send a new task. + // 2. the scheduler thread accepts the task. + // 3. the scheduler thread dispatches the task after proper locking. + // 4. the handler thread processes the dispatched task. + // 5. the handler thread reply back to the scheduler thread as an executed task. + // 6. the scheduler thread post-processes the executed task. + let scheduler_main_loop = || { + let handler_count = self.handler_count; + let session_result_sender = self.session_result_sender.clone(); + let new_task_receiver = self.new_task_receiver.clone(); + + let mut session_ending = false; + let mut active_task_count: usize = 0; + + // Now, this is the main loop for the scheduler thread, which is a special beast. + // + // That's because it could be the most notable bottleneck of throughput in the future + // when there are ~100 handler threads. Unified scheduler's overall throughput is + // largely dependant on its ultra-low latency characteristic, which is the most + // important design goal of the scheduler in order to reduce the transaction + // confirmation latency for end users. + // + // Firstly, the scheduler thread must handle incoming messages from thread(s) owned by + // the replay stage or the banking stage. It also must handle incoming messages from + // the multi-threaded handlers. This heavily-multi-threaded whole processing load must + // be coped just with the single-threaded scheduler, to attain ideal cpu cache + // friendliness and main memory bandwidth saturation with its shared-nothing + // single-threaded account locking implementation. In other words, the per-task + // processing efficiency of the main loop codifies the upper bound of horizontal + // scalability of the unified scheduler. + // + // Moreover, the scheduler is designed to handle tasks without batching at all in the + // pursuit of saturating all of the handler threads with maximally-fine-grained + // concurrency density for throughput as the second design goal. This design goal + // relies on the assumption that there's no considerable penalty arising from the + // unbatched manner of processing. + // + // Note that this assumption isn't true as of writing. The current code path + // underneath execute_batch() isn't optimized for unified scheduler's load pattern (ie. + // batches just with a single transaction) at all. This will be addressed in the + // future. + // + // These two key elements of the design philosophy lead to the rather unforgiving + // implementation burden: Degraded performance would acutely manifest from an even tiny + // amount of individual cpu-bound processing delay in the scheduler thread, like when + // dispatching the next conflicting task after receiving the previous finished one from + // the handler. + // + // Thus, it's fatal for unified scheduler's advertised superiority to squeeze every cpu + // cycles out of the scheduler thread. Thus, any kinds of unessential overhead sources + // like syscalls, VDSO, and even memory (de)allocation should be avoided at all costs + // by design or by means of offloading at the last resort. + move || loop { + let mut is_finished = false; + while !is_finished { + select! { + recv(finished_task_receiver) -> executed_task => { + let executed_task = executed_task.unwrap(); + + active_task_count = active_task_count.checked_sub(1).unwrap(); + let result_with_timings = result_with_timings.as_mut().unwrap(); + Self::accumulate_result_with_timings(result_with_timings, executed_task); + }, + recv(new_task_receiver) -> message => { + assert!(!session_ending); + + match message.unwrap() { + NewTaskPayload::Payload(task) => { + // so, we're NOT scheduling at all here; rather, just execute + // tx straight off. the inter-tx locking deps aren't needed to + // be resolved in the case of single-threaded FIFO like this. + runnable_task_sender + .send_payload(task) + .unwrap(); + active_task_count = active_task_count.checked_add(1).unwrap(); + } + NewTaskPayload::OpenSubchannel(context) => { + // signal about new SchedulingContext to handler threads + runnable_task_sender + .send_chained_channel(context, handler_count) + .unwrap(); + assert_matches!( + result_with_timings.replace(initialized_result_with_timings()), + None + ); + } + NewTaskPayload::CloseSubchannel => { + session_ending = true; + } + } + }, + }; + + // a really simplistic termination condition, which only works under the + // assumption of single handler thread... + is_finished = session_ending && active_task_count == 0; + } + + if session_ending { + session_result_sender + .send(Some( + result_with_timings + .take() + .unwrap_or_else(initialized_result_with_timings), + )) + .unwrap(); + session_ending = false; + } + } + }; + + let handler_main_loop = || { + let pool = self.pool.clone(); + let mut runnable_task_receiver = runnable_task_receiver.clone(); + let finished_task_sender = finished_task_sender.clone(); + + move || loop { + let (task, sender) = select! { + recv(runnable_task_receiver.for_select()) -> message => { + if let Some(task) = runnable_task_receiver.after_select(message.unwrap()) { + (task, &finished_task_sender) + } else { + continue; + } + }, + }; + let mut task = ExecutedTask::new_boxed(task); + Self::execute_task_with_handler( + runnable_task_receiver.context().bank(), + &mut task, + &pool.handler_context, + ); + sender.send(task).unwrap(); + } + }; + + self.scheduler_thread = Some( + thread::Builder::new() + .name("solScheduler".to_owned()) + .spawn(scheduler_main_loop()) + .unwrap(), + ); + + self.handler_threads = (0..self.handler_count) + .map({ + |thx| { + thread::Builder::new() + .name(format!("solScHandler{:02}", thx)) + .spawn(handler_main_loop()) + .unwrap() + } + }) + .collect(); + } + + fn send_task(&self, task: Task) { + debug!("send_task()"); + self.new_task_sender + .send(NewTaskPayload::Payload(task)) + .unwrap() + } + + fn end_session(&mut self) { + if self.session_result_with_timings.is_some() { + debug!("end_session(): already result resides within thread manager.."); + return; + } + debug!("end_session(): will end session..."); + + self.new_task_sender + .send(NewTaskPayload::CloseSubchannel) + .unwrap(); + + if let Some(result_with_timings) = self.session_result_receiver.recv().unwrap() { + self.put_session_result_with_timings(result_with_timings); + } + } + + fn start_session(&mut self, context: &SchedulingContext) { + assert_matches!(self.session_result_with_timings, None); + self.new_task_sender + .send(NewTaskPayload::OpenSubchannel(context.clone())) + .unwrap(); + } +} + pub trait SpawnableScheduler: InstalledScheduler { type Inner: Debug + Send + Sync; @@ -237,29 +671,33 @@ pub trait SpawnableScheduler: InstalledScheduler { impl SpawnableScheduler for PooledScheduler { type Inner = PooledSchedulerInner; - fn into_inner(self) -> (ResultWithTimings, Self::Inner) { - ( - self.result_with_timings.into_inner().expect("not poisoned"), - self.inner, - ) + fn into_inner(mut self) -> (ResultWithTimings, Self::Inner) { + let result_with_timings = { + let manager = &mut self.inner.thread_manager; + manager.end_session(); + manager.take_session_result_with_timings() + }; + (result_with_timings, self.inner) } - fn from_inner(inner: Self::Inner, context: SchedulingContext) -> Self { - Self { - inner, - context, - result_with_timings: Mutex::new((Ok(()), ExecuteTimings::default())), - } + fn from_inner(mut inner: Self::Inner, context: SchedulingContext) -> Self { + inner.thread_manager.start_session(&context); + Self { inner, context } } fn spawn(pool: Arc>, initial_context: SchedulingContext) -> Self { - Self::do_spawn(pool, initial_context) + let mut scheduler = Self::do_spawn(pool, initial_context); + scheduler + .inner + .thread_manager + .start_threads(&scheduler.context); + scheduler } } impl InstalledScheduler for PooledScheduler { fn id(&self) -> SchedulerId { - self.inner.id + self.inner.thread_manager.scheduler_id } fn context(&self) -> &SchedulingContext { @@ -267,23 +705,8 @@ impl InstalledScheduler for PooledScheduler { } fn schedule_execution(&self, &(transaction, index): &(&SanitizedTransaction, usize)) { - let (result, timings) = &mut *self.result_with_timings.lock().expect("not poisoned"); - if result.is_err() { - // just bail out early to short-circuit the processing altogether - return; - } - - // ... so, we're NOT scheduling at all here; rather, just execute tx straight off. the - // inter-tx locking deps aren't needed to be resolved in the case of single-threaded FIFO - // like this. - TH::handle( - result, - timings, - self.context().bank(), - transaction, - index, - &self.inner.pool.handler_context, - ); + let task = Task::create_task(transaction.clone(), index); + self.inner.thread_manager.send_task(task); } fn wait_for_termination( @@ -295,7 +718,7 @@ impl InstalledScheduler for PooledScheduler { } fn pause_for_recent_blockhash(&mut self) { - // not surprisingly, there's nothing to do for this min impl! + self.inner.thread_manager.end_session(); } } @@ -305,7 +728,7 @@ where TH: TaskHandler, { fn return_to_pool(self: Box) { - self.pool.clone().return_scheduler(*self) + self.thread_manager.pool.clone().return_scheduler(*self) } } @@ -544,7 +967,8 @@ mod tests { )); assert_eq!(bank.transaction_count(), 0); scheduler.schedule_execution(&(bad_tx, 0)); - scheduler.pause_for_recent_blockhash(); + // simulate the task-sending thread is stalled for some reason. + std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(bank.transaction_count(), 0); let good_tx_after_bad_tx = @@ -563,7 +987,13 @@ mod tests { scheduler.schedule_execution(&(good_tx_after_bad_tx, 0)); scheduler.pause_for_recent_blockhash(); // transaction_count should remain same as scheduler should be bailing out. - assert_eq!(bank.transaction_count(), 0); + // That's because we're testing the serialized failing execution case in this test. + // However, currently threaded impl can't properly abort in this situtation.. + // so, 1 should be observed, intead of 0. + // Also note that bank.transaction_count() is generally racy by nature, because + // blockstore_processor and unified_scheduler both tend to process non-conflicting batches + // in parallel as part of the normal operation. + assert_eq!(bank.transaction_count(), 1); let bank = BankWithScheduler::new(bank, Some(scheduler)); assert_matches!( @@ -577,8 +1007,10 @@ mod tests { #[derive(Debug)] struct AsyncScheduler( - PooledScheduler, + Mutex, Mutex>>, + SchedulingContext, + Arc>, ); impl AsyncScheduler { @@ -593,7 +1025,7 @@ mod tests { } overall_timings.accumulate(&timings); } - *self.0.result_with_timings.lock().unwrap() = (overall_result, overall_timings); + *self.0.lock().unwrap() = (overall_result, overall_timings); } } @@ -601,17 +1033,17 @@ mod tests { for AsyncScheduler { fn id(&self) -> SchedulerId { - self.0.id() + unimplemented!(); } fn context(&self) -> &SchedulingContext { - self.0.context() + &self.2 } fn schedule_execution(&self, &(transaction, index): &(&SanitizedTransaction, usize)) { let transaction_and_index = (transaction.clone(), index); let context = self.context().clone(); - let pool = self.0.inner.pool.clone(); + let pool = self.3.clone(); self.1.lock().unwrap().push(std::thread::spawn(move || { // intentionally sleep to simulate race condition where register_recent_blockhash @@ -635,10 +1067,14 @@ mod tests { fn wait_for_termination( self: Box, - is_dropped: bool, + _is_dropped: bool, ) -> (ResultWithTimings, UninstalledSchedulerBox) { self.do_wait(); - Box::new(self.0).wait_for_termination(is_dropped) + let result_with_timings = std::mem::replace( + &mut *self.0.lock().unwrap(), + initialized_result_with_timings(), + ); + (result_with_timings, self) } fn pause_for_recent_blockhash(&mut self) { @@ -651,6 +1087,14 @@ mod tests { } } + impl UninstalledScheduler + for AsyncScheduler + { + fn return_to_pool(self: Box) { + self.3.clone().return_scheduler(*self) + } + } + impl SpawnableScheduler for AsyncScheduler { @@ -658,11 +1102,11 @@ mod tests { type Inner = Self; fn into_inner(self) -> (ResultWithTimings, Self::Inner) { - todo!(); + unimplemented!(); } fn from_inner(_inner: Self::Inner, _context: SchedulingContext) -> Self { - todo!(); + unimplemented!(); } fn spawn( @@ -670,19 +1114,10 @@ mod tests { initial_context: SchedulingContext, ) -> Self { AsyncScheduler::( - PooledScheduler::::from_inner( - PooledSchedulerInner { - id: pool.new_scheduler_id(), - pool: SchedulerPool::new( - pool.handler_context.log_messages_bytes_limit, - pool.handler_context.transaction_status_sender.clone(), - pool.handler_context.replay_vote_sender.clone(), - pool.handler_context.prioritization_fee_cache.clone(), - ), - }, - initial_context, - ), + Mutex::new(initialized_result_with_timings()), Mutex::new(vec![]), + initial_context, + pool, ) } } From b9947bd327ef44e0bcc921c76dcf851f04677e63 Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 23 Jan 2024 21:54:06 -0700 Subject: [PATCH 039/401] Support json parsing of epoch-rewards partition data sysvar accounts (#34914) --- account-decoder/src/parse_sysvar.rs | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/account-decoder/src/parse_sysvar.rs b/account-decoder/src/parse_sysvar.rs index 35746949c7f9ef..3fda8e8560c623 100644 --- a/account-decoder/src/parse_sysvar.rs +++ b/account-decoder/src/parse_sysvar.rs @@ -9,6 +9,7 @@ use { bv::BitVec, solana_sdk::{ clock::{Clock, Epoch, Slot, UnixTimestamp}, + epoch_rewards_partition_data::EpochRewardsPartitionDataVersion, epoch_schedule::EpochSchedule, pubkey::Pubkey, rent::Rent, @@ -96,7 +97,24 @@ pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result(data) + { + let EpochRewardsPartitionDataVersion::V0(partition_data) = + epoch_rewards_partition_data; + Some(SysvarAccountType::EpochRewardsPartitionData( + UiEpochRewardsPartitionData { + version: 0, + num_partitions: partition_data.num_partitions as u64, + parent_blockhash: partition_data.parent_blockhash.to_string(), + }, + )) + } else { + None + } } }; parsed_account.ok_or(ParseAccountError::AccountNotParsable( @@ -120,6 +138,7 @@ pub enum SysvarAccountType { StakeHistory(Vec), LastRestartSlot(UiLastRestartSlot), EpochRewards(EpochRewards), + EpochRewardsPartitionData(UiEpochRewardsPartitionData), } #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default)] @@ -239,6 +258,14 @@ pub struct UiLastRestartSlot { pub last_restart_slot: Slot, } +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default)] +#[serde(rename_all = "camelCase")] +pub struct UiEpochRewardsPartitionData { + pub version: u32, + pub num_partitions: u64, + pub parent_blockhash: String, +} + #[cfg(test)] mod test { #[allow(deprecated)] From b11d41a3f7f5c291992c10cc3a617bb2f7806f9a Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Wed, 24 Jan 2024 22:27:03 +0900 Subject: [PATCH 040/401] [zk-token-sdk] Use checked arithmetic when processing transfer amount (#34130) * add `try_split_u64` * add `try_combine_lo_hi_u64` * add `try` variants of ciphertext arithmetic functions * use try functions in proof generaiton and verification logic * deprecate non-`try` functions * use try functions in proof generaiton and verification logic * Apply suggestions from code review Co-authored-by: Jon C * cargo fmt --------- Co-authored-by: Jon C --- zk-token-sdk/src/errors.rs | 2 + zk-token-sdk/src/instruction/errors.rs | 4 + zk-token-sdk/src/instruction/transfer/mod.rs | 199 +++++++++++++++++- .../src/instruction/transfer/with_fee.rs | 52 +++-- .../src/instruction/transfer/without_fee.rs | 11 +- zk-token-sdk/src/zk_token_elgamal/ops.rs | 4 +- 6 files changed, 239 insertions(+), 33 deletions(-) diff --git a/zk-token-sdk/src/errors.rs b/zk-token-sdk/src/errors.rs index ad43b680dc9b0d..2dff1121f6a5cb 100644 --- a/zk-token-sdk/src/errors.rs +++ b/zk-token-sdk/src/errors.rs @@ -38,6 +38,8 @@ pub enum ProofVerificationError { ProofContext, #[error("illegal commitment length")] IllegalCommitmentLength, + #[error("illegal amount bit length")] + IllegalAmountBitLength, } #[derive(Clone, Debug, Eq, PartialEq)] diff --git a/zk-token-sdk/src/instruction/errors.rs b/zk-token-sdk/src/instruction/errors.rs index a21ac1cf345459..0d76bf02766c1b 100644 --- a/zk-token-sdk/src/instruction/errors.rs +++ b/zk-token-sdk/src/instruction/errors.rs @@ -8,4 +8,8 @@ pub enum InstructionError { Decryption, #[error("missing ciphertext")] MissingCiphertext, + #[error("illegal amount bit length")] + IllegalAmountBitLength, + #[error("arithmetic overflow")] + Overflow, } diff --git a/zk-token-sdk/src/instruction/transfer/mod.rs b/zk-token-sdk/src/instruction/transfer/mod.rs index 33bc6c08a96a21..5e728ee7c41baa 100644 --- a/zk-token-sdk/src/instruction/transfer/mod.rs +++ b/zk-token-sdk/src/instruction/transfer/mod.rs @@ -4,9 +4,12 @@ mod without_fee; #[cfg(not(target_os = "solana"))] use { - crate::encryption::{ - elgamal::ElGamalCiphertext, - pedersen::{PedersenCommitment, PedersenOpening}, + crate::{ + encryption::{ + elgamal::ElGamalCiphertext, + pedersen::{PedersenCommitment, PedersenOpening}, + }, + instruction::errors::InstructionError, }, curve25519_dalek::scalar::Scalar, }; @@ -33,6 +36,7 @@ pub enum Role { /// Takes in a 64-bit number `amount` and a bit length `bit_length`. It returns: /// - the `bit_length` low bits of `amount` interpreted as u64 /// - the (64 - `bit_length`) high bits of `amount` interpreted as u64 +#[deprecated(since = "1.18.0", note = "please use `try_split_u64` instead")] #[cfg(not(target_os = "solana"))] pub fn split_u64(amount: u64, bit_length: usize) -> (u64, u64) { if bit_length == 64 { @@ -44,6 +48,30 @@ pub fn split_u64(amount: u64, bit_length: usize) -> (u64, u64) { } } +/// Takes in a 64-bit number `amount` and a bit length `bit_length`. It returns: +/// - the `bit_length` low bits of `amount` interpretted as u64 +/// - the `(64 - bit_length)` high bits of `amount` interpretted as u64 +#[cfg(not(target_os = "solana"))] +pub fn try_split_u64(amount: u64, bit_length: usize) -> Result<(u64, u64), InstructionError> { + match bit_length { + 0 => Ok((0, amount)), + 1..=63 => { + let bit_length_complement = u64::BITS.checked_sub(bit_length as u32).unwrap(); + // shifts are safe as long as `bit_length` and `bit_length_complement` < 64 + let lo = amount + .checked_shl(bit_length_complement) // clear out the high bits + .and_then(|amount| amount.checked_shr(bit_length_complement)) + .unwrap(); // shift back + let hi = amount.checked_shr(bit_length as u32).unwrap(); + + Ok((lo, hi)) + } + 64 => Ok((amount, 0)), + _ => Err(InstructionError::IllegalAmountBitLength), + } +} + +#[deprecated(since = "1.18.0", note = "please use `try_combine_lo_hi_u64` instead")] #[cfg(not(target_os = "solana"))] pub fn combine_lo_hi_u64(amount_lo: u64, amount_hi: u64, bit_length: usize) -> u64 { if bit_length == 64 { @@ -53,16 +81,47 @@ pub fn combine_lo_hi_u64(amount_lo: u64, amount_hi: u64, bit_length: usize) -> u } } +/// Combine two numbers that are interpretted as the low and high bits of a target number. The +/// `bit_length` parameter specifies the number of bits that `amount_hi` is to be shifted by. +#[cfg(not(target_os = "solana"))] +pub fn try_combine_lo_hi_u64( + amount_lo: u64, + amount_hi: u64, + bit_length: usize, +) -> Result { + match bit_length { + 0 => Ok(amount_hi), + 1..=63 => { + // shifts are safe as long as `bit_length` < 64 + let amount_hi = amount_hi.checked_shl(bit_length as u32).unwrap(); + let combined = amount_lo + .checked_add(amount_hi) + .ok_or(InstructionError::IllegalAmountBitLength)?; + Ok(combined) + } + 64 => Ok(amount_lo), + _ => Err(InstructionError::IllegalAmountBitLength), + } +} + #[cfg(not(target_os = "solana"))] -fn combine_lo_hi_ciphertexts( +fn try_combine_lo_hi_ciphertexts( ciphertext_lo: &ElGamalCiphertext, ciphertext_hi: &ElGamalCiphertext, bit_length: usize, -) -> ElGamalCiphertext { - let two_power = (1_u64) << bit_length; - ciphertext_lo + &(ciphertext_hi * &Scalar::from(two_power)) +) -> Result { + let two_power = if bit_length < u64::BITS as usize { + 1_u64.checked_shl(bit_length as u32).unwrap() + } else { + return Err(InstructionError::IllegalAmountBitLength); + }; + Ok(ciphertext_lo + &(ciphertext_hi * &Scalar::from(two_power))) } +#[deprecated( + since = "1.18.0", + note = "please use `try_combine_lo_hi_commitments` instead" +)] #[cfg(not(target_os = "solana"))] pub fn combine_lo_hi_commitments( comm_lo: &PedersenCommitment, @@ -73,6 +132,24 @@ pub fn combine_lo_hi_commitments( comm_lo + comm_hi * &Scalar::from(two_power) } +#[cfg(not(target_os = "solana"))] +pub fn try_combine_lo_hi_commitments( + comm_lo: &PedersenCommitment, + comm_hi: &PedersenCommitment, + bit_length: usize, +) -> Result { + let two_power = if bit_length < u64::BITS as usize { + 1_u64.checked_shl(bit_length as u32).unwrap() + } else { + return Err(InstructionError::IllegalAmountBitLength); + }; + Ok(comm_lo + comm_hi * &Scalar::from(two_power)) +} + +#[deprecated( + since = "1.18.0", + note = "please use `try_combine_lo_hi_openings` instead" +)] #[cfg(not(target_os = "solana"))] pub fn combine_lo_hi_openings( opening_lo: &PedersenOpening, @@ -83,6 +160,20 @@ pub fn combine_lo_hi_openings( opening_lo + opening_hi * &Scalar::from(two_power) } +#[cfg(not(target_os = "solana"))] +pub fn try_combine_lo_hi_openings( + opening_lo: &PedersenOpening, + opening_hi: &PedersenOpening, + bit_length: usize, +) -> Result { + let two_power = if bit_length < u64::BITS as usize { + 1_u64.checked_shl(bit_length as u32).unwrap() + } else { + return Err(InstructionError::IllegalAmountBitLength); + }; + Ok(opening_lo + opening_hi * &Scalar::from(two_power)) +} + #[derive(Clone, Copy)] #[repr(C)] pub struct FeeParameters { @@ -91,3 +182,97 @@ pub struct FeeParameters { /// Maximum fee assessed on transfers, expressed as an amount of tokens pub maximum_fee: u64, } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_split_u64() { + assert_eq!((0, 0), try_split_u64(0, 0).unwrap()); + assert_eq!((0, 0), try_split_u64(0, 1).unwrap()); + assert_eq!((0, 0), try_split_u64(0, 5).unwrap()); + assert_eq!((0, 0), try_split_u64(0, 63).unwrap()); + assert_eq!((0, 0), try_split_u64(0, 64).unwrap()); + assert_eq!( + InstructionError::IllegalAmountBitLength, + try_split_u64(0, 65).unwrap_err() + ); + + assert_eq!((0, 1), try_split_u64(1, 0).unwrap()); + assert_eq!((1, 0), try_split_u64(1, 1).unwrap()); + assert_eq!((1, 0), try_split_u64(1, 5).unwrap()); + assert_eq!((1, 0), try_split_u64(1, 63).unwrap()); + assert_eq!((1, 0), try_split_u64(1, 64).unwrap()); + assert_eq!( + InstructionError::IllegalAmountBitLength, + try_split_u64(1, 65).unwrap_err() + ); + + assert_eq!((0, 33), try_split_u64(33, 0).unwrap()); + assert_eq!((1, 16), try_split_u64(33, 1).unwrap()); + assert_eq!((1, 1), try_split_u64(33, 5).unwrap()); + assert_eq!((33, 0), try_split_u64(33, 63).unwrap()); + assert_eq!((33, 0), try_split_u64(33, 64).unwrap()); + assert_eq!( + InstructionError::IllegalAmountBitLength, + try_split_u64(33, 65).unwrap_err() + ); + + let amount = u64::MAX; + assert_eq!((0, amount), try_split_u64(amount, 0).unwrap()); + assert_eq!((1, (1 << 63) - 1), try_split_u64(amount, 1).unwrap()); + assert_eq!((31, (1 << 59) - 1), try_split_u64(amount, 5).unwrap()); + assert_eq!(((1 << 63) - 1, 1), try_split_u64(amount, 63).unwrap()); + assert_eq!((amount, 0), try_split_u64(amount, 64).unwrap()); + assert_eq!( + InstructionError::IllegalAmountBitLength, + try_split_u64(amount, 65).unwrap_err() + ); + } + + fn test_split_and_combine(amount: u64, bit_length: usize) { + let (amount_lo, amount_hi) = try_split_u64(amount, bit_length).unwrap(); + assert_eq!( + try_combine_lo_hi_u64(amount_lo, amount_hi, bit_length).unwrap(), + amount + ); + } + + #[test] + fn test_combine_lo_hi_u64() { + test_split_and_combine(0, 0); + test_split_and_combine(0, 1); + test_split_and_combine(0, 5); + test_split_and_combine(0, 63); + test_split_and_combine(0, 64); + + test_split_and_combine(1, 0); + test_split_and_combine(1, 1); + test_split_and_combine(1, 5); + test_split_and_combine(1, 63); + test_split_and_combine(1, 64); + + test_split_and_combine(33, 0); + test_split_and_combine(33, 1); + test_split_and_combine(33, 5); + test_split_and_combine(33, 63); + test_split_and_combine(33, 64); + + test_split_and_combine(u64::MAX, 0); + test_split_and_combine(u64::MAX, 1); + test_split_and_combine(u64::MAX, 5); + test_split_and_combine(u64::MAX, 63); + test_split_and_combine(u64::MAX, 64); + + // illegal amount bit + let err = try_combine_lo_hi_u64(0, 0, 65).unwrap_err(); + assert_eq!(err, InstructionError::IllegalAmountBitLength); + + // overflow + let amount_lo = u64::MAX; + let amount_hi = u64::MAX; + let err = try_combine_lo_hi_u64(amount_lo, amount_hi, 1).unwrap_err(); + assert_eq!(err, InstructionError::IllegalAmountBitLength); + } +} diff --git a/zk-token-sdk/src/instruction/transfer/with_fee.rs b/zk-token-sdk/src/instruction/transfer/with_fee.rs index 4bc9a154376840..4d9caec812dddc 100644 --- a/zk-token-sdk/src/instruction/transfer/with_fee.rs +++ b/zk-token-sdk/src/instruction/transfer/with_fee.rs @@ -9,10 +9,10 @@ use { instruction::{ errors::InstructionError, transfer::{ - combine_lo_hi_ciphertexts, combine_lo_hi_commitments, combine_lo_hi_openings, - combine_lo_hi_u64, encryption::{FeeEncryption, TransferAmountCiphertext}, - split_u64, FeeParameters, Role, + try_combine_lo_hi_ciphertexts, try_combine_lo_hi_commitments, + try_combine_lo_hi_openings, try_combine_lo_hi_u64, try_split_u64, FeeParameters, + Role, }, }, range_proof::RangeProof, @@ -128,7 +128,8 @@ impl TransferWithFeeData { withdraw_withheld_authority_pubkey: &ElGamalPubkey, ) -> Result { // split and encrypt transfer amount - let (amount_lo, amount_hi) = split_u64(transfer_amount, TRANSFER_AMOUNT_LO_BITS); + let (amount_lo, amount_hi) = try_split_u64(transfer_amount, TRANSFER_AMOUNT_LO_BITS) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; let (ciphertext_lo, opening_lo) = TransferAmountCiphertext::new( amount_lo, @@ -159,11 +160,12 @@ impl TransferWithFeeData { }; let new_source_ciphertext = old_source_ciphertext - - combine_lo_hi_ciphertexts( + - try_combine_lo_hi_ciphertexts( &transfer_amount_lo_source, &transfer_amount_hi_source, TRANSFER_AMOUNT_LO_BITS, - ); + ) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; // calculate fee // @@ -177,7 +179,9 @@ impl TransferWithFeeData { u64::conditional_select(&fee_parameters.maximum_fee, &fee_amount, below_max); // split and encrypt fee - let (fee_to_encrypt_lo, fee_to_encrypt_hi) = split_u64(fee_to_encrypt, FEE_AMOUNT_LO_BITS); + let (fee_to_encrypt_lo, fee_to_encrypt_hi) = + try_split_u64(fee_to_encrypt, FEE_AMOUNT_LO_BITS) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; let (fee_ciphertext_lo, opening_fee_lo) = FeeEncryption::new( fee_to_encrypt_lo, @@ -510,23 +514,28 @@ impl TransferWithFeeProof { let pod_claimed_commitment: pod::PedersenCommitment = claimed_commitment.into(); transcript.append_commitment(b"commitment-claimed", &pod_claimed_commitment); - let combined_commitment = combine_lo_hi_commitments( + let combined_commitment = try_combine_lo_hi_commitments( ciphertext_lo.get_commitment(), ciphertext_hi.get_commitment(), TRANSFER_AMOUNT_LO_BITS, - ); + ) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; let combined_opening = - combine_lo_hi_openings(opening_lo, opening_hi, TRANSFER_AMOUNT_LO_BITS); + try_combine_lo_hi_openings(opening_lo, opening_hi, TRANSFER_AMOUNT_LO_BITS) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; let combined_fee_amount = - combine_lo_hi_u64(fee_amount_lo, fee_amount_hi, TRANSFER_AMOUNT_LO_BITS); - let combined_fee_commitment = combine_lo_hi_commitments( + try_combine_lo_hi_u64(fee_amount_lo, fee_amount_hi, TRANSFER_AMOUNT_LO_BITS) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; + let combined_fee_commitment = try_combine_lo_hi_commitments( fee_ciphertext_lo.get_commitment(), fee_ciphertext_hi.get_commitment(), TRANSFER_AMOUNT_LO_BITS, - ); + ) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; let combined_fee_opening = - combine_lo_hi_openings(opening_fee_lo, opening_fee_hi, TRANSFER_AMOUNT_LO_BITS); + try_combine_lo_hi_openings(opening_fee_lo, opening_fee_hi, TRANSFER_AMOUNT_LO_BITS) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; // compute real delta commitment let (delta_commitment, opening_delta) = compute_delta_commitment_and_opening( @@ -561,11 +570,12 @@ impl TransferWithFeeProof { // generate the range proof let opening_claimed_negated = &PedersenOpening::default() - &opening_claimed; - let combined_amount = combine_lo_hi_u64( + let combined_amount = try_combine_lo_hi_u64( transfer_amount_lo, transfer_amount_hi, TRANSFER_AMOUNT_LO_BITS, - ); + ) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; let amount_sub_fee = combined_amount .checked_sub(combined_fee_amount) .ok_or(ProofGenerationError::FeeCalculation)?; @@ -680,16 +690,18 @@ impl TransferWithFeeProof { // verify fee sigma proof transcript.append_commitment(b"commitment-claimed", &self.claimed_commitment); - let combined_commitment = combine_lo_hi_commitments( + let combined_commitment = try_combine_lo_hi_commitments( ciphertext_lo.get_commitment(), ciphertext_hi.get_commitment(), TRANSFER_AMOUNT_LO_BITS, - ); - let combined_fee_commitment = combine_lo_hi_commitments( + ) + .map_err(|_| ProofVerificationError::IllegalAmountBitLength)?; + let combined_fee_commitment = try_combine_lo_hi_commitments( fee_ciphertext_lo.get_commitment(), fee_ciphertext_hi.get_commitment(), TRANSFER_AMOUNT_LO_BITS, - ); + ) + .map_err(|_| ProofVerificationError::IllegalAmountBitLength)?; let delta_commitment = compute_delta_commitment( &combined_commitment, diff --git a/zk-token-sdk/src/instruction/transfer/without_fee.rs b/zk-token-sdk/src/instruction/transfer/without_fee.rs index 39e28994cb809b..b72d1ebc7432c0 100644 --- a/zk-token-sdk/src/instruction/transfer/without_fee.rs +++ b/zk-token-sdk/src/instruction/transfer/without_fee.rs @@ -9,7 +9,8 @@ use { instruction::{ errors::InstructionError, transfer::{ - combine_lo_hi_ciphertexts, encryption::TransferAmountCiphertext, split_u64, Role, + encryption::TransferAmountCiphertext, try_combine_lo_hi_ciphertexts, try_split_u64, + Role, }, }, range_proof::RangeProof, @@ -96,7 +97,8 @@ impl TransferData { (destination_pubkey, auditor_pubkey): (&ElGamalPubkey, &ElGamalPubkey), ) -> Result { // split and encrypt transfer amount - let (amount_lo, amount_hi) = split_u64(transfer_amount, TRANSFER_AMOUNT_LO_BITS); + let (amount_lo, amount_hi) = try_split_u64(transfer_amount, TRANSFER_AMOUNT_LO_BITS) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; let (ciphertext_lo, opening_lo) = TransferAmountCiphertext::new( amount_lo, @@ -128,11 +130,12 @@ impl TransferData { }; let new_source_ciphertext = ciphertext_old_source - - combine_lo_hi_ciphertexts( + - try_combine_lo_hi_ciphertexts( &transfer_amount_lo_source, &transfer_amount_hi_source, TRANSFER_AMOUNT_LO_BITS, - ); + ) + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; // generate transcript and append all public inputs let pod_transfer_pubkeys = TransferPubkeys { diff --git a/zk-token-sdk/src/zk_token_elgamal/ops.rs b/zk-token-sdk/src/zk_token_elgamal/ops.rs index 10db117c44a5b9..38da19c1c2e7f1 100644 --- a/zk-token-sdk/src/zk_token_elgamal/ops.rs +++ b/zk-token-sdk/src/zk_token_elgamal/ops.rs @@ -134,7 +134,7 @@ mod tests { elgamal::{ElGamalCiphertext, ElGamalKeypair}, pedersen::{Pedersen, PedersenOpening}, }, - instruction::transfer::split_u64, + instruction::transfer::try_split_u64, zk_token_elgamal::{ops, pod}, }, bytemuck::Zeroable, @@ -204,7 +204,7 @@ mod tests { fn test_transfer_arithmetic() { // transfer amount let transfer_amount: u64 = 55; - let (amount_lo, amount_hi) = split_u64(transfer_amount, 16); + let (amount_lo, amount_hi) = try_split_u64(transfer_amount, 16).unwrap(); // generate public keys let source_keypair = ElGamalKeypair::new_rand(); From ef233eaaa7aa20fbdae870fc82e37bd26f0f885d Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 24 Jan 2024 09:43:18 -0500 Subject: [PATCH 041/401] Refactor load_accounts to take a reference to a slice instead of vec (#34919) This is in preparation for further refactoring of Bank::load_and_execute_transactions in a separate commit. --- accounts-db/src/nonce_info.rs | 12 ++++++------ rpc/src/transaction_status_service.rs | 2 +- runtime/src/accounts/mod.rs | 8 ++++---- runtime/src/bank.rs | 2 +- runtime/src/bank/tests.rs | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/nonce_info.rs b/accounts-db/src/nonce_info.rs index 16917be15c3d9a..8a6d3a40fc7ecc 100644 --- a/accounts-db/src/nonce_info.rs +++ b/accounts-db/src/nonce_info.rs @@ -66,7 +66,7 @@ impl NonceFull { } } pub fn from_partial( - partial: NoncePartial, + partial: &NoncePartial, message: &SanitizedMessage, accounts: &[TransactionAccount], rent_debits: &RentDebits, @@ -221,8 +221,8 @@ mod tests { ), ]; - let full = NonceFull::from_partial(partial.clone(), &message, &accounts, &rent_debits) - .unwrap(); + let full = + NonceFull::from_partial(&partial, &message, &accounts, &rent_debits).unwrap(); assert_eq!(*full.address(), nonce_address); assert_eq!(*full.account(), rent_collected_nonce_account); assert_eq!(full.lamports_per_signature(), Some(lamports_per_signature)); @@ -252,8 +252,8 @@ mod tests { ), ]; - let full = NonceFull::from_partial(partial.clone(), &message, &accounts, &rent_debits) - .unwrap(); + let full = + NonceFull::from_partial(&partial, &message, &accounts, &rent_debits).unwrap(); assert_eq!(*full.address(), nonce_address); assert_eq!(*full.account(), nonce_account); assert_eq!(full.lamports_per_signature(), Some(lamports_per_signature)); @@ -264,7 +264,7 @@ mod tests { { let message = new_sanitized_message(&instructions, Some(&nonce_address)); assert_eq!( - NonceFull::from_partial(partial, &message, &[], &RentDebits::default()) + NonceFull::from_partial(&partial, &message, &[], &RentDebits::default()) .unwrap_err(), TransactionError::AccountNotFound, ); diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index eca53c66658766..68640362b2182c 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -365,7 +365,7 @@ pub(crate) mod tests { inner_instructions: None, durable_nonce_fee: Some(DurableNonceFee::from( &NonceFull::from_partial( - rollback_partial, + &rollback_partial, &SanitizedMessage::Legacy(LegacyMessage::new(message)), &[(pubkey, nonce_account)], &rent_debits, diff --git a/runtime/src/accounts/mod.rs b/runtime/src/accounts/mod.rs index 9c99143416a345..28b1be02283448 100644 --- a/runtime/src/accounts/mod.rs +++ b/runtime/src/accounts/mod.rs @@ -49,7 +49,7 @@ pub(super) fn load_accounts( accounts_db: &AccountsDb, ancestors: &Ancestors, txs: &[SanitizedTransaction], - lock_results: Vec, + lock_results: &[TransactionCheckResult], hash_queue: &BlockhashQueue, error_counters: &mut TransactionErrorMetrics, rent_collector: &RentCollector, @@ -123,7 +123,7 @@ pub(super) fn load_accounts( (Ok(loaded_transaction), nonce) } - (_, (Err(e), _nonce)) => (Err(e), None), + (_, (Err(e), _nonce)) => (Err(e.clone()), None), }) .collect() } @@ -544,7 +544,7 @@ mod tests { &accounts.accounts_db, &ancestors, &[sanitized_tx], - vec![(Ok(()), None)], + &[(Ok(()), None)], &hash_queue, error_counters, rent_collector, @@ -1022,7 +1022,7 @@ mod tests { &accounts.accounts_db, &ancestors, &[tx], - vec![(Ok(()), None)], + &[(Ok(()), None)], &hash_queue, &mut error_counters, &rent_collector, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 38de7ff18de594..a032fbcf428c17 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5179,7 +5179,7 @@ impl Bank { &self.rc.accounts.accounts_db, &self.ancestors, sanitized_txs, - check_results, + &check_results, &self.blockhash_queue.read().unwrap(), &mut error_counters, &self.rent_collector, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index efe8b1970dfeff..763b8c7db42df7 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -10989,7 +10989,7 @@ fn test_rent_state_list_len() { &bank.accounts().accounts_db, &bank.ancestors, &[sanitized_tx.clone()], - vec![(Ok(()), None)], + &[(Ok(()), None)], &bank.blockhash_queue.read().unwrap(), &mut error_counters, &bank.rent_collector, From 0d92254736936fc9ffa68ebe4867233625a1b415 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Wed, 24 Jan 2024 08:57:50 -0800 Subject: [PATCH 042/401] Add push_heaviest_fork and get_heaviest_fork. (#34892) Add push_get_heaviest_fork and push_get_heaviest_fork. --- gossip/src/cluster_info.rs | 113 ++++++++++++++++++++++++++++++++++++- 1 file changed, 112 insertions(+), 1 deletion(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 56b75010f93fe9..23038b0407d30e 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -39,7 +39,9 @@ use { epoch_slots::EpochSlots, gossip_error::GossipError, ping_pong::{self, PingCache, Pong}, - restart_crds_values::{RestartLastVotedForkSlots, RestartLastVotedForkSlotsError}, + restart_crds_values::{ + RestartHeaviestFork, RestartLastVotedForkSlots, RestartLastVotedForkSlotsError, + }, socketaddr, socketaddr_any, weighted_shuffle::WeightedShuffle, }, @@ -984,6 +986,26 @@ impl ClusterInfo { Ok(()) } + pub fn push_restart_heaviest_fork( + &self, + last_slot: Slot, + last_slot_hash: Hash, + observed_stake: u64, + ) { + let restart_heaviest_fork = RestartHeaviestFork { + from: self.id(), + wallclock: timestamp(), + last_slot, + last_slot_hash, + observed_stake, + shred_version: self.my_shred_version(), + }; + self.push_message(CrdsValue::new_signed( + CrdsData::RestartHeaviestFork(restart_heaviest_fork), + &self.keypair(), + )); + } + fn time_gossip_read_lock<'a>( &'a self, label: &'static str, @@ -1254,6 +1276,21 @@ impl ClusterInfo { .collect() } + pub fn get_restart_heaviest_fork(&self, cursor: &mut Cursor) -> Vec { + let self_shred_version = self.my_shred_version(); + let gossip_crds = self.gossip.crds.read().unwrap(); + gossip_crds + .get_entries(cursor) + .filter_map(|entry| { + let CrdsData::RestartHeaviestFork(fork) = &entry.value.data else { + return None; + }; + (fork.shred_version == self_shred_version).then_some(fork) + }) + .cloned() + .collect() + } + /// Returns duplicate-shreds inserted since the given cursor. pub(crate) fn get_duplicate_shreds(&self, cursor: &mut Cursor) -> Vec { let gossip_crds = self.gossip.crds.read().unwrap(); @@ -4603,4 +4640,78 @@ mod tests { assert_eq!(slots[0].from, node_pubkey); assert_eq!(slots[1].from, cluster_info.id()); } + + #[test] + fn test_push_restart_heaviest_fork() { + solana_logger::setup(); + let keypair = Arc::new(Keypair::new()); + let pubkey = keypair.pubkey(); + let contact_info = ContactInfo::new_localhost(&pubkey, 0); + let cluster_info = ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified); + + // make sure empty crds is handled correctly + let mut cursor = Cursor::default(); + let heaviest_forks = cluster_info.get_restart_heaviest_fork(&mut cursor); + assert_eq!(heaviest_forks, vec![]); + + // add new message + let slot1 = 53; + let hash1 = Hash::new_unique(); + let stake1 = 15_000_000; + cluster_info.push_restart_heaviest_fork(slot1, hash1, stake1); + cluster_info.flush_push_queue(); + + let heaviest_forks = cluster_info.get_restart_heaviest_fork(&mut cursor); + assert_eq!(heaviest_forks.len(), 1); + let fork = &heaviest_forks[0]; + assert_eq!(fork.last_slot, slot1); + assert_eq!(fork.last_slot_hash, hash1); + assert_eq!(fork.observed_stake, stake1); + assert_eq!(fork.from, pubkey); + + // Test with different shred versions. + let mut rng = rand::thread_rng(); + let pubkey2 = Pubkey::new_unique(); + let mut new_node = LegacyContactInfo::new_rand(&mut rng, Some(pubkey2)); + new_node.set_shred_version(42); + let slot2 = 54; + let hash2 = Hash::new_unique(); + let stake2 = 23_000_000; + let entries = vec![ + CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new_node)), + CrdsValue::new_unsigned(CrdsData::RestartHeaviestFork(RestartHeaviestFork { + from: pubkey2, + wallclock: timestamp(), + last_slot: slot2, + last_slot_hash: hash2, + observed_stake: stake2, + shred_version: 42, + })), + ]; + { + let mut gossip_crds = cluster_info.gossip.crds.write().unwrap(); + for entry in entries { + assert!(gossip_crds + .insert(entry, /*now=*/ 0, GossipRoute::LocalMessage) + .is_ok()); + } + } + // Should exclude other node's heaviest_fork because of different + // shred-version. + let heaviest_forks = cluster_info.get_restart_heaviest_fork(&mut Cursor::default()); + assert_eq!(heaviest_forks.len(), 1); + assert_eq!(heaviest_forks[0].from, pubkey); + // Match shred versions. + { + let mut node = cluster_info.my_contact_info.write().unwrap(); + node.set_shred_version(42); + } + cluster_info.push_self(); + cluster_info.flush_push_queue(); + + // Should now include the previous heaviest_fork from the other node. + let heaviest_forks = cluster_info.get_restart_heaviest_fork(&mut Cursor::default()); + assert_eq!(heaviest_forks.len(), 1); + assert_eq!(heaviest_forks[0].from, pubkey2); + } } From 662e77feaac1b7aa6c047d496285a2546c93017e Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 24 Jan 2024 14:37:18 -0500 Subject: [PATCH 043/401] Refactor bank's load_and_execute_transaction (#34893) * Refactor bank load_and_execute_transactions * Remove redundant clippy annotation * Report check time where it is done in load_and_execute_transactions --- runtime/src/bank.rs | 71 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index a032fbcf428c17..07677bea4972e1 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -336,6 +336,21 @@ pub struct LoadAndExecuteTransactionsOutput { pub error_counters: TransactionErrorMetrics, } +pub struct LoadAndExecuteSanitizedTransactionsOutput { + pub loaded_transactions: Vec, + // Vector of results indicating whether a transaction was executed or could not + // be executed. Note executed transactions can still have failed! + pub execution_results: Vec, + // Total number of transactions that were executed + pub executed_transactions_count: usize, + // Number of non-vote transactions that were executed + pub executed_non_vote_transactions_count: usize, + // Total number of the executed transactions that returned success/not + // an error. + pub executed_with_successful_result_count: usize, + pub signature_count: u64, +} + pub struct TransactionSimulationResult { pub result: Result<()>, pub logs: TransactionLogMessages, @@ -5157,11 +5172,51 @@ impl Bank { &mut error_counters, ); check_time.stop(); + debug!("check: {}us", check_time.as_us()); + timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_time.as_us()); + + let sanitized_output = self.load_and_execute_sanitized_transactions( + sanitized_txs, + &mut check_results, + &mut error_counters, + enable_cpi_recording, + enable_log_recording, + enable_return_data_recording, + timings, + account_overrides, + log_messages_bytes_limit, + ); + LoadAndExecuteTransactionsOutput { + loaded_transactions: sanitized_output.loaded_transactions, + execution_results: sanitized_output.execution_results, + retryable_transaction_indexes, + executed_transactions_count: sanitized_output.executed_transactions_count, + executed_non_vote_transactions_count: sanitized_output + .executed_non_vote_transactions_count, + executed_with_successful_result_count: sanitized_output + .executed_with_successful_result_count, + signature_count: sanitized_output.signature_count, + error_counters, + } + } + #[allow(clippy::too_many_arguments)] + fn load_and_execute_sanitized_transactions( + &self, + sanitized_txs: &[SanitizedTransaction], + check_results: &mut [TransactionCheckResult], + error_counters: &mut TransactionErrorMetrics, + enable_cpi_recording: bool, + enable_log_recording: bool, + enable_return_data_recording: bool, + timings: &mut ExecuteTimings, + account_overrides: Option<&AccountOverrides>, + log_messages_bytes_limit: Option, + ) -> LoadAndExecuteSanitizedTransactionsOutput { let mut program_accounts_map = self.filter_executable_program_accounts( &self.ancestors, sanitized_txs, - &mut check_results, + check_results, PROGRAM_OWNERS, &self.blockhash_queue.read().unwrap(), ); @@ -5179,9 +5234,9 @@ impl Bank { &self.rc.accounts.accounts_db, &self.ancestors, sanitized_txs, - &check_results, + check_results, &self.blockhash_queue.read().unwrap(), - &mut error_counters, + error_counters, &self.rent_collector, &self.feature_set, &self.fee_structure, @@ -5233,7 +5288,7 @@ impl Bank { enable_log_recording, enable_return_data_recording, timings, - &mut error_counters, + error_counters, log_messages_bytes_limit, &programs_loaded_for_tx_batch.borrow(), ); @@ -5269,14 +5324,12 @@ impl Bank { ); debug!( - "check: {}us load: {}us execute: {}us txs_len={}", - check_time.as_us(), + "load: {}us execute: {}us txs_len={}", load_time.as_us(), execution_time.as_us(), sanitized_txs.len(), ); - timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_time.as_us()); timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); @@ -5392,15 +5445,13 @@ impl Bank { *err_count + executed_with_successful_result_count ); } - LoadAndExecuteTransactionsOutput { + LoadAndExecuteSanitizedTransactionsOutput { loaded_transactions, execution_results, - retryable_transaction_indexes, executed_transactions_count, executed_non_vote_transactions_count, executed_with_successful_result_count, signature_count, - error_counters, } } From 5898b9a2f70c6cc7878510f5da0d70d4602a5f30 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 24 Jan 2024 15:31:50 -0500 Subject: [PATCH 044/401] Cleans up stale accounts hash cache files (#34933) --- accounts-db/src/accounts_db.rs | 16 ++-- accounts-db/src/cache_hash_data.rs | 121 ++++++++++++++++++++++++----- 2 files changed, 114 insertions(+), 23 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 493a8b22c9d0ae..4c8d479cd5fc97 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7537,6 +7537,7 @@ impl AccountsDb { config: &CalcAccountsHashConfig<'_>, kind: CalcAccountsHashKind, slot: Slot, + storages_start_slot: Slot, ) -> CacheHashData { let accounts_hash_cache_path = if !config.store_detailed_debug_info_on_failure { accounts_hash_cache_path @@ -7548,7 +7549,10 @@ impl AccountsDb { _ = std::fs::remove_dir_all(&failed_dir); failed_dir }; - CacheHashData::new(accounts_hash_cache_path, kind == CalcAccountsHashKind::Full) + CacheHashData::new( + accounts_hash_cache_path, + (kind == CalcAccountsHashKind::Incremental).then_some(storages_start_slot), + ) } // modeled after calculate_accounts_delta_hash @@ -7607,7 +7611,8 @@ impl AccountsDb { ) -> Result<(AccountsHashKind, u64), AccountsHashVerificationError> { let total_time = Measure::start(""); let _guard = self.active_stats.activate(ActiveStatItem::Hash); - stats.oldest_root = storages.range().start; + let storages_start_slot = storages.range().start; + stats.oldest_root = storages_start_slot; self.mark_old_slots_as_dirty(storages, config.epoch_schedule.slots_per_epoch, &mut stats); @@ -7623,7 +7628,8 @@ impl AccountsDb { accounts_hash_cache_path, config, kind, - slot + slot, + storages_start_slot, )); stats.cache_hash_data_us += cache_hash_data_us; @@ -9769,7 +9775,7 @@ pub mod tests { let temp_dir = TempDir::new().unwrap(); let accounts_hash_cache_path = temp_dir.path().to_path_buf(); self.scan_snapshot_stores_with_cache( - &CacheHashData::new(accounts_hash_cache_path, true), + &CacheHashData::new(accounts_hash_cache_path, None), storage, stats, bins, @@ -10837,7 +10843,7 @@ pub mod tests { }; let result = accounts_db.scan_account_storage_no_bank( - &CacheHashData::new(accounts_hash_cache_path, true), + &CacheHashData::new(accounts_hash_cache_path, None), &CalcAccountsHashConfig::default(), &get_storage_refs(&[storage]), test_scan, diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index c839a8338c2fc2..fbd24e19f9bf7b 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -6,6 +6,7 @@ use { bytemuck::{Pod, Zeroable}, memmap2::MmapMut, solana_measure::measure::Measure, + solana_sdk::clock::Slot, std::{ collections::HashSet, fs::{self, remove_file, File, OpenOptions}, @@ -192,7 +193,8 @@ impl CacheHashDataFile { pub(crate) struct CacheHashData { cache_dir: PathBuf, pre_existing_cache_files: Arc>>, - should_delete_old_cache_files_on_drop: bool, + /// Decides which old cache files to delete. See `delete_old_cache_files()` for more info. + storages_start_slot: Option, pub stats: Arc, } @@ -204,10 +206,7 @@ impl Drop for CacheHashData { } impl CacheHashData { - pub(crate) fn new( - cache_dir: PathBuf, - should_delete_old_cache_files_on_drop: bool, - ) -> CacheHashData { + pub(crate) fn new(cache_dir: PathBuf, storages_start_slot: Option) -> CacheHashData { std::fs::create_dir_all(&cache_dir).unwrap_or_else(|err| { panic!("error creating cache dir {}: {err}", cache_dir.display()) }); @@ -215,7 +214,7 @@ impl CacheHashData { let result = CacheHashData { cache_dir, pre_existing_cache_files: Arc::new(Mutex::new(HashSet::default())), - should_delete_old_cache_files_on_drop, + storages_start_slot, stats: Arc::default(), }; @@ -225,17 +224,35 @@ impl CacheHashData { /// delete all pre-existing files that will not be used pub(crate) fn delete_old_cache_files(&self) { - if self.should_delete_old_cache_files_on_drop { - let old_cache_files = - std::mem::take(&mut *self.pre_existing_cache_files.lock().unwrap()); - if !old_cache_files.is_empty() { - self.stats - .unused_cache_files - .fetch_add(old_cache_files.len(), Ordering::Relaxed); - for file_name in old_cache_files.iter() { - let result = self.cache_dir.join(file_name); - let _ = fs::remove_file(result); - } + // all the renaming files in `pre_existing_cache_files` were *not* used for this + // accounts hash calculation + let mut old_cache_files = + std::mem::take(&mut *self.pre_existing_cache_files.lock().unwrap()); + + // If `storages_start_slot` is None, we're doing a full accounts hash calculation, and thus + // all unused cache files can be deleted. + // If `storages_start_slot` is Some, we're doing an incremental accounts hash calculation, + // and we only want to delete the unused cache files *that IAH considered*. + if let Some(storages_start_slot) = self.storages_start_slot { + old_cache_files.retain(|old_cache_file| { + let Some(parsed_filename) = parse_filename(old_cache_file) else { + // if parsing the cache filename fails, we *do* want to delete it + return true; + }; + + // if the old cache file is in the incremental accounts hash calculation range, + // then delete it + parsed_filename.slot_range_start >= storages_start_slot + }); + } + + if !old_cache_files.is_empty() { + self.stats + .unused_cache_files + .fetch_add(old_cache_files.len(), Ordering::Relaxed); + for file_name in old_cache_files.iter() { + let result = self.cache_dir.join(file_name); + let _ = fs::remove_file(result); } } } @@ -360,6 +377,39 @@ impl CacheHashData { } } +/// The values of each part of a cache hash data filename +#[derive(Debug)] +pub struct ParsedFilename { + pub slot_range_start: Slot, + pub slot_range_end: Slot, + pub bin_range_start: u64, + pub bin_range_end: u64, + pub hash: u64, +} + +/// Parses a cache hash data filename into its parts +/// +/// Returns None if the filename is invalid +fn parse_filename(cache_filename: impl AsRef) -> Option { + let filename = cache_filename.as_ref().to_string_lossy().to_string(); + let parts: Vec<_> = filename.split('.').collect(); // The parts are separated by a `.` + if parts.len() != 5 { + return None; + } + let slot_range_start = parts.first()?.parse().ok()?; + let slot_range_end = parts.get(1)?.parse().ok()?; + let bin_range_start = parts.get(2)?.parse().ok()?; + let bin_range_end = parts.get(3)?.parse().ok()?; + let hash = u64::from_str_radix(parts.get(4)?, 16).ok()?; // the hash is in hex + Some(ParsedFilename { + slot_range_start, + slot_range_end, + bin_range_start, + bin_range_end, + hash, + }) +} + #[cfg(test)] mod tests { use {super::*, crate::accounts_hash::AccountHash, rand::Rng}; @@ -427,7 +477,7 @@ mod tests { data_this_pass.push(this_bin_data); } } - let cache = CacheHashData::new(cache_dir.clone(), true); + let cache = CacheHashData::new(cache_dir.clone(), None); let file_name = PathBuf::from("test"); cache.save(&file_name, &data_this_pass).unwrap(); cache.get_cache_files(); @@ -517,4 +567,39 @@ mod tests { ct, ) } + + #[test] + fn test_parse_filename() { + let good_filename = "123.456.0.65536.537d65697d9b2baa"; + let parsed_filename = parse_filename(good_filename).unwrap(); + assert_eq!(parsed_filename.slot_range_start, 123); + assert_eq!(parsed_filename.slot_range_end, 456); + assert_eq!(parsed_filename.bin_range_start, 0); + assert_eq!(parsed_filename.bin_range_end, 65536); + assert_eq!(parsed_filename.hash, 0x537d65697d9b2baa); + + let bad_filenames = [ + // bad separator + "123-456-0-65536.537d65697d9b2baa", + // bad values + "abc.456.0.65536.537d65697d9b2baa", + "123.xyz.0.65536.537d65697d9b2baa", + "123.456.?.65536.537d65697d9b2baa", + "123.456.0.@#$%^.537d65697d9b2baa", + "123.456.0.65536.base19shouldfail", + "123.456.0.65536.123456789012345678901234567890", + // missing values + "123.456.0.65536.", + "123.456.0.65536", + // extra junk + "123.456.0.65536.537d65697d9b2baa.42", + "123.456.0.65536.537d65697d9b2baa.", + "123.456.0.65536.537d65697d9b2baa/", + ".123.456.0.65536.537d65697d9b2baa", + "/123.456.0.65536.537d65697d9b2baa", + ]; + for bad_filename in bad_filenames { + assert!(parse_filename(bad_filename).is_none()); + } + } } From 3004eaa9bd527e0ca4731db485ce4dbde368618a Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Thu, 25 Jan 2024 06:27:02 +0900 Subject: [PATCH 045/401] [clap-v3-utils] Add functions to parse directly from `SignerSource` (#34678) * add `_from_source` function variants for signer, keypair, and pubkey * make `parse_signer_source` an associated function of `SignerSource` * refactor `SignerSource` into `input_parsers::signer` * make `_from_source` functions public * remove unnecessary import --- clap-v3-utils/src/input_parsers/signer.rs | 273 ++++++++++++++- clap-v3-utils/src/input_validators.rs | 9 +- clap-v3-utils/src/keypair.rs | 405 ++++++---------------- 3 files changed, 386 insertions(+), 301 deletions(-) diff --git a/clap-v3-utils/src/input_parsers/signer.rs b/clap-v3-utils/src/input_parsers/signer.rs index 468e2dfef95238..d71a37b888646a 100644 --- a/clap-v3-utils/src/input_parsers/signer.rs +++ b/clap-v3-utils/src/input_parsers/signer.rs @@ -1,20 +1,162 @@ use { crate::{ input_parsers::{keypair_of, keypairs_of, pubkey_of, pubkeys_of}, - keypair::{ - parse_signer_source, pubkey_from_path, resolve_signer_from_path, signer_from_path, - SignerSource, SignerSourceError, SignerSourceKind, - }, + keypair::{pubkey_from_path, resolve_signer_from_path, signer_from_path, ASK_KEYWORD}, }, clap::{builder::ValueParser, ArgMatches}, - solana_remote_wallet::remote_wallet::RemoteWalletManager, + solana_remote_wallet::{ + locator::{Locator as RemoteWalletLocator, LocatorError as RemoteWalletLocatorError}, + remote_wallet::RemoteWalletManager, + }, solana_sdk::{ + derivation_path::{DerivationPath, DerivationPathError}, pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }, std::{error, rc::Rc, str::FromStr}, + thiserror::Error, }; +const SIGNER_SOURCE_PROMPT: &str = "prompt"; +const SIGNER_SOURCE_FILEPATH: &str = "file"; +const SIGNER_SOURCE_USB: &str = "usb"; +const SIGNER_SOURCE_STDIN: &str = "stdin"; +const SIGNER_SOURCE_PUBKEY: &str = "pubkey"; + +#[derive(Debug, Error)] +pub enum SignerSourceError { + #[error("unrecognized signer source")] + UnrecognizedSource, + #[error(transparent)] + RemoteWalletLocatorError(#[from] RemoteWalletLocatorError), + #[error(transparent)] + DerivationPathError(#[from] DerivationPathError), + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error("unsupported source")] + UnsupportedSource, +} + +#[derive(Clone)] +pub enum SignerSourceKind { + Prompt, + Filepath(String), + Usb(RemoteWalletLocator), + Stdin, + Pubkey(Pubkey), +} + +impl AsRef for SignerSourceKind { + fn as_ref(&self) -> &str { + match self { + Self::Prompt => SIGNER_SOURCE_PROMPT, + Self::Filepath(_) => SIGNER_SOURCE_FILEPATH, + Self::Usb(_) => SIGNER_SOURCE_USB, + Self::Stdin => SIGNER_SOURCE_STDIN, + Self::Pubkey(_) => SIGNER_SOURCE_PUBKEY, + } + } +} + +impl std::fmt::Debug for SignerSourceKind { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s: &str = self.as_ref(); + write!(f, "{s}") + } +} + +#[derive(Debug, Clone)] +pub struct SignerSource { + pub kind: SignerSourceKind, + pub derivation_path: Option, + pub legacy: bool, +} + +impl SignerSource { + fn new(kind: SignerSourceKind) -> Self { + Self { + kind, + derivation_path: None, + legacy: false, + } + } + + fn new_legacy(kind: SignerSourceKind) -> Self { + Self { + kind, + derivation_path: None, + legacy: true, + } + } + + pub(crate) fn parse>(source: S) -> Result { + let source = source.as_ref(); + let source = { + #[cfg(target_family = "windows")] + { + // trim matched single-quotes since cmd.exe won't + let mut source = source; + while let Some(trimmed) = source.strip_prefix('\'') { + source = if let Some(trimmed) = trimmed.strip_suffix('\'') { + trimmed + } else { + break; + } + } + source.replace('\\', "/") + } + #[cfg(not(target_family = "windows"))] + { + source.to_string() + } + }; + match uriparse::URIReference::try_from(source.as_str()) { + Err(_) => Err(SignerSourceError::UnrecognizedSource), + Ok(uri) => { + if let Some(scheme) = uri.scheme() { + let scheme = scheme.as_str().to_ascii_lowercase(); + match scheme.as_str() { + SIGNER_SOURCE_PROMPT => Ok(SignerSource { + kind: SignerSourceKind::Prompt, + derivation_path: DerivationPath::from_uri_any_query(&uri)?, + legacy: false, + }), + SIGNER_SOURCE_FILEPATH => Ok(SignerSource::new( + SignerSourceKind::Filepath(uri.path().to_string()), + )), + SIGNER_SOURCE_USB => Ok(SignerSource { + kind: SignerSourceKind::Usb(RemoteWalletLocator::new_from_uri(&uri)?), + derivation_path: DerivationPath::from_uri_key_query(&uri)?, + legacy: false, + }), + SIGNER_SOURCE_STDIN => Ok(SignerSource::new(SignerSourceKind::Stdin)), + _ => { + #[cfg(target_family = "windows")] + // On Windows, an absolute path's drive letter will be parsed as the URI + // scheme. Assume a filepath source in case of a single character shceme. + if scheme.len() == 1 { + return Ok(SignerSource::new(SignerSourceKind::Filepath(source))); + } + Err(SignerSourceError::UnrecognizedSource) + } + } + } else { + match source.as_str() { + STDOUT_OUTFILE_TOKEN => Ok(SignerSource::new(SignerSourceKind::Stdin)), + ASK_KEYWORD => Ok(SignerSource::new_legacy(SignerSourceKind::Prompt)), + _ => match Pubkey::from_str(source.as_str()) { + Ok(pubkey) => Ok(SignerSource::new(SignerSourceKind::Pubkey(pubkey))), + Err(_) => std::fs::metadata(source.as_str()) + .map(|_| SignerSource::new(SignerSourceKind::Filepath(source))) + .map_err(|err| err.into()), + }, + } + } + } + } + } +} + // Sentinel value used to indicate to write to screen instead of file pub const STDOUT_OUTFILE_TOKEN: &str = "-"; @@ -72,7 +214,7 @@ impl SignerSourceParserBuilder { pub fn build(self) -> ValueParser { ValueParser::from( move |arg: &str| -> Result { - let signer_source = parse_signer_source(arg)?; + let signer_source = SignerSource::parse(arg)?; if !self.allow_legacy && signer_source.legacy { return Err(SignerSourceError::UnsupportedSource); } @@ -240,11 +382,130 @@ mod tests { super::*, assert_matches::assert_matches, clap::{Arg, Command}, + solana_remote_wallet::locator::Manufacturer, solana_sdk::signature::write_keypair_file, std::fs, tempfile::NamedTempFile, }; + #[test] + fn test_parse_signer_source() { + assert_matches!( + SignerSource::parse(STDOUT_OUTFILE_TOKEN).unwrap(), + SignerSource { + kind: SignerSourceKind::Stdin, + derivation_path: None, + legacy: false, + } + ); + let stdin = "stdin:".to_string(); + assert_matches!( + SignerSource::parse(stdin).unwrap(), + SignerSource { + kind: SignerSourceKind::Stdin, + derivation_path: None, + legacy: false, + } + ); + assert_matches!( + SignerSource::parse(ASK_KEYWORD).unwrap(), + SignerSource { + kind: SignerSourceKind::Prompt, + derivation_path: None, + legacy: true, + } + ); + let pubkey = Pubkey::new_unique(); + assert!( + matches!(SignerSource::parse(pubkey.to_string()).unwrap(), SignerSource { + kind: SignerSourceKind::Pubkey(p), + derivation_path: None, + legacy: false, + } + if p == pubkey) + ); + + // Set up absolute and relative path strs + let file0 = NamedTempFile::new().unwrap(); + let path = file0.path(); + assert!(path.is_absolute()); + let absolute_path_str = path.to_str().unwrap(); + + let file1 = NamedTempFile::new_in(std::env::current_dir().unwrap()).unwrap(); + let path = file1.path().file_name().unwrap().to_str().unwrap(); + let path = std::path::Path::new(path); + assert!(path.is_relative()); + let relative_path_str = path.to_str().unwrap(); + + assert!( + matches!(SignerSource::parse(absolute_path_str).unwrap(), SignerSource { + kind: SignerSourceKind::Filepath(p), + derivation_path: None, + legacy: false, + } if p == absolute_path_str) + ); + assert!( + matches!(SignerSource::parse(relative_path_str).unwrap(), SignerSource { + kind: SignerSourceKind::Filepath(p), + derivation_path: None, + legacy: false, + } if p == relative_path_str) + ); + + let usb = "usb://ledger".to_string(); + let expected_locator = RemoteWalletLocator { + manufacturer: Manufacturer::Ledger, + pubkey: None, + }; + assert_matches!(SignerSource::parse(usb).unwrap(), SignerSource { + kind: SignerSourceKind::Usb(u), + derivation_path: None, + legacy: false, + } if u == expected_locator); + let usb = "usb://ledger?key=0/0".to_string(); + let expected_locator = RemoteWalletLocator { + manufacturer: Manufacturer::Ledger, + pubkey: None, + }; + let expected_derivation_path = Some(DerivationPath::new_bip44(Some(0), Some(0))); + assert_matches!(SignerSource::parse(usb).unwrap(), SignerSource { + kind: SignerSourceKind::Usb(u), + derivation_path: d, + legacy: false, + } if u == expected_locator && d == expected_derivation_path); + // Catchall into SignerSource::Filepath fails + let junk = "sometextthatisnotapubkeyorfile".to_string(); + assert!(Pubkey::from_str(&junk).is_err()); + assert_matches!( + SignerSource::parse(&junk), + Err(SignerSourceError::IoError(_)) + ); + + let prompt = "prompt:".to_string(); + assert_matches!( + SignerSource::parse(prompt).unwrap(), + SignerSource { + kind: SignerSourceKind::Prompt, + derivation_path: None, + legacy: false, + } + ); + assert!( + matches!(SignerSource::parse(format!("file:{absolute_path_str}")).unwrap(), SignerSource { + kind: SignerSourceKind::Filepath(p), + derivation_path: None, + legacy: false, + } if p == absolute_path_str) + ); + assert!( + matches!(SignerSource::parse(format!("file:{relative_path_str}")).unwrap(), SignerSource { + kind: SignerSourceKind::Filepath(p), + derivation_path: None, + legacy: false, + } if p == relative_path_str) + ); + } + fn app<'ab>() -> Command<'ab> { Command::new("test") .arg( diff --git a/clap-v3-utils/src/input_validators.rs b/clap-v3-utils/src/input_validators.rs index 7938dec360bdc8..0b3f75e1a6f334 100644 --- a/clap-v3-utils/src/input_validators.rs +++ b/clap-v3-utils/src/input_validators.rs @@ -1,5 +1,8 @@ use { - crate::keypair::{parse_signer_source, SignerSourceKind, ASK_KEYWORD}, + crate::{ + input_parsers::signer::{SignerSource, SignerSourceKind}, + keypair::ASK_KEYWORD, + }, chrono::DateTime, solana_sdk::{ clock::{Epoch, Slot}, @@ -119,7 +122,7 @@ pub fn is_prompt_signer_source(string: &str) -> Result<(), String> { if string == ASK_KEYWORD { return Ok(()); } - match parse_signer_source(string) + match SignerSource::parse(string) .map_err(|err| format!("{err}"))? .kind { @@ -154,7 +157,7 @@ pub fn is_valid_pubkey(string: T) -> Result<(), String> where T: AsRef + Display, { - match parse_signer_source(string.as_ref()) + match SignerSource::parse(string.as_ref()) .map_err(|err| format!("{err}"))? .kind { diff --git a/clap-v3-utils/src/keypair.rs b/clap-v3-utils/src/keypair.rs index 8adbfff3631f8b..7e41b3c82fbbb3 100644 --- a/clap-v3-utils/src/keypair.rs +++ b/clap-v3-utils/src/keypair.rs @@ -11,7 +11,7 @@ use { crate::{ - input_parsers::{signer::try_pubkeys_sigs_of, STDOUT_OUTFILE_TOKEN}, + input_parsers::signer::{try_pubkeys_sigs_of, SignerSource, SignerSourceKind}, offline::{SIGNER_ARG, SIGN_ONLY_ARG}, ArgConstant, }, @@ -19,12 +19,11 @@ use { clap::ArgMatches, rpassword::prompt_password, solana_remote_wallet::{ - locator::{Locator as RemoteWalletLocator, LocatorError as RemoteWalletLocatorError}, remote_keypair::generate_remote_keypair, remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager}, }, solana_sdk::{ - derivation_path::{DerivationPath, DerivationPathError}, + derivation_path::DerivationPath, hash::Hash, message::Message, pubkey::Pubkey, @@ -37,15 +36,12 @@ use { solana_zk_token_sdk::encryption::{auth_encryption::AeKey, elgamal::ElGamalKeypair}, std::{ cell::RefCell, - convert::TryFrom, error, io::{stdin, stdout, Write}, ops::Deref, process::exit, rc::Rc, - str::FromStr, }, - thiserror::Error, }; pub struct SignOnly { @@ -166,7 +162,7 @@ impl DefaultSigner { fn path(&self) -> Result<&str, Box> { if !self.is_path_checked.borrow().deref() { - parse_signer_source(&self.path) + SignerSource::parse(&self.path) .and_then(|s| { if let SignerSourceKind::Filepath(path) = &s.kind { std::fs::metadata(path).map(|_| ()).map_err(|e| e.into()) @@ -371,148 +367,6 @@ impl DefaultSigner { } } -#[derive(Debug, Clone)] -pub(crate) struct SignerSource { - pub kind: SignerSourceKind, - pub derivation_path: Option, - pub legacy: bool, -} - -impl SignerSource { - fn new(kind: SignerSourceKind) -> Self { - Self { - kind, - derivation_path: None, - legacy: false, - } - } - - fn new_legacy(kind: SignerSourceKind) -> Self { - Self { - kind, - derivation_path: None, - legacy: true, - } - } -} - -const SIGNER_SOURCE_PROMPT: &str = "prompt"; -const SIGNER_SOURCE_FILEPATH: &str = "file"; -const SIGNER_SOURCE_USB: &str = "usb"; -const SIGNER_SOURCE_STDIN: &str = "stdin"; -const SIGNER_SOURCE_PUBKEY: &str = "pubkey"; - -#[derive(Clone)] -pub(crate) enum SignerSourceKind { - Prompt, - Filepath(String), - Usb(RemoteWalletLocator), - Stdin, - Pubkey(Pubkey), -} - -impl AsRef for SignerSourceKind { - fn as_ref(&self) -> &str { - match self { - Self::Prompt => SIGNER_SOURCE_PROMPT, - Self::Filepath(_) => SIGNER_SOURCE_FILEPATH, - Self::Usb(_) => SIGNER_SOURCE_USB, - Self::Stdin => SIGNER_SOURCE_STDIN, - Self::Pubkey(_) => SIGNER_SOURCE_PUBKEY, - } - } -} - -impl std::fmt::Debug for SignerSourceKind { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let s: &str = self.as_ref(); - write!(f, "{s}") - } -} - -#[derive(Debug, Error)] -pub(crate) enum SignerSourceError { - #[error("unrecognized signer source")] - UnrecognizedSource, - #[error(transparent)] - RemoteWalletLocatorError(#[from] RemoteWalletLocatorError), - #[error(transparent)] - DerivationPathError(#[from] DerivationPathError), - #[error(transparent)] - IoError(#[from] std::io::Error), - #[error("unsupported source")] - UnsupportedSource, -} - -pub(crate) fn parse_signer_source>( - source: S, -) -> Result { - let source = source.as_ref(); - let source = { - #[cfg(target_family = "windows")] - { - // trim matched single-quotes since cmd.exe won't - let mut source = source; - while let Some(trimmed) = source.strip_prefix('\'') { - source = if let Some(trimmed) = trimmed.strip_suffix('\'') { - trimmed - } else { - break; - } - } - source.replace('\\', "/") - } - #[cfg(not(target_family = "windows"))] - { - source.to_string() - } - }; - match uriparse::URIReference::try_from(source.as_str()) { - Err(_) => Err(SignerSourceError::UnrecognizedSource), - Ok(uri) => { - if let Some(scheme) = uri.scheme() { - let scheme = scheme.as_str().to_ascii_lowercase(); - match scheme.as_str() { - SIGNER_SOURCE_PROMPT => Ok(SignerSource { - kind: SignerSourceKind::Prompt, - derivation_path: DerivationPath::from_uri_any_query(&uri)?, - legacy: false, - }), - SIGNER_SOURCE_FILEPATH => Ok(SignerSource::new(SignerSourceKind::Filepath( - uri.path().to_string(), - ))), - SIGNER_SOURCE_USB => Ok(SignerSource { - kind: SignerSourceKind::Usb(RemoteWalletLocator::new_from_uri(&uri)?), - derivation_path: DerivationPath::from_uri_key_query(&uri)?, - legacy: false, - }), - SIGNER_SOURCE_STDIN => Ok(SignerSource::new(SignerSourceKind::Stdin)), - _ => { - #[cfg(target_family = "windows")] - // On Windows, an absolute path's drive letter will be parsed as the URI - // scheme. Assume a filepath source in case of a single character shceme. - if scheme.len() == 1 { - return Ok(SignerSource::new(SignerSourceKind::Filepath(source))); - } - Err(SignerSourceError::UnrecognizedSource) - } - } - } else { - match source.as_str() { - STDOUT_OUTFILE_TOKEN => Ok(SignerSource::new(SignerSourceKind::Stdin)), - ASK_KEYWORD => Ok(SignerSource::new_legacy(SignerSourceKind::Prompt)), - _ => match Pubkey::from_str(source.as_str()) { - Ok(pubkey) => Ok(SignerSource::new(SignerSourceKind::Pubkey(pubkey))), - Err(_) => std::fs::metadata(source.as_str()) - .map(|_| SignerSource::new(SignerSourceKind::Filepath(source))) - .map_err(|err| err.into()), - }, - } - } - } - } -} - pub fn presigner_from_pubkey_sigs( pubkey: &Pubkey, signers: &[(Pubkey, Signature)], @@ -697,6 +551,16 @@ pub fn signer_from_path( signer_from_path_with_config(matches, path, keypair_name, wallet_manager, &config) } +pub fn signer_from_source( + matches: &ArgMatches, + source: &SignerSource, + keypair_name: &str, + wallet_manager: &mut Option>, +) -> Result, Box> { + let config = SignerFromPathConfig::default(); + signer_from_source_with_config(matches, source, keypair_name, wallet_manager, &config) +} + /// Loads a [Signer] from one of several possible sources. /// /// The `path` is not strictly a file system path, but is interpreted as various @@ -760,12 +624,23 @@ pub fn signer_from_path_with_config( keypair_name: &str, wallet_manager: &mut Option>, config: &SignerFromPathConfig, +) -> Result, Box> { + let source = SignerSource::parse(path)?; + signer_from_source_with_config(matches, &source, keypair_name, wallet_manager, config) +} + +pub fn signer_from_source_with_config( + matches: &ArgMatches, + source: &SignerSource, + keypair_name: &str, + wallet_manager: &mut Option>, + config: &SignerFromPathConfig, ) -> Result, Box> { let SignerSource { kind, derivation_path, legacy, - } = parse_signer_source(path)?; + } = source; match kind { SignerSourceKind::Prompt => { let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; @@ -773,11 +648,11 @@ pub fn signer_from_path_with_config( keypair_name, skip_validation, false, - derivation_path, - legacy, + derivation_path.clone(), + *legacy, )?)) } - SignerSourceKind::Filepath(path) => match read_keypair_file(&path) { + SignerSourceKind::Filepath(path) => match read_keypair_file(path) { Err(e) => Err(std::io::Error::new( std::io::ErrorKind::Other, format!("could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a keypair file: {e}"), @@ -796,8 +671,8 @@ pub fn signer_from_path_with_config( if let Some(wallet_manager) = wallet_manager { let confirm_key = matches.try_contains_id("confirm_key").unwrap_or(false); Ok(Box::new(generate_remote_keypair( - locator, - derivation_path.unwrap_or_default(), + locator.clone(), + derivation_path.clone().unwrap_or_default(), wallet_manager, confirm_key, keypair_name, @@ -809,11 +684,11 @@ pub fn signer_from_path_with_config( SignerSourceKind::Pubkey(pubkey) => { let presigner = try_pubkeys_sigs_of(matches, SIGNER_ARG.name)? .as_ref() - .and_then(|presigners| presigner_from_pubkey_sigs(&pubkey, presigners)); + .and_then(|presigners| presigner_from_pubkey_sigs(pubkey, presigners)); if let Some(presigner) = presigner { Ok(Box::new(presigner)) } else if config.allow_null_signer || matches.try_contains_id(SIGN_ONLY_ARG.name)? { - Ok(Box::new(NullSigner::new(&pubkey))) + Ok(Box::new(NullSigner::new(pubkey))) } else { Err(std::io::Error::new( std::io::ErrorKind::Other, @@ -868,10 +743,19 @@ pub fn pubkey_from_path( keypair_name: &str, wallet_manager: &mut Option>, ) -> Result> { - let SignerSource { kind, .. } = parse_signer_source(path)?; - match kind { + let source = SignerSource::parse(path)?; + pubkey_from_source(matches, &source, keypair_name, wallet_manager) +} + +pub fn pubkey_from_source( + matches: &ArgMatches, + source: &SignerSource, + keypair_name: &str, + wallet_manager: &mut Option>, +) -> Result> { + match source.kind { SignerSourceKind::Pubkey(pubkey) => Ok(pubkey), - _ => Ok(signer_from_path(matches, path, keypair_name, wallet_manager)?.pubkey()), + _ => Ok(signer_from_source(matches, source, keypair_name, wallet_manager)?.pubkey()), } } @@ -880,12 +764,22 @@ pub fn resolve_signer_from_path( path: &str, keypair_name: &str, wallet_manager: &mut Option>, +) -> Result, Box> { + let source = SignerSource::parse(path)?; + resolve_signer_from_source(matches, &source, keypair_name, wallet_manager) +} + +pub fn resolve_signer_from_source( + matches: &ArgMatches, + source: &SignerSource, + keypair_name: &str, + wallet_manager: &mut Option>, ) -> Result, Box> { let SignerSource { kind, derivation_path, legacy, - } = parse_signer_source(path)?; + } = source; match kind { SignerSourceKind::Prompt => { let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; @@ -895,12 +789,12 @@ pub fn resolve_signer_from_path( keypair_name, skip_validation, false, - derivation_path, - legacy, + derivation_path.clone(), + *legacy, ) .map(|_| None) } - SignerSourceKind::Filepath(path) => match read_keypair_file(&path) { + SignerSourceKind::Filepath(path) => match read_keypair_file(path) { Err(e) => Err(std::io::Error::new( std::io::ErrorKind::Other, format!( @@ -924,8 +818,8 @@ pub fn resolve_signer_from_path( if let Some(wallet_manager) = wallet_manager { let confirm_key = matches.try_contains_id("confirm_key").unwrap_or(false); let path = generate_remote_keypair( - locator, - derivation_path.unwrap_or_default(), + locator.clone(), + derivation_path.clone().unwrap_or_default(), wallet_manager, confirm_key, keypair_name, @@ -936,7 +830,7 @@ pub fn resolve_signer_from_path( Err(RemoteWalletError::NoDeviceFound.into()) } } - _ => Ok(Some(path.to_string())), + SignerSourceKind::Pubkey(pubkey) => Ok(Some(pubkey.to_string())), } } @@ -1015,6 +909,20 @@ pub fn keypair_from_path( Ok(keypair) } +pub fn keypair_from_source( + matches: &ArgMatches, + source: &SignerSource, + keypair_name: &str, + confirm_pubkey: bool, +) -> Result> { + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; + let keypair = encodable_key_from_source(source, keypair_name, skip_validation)?; + if confirm_pubkey { + confirm_encodable_keypair_pubkey(&keypair, "pubkey"); + } + Ok(keypair) +} + /// Loads an [ElGamalKeypair] from one of several possible sources. /// /// If `confirm_pubkey` is `true` then after deriving the keypair, the user will @@ -1063,6 +971,20 @@ pub fn elgamal_keypair_from_path( Ok(elgamal_keypair) } +pub fn elgamal_keypair_from_source( + matches: &ArgMatches, + source: &SignerSource, + elgamal_keypair_name: &str, + confirm_pubkey: bool, +) -> Result> { + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; + let elgamal_keypair = encodable_key_from_source(source, elgamal_keypair_name, skip_validation)?; + if confirm_pubkey { + confirm_encodable_keypair_pubkey(&elgamal_keypair, "ElGamal pubkey"); + } + Ok(elgamal_keypair) +} + fn confirm_encodable_keypair_pubkey(keypair: &K, pubkey_label: &str) { let pubkey = keypair.encodable_pubkey().to_string(); println!("Recovered {pubkey_label} `{pubkey:?}`. Continue? (y/n): "); @@ -1114,24 +1036,42 @@ pub fn ae_key_from_path( encodable_key_from_path(path, key_name, skip_validation) } +pub fn ae_key_from_source( + matches: &ArgMatches, + source: &SignerSource, + key_name: &str, +) -> Result> { + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; + encodable_key_from_source(source, key_name, skip_validation) +} + fn encodable_key_from_path( path: &str, keypair_name: &str, skip_validation: bool, +) -> Result> { + let source = SignerSource::parse(path)?; + encodable_key_from_source(&source, keypair_name, skip_validation) +} + +fn encodable_key_from_source( + source: &SignerSource, + keypair_name: &str, + skip_validation: bool, ) -> Result> { let SignerSource { kind, derivation_path, legacy, - } = parse_signer_source(path)?; + } = source; match kind { SignerSourceKind::Prompt => Ok(encodable_key_from_seed_phrase( keypair_name, skip_validation, - derivation_path, - legacy, + derivation_path.clone(), + *legacy, )?), - SignerSourceKind::Filepath(path) => match K::read_from_file(&path) { + SignerSourceKind::Filepath(path) => match K::read_from_file(path) { Err(e) => Err(std::io::Error::new( std::io::ErrorKind::Other, format!( @@ -1270,11 +1210,10 @@ mod tests { use { super::*, crate::offline::OfflineArgs, - assert_matches::assert_matches, clap::{Arg, Command}, - solana_remote_wallet::{locator::Manufacturer, remote_wallet::initialize_wallet_manager}, + solana_remote_wallet::remote_wallet::initialize_wallet_manager, solana_sdk::{signer::keypair::write_keypair_file, system_instruction}, - tempfile::{NamedTempFile, TempDir}, + tempfile::TempDir, }; #[test] @@ -1317,124 +1256,6 @@ mod tests { assert_eq!(signer_pubkeys, expect); } - #[test] - fn test_parse_signer_source() { - assert_matches!( - parse_signer_source(STDOUT_OUTFILE_TOKEN).unwrap(), - SignerSource { - kind: SignerSourceKind::Stdin, - derivation_path: None, - legacy: false, - } - ); - let stdin = "stdin:".to_string(); - assert_matches!( - parse_signer_source(stdin).unwrap(), - SignerSource { - kind: SignerSourceKind::Stdin, - derivation_path: None, - legacy: false, - } - ); - assert_matches!( - parse_signer_source(ASK_KEYWORD).unwrap(), - SignerSource { - kind: SignerSourceKind::Prompt, - derivation_path: None, - legacy: true, - } - ); - let pubkey = Pubkey::new_unique(); - assert!( - matches!(parse_signer_source(pubkey.to_string()).unwrap(), SignerSource { - kind: SignerSourceKind::Pubkey(p), - derivation_path: None, - legacy: false, - } - if p == pubkey) - ); - - // Set up absolute and relative path strs - let file0 = NamedTempFile::new().unwrap(); - let path = file0.path(); - assert!(path.is_absolute()); - let absolute_path_str = path.to_str().unwrap(); - - let file1 = NamedTempFile::new_in(std::env::current_dir().unwrap()).unwrap(); - let path = file1.path().file_name().unwrap().to_str().unwrap(); - let path = std::path::Path::new(path); - assert!(path.is_relative()); - let relative_path_str = path.to_str().unwrap(); - - assert!( - matches!(parse_signer_source(absolute_path_str).unwrap(), SignerSource { - kind: SignerSourceKind::Filepath(p), - derivation_path: None, - legacy: false, - } if p == absolute_path_str) - ); - assert!( - matches!(parse_signer_source(relative_path_str).unwrap(), SignerSource { - kind: SignerSourceKind::Filepath(p), - derivation_path: None, - legacy: false, - } if p == relative_path_str) - ); - - let usb = "usb://ledger".to_string(); - let expected_locator = RemoteWalletLocator { - manufacturer: Manufacturer::Ledger, - pubkey: None, - }; - assert_matches!(parse_signer_source(usb).unwrap(), SignerSource { - kind: SignerSourceKind::Usb(u), - derivation_path: None, - legacy: false, - } if u == expected_locator); - let usb = "usb://ledger?key=0/0".to_string(); - let expected_locator = RemoteWalletLocator { - manufacturer: Manufacturer::Ledger, - pubkey: None, - }; - let expected_derivation_path = Some(DerivationPath::new_bip44(Some(0), Some(0))); - assert_matches!(parse_signer_source(usb).unwrap(), SignerSource { - kind: SignerSourceKind::Usb(u), - derivation_path: d, - legacy: false, - } if u == expected_locator && d == expected_derivation_path); - // Catchall into SignerSource::Filepath fails - let junk = "sometextthatisnotapubkeyorfile".to_string(); - assert!(Pubkey::from_str(&junk).is_err()); - assert_matches!( - parse_signer_source(&junk), - Err(SignerSourceError::IoError(_)) - ); - - let prompt = "prompt:".to_string(); - assert_matches!( - parse_signer_source(prompt).unwrap(), - SignerSource { - kind: SignerSourceKind::Prompt, - derivation_path: None, - legacy: false, - } - ); - assert!( - matches!(parse_signer_source(format!("file:{absolute_path_str}")).unwrap(), SignerSource { - kind: SignerSourceKind::Filepath(p), - derivation_path: None, - legacy: false, - } if p == absolute_path_str) - ); - assert!( - matches!(parse_signer_source(format!("file:{relative_path_str}")).unwrap(), SignerSource { - kind: SignerSourceKind::Filepath(p), - derivation_path: None, - legacy: false, - } if p == relative_path_str) - ); - } - #[test] fn signer_from_path_with_file() -> Result<(), Box> { let dir = TempDir::new()?; From bbd1fd41acfc7ee2d3eaa9fc62a6777fea2b375d Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 24 Jan 2024 16:25:01 -0700 Subject: [PATCH 046/401] Move EpochRewardsHasher to solana-sdk (#34934) * Move EpochRewardsHasher to solana-sdk * Cargo.lock * Apparently we're allowing arithmetic_side_effects in all of runtime * Move allow stmt to block instead of module * Also allow in test mod --- Cargo.lock | 2 +- programs/sbf/Cargo.lock | 2 +- runtime/Cargo.toml | 1 - runtime/src/epoch_rewards_hasher.rs | 182 +-------------------------- sdk/Cargo.toml | 1 + sdk/src/epoch_rewards_hasher.rs | 186 ++++++++++++++++++++++++++++ sdk/src/lib.rs | 1 + 7 files changed, 192 insertions(+), 183 deletions(-) create mode 100644 sdk/src/epoch_rewards_hasher.rs diff --git a/Cargo.lock b/Cargo.lock index a7863f06e62754..c141f501f844b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7052,7 +7052,6 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "siphasher", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program", @@ -7148,6 +7147,7 @@ dependencies = [ "serde_with", "sha2 0.10.8", "sha3 0.10.4", + "siphasher", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index dcf8c5cc3d597e..30576236c8a568 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5754,7 +5754,6 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "siphasher", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program", @@ -6252,6 +6251,7 @@ dependencies = [ "serde_with", "sha2 0.10.8", "sha3 0.10.4", + "siphasher", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index f781067592d7e2..355c858597895f 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -47,7 +47,6 @@ regex = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } serde_json = { workspace = true } -siphasher = { workspace = true } solana-accounts-db = { workspace = true } solana-address-lookup-table-program = { workspace = true } solana-bpf-loader-program = { workspace = true } diff --git a/runtime/src/epoch_rewards_hasher.rs b/runtime/src/epoch_rewards_hasher.rs index 5b7feb6efccded..120bb0c2c98500 100644 --- a/runtime/src/epoch_rewards_hasher.rs +++ b/runtime/src/epoch_rewards_hasher.rs @@ -1,45 +1,8 @@ use { crate::bank::StakeRewards, - siphasher::sip::SipHasher13, - solana_sdk::{hash::Hash, pubkey::Pubkey}, - std::hash::Hasher, + solana_sdk::{epoch_rewards_hasher::EpochRewardsHasher, hash::Hash}, }; -#[derive(Debug, Clone)] -pub(crate) struct EpochRewardsHasher { - hasher: SipHasher13, - partitions: usize, -} - -impl EpochRewardsHasher { - /// Use SipHasher13 keyed on the `seed` for calculating epoch reward partition - pub(crate) fn new(partitions: usize, seed: &Hash) -> Self { - let mut hasher = SipHasher13::new(); - hasher.write(seed.as_ref()); - Self { hasher, partitions } - } - - /// Return partition index (0..partitions) by hashing `address` with the `hasher` - pub(crate) fn hash_address_to_partition(self, address: &Pubkey) -> usize { - let Self { - mut hasher, - partitions, - } = self; - hasher.write(address.as_ref()); - let hash64 = hasher.finish(); - - hash_to_partition(hash64, partitions) - } -} - -/// Compute the partition index by modulo the address hash to number of partitions w.o bias. -/// (rand_int * DESIRED_RANGE_MAX) / (RAND_MAX + 1) -fn hash_to_partition(hash: u64, partitions: usize) -> usize { - ((partitions as u128) - .saturating_mul(u128::from(hash)) - .saturating_div(u128::from(u64::MAX).saturating_add(1))) as usize -} - pub(crate) fn hash_rewards_into_partitions( stake_rewards: StakeRewards, parent_blockhash: &Hash, @@ -62,148 +25,7 @@ pub(crate) fn hash_rewards_into_partitions( #[cfg(test)] mod tests { - use { - super::*, - solana_accounts_db::stake_rewards::StakeReward, - std::{collections::HashMap, ops::RangeInclusive}, - }; - - #[test] - fn test_get_equal_partition_range() { - // show how 2 equal partition ranges are 0..=(max/2), (max/2+1)..=max - // the inclusive is tricky to think about - let range = get_equal_partition_range(0, 2); - assert_eq!(*range.start(), 0); - assert_eq!(*range.end(), u64::MAX / 2); - let range = get_equal_partition_range(1, 2); - assert_eq!(*range.start(), u64::MAX / 2 + 1); - assert_eq!(*range.end(), u64::MAX); - } - - #[test] - fn test_hash_to_partitions() { - let partitions = 16; - assert_eq!(hash_to_partition(0, partitions), 0); - assert_eq!(hash_to_partition(u64::MAX / 16, partitions), 0); - assert_eq!(hash_to_partition(u64::MAX / 16 + 1, partitions), 1); - assert_eq!(hash_to_partition(u64::MAX / 16 * 2, partitions), 1); - assert_eq!(hash_to_partition(u64::MAX / 16 * 2 + 1, partitions), 1); - assert_eq!(hash_to_partition(u64::MAX - 1, partitions), partitions - 1); - assert_eq!(hash_to_partition(u64::MAX, partitions), partitions - 1); - } - - fn test_partitions(partition: usize, partitions: usize) { - let partition = partition.min(partitions - 1); - let range = get_equal_partition_range(partition, partitions); - // beginning and end of this partition - assert_eq!(hash_to_partition(*range.start(), partitions), partition); - assert_eq!(hash_to_partition(*range.end(), partitions), partition); - if partition < partitions - 1 { - // first index in next partition - assert_eq!( - hash_to_partition(*range.end() + 1, partitions), - partition + 1 - ); - } else { - assert_eq!(*range.end(), u64::MAX); - } - if partition > 0 { - // last index in previous partition - assert_eq!( - hash_to_partition(*range.start() - 1, partitions), - partition - 1 - ); - } else { - assert_eq!(*range.start(), 0); - } - } - - #[test] - fn test_hash_to_partitions_equal_ranges() { - for partitions in [2, 4, 8, 16, 4096] { - assert_eq!(hash_to_partition(0, partitions), 0); - for partition in [0, 1, 2, partitions - 1] { - test_partitions(partition, partitions); - } - - let range = get_equal_partition_range(0, partitions); - for partition in 1..partitions { - let this_range = get_equal_partition_range(partition, partitions); - assert_eq!( - this_range.end() - this_range.start(), - range.end() - range.start() - ); - } - } - // verify non-evenly divisible partitions (partitions will be different sizes by at most 1 from any other partition) - for partitions in [3, 19, 1019, 4095] { - for partition in [0, 1, 2, partitions - 1] { - test_partitions(partition, partitions); - } - let expected_len_of_partition = - ((u128::from(u64::MAX) + 1) / partitions as u128) as u64; - for partition in 0..partitions { - let this_range = get_equal_partition_range(partition, partitions); - let len = this_range.end() - this_range.start(); - // size is same or 1 less - assert!( - len == expected_len_of_partition || len + 1 == expected_len_of_partition, - "{}, {}, {}, {}", - expected_len_of_partition, - len, - partition, - partitions - ); - } - } - } - - /// return start and end_inclusive of `partition` indexes out of from u64::MAX+1 elements in equal `partitions` - /// These will be equal as long as (u64::MAX + 1) divides by `partitions` evenly - fn get_equal_partition_range(partition: usize, partitions: usize) -> RangeInclusive { - let max_inclusive = u128::from(u64::MAX); - let max_plus_1 = max_inclusive + 1; - let partition = partition as u128; - let partitions = partitions as u128; - let mut start = max_plus_1 * partition / partitions; - if partition > 0 && start * partitions / max_plus_1 == partition - 1 { - // partitions don't evenly divide and the start of this partition needs to be 1 greater - start += 1; - } - - let mut end_inclusive = start + max_plus_1 / partitions - 1; - if partition < partitions.saturating_sub(1) { - let next = end_inclusive + 1; - if next * partitions / max_plus_1 == partition { - // this partition is far enough into partitions such that the len of this partition is 1 larger than expected - end_inclusive += 1; - } - } else { - end_inclusive = max_inclusive; - } - RangeInclusive::new(start as u64, end_inclusive as u64) - } - - /// Make sure that each time hash_address_to_partition is called, it uses the initial seed state and that clone correctly copies the initial hasher state. - #[test] - fn test_hasher_copy() { - let seed = Hash::new_unique(); - let partitions = 10; - let hasher = EpochRewardsHasher::new(partitions, &seed); - - let pk = Pubkey::new_unique(); - - let b1 = hasher.clone().hash_address_to_partition(&pk); - let b2 = hasher.hash_address_to_partition(&pk); - assert_eq!(b1, b2); - - // make sure b1 includes the seed's hash - let mut hasher = SipHasher13::new(); - hasher.write(seed.as_ref()); - hasher.write(pk.as_ref()); - let partition = hash_to_partition(hasher.finish(), partitions); - assert_eq!(partition, b1); - } + use {super::*, solana_accounts_db::stake_rewards::StakeReward, std::collections::HashMap}; #[test] fn test_hash_rewards_into_partitions() { diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 061b16cb534b3b..57bf0738fa41eb 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -75,6 +75,7 @@ serde_json = { workspace = true, optional = true } serde_with = { workspace = true, features = ["macros"] } sha2 = { workspace = true } sha3 = { workspace = true, optional = true } +siphasher = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } solana-logger = { workspace = true, optional = true } diff --git a/sdk/src/epoch_rewards_hasher.rs b/sdk/src/epoch_rewards_hasher.rs new file mode 100644 index 00000000000000..6a5d315f0370ba --- /dev/null +++ b/sdk/src/epoch_rewards_hasher.rs @@ -0,0 +1,186 @@ +use { + siphasher::sip::SipHasher13, + solana_sdk::{hash::Hash, pubkey::Pubkey}, + std::hash::Hasher, +}; + +#[derive(Debug, Clone)] +pub struct EpochRewardsHasher { + hasher: SipHasher13, + partitions: usize, +} + +impl EpochRewardsHasher { + /// Use SipHasher13 keyed on the `seed` for calculating epoch reward partition + pub fn new(partitions: usize, seed: &Hash) -> Self { + let mut hasher = SipHasher13::new(); + hasher.write(seed.as_ref()); + Self { hasher, partitions } + } + + /// Return partition index (0..partitions) by hashing `address` with the `hasher` + pub fn hash_address_to_partition(self, address: &Pubkey) -> usize { + let Self { + mut hasher, + partitions, + } = self; + hasher.write(address.as_ref()); + let hash64 = hasher.finish(); + + hash_to_partition(hash64, partitions) + } +} + +/// Compute the partition index by modulo the address hash to number of partitions w.o bias. +/// (rand_int * DESIRED_RANGE_MAX) / (RAND_MAX + 1) +// Clippy objects to `u128::from(u64::MAX).saturating_add(1)`, even though it +// can never overflow +#[allow(clippy::arithmetic_side_effects)] +fn hash_to_partition(hash: u64, partitions: usize) -> usize { + ((partitions as u128) + .saturating_mul(u128::from(hash)) + .saturating_div(u128::from(u64::MAX).saturating_add(1))) as usize +} + +#[cfg(test)] +mod tests { + #![allow(clippy::arithmetic_side_effects)] + use {super::*, std::ops::RangeInclusive}; + + #[test] + fn test_get_equal_partition_range() { + // show how 2 equal partition ranges are 0..=(max/2), (max/2+1)..=max + // the inclusive is tricky to think about + let range = get_equal_partition_range(0, 2); + assert_eq!(*range.start(), 0); + assert_eq!(*range.end(), u64::MAX / 2); + let range = get_equal_partition_range(1, 2); + assert_eq!(*range.start(), u64::MAX / 2 + 1); + assert_eq!(*range.end(), u64::MAX); + } + + #[test] + fn test_hash_to_partitions() { + let partitions = 16; + assert_eq!(hash_to_partition(0, partitions), 0); + assert_eq!(hash_to_partition(u64::MAX / 16, partitions), 0); + assert_eq!(hash_to_partition(u64::MAX / 16 + 1, partitions), 1); + assert_eq!(hash_to_partition(u64::MAX / 16 * 2, partitions), 1); + assert_eq!(hash_to_partition(u64::MAX / 16 * 2 + 1, partitions), 1); + assert_eq!(hash_to_partition(u64::MAX - 1, partitions), partitions - 1); + assert_eq!(hash_to_partition(u64::MAX, partitions), partitions - 1); + } + + fn test_partitions(partition: usize, partitions: usize) { + let partition = partition.min(partitions - 1); + let range = get_equal_partition_range(partition, partitions); + // beginning and end of this partition + assert_eq!(hash_to_partition(*range.start(), partitions), partition); + assert_eq!(hash_to_partition(*range.end(), partitions), partition); + if partition < partitions - 1 { + // first index in next partition + assert_eq!( + hash_to_partition(*range.end() + 1, partitions), + partition + 1 + ); + } else { + assert_eq!(*range.end(), u64::MAX); + } + if partition > 0 { + // last index in previous partition + assert_eq!( + hash_to_partition(*range.start() - 1, partitions), + partition - 1 + ); + } else { + assert_eq!(*range.start(), 0); + } + } + + #[test] + fn test_hash_to_partitions_equal_ranges() { + for partitions in [2, 4, 8, 16, 4096] { + assert_eq!(hash_to_partition(0, partitions), 0); + for partition in [0, 1, 2, partitions - 1] { + test_partitions(partition, partitions); + } + + let range = get_equal_partition_range(0, partitions); + for partition in 1..partitions { + let this_range = get_equal_partition_range(partition, partitions); + assert_eq!( + this_range.end() - this_range.start(), + range.end() - range.start() + ); + } + } + // verify non-evenly divisible partitions (partitions will be different sizes by at most 1 from any other partition) + for partitions in [3, 19, 1019, 4095] { + for partition in [0, 1, 2, partitions - 1] { + test_partitions(partition, partitions); + } + let expected_len_of_partition = + ((u128::from(u64::MAX) + 1) / partitions as u128) as u64; + for partition in 0..partitions { + let this_range = get_equal_partition_range(partition, partitions); + let len = this_range.end() - this_range.start(); + // size is same or 1 less + assert!( + len == expected_len_of_partition || len + 1 == expected_len_of_partition, + "{}, {}, {}, {}", + expected_len_of_partition, + len, + partition, + partitions + ); + } + } + } + + /// return start and end_inclusive of `partition` indexes out of from u64::MAX+1 elements in equal `partitions` + /// These will be equal as long as (u64::MAX + 1) divides by `partitions` evenly + fn get_equal_partition_range(partition: usize, partitions: usize) -> RangeInclusive { + let max_inclusive = u128::from(u64::MAX); + let max_plus_1 = max_inclusive + 1; + let partition = partition as u128; + let partitions = partitions as u128; + let mut start = max_plus_1 * partition / partitions; + if partition > 0 && start * partitions / max_plus_1 == partition - 1 { + // partitions don't evenly divide and the start of this partition needs to be 1 greater + start += 1; + } + + let mut end_inclusive = start + max_plus_1 / partitions - 1; + if partition < partitions.saturating_sub(1) { + let next = end_inclusive + 1; + if next * partitions / max_plus_1 == partition { + // this partition is far enough into partitions such that the len of this partition is 1 larger than expected + end_inclusive += 1; + } + } else { + end_inclusive = max_inclusive; + } + RangeInclusive::new(start as u64, end_inclusive as u64) + } + + /// Make sure that each time hash_address_to_partition is called, it uses the initial seed state and that clone correctly copies the initial hasher state. + #[test] + fn test_hasher_copy() { + let seed = Hash::new_unique(); + let partitions = 10; + let hasher = EpochRewardsHasher::new(partitions, &seed); + + let pk = Pubkey::new_unique(); + + let b1 = hasher.clone().hash_address_to_partition(&pk); + let b2 = hasher.hash_address_to_partition(&pk); + assert_eq!(b1, b2); + + // make sure b1 includes the seed's hash + let mut hasher = SipHasher13::new(); + hasher.write(seed.as_ref()); + hasher.write(pk.as_ref()); + let partition = hash_to_partition(hasher.finish(), partitions); + assert_eq!(partition, b1); + } +} diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 720d5198ab950c..e64d6ddc57d0fd 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -69,6 +69,7 @@ pub mod ed25519_instruction; pub mod entrypoint; pub mod entrypoint_deprecated; pub mod epoch_info; +pub mod epoch_rewards_hasher; pub mod example_mocks; pub mod exit; pub mod feature; From 62e7ebd0cccb4b7c3a9597b76b04ec827667051f Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 24 Jan 2024 15:30:32 -0800 Subject: [PATCH 047/401] BlockProductionMethod::CentralScheduler as default (#34891) --- CHANGELOG.md | 1 + core/src/validator.rs | 2 +- local-cluster/tests/local_cluster.rs | 14 +++++++++++--- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99d52beaa0b83e..dadc45594b80e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ Release channels have their own copy of this changelog: * The default for `--use-snapshot-archives-at-startup` is now `when-newest` (#33883) * The default for `solana-ledger-tool`, however, remains `always` (#34228) * Added `central-scheduler` option for `--block-production-method` (#33890) + * `central-scheduler` as default option for `--block-production-method` (#34891) * Updated to Borsh v1 * Added allow_commission_decrease_at_any_time feature which will allow commission on a vote account to be decreased even in the second half of epochs when the commission_updates_only_allowed_in_first_half_of_epoch diff --git a/core/src/validator.rs b/core/src/validator.rs index 4e96a3c2b5b4ff..23045aa3557e74 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -168,8 +168,8 @@ impl BlockVerificationMethod { #[derive(Clone, EnumString, EnumVariantNames, Default, IntoStaticStr, Display)] #[strum(serialize_all = "kebab-case")] pub enum BlockProductionMethod { - #[default] ThreadLocalMultiIterator, + #[default] CentralScheduler, } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index aa919e75f0366d..5a4ca6e4cb6cc1 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -16,7 +16,7 @@ use { }, optimistic_confirmation_verifier::OptimisticConfirmationVerifier, replay_stage::DUPLICATE_THRESHOLD, - validator::{BlockVerificationMethod, ValidatorConfig}, + validator::{BlockProductionMethod, BlockVerificationMethod, ValidatorConfig}, }, solana_download_utils::download_snapshot_archive, solana_entry::entry::create_ticks, @@ -349,11 +349,16 @@ fn test_forwarding() { solana_logger::setup_with_default(RUST_LOG_FILTER); // Set up a cluster where one node is never the leader, so all txs sent to this node // will be have to be forwarded in order to be confirmed + // Only ThreadLocalMultiIterator banking stage forwards transactions, + // so must use that block-production-method. let mut config = ClusterConfig { node_stakes: vec![DEFAULT_NODE_STAKE * 100, DEFAULT_NODE_STAKE], cluster_lamports: DEFAULT_CLUSTER_LAMPORTS + DEFAULT_NODE_STAKE * 100, validator_configs: make_identical_validator_configs( - &ValidatorConfig::default_for_test(), + &ValidatorConfig { + block_production_method: BlockProductionMethod::ThreadLocalMultiIterator, + ..ValidatorConfig::default_for_test() + }, 2, ), ..ClusterConfig::default() @@ -4257,7 +4262,10 @@ fn test_leader_failure_4() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_leader_failure_4"); let num_nodes = 4; - let validator_config = ValidatorConfig::default_for_test(); + let validator_config = ValidatorConfig { + block_production_method: BlockProductionMethod::ThreadLocalMultiIterator, + ..ValidatorConfig::default_for_test() + }; let mut config = ClusterConfig { cluster_lamports: DEFAULT_CLUSTER_LAMPORTS, node_stakes: vec![DEFAULT_NODE_STAKE; 4], From 5b59930782879ea90a78b8a35131d03d966adc4b Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 24 Jan 2024 20:28:25 -0500 Subject: [PATCH 048/401] Refactoring internal bank method signature for consistency (#34936) --- runtime/src/bank.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 07677bea4972e1..e6f188b66a8bdc 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4431,9 +4431,9 @@ impl Bank { self.rc.accounts.accounts_db.set_shrink_paths(paths); } - fn check_age<'a>( + fn check_age( &self, - txs: impl Iterator + 'a)>, + sanitized_txs: &[impl core::borrow::Borrow], lock_results: &[Result<()>], max_age: usize, error_counters: &mut TransactionErrorMetrics, @@ -4442,7 +4442,9 @@ impl Bank { let last_blockhash = hash_queue.last_hash(); let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash); - txs.zip(lock_results) + sanitized_txs + .iter() + .zip(lock_results) .map(|(tx, lock_res)| match lock_res { Ok(()) => self.check_transaction_age( tx.borrow(), @@ -4560,9 +4562,8 @@ impl Bank { max_age: usize, error_counters: &mut TransactionErrorMetrics, ) -> Vec { - let age_results = - self.check_age(sanitized_txs.iter(), lock_results, max_age, error_counters); - self.check_status_cache(sanitized_txs, age_results, error_counters) + let lock_results = self.check_age(sanitized_txs, lock_results, max_age, error_counters); + self.check_status_cache(sanitized_txs, lock_results, error_counters) } pub fn collect_balances(&self, batch: &TransactionBatch) -> TransactionBalances { From b04765f8b582a3156bf009307708e3c9701cea8d Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 24 Jan 2024 17:35:56 -0800 Subject: [PATCH 049/401] Code cleanup in account_rent_state (#34941) --- runtime/src/accounts/account_rent_state.rs | 109 +++++++++--------- runtime/src/accounts/mod.rs | 7 +- .../bank/transaction_account_state_info.rs | 7 +- 3 files changed, 59 insertions(+), 64 deletions(-) diff --git a/runtime/src/accounts/account_rent_state.rs b/runtime/src/accounts/account_rent_state.rs index 0949e21acfd7d5..3fc71ac6a27686 100644 --- a/runtime/src/accounts/account_rent_state.rs +++ b/runtime/src/accounts/account_rent_state.rs @@ -56,66 +56,67 @@ impl RentState { } } } -} -pub(super) fn submit_rent_state_metrics(pre_rent_state: &RentState, post_rent_state: &RentState) { - match (pre_rent_state, post_rent_state) { - (&RentState::Uninitialized, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_err-new_account", 1); - } - (&RentState::RentPaying { .. }, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_ok-legacy", 1); - } - (_, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_err-other", 1); + fn submit_rent_state_metrics(pre_rent_state: &Self, post_rent_state: &Self) { + match (pre_rent_state, post_rent_state) { + (&RentState::Uninitialized, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_err-new_account", 1); + } + (&RentState::RentPaying { .. }, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_ok-legacy", 1); + } + (_, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_err-other", 1); + } + _ => {} } - _ => {} } -} -pub(crate) fn check_rent_state( - pre_rent_state: Option<&RentState>, - post_rent_state: Option<&RentState>, - transaction_context: &TransactionContext, - index: IndexOfAccount, -) -> Result<()> { - if let Some((pre_rent_state, post_rent_state)) = pre_rent_state.zip(post_rent_state) { - let expect_msg = "account must exist at TransactionContext index if rent-states are Some"; - check_rent_state_with_account( - pre_rent_state, - post_rent_state, - transaction_context - .get_key_of_account_at_index(index) - .expect(expect_msg), - &transaction_context - .get_account_at_index(index) - .expect(expect_msg) - .borrow(), - index, - )?; + pub(crate) fn check_rent_state( + pre_rent_state: Option<&Self>, + post_rent_state: Option<&Self>, + transaction_context: &TransactionContext, + index: IndexOfAccount, + ) -> Result<()> { + if let Some((pre_rent_state, post_rent_state)) = pre_rent_state.zip(post_rent_state) { + let expect_msg = + "account must exist at TransactionContext index if rent-states are Some"; + Self::check_rent_state_with_account( + pre_rent_state, + post_rent_state, + transaction_context + .get_key_of_account_at_index(index) + .expect(expect_msg), + &transaction_context + .get_account_at_index(index) + .expect(expect_msg) + .borrow(), + index, + )?; + } + Ok(()) } - Ok(()) -} -pub(super) fn check_rent_state_with_account( - pre_rent_state: &RentState, - post_rent_state: &RentState, - address: &Pubkey, - account_state: &AccountSharedData, - account_index: IndexOfAccount, -) -> Result<()> { - submit_rent_state_metrics(pre_rent_state, post_rent_state); - if !solana_sdk::incinerator::check_id(address) - && !post_rent_state.transition_allowed_from(pre_rent_state) - { - debug!( - "Account {} not rent exempt, state {:?}", - address, account_state, - ); - let account_index = account_index as u8; - Err(TransactionError::InsufficientFundsForRent { account_index }) - } else { - Ok(()) + pub(super) fn check_rent_state_with_account( + pre_rent_state: &Self, + post_rent_state: &Self, + address: &Pubkey, + account_state: &AccountSharedData, + account_index: IndexOfAccount, + ) -> Result<()> { + Self::submit_rent_state_metrics(pre_rent_state, post_rent_state); + if !solana_sdk::incinerator::check_id(address) + && !post_rent_state.transition_allowed_from(pre_rent_state) + { + debug!( + "Account {} not rent exempt, state {:?}", + address, account_state, + ); + let account_index = account_index as u8; + Err(TransactionError::InsufficientFundsForRent { account_index }) + } else { + Ok(()) + } } } diff --git a/runtime/src/accounts/mod.rs b/runtime/src/accounts/mod.rs index 28b1be02283448..4bf09d94ffb7a1 100644 --- a/runtime/src/accounts/mod.rs +++ b/runtime/src/accounts/mod.rs @@ -1,10 +1,7 @@ pub mod account_rent_state; use { - crate::{ - accounts::account_rent_state::{check_rent_state_with_account, RentState}, - bank::RewardInterval, - }, + crate::{accounts::account_rent_state::RentState, bank::RewardInterval}, itertools::Itertools, log::warn, solana_accounts_db::{ @@ -476,7 +473,7 @@ pub fn validate_fee_payer( .map_err(|_| TransactionError::InsufficientFundsForFee)?; let payer_post_rent_state = RentState::from_account(payer_account, &rent_collector.rent); - check_rent_state_with_account( + RentState::check_rent_state_with_account( &payer_pre_rent_state, &payer_post_rent_state, payer_address, diff --git a/runtime/src/bank/transaction_account_state_info.rs b/runtime/src/bank/transaction_account_state_info.rs index 4e5f58d85fffc8..c09127a6f32bb3 100644 --- a/runtime/src/bank/transaction_account_state_info.rs +++ b/runtime/src/bank/transaction_account_state_info.rs @@ -1,8 +1,5 @@ use { - crate::{ - accounts::account_rent_state::{check_rent_state, RentState}, - bank::Bank, - }, + crate::{accounts::account_rent_state::RentState, bank::Bank}, solana_sdk::{ account::ReadableAccount, message::SanitizedMessage, @@ -63,7 +60,7 @@ impl Bank { for (i, (pre_state_info, post_state_info)) in pre_state_infos.iter().zip(post_state_infos).enumerate() { - check_rent_state( + RentState::check_rent_state( pre_state_info.rent_state.as_ref(), post_state_info.rent_state.as_ref(), transaction_context, From b161f6ce08342d5dab8b17f706a442d8dba9ac6e Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Thu, 25 Jan 2024 06:20:00 -0800 Subject: [PATCH 050/401] Create SVM folder as a placeholder for the relevant code (#34942) --- core/src/banking_stage/consumer.rs | 2 +- runtime/src/bank.rs | 2 +- runtime/src/bank/fee_distribution.rs | 2 +- runtime/src/bank/transaction_account_state_info.rs | 2 +- runtime/src/lib.rs | 2 +- runtime/src/{accounts/mod.rs => svm/account_loader.rs} | 6 ++---- runtime/src/{accounts => svm}/account_rent_state.rs | 0 runtime/src/svm/mod.rs | 2 ++ 8 files changed, 9 insertions(+), 9 deletions(-) rename runtime/src/{accounts/mod.rs => svm/account_loader.rs} (99%) rename runtime/src/{accounts => svm}/account_rent_state.rs (100%) create mode 100644 runtime/src/svm/mod.rs diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 64b68889747633..d5dccca98a0fae 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -23,8 +23,8 @@ use { compute_budget_processor::process_compute_budget_instructions, timings::ExecuteTimings, }, solana_runtime::{ - accounts::validate_fee_payer, bank::{Bank, LoadAndExecuteTransactionsOutput}, + svm::account_loader::validate_fee_payer, transaction_batch::TransactionBatch, }, solana_sdk::{ diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e6f188b66a8bdc..ecca773a401d09 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -59,6 +59,7 @@ use { }, stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, + svm::account_loader::load_accounts, transaction_batch::TransactionBatch, }, byteorder::{ByteOrder, LittleEndian}, @@ -282,7 +283,6 @@ pub struct BankRc { pub(crate) bank_id_generator: Arc, } -use crate::accounts::load_accounts; #[cfg(RUSTC_WITH_SPECIALIZATION)] use solana_frozen_abi::abi_example::AbiExample; diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 0ad70efbf9ca6e..85d68c07fd7448 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -1,6 +1,6 @@ use { super::Bank, - crate::accounts::account_rent_state::RentState, + crate::svm::account_rent_state::RentState, log::{debug, warn}, solana_accounts_db::stake_rewards::RewardInfo, solana_sdk::{ diff --git a/runtime/src/bank/transaction_account_state_info.rs b/runtime/src/bank/transaction_account_state_info.rs index c09127a6f32bb3..259cd5142cf3ec 100644 --- a/runtime/src/bank/transaction_account_state_info.rs +++ b/runtime/src/bank/transaction_account_state_info.rs @@ -1,5 +1,5 @@ use { - crate::{accounts::account_rent_state::RentState, bank::Bank}, + crate::{bank::Bank, svm::account_rent_state::RentState}, solana_sdk::{ account::ReadableAccount, message::SanitizedMessage, diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index b0884a6f185c20..0612ac0cca74d2 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -4,7 +4,6 @@ #[macro_use] extern crate lazy_static; -pub mod accounts; pub mod accounts_background_service; pub mod bank; pub mod bank_client; @@ -37,6 +36,7 @@ pub mod stake_weighted_timestamp; pub mod stakes; pub mod static_ids; pub mod status_cache; +pub mod svm; pub mod transaction_batch; pub mod transaction_priority_details; diff --git a/runtime/src/accounts/mod.rs b/runtime/src/svm/account_loader.rs similarity index 99% rename from runtime/src/accounts/mod.rs rename to runtime/src/svm/account_loader.rs index 4bf09d94ffb7a1..8fa432db1556dc 100644 --- a/runtime/src/accounts/mod.rs +++ b/runtime/src/svm/account_loader.rs @@ -1,7 +1,5 @@ -pub mod account_rent_state; - use { - crate::{accounts::account_rent_state::RentState, bank::RewardInterval}, + crate::{bank::RewardInterval, svm::account_rent_state::RentState}, itertools::Itertools, log::warn, solana_accounts_db::{ @@ -42,7 +40,7 @@ use { }; #[allow(clippy::too_many_arguments)] -pub(super) fn load_accounts( +pub(crate) fn load_accounts( accounts_db: &AccountsDb, ancestors: &Ancestors, txs: &[SanitizedTransaction], diff --git a/runtime/src/accounts/account_rent_state.rs b/runtime/src/svm/account_rent_state.rs similarity index 100% rename from runtime/src/accounts/account_rent_state.rs rename to runtime/src/svm/account_rent_state.rs diff --git a/runtime/src/svm/mod.rs b/runtime/src/svm/mod.rs new file mode 100644 index 00000000000000..a863d370802e0e --- /dev/null +++ b/runtime/src/svm/mod.rs @@ -0,0 +1,2 @@ +pub mod account_loader; +pub mod account_rent_state; From b18f7383719892236b94200199c480cf144a83f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:52:40 +0800 Subject: [PATCH 051/401] build(deps): bump bytemuck from 1.14.0 to 1.14.1 (#34945) * build(deps): bump bytemuck from 1.14.0 to 1.14.1 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.14.0 to 1.14.1. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.14.0...v1.14.1) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c141f501f844b4..5378580c3c0162 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -908,9 +908,9 @@ checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "bytemuck" -version = "1.14.0" +version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" +checksum = "ed2490600f404f2b94c167e31d3ed1d5f3c225a0f3b80230053b3e0b7b962bd9" dependencies = [ "bytemuck_derive", ] diff --git a/Cargo.toml b/Cargo.toml index ba7d88e75dc724..71ab138fd93921 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -162,7 +162,7 @@ bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.7" -bytemuck = "1.14.0" +bytemuck = "1.14.1" byteorder = "1.5.0" bytes = "1.5" bzip2 = "0.4.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 30576236c8a568..c1d80f5a3b1ad2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -840,9 +840,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.14.0" +version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" +checksum = "ed2490600f404f2b94c167e31d3ed1d5f3c225a0f3b80230053b3e0b7b962bd9" dependencies = [ "bytemuck_derive", ] From 0c2d9d25fdd8dab9b2db29f6b7922f10c8e8b757 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Thu, 25 Jan 2024 08:51:41 -0800 Subject: [PATCH 052/401] solana-program: VoteState::deserialize() (#34829) * implement a custom parser for `VoteState` which is usuable in a bpf context * derive or impl `Arbitrary` for `VoteStateVersions` and its component types, for test builds only --- Cargo.lock | 21 +++ Cargo.toml | 1 + sdk/program/Cargo.toml | 1 + sdk/program/src/pubkey.rs | 4 + sdk/program/src/serialize_utils/cursor.rs | 133 ++++++++++++++++++ .../mod.rs} | 2 + sdk/program/src/vote/authorized_voters.rs | 3 + sdk/program/src/vote/state/mod.rs | 120 +++++++++++++++- .../src/vote/state/vote_state_1_14_11.rs | 3 + .../src/vote/state/vote_state_deserialize.rs | 129 +++++++++++++++++ .../src/vote/state/vote_state_versions.rs | 14 ++ 11 files changed, 424 insertions(+), 7 deletions(-) create mode 100644 sdk/program/src/serialize_utils/cursor.rs rename sdk/program/src/{serialize_utils.rs => serialize_utils/mod.rs} (99%) create mode 100644 sdk/program/src/vote/state/vote_state_deserialize.rs diff --git a/Cargo.lock b/Cargo.lock index 5378580c3c0162..d7c2a2405e8e69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -181,6 +181,15 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +dependencies = [ + "derive_arbitrary", +] + [[package]] name = "arc-swap" version = "1.5.0" @@ -1612,6 +1621,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "derive_more" version = "0.99.16" @@ -6663,6 +6683,7 @@ name = "solana-program" version = "1.18.0" dependencies = [ "anyhow", + "arbitrary", "ark-bn254", "ark-ec", "ark-ff", diff --git a/Cargo.toml b/Cargo.toml index 71ab138fd93921..242dfa13d032f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -139,6 +139,7 @@ aquamarine = "0.3.3" aes-gcm-siv = "0.10.3" ahash = "0.8.7" anyhow = "1.0.79" +arbitrary = "1.3.2" ark-bn254 = "0.4.0" ark-ec = "0.4.0" ark-ff = "0.4.0" diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index ccd18701eefcc4..7bc414472f525f 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -66,6 +66,7 @@ wasm-bindgen = { workspace = true } zeroize = { workspace = true, features = ["default", "zeroize_derive"] } [target.'cfg(not(target_os = "solana"))'.dev-dependencies] +arbitrary = { workspace = true, features = ["derive"] } solana-logger = { workspace = true } [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 04fcc69dc9185a..728a5cd252d89f 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -1,6 +1,9 @@ //! Solana account addresses. #![allow(clippy::arithmetic_side_effects)] + +#[cfg(test)] +use arbitrary::Arbitrary; use { crate::{decode_error::DecodeError, hash::hashv, wasm_bindgen}, borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, @@ -85,6 +88,7 @@ impl From for PubkeyError { Zeroable, )] #[borsh(crate = "borsh")] +#[cfg_attr(test, derive(Arbitrary))] pub struct Pubkey(pub(crate) [u8; 32]); impl crate::sanitize::Sanitize for Pubkey {} diff --git a/sdk/program/src/serialize_utils/cursor.rs b/sdk/program/src/serialize_utils/cursor.rs new file mode 100644 index 00000000000000..0066737382ab29 --- /dev/null +++ b/sdk/program/src/serialize_utils/cursor.rs @@ -0,0 +1,133 @@ +use { + crate::{instruction::InstructionError, pubkey::Pubkey}, + std::io::{Cursor, Read}, +}; + +pub(crate) fn read_u8>(cursor: &mut Cursor) -> Result { + let mut buf = [0; 1]; + cursor + .read_exact(&mut buf) + .map_err(|_| InstructionError::InvalidAccountData)?; + + Ok(buf[0]) +} + +pub(crate) fn read_u32>(cursor: &mut Cursor) -> Result { + let mut buf = [0; 4]; + cursor + .read_exact(&mut buf) + .map_err(|_| InstructionError::InvalidAccountData)?; + + Ok(u32::from_le_bytes(buf)) +} + +pub(crate) fn read_u64>(cursor: &mut Cursor) -> Result { + let mut buf = [0; 8]; + cursor + .read_exact(&mut buf) + .map_err(|_| InstructionError::InvalidAccountData)?; + + Ok(u64::from_le_bytes(buf)) +} + +pub(crate) fn read_option_u64>( + cursor: &mut Cursor, +) -> Result, InstructionError> { + let variant = read_u8(cursor)?; + match variant { + 0 => Ok(None), + 1 => read_u64(cursor).map(Some), + _ => Err(InstructionError::InvalidAccountData), + } +} + +pub(crate) fn read_i64>(cursor: &mut Cursor) -> Result { + let mut buf = [0; 8]; + cursor + .read_exact(&mut buf) + .map_err(|_| InstructionError::InvalidAccountData)?; + + Ok(i64::from_le_bytes(buf)) +} + +pub(crate) fn read_pubkey>( + cursor: &mut Cursor, +) -> Result { + let mut buf = [0; 32]; + cursor + .read_exact(&mut buf) + .map_err(|_| InstructionError::InvalidAccountData)?; + + Ok(Pubkey::from(buf)) +} + +#[cfg(test)] +mod test { + use {super::*, rand::Rng, std::fmt::Debug}; + + #[test] + fn test_read_u8() { + for _ in 0..100 { + let test_value = rand::random::(); + test_read(read_u8, test_value); + } + } + + #[test] + fn test_read_u32() { + for _ in 0..100 { + let test_value = rand::random::(); + test_read(read_u32, test_value); + } + } + + #[test] + fn test_read_u64() { + for _ in 0..100 { + let test_value = rand::random::(); + test_read(read_u64, test_value); + } + } + + #[test] + fn test_read_option_u64() { + for _ in 0..100 { + let test_value = rand::random::>(); + test_read(read_option_u64, test_value); + } + } + + #[test] + fn test_read_i64() { + for _ in 0..100 { + let test_value = rand::random::(); + test_read(read_i64, test_value); + } + } + + #[test] + fn test_read_pubkey() { + for _ in 0..100 { + let mut buf = [0; 32]; + rand::thread_rng().fill(&mut buf); + let test_value = Pubkey::from(buf); + test_read(read_pubkey, test_value); + } + } + + fn test_read( + reader: fn(&mut Cursor>) -> Result, + test_value: T, + ) { + let bincode_bytes = bincode::serialize(&test_value).unwrap(); + let mut cursor = Cursor::new(bincode_bytes); + let bincode_read = reader(&mut cursor).unwrap(); + + let borsh_bytes = borsh0_10::to_vec(&test_value).unwrap(); + let mut cursor = Cursor::new(borsh_bytes); + let borsh_read = reader(&mut cursor).unwrap(); + + assert_eq!(test_value, bincode_read); + assert_eq!(test_value, borsh_read); + } +} diff --git a/sdk/program/src/serialize_utils.rs b/sdk/program/src/serialize_utils/mod.rs similarity index 99% rename from sdk/program/src/serialize_utils.rs rename to sdk/program/src/serialize_utils/mod.rs index d57095ce7a98ea..1e335483f922cf 100644 --- a/sdk/program/src/serialize_utils.rs +++ b/sdk/program/src/serialize_utils/mod.rs @@ -3,6 +3,8 @@ #![allow(clippy::arithmetic_side_effects)] use crate::{pubkey::Pubkey, sanitize::SanitizeError}; +pub mod cursor; + pub fn append_u16(buf: &mut Vec, data: u16) { let start = buf.len(); buf.resize(buf.len() + 2, 0); diff --git a/sdk/program/src/vote/authorized_voters.rs b/sdk/program/src/vote/authorized_voters.rs index f361be237d219a..9920391146b2c2 100644 --- a/sdk/program/src/vote/authorized_voters.rs +++ b/sdk/program/src/vote/authorized_voters.rs @@ -1,3 +1,5 @@ +#[cfg(test)] +use arbitrary::Arbitrary; use { crate::{clock::Epoch, pubkey::Pubkey}, serde_derive::{Deserialize, Serialize}, @@ -5,6 +7,7 @@ use { }; #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] +#[cfg_attr(test, derive(Arbitrary))] pub struct AuthorizedVoters { authorized_voters: BTreeMap, } diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 6d77d3ab5d9dda..9eddce4d948cab 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -1,9 +1,12 @@ //! Vote state -#[cfg(test)] -use crate::epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET; #[cfg(not(target_os = "solana"))] use bincode::deserialize; +#[cfg(test)] +use { + crate::epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, + arbitrary::{Arbitrary, Unstructured}, +}; use { crate::{ clock::{Epoch, Slot, UnixTimestamp}, @@ -11,17 +14,20 @@ use { instruction::InstructionError, pubkey::Pubkey, rent::Rent, + serialize_utils::cursor::read_u32, sysvar::clock::Clock, vote::{authorized_voters::AuthorizedVoters, error::VoteError}, }, bincode::{serialize_into, ErrorKind}, serde_derive::{Deserialize, Serialize}, - std::{collections::VecDeque, fmt::Debug}, + std::{collections::VecDeque, fmt::Debug, io::Cursor}, }; mod vote_state_0_23_5; pub mod vote_state_1_14_11; pub use vote_state_1_14_11::*; +mod vote_state_deserialize; +use vote_state_deserialize::deserialize_vote_state_into; pub mod vote_state_versions; pub use vote_state_versions::*; @@ -67,6 +73,7 @@ impl Vote { } #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Copy, Clone, AbiExample)] +#[cfg_attr(test, derive(Arbitrary))] pub struct Lockout { slot: Slot, confirmation_count: u32, @@ -114,6 +121,7 @@ impl Lockout { } #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Copy, Clone, AbiExample)] +#[cfg_attr(test, derive(Arbitrary))] pub struct LandedVote { // Latency is the difference in slot number between the slot that was voted on (lockout.slot) and the slot in // which the vote that added this Lockout landed. For votes which were cast before versions of the validator @@ -226,6 +234,7 @@ pub struct VoteAuthorizeCheckedWithSeedArgs { } #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] +#[cfg_attr(test, derive(Arbitrary))] pub struct BlockTimestamp { pub slot: Slot, pub timestamp: UnixTimestamp, @@ -280,8 +289,26 @@ impl CircBuf { } } +#[cfg(test)] +impl<'a, I: Default + Copy> Arbitrary<'a> for CircBuf +where + I: Arbitrary<'a>, +{ + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let mut circbuf = Self::default(); + + let len = u.arbitrary_len::()?; + for _ in 0..len { + circbuf.append(I::arbitrary(u)?); + } + + Ok(circbuf) + } +} + #[frozen_abi(digest = "EeenjJaSrm9hRM39gK6raRNtzG61hnk7GciUCJJRDUSQ")] #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] +#[cfg_attr(test, derive(Arbitrary))] pub struct VoteState { /// the node that votes in this account pub node_pubkey: Pubkey, @@ -347,16 +374,43 @@ impl VoteState { 3762 // see test_vote_state_size_of. } - #[allow(clippy::used_underscore_binding)] - pub fn deserialize(_input: &[u8]) -> Result { + // we retain bincode deserialize for not(target_os = "solana") + // because the hand-written parser does not support V0_23_5 + pub fn deserialize(input: &[u8]) -> Result { #[cfg(not(target_os = "solana"))] { - deserialize::(_input) + deserialize::(input) .map(|versioned| versioned.convert_to_current()) .map_err(|_| InstructionError::InvalidAccountData) } #[cfg(target_os = "solana")] - unimplemented!(); + { + let mut vote_state = Self::default(); + Self::deserialize_into(input, &mut vote_state)?; + Ok(vote_state) + } + } + + /// Deserializes the input buffer into the provided `VoteState` + /// + /// This function exists to deserialize `VoteState` in a BPF context without going above + /// the compute limit, and must be kept up to date with `bincode::deserialize`. + pub fn deserialize_into( + input: &[u8], + vote_state: &mut VoteState, + ) -> Result<(), InstructionError> { + let mut cursor = Cursor::new(input); + + let variant = read_u32(&mut cursor)?; + match variant { + // V0_23_5. not supported; these should not exist on mainnet + 0 => Err(InstructionError::InvalidAccountData), + // V1_14_11. substantially different layout and data from V0_23_5 + 1 => deserialize_vote_state_into(&mut cursor, vote_state, false), + // Current. the only difference from V1_14_11 is the addition of a slot-latency to each vote + 2 => deserialize_vote_state_into(&mut cursor, vote_state, true), + _ => Err(InstructionError::InvalidAccountData), + } } pub fn serialize( @@ -818,6 +872,58 @@ mod tests { ); } + #[test] + fn test_vote_deserialize_into() { + // base case + let target_vote_state = VoteState::default(); + let vote_state_buf = + bincode::serialize(&VoteStateVersions::new_current(target_vote_state.clone())).unwrap(); + + let mut test_vote_state = VoteState::default(); + VoteState::deserialize_into(&vote_state_buf, &mut test_vote_state).unwrap(); + + assert_eq!(target_vote_state, test_vote_state); + + // variant + // provide 4x the minimum struct size in bytes to ensure we typically touch every field + let struct_bytes_x4 = std::mem::size_of::() * 4; + for _ in 0..1000 { + let raw_data: Vec = (0..struct_bytes_x4).map(|_| rand::random::()).collect(); + let mut unstructured = Unstructured::new(&raw_data); + + let target_vote_state_versions = + VoteStateVersions::arbitrary(&mut unstructured).unwrap(); + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + let target_vote_state = target_vote_state_versions.convert_to_current(); + + let mut test_vote_state = VoteState::default(); + VoteState::deserialize_into(&vote_state_buf, &mut test_vote_state).unwrap(); + + assert_eq!(target_vote_state, test_vote_state); + } + } + + #[test] + fn test_vote_deserialize_into_nopanic() { + // base case + let mut test_vote_state = VoteState::default(); + let e = VoteState::deserialize_into(&[], &mut test_vote_state).unwrap_err(); + assert_eq!(e, InstructionError::InvalidAccountData); + + // variant + let serialized_len_x4 = bincode::serialized_size(&test_vote_state).unwrap() * 4; + let mut rng = rand::thread_rng(); + for _ in 0..1000 { + let raw_data_length = rng.gen_range(1..serialized_len_x4); + let raw_data: Vec = (0..raw_data_length).map(|_| rng.gen::()).collect(); + + // it is extremely improbable, though theoretically possible, for random bytes to be syntactically valid + // so we only check that the deserialize function does not panic + let mut test_vote_state = VoteState::default(); + let _ = VoteState::deserialize_into(&raw_data, &mut test_vote_state); + } + } + #[test] fn test_vote_state_commission_split() { let vote_state = VoteState::default(); diff --git a/sdk/program/src/vote/state/vote_state_1_14_11.rs b/sdk/program/src/vote/state/vote_state_1_14_11.rs index 2e2f17484cb21c..4b68ced36524d6 100644 --- a/sdk/program/src/vote/state/vote_state_1_14_11.rs +++ b/sdk/program/src/vote/state/vote_state_1_14_11.rs @@ -1,10 +1,13 @@ use super::*; +#[cfg(test)] +use arbitrary::Arbitrary; // Offset used for VoteState version 1_14_11 const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 82; #[frozen_abi(digest = "CZTgLymuevXjAx6tM8X8T5J3MCx9AkEsFSmu4FJrEpkG")] #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] +#[cfg_attr(test, derive(Arbitrary))] pub struct VoteState1_14_11 { /// the node that votes in this account pub node_pubkey: Pubkey, diff --git a/sdk/program/src/vote/state/vote_state_deserialize.rs b/sdk/program/src/vote/state/vote_state_deserialize.rs new file mode 100644 index 00000000000000..b93f1c7442d10f --- /dev/null +++ b/sdk/program/src/vote/state/vote_state_deserialize.rs @@ -0,0 +1,129 @@ +use { + crate::{ + instruction::InstructionError, + pubkey::Pubkey, + serialize_utils::cursor::*, + vote::state::{BlockTimestamp, LandedVote, Lockout, VoteState, MAX_ITEMS}, + }, + std::io::Cursor, +}; + +pub(super) fn deserialize_vote_state_into( + cursor: &mut Cursor<&[u8]>, + vote_state: &mut VoteState, + has_latency: bool, +) -> Result<(), InstructionError> { + vote_state.node_pubkey = read_pubkey(cursor)?; + vote_state.authorized_withdrawer = read_pubkey(cursor)?; + vote_state.commission = read_u8(cursor)?; + read_votes_into(cursor, vote_state, has_latency)?; + vote_state.root_slot = read_option_u64(cursor)?; + read_authorized_voters_into(cursor, vote_state)?; + read_prior_voters_into(cursor, vote_state)?; + read_epoch_credits_into(cursor, vote_state)?; + read_last_timestamp_into(cursor, vote_state)?; + + Ok(()) +} + +fn read_votes_into>( + cursor: &mut Cursor, + vote_state: &mut VoteState, + has_latency: bool, +) -> Result<(), InstructionError> { + let vote_count = read_u64(cursor)?; + + for _ in 0..vote_count { + let latency = if has_latency { read_u8(cursor)? } else { 0 }; + + let slot = read_u64(cursor)?; + let confirmation_count = read_u32(cursor)?; + let lockout = Lockout::new_with_confirmation_count(slot, confirmation_count); + + vote_state.votes.push_back(LandedVote { latency, lockout }); + } + + Ok(()) +} + +fn read_authorized_voters_into>( + cursor: &mut Cursor, + vote_state: &mut VoteState, +) -> Result<(), InstructionError> { + let authorized_voter_count = read_u64(cursor)?; + + for _ in 0..authorized_voter_count { + let epoch = read_u64(cursor)?; + let authorized_voter = read_pubkey(cursor)?; + + vote_state.authorized_voters.insert(epoch, authorized_voter); + } + + Ok(()) +} + +fn read_prior_voters_into>( + cursor: &mut Cursor, + vote_state: &mut VoteState, +) -> Result<(), InstructionError> { + let mut encountered_null_voter = false; + for i in 0..MAX_ITEMS { + let prior_voter = read_pubkey(cursor)?; + let from_epoch = read_u64(cursor)?; + let until_epoch = read_u64(cursor)?; + let item = (prior_voter, from_epoch, until_epoch); + + if item == (Pubkey::default(), 0, 0) { + encountered_null_voter = true; + } else if encountered_null_voter { + // `prior_voters` should never be sparse + return Err(InstructionError::InvalidAccountData); + } else { + vote_state.prior_voters.buf[i] = item; + } + } + + let idx = read_u64(cursor)? as usize; + vote_state.prior_voters.idx = idx; + + let is_empty_byte = read_u8(cursor)?; + let is_empty = match is_empty_byte { + 0 => false, + 1 => true, + _ => return Err(InstructionError::InvalidAccountData), + }; + vote_state.prior_voters.is_empty = is_empty; + + Ok(()) +} + +fn read_epoch_credits_into>( + cursor: &mut Cursor, + vote_state: &mut VoteState, +) -> Result<(), InstructionError> { + let epoch_credit_count = read_u64(cursor)?; + + for _ in 0..epoch_credit_count { + let epoch = read_u64(cursor)?; + let credits = read_u64(cursor)?; + let prev_credits = read_u64(cursor)?; + + vote_state + .epoch_credits + .push((epoch, credits, prev_credits)); + } + + Ok(()) +} + +fn read_last_timestamp_into>( + cursor: &mut Cursor, + vote_state: &mut VoteState, +) -> Result<(), InstructionError> { + let slot = read_u64(cursor)?; + let timestamp = read_i64(cursor)?; + + vote_state.last_timestamp = BlockTimestamp { slot, timestamp }; + + Ok(()) +} diff --git a/sdk/program/src/vote/state/vote_state_versions.rs b/sdk/program/src/vote/state/vote_state_versions.rs index 7c4939d36928bc..58d63d15def379 100644 --- a/sdk/program/src/vote/state/vote_state_versions.rs +++ b/sdk/program/src/vote/state/vote_state_versions.rs @@ -1,4 +1,6 @@ use super::{vote_state_0_23_5::VoteState0_23_5, vote_state_1_14_11::VoteState1_14_11, *}; +#[cfg(test)] +use arbitrary::{Arbitrary, Unstructured}; #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] pub enum VoteStateVersions { @@ -90,3 +92,15 @@ impl VoteStateVersions { || VoteState1_14_11::is_correct_size_and_initialized(data) } } + +#[cfg(test)] +impl Arbitrary<'_> for VoteStateVersions { + fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result { + let variant = u.choose_index(2)?; + match variant { + 0 => Ok(Self::Current(Box::new(VoteState::arbitrary(u)?))), + 1 => Ok(Self::V1_14_11(Box::new(VoteState1_14_11::arbitrary(u)?))), + _ => unreachable!(), + } + } +} From 2c98399afd48a00b74492f020231550385c41f89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 25 Jan 2024 18:02:38 +0100 Subject: [PATCH 053/401] Docs - for loaded_programs.rs (#34715) Adds doc comments to loaded_programs.rs --- program-runtime/src/loaded_programs.rs | 74 +++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 6 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index b7b92a0409c800..a92da7bd001bbe 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -60,20 +60,31 @@ pub trait ForkGraph { } } +/// Actual payload of [LoadedProgram]. #[derive(Default)] pub enum LoadedProgramType { - /// Tombstone for undeployed, closed or unloadable programs + /// Tombstone for programs which did not pass the verifier. + /// + /// These can potentially come back alive if the environment changes. FailedVerification(ProgramRuntimeEnvironment), + /// Tombstone for programs which were explicitly undeployoed / closed. #[default] Closed, + /// Tombstone for programs which have recently been modified but the new version is not visible yet. DelayVisibility, - /// Successfully verified but not currently compiled, used to track usage statistics when a compiled program is evicted from memory. + /// Successfully verified but not currently compiled. + /// + /// It continues to track usage statistics even when the compiled executable of the program is evicted from memory. Unloaded(ProgramRuntimeEnvironment), + /// Verified and compiled program of loader-v1 or loader-v2 LegacyV0(Executable>), + /// Verified and compiled program of loader-v3 (aka upgradable loader) LegacyV1(Executable>), + /// Verified and compiled program of loader-v4 Typed(Executable>), #[cfg(test)] TestLoaded(ProgramRuntimeEnvironment), + /// A built-in program which is not stored on-chain but backed into and distributed with the validator Builtin(BuiltinProgram>), } @@ -113,6 +124,9 @@ impl LoadedProgramType { } } +/// Holds a program version at a specific address and on a specific slot / fork. +/// +/// It contains the actual program in [LoadedProgramType] and a bunch of meta-data. #[derive(Debug, Default)] pub struct LoadedProgram { /// The program of this entry @@ -133,17 +147,28 @@ pub struct LoadedProgram { pub latest_access_slot: AtomicU64, } +/// Global cache statistics for [LoadedPrograms]. #[derive(Debug, Default)] pub struct Stats { + /// a program was requested pub hits: AtomicU64, + /// a program was polled during cooperative loading pub misses: AtomicU64, + /// a compiled executable was unloaded pub evictions: HashMap, + /// a program was loaded pub insertions: AtomicU64, + /// a program was reloaded or redeployed pub replacements: AtomicU64, + /// a program was only used once before being unloaded pub one_hit_wonders: AtomicU64, + /// a program became unreachable in the fork graph because of rerooting pub prunes_orphan: AtomicU64, + /// a program got pruned because its expiration slot passed pub prunes_expired: AtomicU64, + /// a program got pruned because it was not recompiled for the next epoch pub prunes_environment: AtomicU64, + /// the [SecondLevel] was empty because all slot versions got pruned pub empty_entries: AtomicU64, } @@ -203,12 +228,18 @@ impl Stats { } } +/// Time measurements for loading a single [LoadedProgram]. #[derive(Debug, Default)] pub struct LoadProgramMetrics { + /// Program address, but as text pub program_id: String, + /// Microseconds it took to `create_program_runtime_environment` pub register_syscalls_us: u64, + /// Microseconds it took to `Executable::::load` pub load_elf_us: u64, + /// Microseconds it took to `executable.verify::` pub verify_code_us: u64, + /// Microseconds it took to `executable.jit_compile` pub jit_compile_us: u64, } @@ -434,11 +465,14 @@ impl LoadedProgram { } } +/// Globally shared RBPF config and syscall registry +/// +/// This is only valid in an epoch range as long as no feature affecting RBPF is activated. #[derive(Clone, Debug)] pub struct ProgramRuntimeEnvironments { - /// Globally shared RBPF config and syscall registry for runtime V1 + /// For program runtime V1 pub program_runtime_v1: ProgramRuntimeEnvironment, - /// Globally shared RBPF config and syscall registry for runtime V2 + /// For program runtime V2 pub program_runtime_v2: ProgramRuntimeEnvironment, } @@ -469,7 +503,7 @@ impl LoadingTaskCookie { } } -/// Prevents excessive polling during cooperative loading +/// Suspends the thread in case no cooprative loading task was assigned #[derive(Debug, Default)] pub struct LoadingTaskWaiter { cookie: Mutex, @@ -503,13 +537,33 @@ impl LoadingTaskWaiter { } } +/// Contains all the program versions at a specific address. #[derive(Debug, Default)] struct SecondLevel { + /// List of all versions (across all forks) of a program sorted by the slot in which they were modified slot_versions: Vec>, - /// Contains the bank and TX batch a program at this address is currently being loaded + /// `Some` if there is currently a cooperative loading task for this program address + /// + /// It is possible that multiple TX batches from different slots need different versions of a program. + /// However, that can only be figured out once a program is loaded and its deployment slot is known. cooperative_loading_lock: Option<(Slot, std::thread::ThreadId)>, } +/// This structure is the global cache of loaded, verified and compiled programs. +/// +/// It ... +/// - is validator global and fork graph aware, so it can optimize the commonalities across banks. +/// - handles the visibility rules of un/re/deployments. +/// - stores the usage statistics and verification status of each program. +/// - is elastic and uses a probabilistic eviction stragety based on the usage statistics. +/// - also keeps the compiled executables around, but only for the most used programs. +/// - supports various kinds of tombstones to avoid loading programs which can not be loaded. +/// - cleans up entries on orphan branches when the block store is rerooted. +/// - supports the recompilation phase before feature activations which can change cached programs. +/// - manages the environments of the programs and upcoming environments for the next epoch. +/// - allows for cooperative loading of TX batches which hit the same missing programs simultaneously. +/// - enforces that all programs used in a batch are eagerly loaded ahead of execution. +/// - is not persisted to disk or a snapshot, so it needs to cold start and warm up first. pub struct LoadedPrograms { /// A two level index: /// @@ -529,8 +583,11 @@ pub struct LoadedPrograms { pub upcoming_environments: Option, /// List of loaded programs which should be recompiled before the next epoch (but don't have to). pub programs_to_recompile: Vec<(Pubkey, Arc)>, + /// Statistics counters pub stats: Stats, + /// Reference to the block store pub fork_graph: Option>>, + /// Coordinates TX batches waiting for others to complete their task during cooperative loading pub loading_task_waiter: Arc, } @@ -545,6 +602,11 @@ impl Debug for LoadedPrograms { } } +/// Local view into [LoadedPrograms] which was extracted for a specific TX batch. +/// +/// This isolation enables the global [LoadedPrograms] to continue to evolve (e.g. evictions), +/// while the TX batch is guaranteed it will continue to find all the programs it requires. +/// For program management instructions this also buffers them before they are merged back into the global [LoadedPrograms]. #[derive(Clone, Debug, Default)] pub struct LoadedProgramsForTxBatch { /// Pubkey is the address of a program. From 26d62b9516b4c079f17f662a03874d85c0719176 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Thu, 25 Jan 2024 09:38:06 -0800 Subject: [PATCH 054/401] [TieredStorage] writing hot account blocks and index blocks (#34828) #### Problem The implementation of write_accounts() for HotAccountStorage is missing. It consists of the writing of account blocks, index block, and owners block. #### Summary of Changes This PR completes part of the HotStorageWriter::write_accounts(). Specifically, it finishes the writing of account blocks and index block. #### Test Plan A new unit-test is added to verify the correctness of the work-in-progress HotStorageWriter::write_accounts(). --- accounts-db/src/tiered_storage/hot.rs | 285 +++++++++++++++++++-- accounts-db/src/tiered_storage/readable.rs | 5 +- 2 files changed, 265 insertions(+), 25 deletions(-) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 3bb5f54e470b3c..8652b0b2aa9514 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -9,19 +9,20 @@ use { byte_block, file::TieredStorageFile, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter}, - index::{AccountOffset, IndexBlockFormat, IndexOffset}, + index::{AccountIndexWriterEntry, AccountOffset, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, mmap_utils::{get_pod, get_slice}, owners::{OwnerOffset, OwnersBlockFormat}, readable::TieredReadableAccount, - TieredStorageError, TieredStorageFormat, TieredStorageResult, + StorableAccounts, StorableAccountsWithHashesAndWriteVersions, TieredStorageError, + TieredStorageFormat, TieredStorageResult, }, }, bytemuck::{Pod, Zeroable}, memmap2::{Mmap, MmapOptions}, modular_bitfield::prelude::*, - solana_sdk::{pubkey::Pubkey, stake_history::Epoch}, - std::{fs::OpenOptions, option::Option, path::Path}, + solana_sdk::{account::ReadableAccount, pubkey::Pubkey, stake_history::Epoch}, + std::{borrow::Borrow, fs::OpenOptions, option::Option, path::Path}, }; pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { @@ -45,9 +46,6 @@ fn new_hot_footer() -> TieredStorageFooter { } } -/// The maximum number of padding bytes used in a hot account entry. -const MAX_HOT_PADDING: u8 = 7; - /// The maximum allowed value for the owner index of a hot account. const MAX_HOT_OWNER_OFFSET: OwnerOffset = OwnerOffset((1 << 29) - 1); @@ -58,9 +56,26 @@ const MAX_HOT_OWNER_OFFSET: OwnerOffset = OwnerOffset((1 << 29) - 1); /// bytes in HotAccountOffset. pub(crate) const HOT_ACCOUNT_ALIGNMENT: usize = 8; +/// The alignemnt for the blocks inside a hot accounts file. A hot accounts +/// file consists of accounts block, index block, owners block, and footer. +/// This requirement allows the offset of each block properly aligned so +/// that they can be readable under mmap. +pub(crate) const HOT_BLOCK_ALIGNMENT: usize = 8; + /// The maximum supported offset for hot accounts storage. const MAX_HOT_ACCOUNT_OFFSET: usize = u32::MAX as usize * HOT_ACCOUNT_ALIGNMENT; +// returns the required number of padding +fn padding_bytes(data_len: usize) -> u8 { + ((HOT_ACCOUNT_ALIGNMENT - (data_len % HOT_ACCOUNT_ALIGNMENT)) % HOT_ACCOUNT_ALIGNMENT) as u8 +} + +/// The maximum number of padding bytes used in a hot account entry. +const MAX_HOT_PADDING: u8 = 7; + +/// The buffer that is used for padding. +const PADDING_BUFFER: [u8; 8] = [0u8; HOT_ACCOUNT_ALIGNMENT]; + #[bitfield(bits = 32)] #[repr(C)] #[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Pod, Zeroable)] @@ -444,6 +459,23 @@ impl HotStorageReader { } } +fn write_optional_fields( + file: &TieredStorageFile, + opt_fields: &AccountMetaOptionalFields, +) -> TieredStorageResult { + let mut size = 0; + if let Some(rent_epoch) = opt_fields.rent_epoch { + size += file.write_pod(&rent_epoch)?; + } + if let Some(hash) = opt_fields.account_hash { + size += file.write_pod(&hash)?; + } + + debug_assert_eq!(size, opt_fields.size()); + + Ok(size) +} + /// The writer that creates a hot accounts file. #[derive(Debug)] pub struct HotStorageWriter { @@ -457,25 +489,144 @@ impl HotStorageWriter { storage: TieredStorageFile::new_writable(file_path)?, }) } + + /// Persists an account with the specified information and returns + /// the stored size of the account. + fn write_account( + &self, + lamports: u64, + account_data: &[u8], + executable: bool, + rent_epoch: Option, + account_hash: Option, + ) -> TieredStorageResult { + let optional_fields = AccountMetaOptionalFields { + rent_epoch, + account_hash, + }; + + let mut flags = AccountMetaFlags::new_from(&optional_fields); + flags.set_executable(executable); + + let padding_len = padding_bytes(account_data.len()); + let meta = HotAccountMeta::new() + .with_lamports(lamports) + .with_account_data_size(account_data.len() as u64) + .with_account_data_padding(padding_len) + .with_flags(&flags); + + let mut stored_size = 0; + + stored_size += self.storage.write_pod(&meta)?; + stored_size += self.storage.write_bytes(account_data)?; + stored_size += self + .storage + .write_bytes(&PADDING_BUFFER[0..(padding_len as usize)])?; + stored_size += write_optional_fields(&self.storage, &optional_fields)?; + + Ok(stored_size) + } + + /// A work-in-progress function that will eventually implements + /// AccountsFile::appends_account() + pub fn write_accounts< + 'a, + 'b, + T: ReadableAccount + Sync, + U: StorableAccounts<'a, T>, + V: Borrow, + >( + &self, + accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, + skip: usize, + ) -> TieredStorageResult<()> { + let mut footer = new_hot_footer(); + let mut index = vec![]; + let mut cursor = 0; + + // writing accounts blocks + let len = accounts.accounts.len(); + for i in skip..len { + let (account, address, account_hash, _write_version) = accounts.get(i); + let index_entry = AccountIndexWriterEntry { + address, + offset: HotAccountOffset::new(cursor)?, + }; + + // Obtain necessary fields from the account, or default fields + // for a zero-lamport account in the None case. + let (lamports, data, executable, rent_epoch, account_hash) = account + .map(|acc| { + ( + acc.lamports(), + acc.data(), + acc.executable(), + // only persist rent_epoch for those non-rent-exempt accounts + (acc.rent_epoch() != Epoch::MAX).then_some(acc.rent_epoch()), + Some(*account_hash), + ) + }) + .unwrap_or((0, &[], false, None, None)); + + cursor += self.write_account(lamports, data, executable, rent_epoch, account_hash)?; + index.push(index_entry); + } + footer.account_entry_count = (len - skip) as u32; + + // writing index block + // expect the offset of each block aligned. + assert!(cursor % HOT_BLOCK_ALIGNMENT == 0); + footer.index_block_offset = cursor as u64; + cursor += footer + .index_block_format + .write_index_block(&self.storage, &index)?; + if cursor % HOT_BLOCK_ALIGNMENT != 0 { + // In case it is not yet aligned, it is due to the fact that + // the index block has an odd number of entries. In such case, + // we expect the amount off is equal to 4. + assert_eq!(cursor % HOT_BLOCK_ALIGNMENT, 4); + cursor += self.storage.write_pod(&0u32)?; + } + + // TODO: owner block will be implemented in the follow-up PRs + // expect the offset of each block aligned. + assert!(cursor % HOT_BLOCK_ALIGNMENT == 0); + footer.owners_block_offset = cursor as u64; + footer.owner_count = 0; + + footer.write_footer_block(&self.storage)?; + + Ok(()) + } } #[cfg(test)] pub mod tests { use { super::*, - crate::tiered_storage::{ - byte_block::ByteBlockWriter, - file::TieredStorageFile, - footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter, FOOTER_SIZE}, - hot::{HotAccountMeta, HotStorageReader}, - index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, - meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, - owners::{OwnersBlockFormat, OwnersTable}, + crate::{ + account_storage::meta::StoredMeta, + rent_collector::RENT_EXEMPT_RENT_EPOCH, + tiered_storage::{ + byte_block::ByteBlockWriter, + file::TieredStorageFile, + footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter, FOOTER_SIZE}, + hot::{HotAccountMeta, HotStorageReader}, + index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, + meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, + owners::{OwnersBlockFormat, OwnersTable}, + }, }, assert_matches::assert_matches, memoffset::offset_of, rand::{seq::SliceRandom, Rng}, - solana_sdk::{account::ReadableAccount, hash::Hash, pubkey::Pubkey, stake_history::Epoch}, + solana_sdk::{ + account::{Account, AccountSharedData, ReadableAccount}, + hash::Hash, + pubkey::Pubkey, + slot_history::Slot, + stake_history::Epoch, + }, tempfile::TempDir, }; @@ -957,11 +1108,6 @@ pub mod tests { } } - // returns the required number of padding - fn padding_bytes(data_len: usize) -> u8 { - ((HOT_ACCOUNT_ALIGNMENT - (data_len % HOT_ACCOUNT_ALIGNMENT)) % HOT_ACCOUNT_ALIGNMENT) as u8 - } - #[test] fn test_hot_storage_get_account() { // Generate a new temp path that is guaranteed to NOT already have a file. @@ -1088,4 +1234,101 @@ pub mod tests { // HotStorageWriter only writes once. assert_matches!(HotStorageWriter::new(&path), Err(_)); } + + /// Create a test account based on the specified seed. + /// The created test account might have default rent_epoch + /// and write_version. + fn create_test_account(seed: u64) -> (StoredMeta, AccountSharedData) { + let data_byte = seed as u8; + let account = Account { + lamports: seed + 1, + data: std::iter::repeat(data_byte).take(seed as usize).collect(), + owner: Pubkey::new_unique(), + executable: seed % 2 > 0, + rent_epoch: if seed % 3 > 0 { + seed + } else { + RENT_EXEMPT_RENT_EPOCH + }, + }; + + let stored_meta = StoredMeta { + write_version_obsolete: u64::MAX, + pubkey: Pubkey::new_unique(), + data_len: seed, + }; + (stored_meta, AccountSharedData::from(account)) + } + + #[test] + fn test_write_account_and_index_blocks() { + let account_data_sizes = &[ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1000, 2000, 3000, 4000, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + ]; + + let accounts: Vec<_> = account_data_sizes + .iter() + .map(|size| create_test_account(*size)) + .collect(); + + let account_refs: Vec<_> = accounts + .iter() + .map(|account| (&account.0.pubkey, &account.1)) + .collect(); + + // Slot information is not used here + let account_data = (Slot::MAX, &account_refs[..]); + let hashes: Vec<_> = std::iter::repeat_with(|| AccountHash(Hash::new_unique())) + .take(account_data_sizes.len()) + .collect(); + + let write_versions: Vec<_> = accounts + .iter() + .map(|account| account.0.write_version_obsolete) + .collect(); + + let storable_accounts = + StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( + &account_data, + hashes.clone(), + write_versions.clone(), + ); + + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_write_account_and_index_blocks"); + + { + let writer = HotStorageWriter::new(&path).unwrap(); + writer.write_accounts(&storable_accounts, 0).unwrap(); + } + + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + + let num_accounts = account_data_sizes.len(); + + for i in 0..num_accounts { + let (stored_meta, next) = hot_storage + .get_account(IndexOffset(i as u32)) + .unwrap() + .unwrap(); + + let (account, address, hash, _write_version) = storable_accounts.get(i); + let account = account.unwrap(); + + assert_eq!(stored_meta.lamports(), account.lamports()); + assert_eq!(stored_meta.data().len(), account.data().len()); + assert_eq!(stored_meta.data(), account.data()); + assert_eq!(stored_meta.executable(), account.executable()); + assert_eq!(stored_meta.pubkey(), address); + assert_eq!(stored_meta.hash(), hash); + + assert_eq!(i + 1, next); + } + // Make sure it returns None on NUM_ACCOUNTS to allow termination on + // while loop in actual accounts-db read case. + assert_matches!( + hot_storage.get_account(IndexOffset(num_accounts as u32)), + Ok(None) + ); + } } diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 629f08fa1d3fe6..aff29a79fb03ab 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -64,11 +64,8 @@ impl<'accounts_file, M: TieredAccountMeta> ReadableAccount } /// Returns true if the data associated to this account is executable. - /// - /// Temporarily unimplemented!() as program runtime v2 will use - /// a different API for executable. fn executable(&self) -> bool { - unimplemented!(); + self.meta.flags().executable() } /// Returns the epoch that this account will next owe rent by parsing From 29737ab5e4ff5044d5d060aac913d17b6b164a46 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 25 Jan 2024 10:22:27 -0800 Subject: [PATCH 055/401] Use ThreadLocalMultiIterator for tests (#34947) * Use ThreadLocalMultiIterator for tests * some validator config was not using default_for_test --- core/src/validator.rs | 1 + local-cluster/tests/local_cluster.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 23045aa3557e74..f624dae1e08021 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -343,6 +343,7 @@ impl ValidatorConfig { Self { enforce_ulimit_nofile: false, rpc_config: JsonRpcConfig::default_for_test(), + block_production_method: BlockProductionMethod::ThreadLocalMultiIterator, ..Self::default() } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 5a4ca6e4cb6cc1..b79a1c4e309f26 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2268,7 +2268,7 @@ fn test_hard_fork_with_gap_in_roots() { let validator_config = ValidatorConfig { snapshot_config: LocalCluster::create_dummy_load_only_snapshot_config(), - ..ValidatorConfig::default() + ..ValidatorConfig::default_for_test() }; let mut config = ClusterConfig { cluster_lamports: 100_000, @@ -5236,7 +5236,7 @@ fn test_duplicate_shreds_switch_failure() { validator_keypair, validator_config: ValidatorConfig { voting_disabled, - ..ValidatorConfig::default() + ..ValidatorConfig::default_for_test() }, in_genesis, } From 6c4746dc14cddd4161e684048e46f3c367da45c5 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 25 Jan 2024 12:46:43 -0700 Subject: [PATCH 056/401] accounts-cluster-bench: Break early when a max-accounts limit is reached (#34922) * Improve help text * Add max_accounts cli arg and halt loop accordingly * Log totals before max_account exit * Add test of early end on max_accounts * Add logging when account-creation loop breaks --- accounts-cluster-bench/src/main.rs | 88 +++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 3 deletions(-) diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs index 13251b4a45a9e9..1a945090b39777 100644 --- a/accounts-cluster-bench/src/main.rs +++ b/accounts-cluster-bench/src/main.rs @@ -509,6 +509,7 @@ fn run_accounts_bench( close_nth_batch: u64, maybe_lamports: Option, num_instructions: usize, + max_accounts: Option, mint: Option, reclaim_accounts: bool, rpc_benches: Option>, @@ -682,7 +683,15 @@ fn run_accounts_bench( } count += 1; - if last_log.elapsed().as_millis() > 3000 || (count >= iterations && iterations != 0) { + let max_accounts_met = if let Some(max_accounts) = max_accounts { + total_accounts_created >= max_accounts + } else { + false + }; + if last_log.elapsed().as_millis() > 3000 + || (count >= iterations && iterations != 0) + || max_accounts_met + { info!( "total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}", total_accounts_created, total_accounts_closed, tx_sent_count, count, balances @@ -690,6 +699,14 @@ fn run_accounts_bench( last_log = Instant::now(); } if iterations != 0 && count >= iterations { + info!("{iterations} iterations reached"); + break; + } + if max_accounts_met { + info!( + "Max account limit of {:?} reached", + max_accounts.unwrap_or_default() + ); break; } if executor.num_outstanding() >= batch_size { @@ -873,16 +890,23 @@ fn main() { Arg::with_name("num_instructions") .long("num-instructions") .takes_value(true) - .value_name("NUM") + .value_name("NUM_INSTRUCTIONS") .help("Number of accounts to create on each transaction"), ) .arg( Arg::with_name("iterations") .long("iterations") .takes_value(true) - .value_name("NUM") + .value_name("NUM_ITERATIONS") .help("Number of iterations to make. 0 = unlimited iterations."), ) + .arg( + Arg::with_name("max_accounts") + .long("max-accounts") + .takes_value(true) + .value_name("NUM_ACCOUNTS") + .help("Halt after client has created this number of accounts. Does not count closed accounts."), + ) .arg( Arg::with_name("check_gossip") .long("check-gossip") @@ -892,6 +916,7 @@ fn main() { Arg::with_name("mint") .long("mint") .takes_value(true) + .value_name("MINT_ADDRESS") .help("Mint address to initialize account"), ) .arg( @@ -904,12 +929,14 @@ fn main() { Arg::with_name("num_rpc_bench_threads") .long("num-rpc-bench-threads") .takes_value(true) + .value_name("NUM_THREADS") .help("Spawn this many RPC benching threads for each type passed by --rpc-bench"), ) .arg( Arg::with_name("rpc_bench") .long("rpc-bench") .takes_value(true) + .value_name("RPC_BENCH_TYPE(S)") .multiple(true) .help("Spawn a thread which calls a specific RPC method in a loop to benchmark it"), ) @@ -922,6 +949,7 @@ fn main() { let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4); let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0); let iterations = value_t!(matches, "iterations", usize).unwrap_or(10); + let max_accounts = value_t!(matches, "max_accounts", usize).ok(); let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1); if num_instructions == 0 || num_instructions > 500 { eprintln!("bad num_instructions: {num_instructions}"); @@ -1015,6 +1043,7 @@ fn main() { close_nth_batch, lamports, num_instructions, + max_accounts, mint, matches.is_present("reclaim_accounts"), rpc_benches, @@ -1091,6 +1120,58 @@ pub mod test { close_nth_batch, maybe_lamports, num_instructions, + None, + mint, + reclaim_accounts, + Some(vec![RpcBench::ProgramAccounts]), + 1, + ); + let post_txs = client.get_transaction_count().unwrap(); + start.stop(); + info!("{} pre {} post {}", start, pre_txs, post_txs); + } + + #[test] + fn test_halt_accounts_creation_at_max() { + solana_logger::setup(); + let mut validator_config = ValidatorConfig::default_for_test(); + let num_nodes = 1; + add_secondary_indexes(&mut validator_config.account_indexes); + add_secondary_indexes(&mut validator_config.rpc_config.account_indexes); + let mut config = ClusterConfig { + cluster_lamports: 10_000_000, + poh_config: PohConfig::new_sleep(Duration::from_millis(50)), + node_stakes: vec![100; num_nodes], + validator_configs: make_identical_validator_configs(&validator_config, num_nodes), + ..ClusterConfig::default() + }; + + let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); + let iterations = 100; + let maybe_space = None; + let batch_size = 20; + let close_nth_batch = 0; + let maybe_lamports = None; + let num_instructions = 2; + let mut start = Measure::start("total accounts run"); + let rpc_addr = cluster.entry_point_info.rpc().unwrap(); + let client = Arc::new(RpcClient::new_socket_with_commitment( + rpc_addr, + CommitmentConfig::confirmed(), + )); + let mint = None; + let reclaim_accounts = false; + let pre_txs = client.get_transaction_count().unwrap(); + run_accounts_bench( + client.clone(), + &[&cluster.funding_keypair], + iterations, + maybe_space, + batch_size, + close_nth_batch, + maybe_lamports, + num_instructions, + Some(90), mint, reclaim_accounts, Some(vec![RpcBench::ProgramAccounts]), @@ -1190,6 +1271,7 @@ pub mod test { close_nth_batch, Some(minimum_balance), num_instructions, + None, Some(spl_mint_keypair.pubkey()), true, None, From 22500c23dbb2e36c9b7c67a21c0172e509c94ca2 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 25 Jan 2024 13:39:06 -0700 Subject: [PATCH 057/401] Add rpc support for partitioned rewards (#34773) * Check feature_set for enable_partitioned_epoch_reward * Keep common variable outside if case * Keep common early return out of if case, since the first_slot_in_epoch must exist for partiion PDA to exist * Get and parse epoch partition data PDA * Find partition index for all addresses * Pull relevant blocks and get rewards * Reuse ordering and reformatting * Remove feature deactivation from TestValidator * Restore rewards iteration in first block in epoch for feature case to catch Voting rewards * Add fn get_reward_map helper to dedupe code * No need to start 2nd get_block_with_limit call with first block again * Replace filter_map to parameterize RewardType filter expression * Weird thing to make clippy and compiler agree (https://github.com/rust-lang/rust-clippy/issues/8098) * Use activated_slot to ensure the right approach for past rewards epochs --- rpc/src/rpc.rs | 157 ++++++++++++++++----- validator/src/bin/solana-test-validator.rs | 5 +- 2 files changed, 124 insertions(+), 38 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 5cc5b82344e0d1..16d78a913bc90b 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -7,7 +7,7 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, bincode::{config::Options, serialize}, crossbeam_channel::{unbounded, Receiver, Sender}, - jsonrpc_core::{futures::future, types::error, BoxFuture, Error, Metadata, Result}, + jsonrpc_core::{futures::future, types::error, BoxFuture, Error, ErrorCode, Metadata, Result}, jsonrpc_derive::rpc, solana_account_decoder::{ parse_token::{is_known_spl_token_id, token_amount_to_ui_amount, UiTokenAmount}, @@ -62,6 +62,10 @@ use { clock::{Slot, UnixTimestamp, MAX_RECENT_BLOCKHASHES}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, + epoch_rewards_hasher::EpochRewardsHasher, + epoch_rewards_partition_data::{ + get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, + }, epoch_schedule::EpochSchedule, exit::Exit, feature_set, @@ -519,6 +523,38 @@ impl JsonRpcRequestProcessor { }) } + async fn get_reward_map( + &self, + slot: Slot, + addresses: &[String], + reward_type_filter: &F, + config: &RpcEpochConfig, + ) -> Result> + where + F: Fn(RewardType) -> bool, + { + let Ok(Some(block)) = self + .get_block( + slot, + Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), + ) + .await + else { + return Err(RpcCustomError::BlockNotAvailable { slot }.into()); + }; + + Ok(block + .rewards + .unwrap_or_default() + .into_iter() + .filter(|reward| { + reward.reward_type.is_some_and(reward_type_filter) + && addresses.contains(&reward.pubkey) + }) + .map(|reward| (reward.clone().pubkey, (reward, slot))) + .collect()) + } + pub async fn get_inflation_reward( &self, addresses: Vec, @@ -527,18 +563,20 @@ impl JsonRpcRequestProcessor { let config = config.unwrap_or_default(); let epoch_schedule = self.get_epoch_schedule(); let first_available_block = self.get_first_available_block().await; + let slot_context = RpcContextConfig { + commitment: config.commitment, + min_context_slot: config.min_context_slot, + }; let epoch = match config.epoch { Some(epoch) => epoch, None => epoch_schedule - .get_epoch(self.get_slot(RpcContextConfig { - commitment: config.commitment, - min_context_slot: config.min_context_slot, - })?) + .get_epoch(self.get_slot(slot_context)?) .saturating_sub(1), }; - // Rewards for this epoch are found in the first confirmed block of the next epoch - let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch.saturating_add(1)); + let rewards_epoch = epoch.saturating_add(1); + let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(rewards_epoch); + if first_slot_in_epoch < first_available_block { if self.bigtable_ledger_storage.is_some() { return Err(RpcCustomError::LongTermStorageSlotSkipped { @@ -554,6 +592,8 @@ impl JsonRpcRequestProcessor { } } + let bank = self.get_bank_with_config(slot_context)?; + let first_confirmed_block_in_epoch = *self .get_blocks_with_limit(first_slot_in_epoch, 1, config.commitment) .await? @@ -561,44 +601,94 @@ impl JsonRpcRequestProcessor { .ok_or(RpcCustomError::BlockNotAvailable { slot: first_slot_in_epoch, })?; + let partitioned_epoch_reward_enabled_slot = bank + .feature_set + .activated_slot(&feature_set::enable_partitioned_epoch_reward::id()); + let partitioned_epoch_reward_enabled = partitioned_epoch_reward_enabled_slot + .map(|slot| slot <= first_confirmed_block_in_epoch) + .unwrap_or(false); - let Ok(Some(first_confirmed_block)) = self - .get_block( + let mut reward_map: HashMap = { + let addresses: Vec = + addresses.iter().map(|pubkey| pubkey.to_string()).collect(); + + self.get_reward_map( first_confirmed_block_in_epoch, - Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), + &addresses, + &|reward_type| -> bool { + reward_type == RewardType::Voting + || (!partitioned_epoch_reward_enabled && reward_type == RewardType::Staking) + }, + &config, ) - .await - else { - return Err(RpcCustomError::BlockNotAvailable { - slot: first_confirmed_block_in_epoch, - } - .into()); + .await? }; - let addresses: Vec = addresses - .into_iter() - .map(|pubkey| pubkey.to_string()) - .collect(); + if partitioned_epoch_reward_enabled { + let partition_data_address = get_epoch_rewards_partition_data_address(rewards_epoch); + let partition_data_account = + bank.get_account(&partition_data_address) + .ok_or_else(|| Error { + code: ErrorCode::InternalError, + message: format!( + "Partition data account not found for epoch {:?} at {:?}", + epoch, partition_data_address + ), + data: None, + })?; + let EpochRewardsPartitionDataVersion::V0(partition_data) = + bincode::deserialize(partition_data_account.data()) + .map_err(|_| Error::internal_error())?; + let hasher = EpochRewardsHasher::new( + partition_data.num_partitions, + &partition_data.parent_blockhash, + ); + let mut partition_index_addresses: HashMap> = HashMap::new(); + for address in addresses.iter() { + let address_string = address.to_string(); + // Skip this address if (Voting) rewards were already found in + // the first block of the epoch + if !reward_map.contains_key(&address_string) { + let partition_index = hasher.clone().hash_address_to_partition(address); + partition_index_addresses + .entry(partition_index) + .and_modify(|list| list.push(address_string.clone())) + .or_insert(vec![address_string]); + } + } - let reward_hash: HashMap = first_confirmed_block - .rewards - .unwrap_or_default() - .into_iter() - .filter_map(|reward| match reward.reward_type? { - RewardType::Staking | RewardType::Voting => addresses - .contains(&reward.pubkey) - .then(|| (reward.clone().pubkey, reward)), - _ => None, - }) - .collect(); + let block_list = self + .get_blocks_with_limit( + first_confirmed_block_in_epoch + 1, + partition_data.num_partitions, + config.commitment, + ) + .await?; + + for (partition_index, addresses) in partition_index_addresses.iter() { + let slot = *block_list + .get(*partition_index) + .ok_or_else(Error::internal_error)?; + + let index_reward_map = self + .get_reward_map( + slot, + addresses, + &|reward_type| -> bool { reward_type == RewardType::Staking }, + &config, + ) + .await?; + reward_map.extend(index_reward_map); + } + } let rewards = addresses .iter() .map(|address| { - if let Some(reward) = reward_hash.get(address) { + if let Some((reward, slot)) = reward_map.get(&address.to_string()) { return Some(RpcInflationReward { epoch, - effective_slot: first_confirmed_block_in_epoch, + effective_slot: *slot, amount: reward.lamports.unsigned_abs(), post_balance: reward.post_balance, commission: reward.commission, @@ -607,7 +697,6 @@ impl JsonRpcRequestProcessor { None }) .collect(); - Ok(rewards) } diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index aee5fc039df410..3c851e7788e2c3 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -19,7 +19,6 @@ use { account::AccountSharedData, clock::Slot, epoch_schedule::EpochSchedule, - feature_set, native_token::sol_to_lamports, pubkey::Pubkey, rent::Rent, @@ -349,9 +348,7 @@ fn main() { exit(1); }); - let mut features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); - // Remove this when client support is ready for the enable_partitioned_epoch_reward feature - features_to_deactivate.push(feature_set::enable_partitioned_epoch_reward::id()); + let features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); if TestValidatorGenesis::ledger_exists(&ledger_path) { for (name, long) in &[ From 1e68ba5f8ab5eda5e29991ccbf31963adab66adb Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Thu, 25 Jan 2024 13:56:43 -0800 Subject: [PATCH 058/401] Move counter and logging code back to load_and_execute_transactions() (#34951) --- runtime/src/bank.rs | 251 +++++++++++++++++++++----------------------- 1 file changed, 120 insertions(+), 131 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ecca773a401d09..e00b04354697cf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -341,14 +341,6 @@ pub struct LoadAndExecuteSanitizedTransactionsOutput { // Vector of results indicating whether a transaction was executed or could not // be executed. Note executed transactions can still have failed! pub execution_results: Vec, - // Total number of transactions that were executed - pub executed_transactions_count: usize, - // Number of non-vote transactions that were executed - pub executed_non_vote_transactions_count: usize, - // Total number of the executed transactions that returned success/not - // an error. - pub executed_with_successful_result_count: usize, - pub signature_count: u64, } pub struct TransactionSimulationResult { @@ -5187,16 +5179,130 @@ impl Bank { account_overrides, log_messages_bytes_limit, ); + + let mut signature_count = 0; + + let mut executed_transactions_count: usize = 0; + let mut executed_non_vote_transactions_count: usize = 0; + let mut executed_with_successful_result_count: usize = 0; + let err_count = &mut error_counters.total; + let transaction_log_collector_config = + self.transaction_log_collector_config.read().unwrap(); + + let mut collect_logs_time = Measure::start("collect_logs_time"); + for (execution_result, tx) in sanitized_output.execution_results.iter().zip(sanitized_txs) { + if let Some(debug_keys) = &self.transaction_debug_keys { + for key in tx.message().account_keys().iter() { + if debug_keys.contains(key) { + let result = execution_result.flattened_result(); + info!("slot: {} result: {:?} tx: {:?}", self.slot, result, tx); + break; + } + } + } + + let is_vote = tx.is_simple_vote_transaction(); + + if execution_result.was_executed() // Skip log collection for unprocessed transactions + && transaction_log_collector_config.filter != TransactionLogCollectorFilter::None + { + let mut filtered_mentioned_addresses = Vec::new(); + if !transaction_log_collector_config + .mentioned_addresses + .is_empty() + { + for key in tx.message().account_keys().iter() { + if transaction_log_collector_config + .mentioned_addresses + .contains(key) + { + filtered_mentioned_addresses.push(*key); + } + } + } + + let store = match transaction_log_collector_config.filter { + TransactionLogCollectorFilter::All => { + !is_vote || !filtered_mentioned_addresses.is_empty() + } + TransactionLogCollectorFilter::AllWithVotes => true, + TransactionLogCollectorFilter::None => false, + TransactionLogCollectorFilter::OnlyMentionedAddresses => { + !filtered_mentioned_addresses.is_empty() + } + }; + + if store { + if let Some(TransactionExecutionDetails { + status, + log_messages: Some(log_messages), + .. + }) = execution_result.details() + { + let mut transaction_log_collector = + self.transaction_log_collector.write().unwrap(); + let transaction_log_index = transaction_log_collector.logs.len(); + + transaction_log_collector.logs.push(TransactionLogInfo { + signature: *tx.signature(), + result: status.clone(), + is_vote, + log_messages: log_messages.clone(), + }); + for key in filtered_mentioned_addresses.into_iter() { + transaction_log_collector + .mentioned_address_map + .entry(key) + .or_default() + .push(transaction_log_index); + } + } + } + } + + if execution_result.was_executed() { + // Signature count must be accumulated only if the transaction + // is executed, otherwise a mismatched count between banking and + // replay could occur + signature_count += u64::from(tx.message().header().num_required_signatures); + executed_transactions_count += 1; + } + + match execution_result.flattened_result() { + Ok(()) => { + if !is_vote { + executed_non_vote_transactions_count += 1; + } + executed_with_successful_result_count += 1; + } + Err(err) => { + if *err_count == 0 { + debug!("tx error: {:?} {:?}", err, tx); + } + *err_count += 1; + } + } + } + collect_logs_time.stop(); + timings + .saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_time.as_us()); + + if *err_count > 0 { + debug!( + "{} errors of {} txs", + *err_count, + *err_count + executed_with_successful_result_count + ); + } + LoadAndExecuteTransactionsOutput { loaded_transactions: sanitized_output.loaded_transactions, execution_results: sanitized_output.execution_results, retryable_transaction_indexes, - executed_transactions_count: sanitized_output.executed_transactions_count, - executed_non_vote_transactions_count: sanitized_output - .executed_non_vote_transactions_count, - executed_with_successful_result_count: sanitized_output - .executed_with_successful_result_count, - signature_count: sanitized_output.signature_count, + executed_transactions_count, + executed_non_vote_transactions_count, + executed_with_successful_result_count, + signature_count, error_counters, } } @@ -5250,7 +5356,6 @@ impl Bank { load_time.stop(); let mut execution_time = Measure::start("execution_time"); - let mut signature_count: u64 = 0; let execution_results: Vec = loaded_transactions .iter_mut() @@ -5334,125 +5439,9 @@ impl Bank { timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); - let mut executed_transactions_count: usize = 0; - let mut executed_non_vote_transactions_count: usize = 0; - let mut executed_with_successful_result_count: usize = 0; - let err_count = &mut error_counters.total; - let transaction_log_collector_config = - self.transaction_log_collector_config.read().unwrap(); - - let mut collect_logs_time = Measure::start("collect_logs_time"); - for (execution_result, tx) in execution_results.iter().zip(sanitized_txs) { - if let Some(debug_keys) = &self.transaction_debug_keys { - for key in tx.message().account_keys().iter() { - if debug_keys.contains(key) { - let result = execution_result.flattened_result(); - info!("slot: {} result: {:?} tx: {:?}", self.slot, result, tx); - break; - } - } - } - - let is_vote = tx.is_simple_vote_transaction(); - - if execution_result.was_executed() // Skip log collection for unprocessed transactions - && transaction_log_collector_config.filter != TransactionLogCollectorFilter::None - { - let mut filtered_mentioned_addresses = Vec::new(); - if !transaction_log_collector_config - .mentioned_addresses - .is_empty() - { - for key in tx.message().account_keys().iter() { - if transaction_log_collector_config - .mentioned_addresses - .contains(key) - { - filtered_mentioned_addresses.push(*key); - } - } - } - - let store = match transaction_log_collector_config.filter { - TransactionLogCollectorFilter::All => { - !is_vote || !filtered_mentioned_addresses.is_empty() - } - TransactionLogCollectorFilter::AllWithVotes => true, - TransactionLogCollectorFilter::None => false, - TransactionLogCollectorFilter::OnlyMentionedAddresses => { - !filtered_mentioned_addresses.is_empty() - } - }; - - if store { - if let Some(TransactionExecutionDetails { - status, - log_messages: Some(log_messages), - .. - }) = execution_result.details() - { - let mut transaction_log_collector = - self.transaction_log_collector.write().unwrap(); - let transaction_log_index = transaction_log_collector.logs.len(); - - transaction_log_collector.logs.push(TransactionLogInfo { - signature: *tx.signature(), - result: status.clone(), - is_vote, - log_messages: log_messages.clone(), - }); - for key in filtered_mentioned_addresses.into_iter() { - transaction_log_collector - .mentioned_address_map - .entry(key) - .or_default() - .push(transaction_log_index); - } - } - } - } - - if execution_result.was_executed() { - // Signature count must be accumulated only if the transaction - // is executed, otherwise a mismatched count between banking and - // replay could occur - signature_count += u64::from(tx.message().header().num_required_signatures); - executed_transactions_count += 1; - } - - match execution_result.flattened_result() { - Ok(()) => { - if !is_vote { - executed_non_vote_transactions_count += 1; - } - executed_with_successful_result_count += 1; - } - Err(err) => { - if *err_count == 0 { - debug!("tx error: {:?} {:?}", err, tx); - } - *err_count += 1; - } - } - } - collect_logs_time.stop(); - timings - .saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_time.as_us()); - - if *err_count > 0 { - debug!( - "{} errors of {} txs", - *err_count, - *err_count + executed_with_successful_result_count - ); - } LoadAndExecuteSanitizedTransactionsOutput { loaded_transactions, execution_results, - executed_transactions_count, - executed_non_vote_transactions_count, - executed_with_successful_result_count, - signature_count, } } From e155d9c44580141d7cb8d9f8120d67254f3eb588 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 25 Jan 2024 16:58:56 -0500 Subject: [PATCH 059/401] Adds cache hash data deletion policy enum (#34956) --- accounts-db/src/accounts_db.rs | 19 ++++++---- accounts-db/src/cache_hash_data.rs | 56 +++++++++++++++++++----------- 2 files changed, 48 insertions(+), 27 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4c8d479cd5fc97..ded01efa8884ab 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -58,7 +58,9 @@ use { append_vec::{ aligned_stored_size, AppendVec, APPEND_VEC_MMAPPED_FILES_OPEN, STORE_META_OVERHEAD, }, - cache_hash_data::{CacheHashData, CacheHashDataFileReference}, + cache_hash_data::{ + CacheHashData, CacheHashDataFileReference, DeletionPolicy as CacheHashDeletionPolicy, + }, contains::Contains, epoch_accounts_hash::EpochAccountsHashManager, in_mem_accounts_index::StartupStats, @@ -7549,10 +7551,13 @@ impl AccountsDb { _ = std::fs::remove_dir_all(&failed_dir); failed_dir }; - CacheHashData::new( - accounts_hash_cache_path, - (kind == CalcAccountsHashKind::Incremental).then_some(storages_start_slot), - ) + let deletion_policy = match kind { + CalcAccountsHashKind::Full => CacheHashDeletionPolicy::AllUnused, + CalcAccountsHashKind::Incremental => { + CacheHashDeletionPolicy::UnusedAtLeast(storages_start_slot) + } + }; + CacheHashData::new(accounts_hash_cache_path, deletion_policy) } // modeled after calculate_accounts_delta_hash @@ -9775,7 +9780,7 @@ pub mod tests { let temp_dir = TempDir::new().unwrap(); let accounts_hash_cache_path = temp_dir.path().to_path_buf(); self.scan_snapshot_stores_with_cache( - &CacheHashData::new(accounts_hash_cache_path, None), + &CacheHashData::new(accounts_hash_cache_path, CacheHashDeletionPolicy::AllUnused), storage, stats, bins, @@ -10843,7 +10848,7 @@ pub mod tests { }; let result = accounts_db.scan_account_storage_no_bank( - &CacheHashData::new(accounts_hash_cache_path, None), + &CacheHashData::new(accounts_hash_cache_path, CacheHashDeletionPolicy::AllUnused), &CalcAccountsHashConfig::default(), &get_storage_refs(&[storage]), test_scan, diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index fbd24e19f9bf7b..e9675b9fd22798 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -193,8 +193,7 @@ impl CacheHashDataFile { pub(crate) struct CacheHashData { cache_dir: PathBuf, pre_existing_cache_files: Arc>>, - /// Decides which old cache files to delete. See `delete_old_cache_files()` for more info. - storages_start_slot: Option, + deletion_policy: DeletionPolicy, pub stats: Arc, } @@ -206,7 +205,7 @@ impl Drop for CacheHashData { } impl CacheHashData { - pub(crate) fn new(cache_dir: PathBuf, storages_start_slot: Option) -> CacheHashData { + pub(crate) fn new(cache_dir: PathBuf, deletion_policy: DeletionPolicy) -> CacheHashData { std::fs::create_dir_all(&cache_dir).unwrap_or_else(|err| { panic!("error creating cache dir {}: {err}", cache_dir.display()) }); @@ -214,7 +213,7 @@ impl CacheHashData { let result = CacheHashData { cache_dir, pre_existing_cache_files: Arc::new(Mutex::new(HashSet::default())), - storages_start_slot, + deletion_policy, stats: Arc::default(), }; @@ -229,21 +228,24 @@ impl CacheHashData { let mut old_cache_files = std::mem::take(&mut *self.pre_existing_cache_files.lock().unwrap()); - // If `storages_start_slot` is None, we're doing a full accounts hash calculation, and thus - // all unused cache files can be deleted. - // If `storages_start_slot` is Some, we're doing an incremental accounts hash calculation, - // and we only want to delete the unused cache files *that IAH considered*. - if let Some(storages_start_slot) = self.storages_start_slot { - old_cache_files.retain(|old_cache_file| { - let Some(parsed_filename) = parse_filename(old_cache_file) else { - // if parsing the cache filename fails, we *do* want to delete it - return true; - }; - - // if the old cache file is in the incremental accounts hash calculation range, - // then delete it - parsed_filename.slot_range_start >= storages_start_slot - }); + match self.deletion_policy { + DeletionPolicy::AllUnused => { + // no additional work to do here; we will delete everything in `old_cache_files` + } + DeletionPolicy::UnusedAtLeast(storages_start_slot) => { + // when calculating an incremental accounts hash, we only want to delete the unused + // cache files *that IAH considered* + old_cache_files.retain(|old_cache_file| { + let Some(parsed_filename) = parse_filename(old_cache_file) else { + // if parsing the cache filename fails, we *do* want to delete it + return true; + }; + + // if the old cache file is in the incremental accounts hash calculation range, + // then delete it + parsed_filename.slot_range_start >= storages_start_slot + }); + } } if !old_cache_files.is_empty() { @@ -410,6 +412,19 @@ fn parse_filename(cache_filename: impl AsRef) -> Option { }) } +/// Decides which old cache files to delete +/// +/// See `delete_old_cache_files()` for more info. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum DeletionPolicy { + /// Delete *all* the unused cache files + /// Should be used when calculating full accounts hash + AllUnused, + /// Delete *only* the unused cache files with starting slot range *at least* this slot + /// Should be used when calculating incremental accounts hash + UnusedAtLeast(Slot), +} + #[cfg(test)] mod tests { use {super::*, crate::accounts_hash::AccountHash, rand::Rng}; @@ -477,7 +492,8 @@ mod tests { data_this_pass.push(this_bin_data); } } - let cache = CacheHashData::new(cache_dir.clone(), None); + let cache = + CacheHashData::new(cache_dir.clone(), DeletionPolicy::AllUnused); let file_name = PathBuf::from("test"); cache.save(&file_name, &data_this_pass).unwrap(); cache.get_cache_files(); From 9e09524595705320255aaa50d5e5b977508d4cf4 Mon Sep 17 00:00:00 2001 From: ellttBen Date: Thu, 25 Jan 2024 23:25:03 +0100 Subject: [PATCH 060/401] Cli: Return final transaction signature in solana program deploy (#34931) --- cli-output/src/cli_output.rs | 8 +++++++- cli/src/program.rs | 35 ++++++++++++++++++++--------------- cli/src/program_v4.rs | 3 +++ 3 files changed, 30 insertions(+), 16 deletions(-) diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 7e51a05786fcfa..0eca9cde5c1a52 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -2075,6 +2075,7 @@ impl fmt::Display for CliTokenAccount { #[serde(rename_all = "camelCase")] pub struct CliProgramId { pub program_id: String, + pub signature: Option, } impl QuietDisplay for CliProgramId {} @@ -2082,7 +2083,12 @@ impl VerboseDisplay for CliProgramId {} impl fmt::Display for CliProgramId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - writeln_name_value(f, "Program Id:", &self.program_id) + writeln_name_value(f, "Program Id:", &self.program_id)?; + if let Some(ref signature) = self.signature { + writeln!(f)?; + writeln_name_value(f, "Signature:", signature)?; + } + Ok(()) } } diff --git a/cli/src/program.rs b/cli/src/program.rs index f4f31b72d0e83f..0d59f6a2564264 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -1337,11 +1337,12 @@ fn process_program_upgrade( let mut tx = Transaction::new_unsigned(message); let signers = &[fee_payer_signer, upgrade_authority_signer]; tx.try_sign(signers, blockhash)?; - rpc_client + let final_tx_sig = rpc_client .send_and_confirm_transaction_with_spinner(&tx) .map_err(|e| format!("Upgrading program failed: {e}"))?; let program_id = CliProgramId { program_id: program_id.to_string(), + signature: Some(final_tx_sig.to_string()), }; Ok(config.output_format.formatted_string(&program_id)) } @@ -2263,7 +2264,7 @@ fn do_process_program_write_and_deploy( )?; } - send_deploy_messages( + let final_tx_sig = send_deploy_messages( rpc_client, config, &initial_message, @@ -2278,6 +2279,7 @@ fn do_process_program_write_and_deploy( if let Some(program_signers) = program_signers { let program_id = CliProgramId { program_id: program_signers[0].pubkey().to_string(), + signature: final_tx_sig.as_ref().map(ToString::to_string), }; Ok(config.output_format.formatted_string(&program_id)) } else { @@ -2396,7 +2398,7 @@ fn do_process_program_upgrade( )?; } - send_deploy_messages( + let final_tx_sig = send_deploy_messages( rpc_client, config, &initial_message, @@ -2410,6 +2412,7 @@ fn do_process_program_upgrade( let program_id = CliProgramId { program_id: program_id.to_string(), + signature: final_tx_sig.as_ref().map(ToString::to_string), }; Ok(config.output_format.formatted_string(&program_id)) } @@ -2534,7 +2537,7 @@ fn send_deploy_messages( initial_signer: Option<&dyn Signer>, write_signer: Option<&dyn Signer>, final_signers: Option<&[&dyn Signer]>, -) -> Result<(), Box> { +) -> Result, Box> { if let Some(message) = initial_message { if let Some(initial_signer) = initial_signer { trace!("Preparing the required accounts"); @@ -2626,20 +2629,22 @@ fn send_deploy_messages( let mut signers = final_signers.to_vec(); signers.push(fee_payer_signer); final_tx.try_sign(&signers, blockhash)?; - rpc_client - .send_and_confirm_transaction_with_spinner_and_config( - &final_tx, - config.commitment, - RpcSendTransactionConfig { - preflight_commitment: Some(config.commitment.commitment), - ..RpcSendTransactionConfig::default() - }, - ) - .map_err(|e| format!("Deploying program failed: {e}"))?; + return Ok(Some( + rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &final_tx, + config.commitment, + RpcSendTransactionConfig { + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ) + .map_err(|e| format!("Deploying program failed: {e}"))?, + )); } } - Ok(()) + Ok(None) } fn create_ephemeral_keypair( diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index a96b227ef85312..1f76c0594e6e7a 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -636,6 +636,7 @@ pub fn process_deploy_program( let program_id = CliProgramId { program_id: program_address.to_string(), + signature: None, }; Ok(config.output_format.formatted_string(&program_id)) } @@ -690,6 +691,7 @@ fn process_undeploy_program( let program_id = CliProgramId { program_id: program_address.to_string(), + signature: None, }; Ok(config.output_format.formatted_string(&program_id)) } @@ -716,6 +718,7 @@ fn process_finalize_program( let program_id = CliProgramId { program_id: program_address.to_string(), + signature: None, }; Ok(config.output_format.formatted_string(&program_id)) } From f0d67d7f28eb4bccfab0745df5c89cad3073d65d Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 25 Jan 2024 17:27:45 -0500 Subject: [PATCH 061/401] Adds hit/miss stats for cache hash data (#34954) --- accounts-db/src/accounts_db.rs | 23 +++++++++++++++++++++++ accounts-db/src/cache_hash_data_stats.rs | 6 ++++++ 2 files changed, 29 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ded01efa8884ab..b1103cb17248fe 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7151,6 +7151,29 @@ impl AccountsDb { }) .collect::>(); + // Calculate the hits and misses of the hash data files cache. + // This is outside of the parallel loop above so that we only need to + // update each atomic stat value once. + // There are approximately 173 items in the cache files list, + // so should be very fast to iterate and compute. + // (173 cache files == 432,000 slots / 2,5000 slots-per-cache-file) + let mut hits = 0; + let mut misses = 0; + for cache_file in &cache_files { + match cache_file { + ScanAccountStorageResult::CacheFileAlreadyExists(_) => hits += 1, + ScanAccountStorageResult::CacheFileNeedsToBeCreated(_) => misses += 1, + }; + } + cache_hash_data + .stats + .hits + .fetch_add(hits, Ordering::Relaxed); + cache_hash_data + .stats + .misses + .fetch_add(misses, Ordering::Relaxed); + // deletes the old files that will not be used before creating new ones cache_hash_data.delete_old_cache_files(); diff --git a/accounts-db/src/cache_hash_data_stats.rs b/accounts-db/src/cache_hash_data_stats.rs index f8d3364f8f81e0..ba134dc226a288 100644 --- a/accounts-db/src/cache_hash_data_stats.rs +++ b/accounts-db/src/cache_hash_data_stats.rs @@ -15,6 +15,10 @@ pub struct CacheHashDataStats { pub load_us: AtomicU64, pub read_us: AtomicU64, pub unused_cache_files: AtomicUsize, + /// the number of hash data files that were found in the cache and reused + pub hits: AtomicUsize, + /// the number of hash data files that were not found in the cache + pub misses: AtomicUsize, } impl CacheHashDataStats { @@ -69,6 +73,8 @@ impl CacheHashDataStats { self.unused_cache_files.load(Ordering::Relaxed), i64 ), + ("hits", self.hits.load(Ordering::Relaxed), i64), + ("misses", self.misses.load(Ordering::Relaxed), i64), ); } } From 51c0649af8cce3066e499cab1a47a8d1be6c6e6c Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Thu, 25 Jan 2024 17:17:24 -0800 Subject: [PATCH 062/401] [TieredStorage] Use RENT_EXEMPT_RENT_EPOCH in HotStorageWriter (#34950) #### Problem In HotStorageWriter::write_accounts, it skips storing rent-epoch when the rent-epoch equals Epoch::MAX. While the value is correct, it is more suitable to use RENT_EXEMPT_RENT_EPOCH instead as the goal here is to save bytes for rent-exempt accounts. #### Summary of Changes Replace Epoch::MAX by RENT_EXEMPT_RENT_EPOCH when checking whether to skip storing rent-epoch in HotStorageWriter. --- accounts-db/src/tiered_storage/hot.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 8652b0b2aa9514..f87b1c2c25df7a 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -5,6 +5,7 @@ use { account_storage::meta::StoredAccountMeta, accounts_file::MatchAccountOwnerError, accounts_hash::AccountHash, + rent_collector::RENT_EXEMPT_RENT_EPOCH, tiered_storage::{ byte_block, file::TieredStorageFile, @@ -562,7 +563,7 @@ impl HotStorageWriter { acc.data(), acc.executable(), // only persist rent_epoch for those non-rent-exempt accounts - (acc.rent_epoch() != Epoch::MAX).then_some(acc.rent_epoch()), + (acc.rent_epoch() != RENT_EXEMPT_RENT_EPOCH).then_some(acc.rent_epoch()), Some(*account_hash), ) }) @@ -606,7 +607,6 @@ pub mod tests { super::*, crate::{ account_storage::meta::StoredMeta, - rent_collector::RENT_EXEMPT_RENT_EPOCH, tiered_storage::{ byte_block::ByteBlockWriter, file::TieredStorageFile, From 89fd6acb8fc76a51424b2e20c4d2683ab15a00f1 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 25 Jan 2024 22:57:26 -0600 Subject: [PATCH 063/401] ledger-tool: Minor cleanup on --ignore-ulimit-nofile-error flag (#34944) This argument is a flag and doesn't take a value; however, it had the .value_name() modifier set with "FORMAT". This could be confusing so remove .value_name() and add .takes_value(false) --- ledger-tool/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index d81d9c212ba432..d4fd8a3b2588a9 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -843,7 +843,7 @@ fn main() { .arg( Arg::with_name("ignore_ulimit_nofile_error") .long("ignore-ulimit-nofile-error") - .value_name("FORMAT") + .takes_value(false) .global(true) .help( "Allow opening the blockstore to succeed even if the desired open file \ From 3add40fc07bd5a922ed443023264d14fa0a9d41f Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 26 Jan 2024 00:55:05 -0600 Subject: [PATCH 064/401] ledger-tool: Refactor accounts subcommand output code (#34915) The accounts command currently dumps every single account in the AccountsDb. This is obviously a lot of output, so a previous change streamed the accounts instead of collecting and dumping at the end. The streaming approach is much more performant, but the implementation is non-trivial. This change - Moves the accounts output code to output.rs - Refactor the logic to several objects that implment the functionality - Adjust the json output to also include the summary This change lays the groundwork for cleanly adding several more flags that will allow for querying different subsets of accounts. --- ledger-tool/src/main.rs | 148 ++++++++----------------------- ledger-tool/src/output.rs | 181 +++++++++++++++++++++++++++++++++++++- runtime/src/bank.rs | 3 +- 3 files changed, 218 insertions(+), 114 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index d4fd8a3b2588a9..50bc3a40263743 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1,19 +1,24 @@ #![allow(clippy::arithmetic_side_effects)] use { - crate::{args::*, bigtable::*, blockstore::*, ledger_path::*, ledger_utils::*, program::*}, + crate::{ + args::*, + bigtable::*, + blockstore::*, + ledger_path::*, + ledger_utils::*, + output::{output_account, AccountsOutputConfig, AccountsOutputStreamer}, + program::*, + }, clap::{ crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand, }, dashmap::DashMap, log::*, - serde::{ - ser::{SerializeSeq, Serializer}, - Serialize, - }, - solana_account_decoder::{UiAccount, UiAccountData, UiAccountEncoding}, + serde::Serialize, + solana_account_decoder::UiAccountEncoding, solana_accounts_db::{ - accounts::Accounts, accounts_db::CalcAccountsHashDataSource, accounts_index::ScanConfig, + accounts_db::CalcAccountsHashDataSource, accounts_index::ScanConfig, hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, }, solana_clap_utils::{ @@ -25,7 +30,7 @@ use { validate_maximum_incremental_snapshot_archives_to_retain, }, }, - solana_cli_output::{CliAccount, CliAccountNewConfig, OutputFormat}, + solana_cli_output::OutputFormat, solana_core::{ system_monitor_service::{SystemMonitorService, SystemMonitorStatsReportConfig}, validator::BlockVerificationMethod, @@ -38,7 +43,7 @@ use { }, solana_measure::{measure, measure::Measure}, solana_runtime::{ - bank::{bank_hash_details, Bank, RewardCalculationEvent, TotalAccountsStats}, + bank::{bank_hash_details, Bank, RewardCalculationEvent}, bank_forks::BankForks, snapshot_archive_info::SnapshotArchiveInfoGetter, snapshot_bank_utils, @@ -73,7 +78,7 @@ use { collections::{HashMap, HashSet}, ffi::OsStr, fs::File, - io::{self, stdout, Write}, + io::{self, Write}, num::NonZeroUsize, path::{Path, PathBuf}, process::{exit, Command, Stdio}, @@ -102,44 +107,6 @@ fn parse_encoding_format(matches: &ArgMatches<'_>) -> UiAccountEncoding { } } -fn output_account( - pubkey: &Pubkey, - account: &AccountSharedData, - modified_slot: Option, - print_account_data: bool, - encoding: UiAccountEncoding, -) { - println!("{pubkey}:"); - println!(" balance: {} SOL", lamports_to_sol(account.lamports())); - println!(" owner: '{}'", account.owner()); - println!(" executable: {}", account.executable()); - if let Some(slot) = modified_slot { - println!(" slot: {slot}"); - } - println!(" rent_epoch: {}", account.rent_epoch()); - println!(" data_len: {}", account.data().len()); - if print_account_data { - let account_data = UiAccount::encode(pubkey, account, encoding, None, None).data; - match account_data { - UiAccountData::Binary(data, data_encoding) => { - println!(" data: '{data}'"); - println!( - " encoding: {}", - serde_json::to_string(&data_encoding).unwrap() - ); - } - UiAccountData::Json(account_data) => { - println!( - " data: '{}'", - serde_json::to_string(&account_data).unwrap() - ); - println!(" encoding: \"jsonParsed\""); - } - UiAccountData::LegacyBinary(_) => {} - }; - } -} - fn render_dot(dot: String, output_file: &str, output_format: &str) -> io::Result<()> { let mut child = Command::new("dot") .arg(format!("-T{output_format}")) @@ -2192,7 +2159,6 @@ fn main() { ("accounts", Some(arg_matches)) => { let process_options = parse_process_options(&ledger_path, arg_matches); let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let include_sysvars = arg_matches.is_present("include_sysvars"); let blockstore = open_blockstore( &ledger_path, arg_matches, @@ -2206,70 +2172,30 @@ fn main() { snapshot_archive_path, incremental_snapshot_archive_path, ); - let bank = bank_forks.read().unwrap().working_bank(); - let mut serializer = serde_json::Serializer::new(stdout()); - let (summarize, mut json_serializer) = - match OutputFormat::from_matches(arg_matches, "output_format", false) { - OutputFormat::Json | OutputFormat::JsonCompact => { - (false, Some(serializer.serialize_seq(None).unwrap())) - } - _ => (true, None), - }; - let mut total_accounts_stats = TotalAccountsStats::default(); - let rent_collector = bank.rent_collector(); - let print_account_contents = !arg_matches.is_present("no_account_contents"); - let print_account_data = !arg_matches.is_present("no_account_data"); - let data_encoding = parse_encoding_format(arg_matches); - let cli_account_new_config = CliAccountNewConfig { - data_encoding, - ..CliAccountNewConfig::default() - }; - let scan_func = - |some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>| { - if let Some((pubkey, account, slot)) = some_account_tuple - .filter(|(_, account, _)| Accounts::is_loadable(account.lamports())) - { - if !include_sysvars && solana_sdk::sysvar::is_sysvar_id(pubkey) { - return; - } - total_accounts_stats.accumulate_account( - pubkey, - &account, - rent_collector, - ); - - if print_account_contents { - if let Some(json_serializer) = json_serializer.as_mut() { - let cli_account = CliAccount::new_with_config( - pubkey, - &account, - &cli_account_new_config, - ); - json_serializer.serialize_element(&cli_account).unwrap(); - } else { - output_account( - pubkey, - &account, - Some(slot), - print_account_data, - data_encoding, - ); - } - } - } - }; - let mut measure = Measure::start("scanning accounts"); - bank.scan_all_accounts(scan_func).unwrap(); - measure.stop(); - info!("{}", measure); - if let Some(json_serializer) = json_serializer { - json_serializer.end().unwrap(); - } - if summarize { - println!("\n{total_accounts_stats:#?}"); - } + let include_sysvars = arg_matches.is_present("include_sysvars"); + let include_account_contents = !arg_matches.is_present("no_account_contents"); + let include_account_data = !arg_matches.is_present("no_account_data"); + let account_data_encoding = parse_encoding_format(arg_matches); + let config = AccountsOutputConfig { + include_sysvars, + include_account_contents, + include_account_data, + account_data_encoding, + }; + let output_format = + OutputFormat::from_matches(arg_matches, "output_format", false); + + let accounts_streamer = + AccountsOutputStreamer::new(bank, output_format, config); + let (_, scan_time) = measure!( + accounts_streamer + .output() + .map_err(|err| error!("Error while outputting accounts: {err}")), + "accounts scan" + ); + info!("{scan_time}"); } ("capitalization", Some(arg_matches)) => { let process_options = parse_process_options(&ledger_path, arg_matches); diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index 4c953b37baa0f2..e21676771d598f 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -1,11 +1,20 @@ use { crate::ledger_utils::get_program_ids, chrono::{Local, TimeZone}, - serde::{Deserialize, Serialize}, - solana_cli_output::{display::writeln_transaction, OutputFormat, QuietDisplay, VerboseDisplay}, + serde::{ + ser::{Impossible, SerializeSeq, SerializeStruct, Serializer}, + Deserialize, Serialize, + }, + solana_account_decoder::{UiAccount, UiAccountData, UiAccountEncoding}, + solana_cli_output::{ + display::writeln_transaction, CliAccount, CliAccountNewConfig, OutputFormat, QuietDisplay, + VerboseDisplay, + }, solana_entry::entry::Entry, solana_ledger::blockstore::Blockstore, + solana_runtime::bank::{Bank, TotalAccountsStats}, solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, clock::{Slot, UnixTimestamp}, hash::Hash, native_token::lamports_to_sol, @@ -15,10 +24,13 @@ use { EncodedConfirmedBlock, EncodedTransactionWithStatusMeta, EntrySummary, Rewards, }, std::{ + cell::RefCell, collections::HashMap, fmt::{self, Display, Formatter}, io::{stdout, Write}, + rc::Rc, result::Result, + sync::Arc, }, }; @@ -548,3 +560,168 @@ pub fn output_sorted_program_ids(program_ids: HashMap) { println!("{:<44}: {}", program_id.to_string(), count); } } + +/// A type to facilitate streaming account information to an output destination +/// +/// This type scans every account, so streaming is preferred over the simpler +/// approach of accumulating all the accounts into a Vec and printing or +/// serializing the Vec directly. +pub struct AccountsOutputStreamer { + account_scanner: AccountsScanner, + total_accounts_stats: Rc>, + output_format: OutputFormat, +} + +pub struct AccountsOutputConfig { + pub include_sysvars: bool, + pub include_account_contents: bool, + pub include_account_data: bool, + pub account_data_encoding: UiAccountEncoding, +} + +impl AccountsOutputStreamer { + pub fn new(bank: Arc, output_format: OutputFormat, config: AccountsOutputConfig) -> Self { + let total_accounts_stats = Rc::new(RefCell::new(TotalAccountsStats::default())); + let account_scanner = AccountsScanner { + bank, + total_accounts_stats: total_accounts_stats.clone(), + config, + }; + Self { + account_scanner, + total_accounts_stats, + output_format, + } + } + + pub fn output(&self) -> Result<(), String> { + match self.output_format { + OutputFormat::Json | OutputFormat::JsonCompact => { + let mut serializer = serde_json::Serializer::new(stdout()); + let mut struct_serializer = serializer + .serialize_struct("accountInfo", 2) + .map_err(|err| format!("unable to start serialization: {err}"))?; + struct_serializer + .serialize_field("accounts", &self.account_scanner) + .map_err(|err| format!("unable to serialize accounts scanner: {err}"))?; + struct_serializer + .serialize_field("summary", &*self.total_accounts_stats.borrow()) + .map_err(|err| format!("unable to serialize accounts summary: {err}"))?; + SerializeStruct::end(struct_serializer) + .map_err(|err| format!("unable to end serialization: {err}")) + } + _ => { + // The compiler needs a placeholder type to satisfy the generic + // SerializeSeq trait on AccountScanner::output(). The type + // doesn't really matter since we're passing None, so just use + // serde::ser::Impossible as it already implements SerializeSeq + self.account_scanner + .output::>(&mut None); + println!("\n{:#?}", self.total_accounts_stats.borrow()); + Ok(()) + } + } + } +} + +struct AccountsScanner { + bank: Arc, + total_accounts_stats: Rc>, + config: AccountsOutputConfig, +} + +impl AccountsScanner { + /// Returns true if this account should be included in the output + fn should_process_account(&self, account: &AccountSharedData, pubkey: &Pubkey) -> bool { + solana_accounts_db::accounts::Accounts::is_loadable(account.lamports()) + && (self.config.include_sysvars || !solana_sdk::sysvar::is_sysvar_id(pubkey)) + } + + pub fn output(&self, seq_serializer: &mut Option) + where + S: SerializeSeq, + { + let mut total_accounts_stats = self.total_accounts_stats.borrow_mut(); + let rent_collector = self.bank.rent_collector(); + + let cli_account_new_config = CliAccountNewConfig { + data_encoding: self.config.account_data_encoding, + ..CliAccountNewConfig::default() + }; + + let scan_func = |account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>| { + if let Some((pubkey, account, slot)) = account_tuple + .filter(|(pubkey, account, _)| self.should_process_account(account, pubkey)) + { + total_accounts_stats.accumulate_account(pubkey, &account, rent_collector); + + if self.config.include_account_contents { + if let Some(serializer) = seq_serializer { + let cli_account = + CliAccount::new_with_config(pubkey, &account, &cli_account_new_config); + serializer.serialize_element(&cli_account).unwrap(); + } else { + output_account( + pubkey, + &account, + Some(slot), + self.config.include_account_data, + self.config.account_data_encoding, + ); + } + } + } + }; + + self.bank.scan_all_accounts(scan_func).unwrap(); + } +} + +impl Serialize for AccountsScanner { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut seq_serializer = Some(serializer.serialize_seq(None)?); + self.output(&mut seq_serializer); + seq_serializer.unwrap().end() + } +} + +pub fn output_account( + pubkey: &Pubkey, + account: &AccountSharedData, + modified_slot: Option, + print_account_data: bool, + encoding: UiAccountEncoding, +) { + println!("{pubkey}:"); + println!(" balance: {} SOL", lamports_to_sol(account.lamports())); + println!(" owner: '{}'", account.owner()); + println!(" executable: {}", account.executable()); + if let Some(slot) = modified_slot { + println!(" slot: {slot}"); + } + println!(" rent_epoch: {}", account.rent_epoch()); + println!(" data_len: {}", account.data().len()); + if print_account_data { + let account_data = UiAccount::encode(pubkey, account, encoding, None, None).data; + match account_data { + UiAccountData::Binary(data, data_encoding) => { + println!(" data: '{data}'"); + println!( + " encoding: {}", + serde_json::to_string(&data_encoding).unwrap() + ); + } + UiAccountData::Json(account_data) => { + println!( + " data: '{}'", + serde_json::to_string(&account_data).unwrap() + ); + println!(" encoding: \"jsonParsed\""); + } + UiAccountData::LegacyBinary(_) => {} + }; + } +} diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e00b04354697cf..03971724438dc9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -72,6 +72,7 @@ use { slice::ParallelSlice, ThreadPool, ThreadPoolBuilder, }, + serde::Serialize, solana_accounts_db::{ account_overrides::AccountOverrides, accounts::{ @@ -8485,7 +8486,7 @@ impl CollectRentInPartitionInfo { } /// Struct to collect stats when scanning all accounts in `get_total_accounts_stats()` -#[derive(Debug, Default, Copy, Clone)] +#[derive(Debug, Default, Copy, Clone, Serialize)] pub struct TotalAccountsStats { /// Total number of accounts pub num_accounts: usize, From 5da06c5f7da146b72dfa7e9dd5a96d5ebd7b5c63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Jan 2024 20:33:48 +0800 Subject: [PATCH 065/401] build(deps): bump hidapi from 2.4.1 to 2.5.0 (#34965) Bumps [hidapi](https://github.com/ruabmbua/hidapi-rs) from 2.4.1 to 2.5.0. - [Release notes](https://github.com/ruabmbua/hidapi-rs/releases) - [Commits](https://github.com/ruabmbua/hidapi-rs/commits) --- updated-dependencies: - dependency-name: hidapi dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 7 ++++--- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7c2a2405e8e69..c98ae60cf9cf14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2428,14 +2428,15 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hidapi" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723777263b0dcc5730aec947496bd8c3940ba63c15f5633b288cc615f4f6af79" +checksum = "3b125253e27c9fd67beac20665348f4bfc5b488b5c8a1020610eeb7e6d205cde" dependencies = [ "cc", + "cfg-if 1.0.0", "libc", "pkg-config", - "winapi 0.3.9", + "windows-sys 0.48.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 242dfa13d032f6..2aa92016e9a22f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -212,7 +212,7 @@ gethostname = "0.2.3" getrandom = "0.2.10" goauth = "0.13.1" hex = "0.4.3" -hidapi = { version = "2.4.1", default-features = false } +hidapi = { version = "2.5.0", default-features = false } histogram = "0.6.9" hmac = "0.12.1" http = "0.2.11" From 5ecc47ec5a8867ca04b8b616c72749329e6a3aed Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Fri, 26 Jan 2024 08:24:45 -0600 Subject: [PATCH 066/401] separate priority fee and transaction fee from fee calculation (#34757) add function calculate_fee_details() that returns FeeDetails with base and priority fee separated --- sdk/src/fee.rs | 47 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index 2fb045aba5d73e..bd3af75e70da18 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -31,6 +31,19 @@ pub struct FeeStructure { pub compute_fee_bins: Vec, } +/// Return type of calculate_fee(...) +#[derive(Debug, Default, Clone, Eq, PartialEq)] +pub struct FeeDetails { + transaction_fee: u64, + prioritization_fee: u64, +} + +impl FeeDetails { + pub fn total_fee(&self) -> u64 { + self.transaction_fee.saturating_add(self.prioritization_fee) + } +} + pub const ACCOUNT_DATA_COST_PAGE_SIZE: u64 = 32_u64.saturating_mul(1024); impl FeeStructure { @@ -75,15 +88,32 @@ impl FeeStructure { .saturating_mul(heap_cost) } - /// Calculate fee for `SanitizedMessage` #[cfg(not(target_os = "solana"))] pub fn calculate_fee( &self, message: &SanitizedMessage, - _lamports_per_signature: u64, + lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, ) -> u64 { + self.calculate_fee_details( + message, + lamports_per_signature, + budget_limits, + include_loaded_account_data_size_in_fee, + ) + .total_fee() + } + + /// Calculate fee details for `SanitizedMessage` + #[cfg(not(target_os = "solana"))] + pub fn calculate_fee_details( + &self, + message: &SanitizedMessage, + _lamports_per_signature: u64, + budget_limits: &FeeBudgetLimits, + include_loaded_account_data_size_in_fee: bool, + ) -> FeeDetails { let signature_fee = message .num_signatures() .saturating_mul(self.lamports_per_signature); @@ -115,12 +145,13 @@ impl FeeStructure { .unwrap_or_default() }); - (budget_limits - .prioritization_fee - .saturating_add(signature_fee) - .saturating_add(write_lock_fee) - .saturating_add(compute_fee) as f64) - .round() as u64 + FeeDetails { + transaction_fee: (signature_fee + .saturating_add(write_lock_fee) + .saturating_add(compute_fee) as f64) + .round() as u64, + prioritization_fee: budget_limits.prioritization_fee, + } } } From 663a1bb8f44386f903419b437cc59c0ad6a2811a Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 26 Jan 2024 06:48:28 -0800 Subject: [PATCH 067/401] [TieredStorage] Write owners block for HotAccountStorage (#34927) #### Problem So far the current HotStorageWriter::write_accounts() only writes accounts blocks and index block. #### Summary of Changes The PR further writes owners block in HotStorageWriter::write_accounts(). #### Test Plan Extended existing test for HotStorageWriter to cover the owners block. --- accounts-db/src/tiered_storage/hot.rs | 72 +++++++++++++++++------- accounts-db/src/tiered_storage/owners.rs | 14 +++++ 2 files changed, 67 insertions(+), 19 deletions(-) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index f87b1c2c25df7a..54f62294b6a559 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -13,7 +13,7 @@ use { index::{AccountIndexWriterEntry, AccountOffset, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, mmap_utils::{get_pod, get_slice}, - owners::{OwnerOffset, OwnersBlockFormat}, + owners::{OwnerOffset, OwnersBlockFormat, OwnersTable, OWNER_NO_OWNER}, readable::TieredReadableAccount, StorableAccounts, StorableAccountsWithHashesAndWriteVersions, TieredStorageError, TieredStorageFormat, TieredStorageResult, @@ -496,6 +496,7 @@ impl HotStorageWriter { fn write_account( &self, lamports: u64, + owner_offset: OwnerOffset, account_data: &[u8], executable: bool, rent_epoch: Option, @@ -512,6 +513,7 @@ impl HotStorageWriter { let padding_len = padding_bytes(account_data.len()); let meta = HotAccountMeta::new() .with_lamports(lamports) + .with_owner_offset(owner_offset) .with_account_data_size(account_data.len() as u64) .with_account_data_padding(padding_len) .with_flags(&flags); @@ -528,8 +530,9 @@ impl HotStorageWriter { Ok(stored_size) } - /// A work-in-progress function that will eventually implements - /// AccountsFile::appends_account() + /// Persists `accounts` into the underlying hot accounts file associated + /// with this HotStorageWriter. The first `skip` number of accounts are + /// *not* persisted. pub fn write_accounts< 'a, 'b, @@ -543,6 +546,7 @@ impl HotStorageWriter { ) -> TieredStorageResult<()> { let mut footer = new_hot_footer(); let mut index = vec![]; + let mut owners_table = OwnersTable::default(); let mut cursor = 0; // writing accounts blocks @@ -556,10 +560,11 @@ impl HotStorageWriter { // Obtain necessary fields from the account, or default fields // for a zero-lamport account in the None case. - let (lamports, data, executable, rent_epoch, account_hash) = account + let (lamports, owner, data, executable, rent_epoch, account_hash) = account .map(|acc| { ( acc.lamports(), + acc.owner(), acc.data(), acc.executable(), // only persist rent_epoch for those non-rent-exempt accounts @@ -567,9 +572,16 @@ impl HotStorageWriter { Some(*account_hash), ) }) - .unwrap_or((0, &[], false, None, None)); - - cursor += self.write_account(lamports, data, executable, rent_epoch, account_hash)?; + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None, None)); + let owner_offset = owners_table.insert(owner); + cursor += self.write_account( + lamports, + owner_offset, + data, + executable, + rent_epoch, + account_hash, + )?; index.push(index_entry); } footer.account_entry_count = (len - skip) as u32; @@ -589,11 +601,13 @@ impl HotStorageWriter { cursor += self.storage.write_pod(&0u32)?; } - // TODO: owner block will be implemented in the follow-up PRs - // expect the offset of each block aligned. + // writing owners block assert!(cursor % HOT_BLOCK_ALIGNMENT == 0); footer.owners_block_offset = cursor as u64; - footer.owner_count = 0; + footer.owner_count = owners_table.len() as u32; + footer + .owners_block_format + .write_owners_block(&self.storage, &owners_table)?; footer.write_footer_block(&self.storage)?; @@ -1238,12 +1252,17 @@ pub mod tests { /// Create a test account based on the specified seed. /// The created test account might have default rent_epoch /// and write_version. + /// + /// When the seed is zero, then a zero-lamport test account will be + /// created. fn create_test_account(seed: u64) -> (StoredMeta, AccountSharedData) { let data_byte = seed as u8; + let owner_byte = u8::MAX - data_byte; let account = Account { - lamports: seed + 1, + lamports: seed, data: std::iter::repeat(data_byte).take(seed as usize).collect(), - owner: Pubkey::new_unique(), + // this will allow some test account sharing the same owner. + owner: [owner_byte; 32].into(), executable: seed % 2 > 0, rent_epoch: if seed % 3 > 0 { seed @@ -1312,15 +1331,30 @@ pub mod tests { .unwrap() .unwrap(); - let (account, address, hash, _write_version) = storable_accounts.get(i); - let account = account.unwrap(); + let (account, address, account_hash, _write_version) = storable_accounts.get(i); + let (lamports, owner, data, executable, account_hash) = account + .map(|acc| { + ( + acc.lamports(), + acc.owner(), + acc.data(), + acc.executable(), + // only persist rent_epoch for those non-rent-exempt accounts + Some(*account_hash), + ) + }) + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); - assert_eq!(stored_meta.lamports(), account.lamports()); - assert_eq!(stored_meta.data().len(), account.data().len()); - assert_eq!(stored_meta.data(), account.data()); - assert_eq!(stored_meta.executable(), account.executable()); + assert_eq!(stored_meta.lamports(), lamports); + assert_eq!(stored_meta.data().len(), data.len()); + assert_eq!(stored_meta.data(), data); + assert_eq!(stored_meta.executable(), executable); + assert_eq!(stored_meta.owner(), owner); assert_eq!(stored_meta.pubkey(), address); - assert_eq!(stored_meta.hash(), hash); + assert_eq!( + *stored_meta.hash(), + account_hash.unwrap_or(AccountHash(Hash::default())) + ); assert_eq!(i + 1, next); } diff --git a/accounts-db/src/tiered_storage/owners.rs b/accounts-db/src/tiered_storage/owners.rs index 45bfafe1645430..ebe60cc6f8ed0f 100644 --- a/accounts-db/src/tiered_storage/owners.rs +++ b/accounts-db/src/tiered_storage/owners.rs @@ -16,6 +16,10 @@ use { #[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)] pub struct OwnerOffset(pub u32); +lazy_static! { + pub static ref OWNER_NO_OWNER: Pubkey = Pubkey::default(); +} + /// Owner block holds a set of unique addresses of account owners, /// and an account meta has a owner_offset field for accessing /// it's owner address. @@ -97,6 +101,16 @@ impl<'a> OwnersTable<'a> { OwnerOffset(offset as u32) } + + /// Returns the number of unique owner addresses in the table. + pub fn len(&self) -> usize { + self.owners_set.len() + } + + /// Returns true if the OwnersTable is empty + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } #[cfg(test)] From 93271d91b0a379a3144c68a8baed6bc6eb33af69 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Fri, 26 Jan 2024 07:58:37 -0800 Subject: [PATCH 068/401] gossip: notify state machine of duplicate proofs (#32963) * gossip: notify state machine of duplicate proofs * Add feature flag for ingesting duplicate proofs from Gossip. * Use the Epoch the shred is in instead of the root bank epoch. * Fix unittest by activating the feature. * Add a test for feature disabled case. * EpochSchedule is now not copyable, clone it explicitly. * pr feedback: read epoch schedule on startup, add guard for ff recache * pr feedback: bank_forks lock, -cached_slots_in_epoch, init ff * pr feedback: bank.forks_try_read() -> read() * pr feedback: fix local-cluster setup * local-cluster: do not expose gossip internals, use retry mechanism instead * local-cluster: split out case 4b into separate test and ignore * pr feedback: avoid taking lock if ff is already found * pr feedback: do not cache ff epoch * pr feedback: bank_forks lock, revert to cached_slots_in_epoch * pr feedback: move local variable into helper function * pr feedback: use let else, remove epoch 0 hack --------- Co-authored-by: Wen --- core/src/tvu.rs | 3 +- gossip/src/duplicate_shred.rs | 2 + gossip/src/duplicate_shred_handler.rs | 147 ++++++++++++++++++++++---- local-cluster/src/cluster_tests.rs | 48 ++++++--- local-cluster/tests/local_cluster.rs | 48 ++++++--- sdk/src/feature_set.rs | 5 + vote/src/vote_transaction.rs | 2 +- 7 files changed, 206 insertions(+), 49 deletions(-) diff --git a/core/src/tvu.rs b/core/src/tvu.rs index c3f94efe0dc911..d498ab405d39aa 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -228,7 +228,7 @@ impl Tvu { leader_schedule_cache.clone(), verified_vote_receiver, completed_data_sets_sender, - duplicate_slots_sender, + duplicate_slots_sender.clone(), ancestor_hashes_replay_update_receiver, dumped_slots_receiver, popular_pruned_forks_sender, @@ -337,6 +337,7 @@ impl Tvu { blockstore, leader_schedule_cache.clone(), bank_forks.clone(), + duplicate_slots_sender, ), ); diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index 70e56d35e82334..85f4f4fa0cf149 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -56,6 +56,8 @@ pub enum Error { BlockstoreInsertFailed(#[from] BlockstoreError), #[error("data chunk mismatch")] DataChunkMismatch, + #[error("unable to send duplicate slot to state machine")] + DuplicateSlotSenderFailure, #[error("invalid chunk_index: {chunk_index}, num_chunks: {num_chunks}")] InvalidChunkIndex { chunk_index: u8, num_chunks: u8 }, #[error("invalid duplicate shreds")] diff --git a/gossip/src/duplicate_shred_handler.rs b/gossip/src/duplicate_shred_handler.rs index 1410e8262f027d..e7b4cd0466fe75 100644 --- a/gossip/src/duplicate_shred_handler.rs +++ b/gossip/src/duplicate_shred_handler.rs @@ -3,11 +3,13 @@ use { duplicate_shred::{self, DuplicateShred, Error}, duplicate_shred_listener::DuplicateShredHandlerTrait, }, + crossbeam_channel::Sender, log::error, solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache}, solana_runtime::bank_forks::BankForks, solana_sdk::{ clock::{Epoch, Slot}, + feature_set, pubkey::Pubkey, }, std::{ @@ -44,6 +46,8 @@ pub struct DuplicateShredHandler { cached_on_epoch: Epoch, cached_staked_nodes: Arc>, cached_slots_in_epoch: u64, + // Used to notify duplicate consensus state machine + duplicate_slots_sender: Sender, } impl DuplicateShredHandlerTrait for DuplicateShredHandler { @@ -63,6 +67,7 @@ impl DuplicateShredHandler { blockstore: Arc, leader_schedule_cache: Arc, bank_forks: Arc>, + duplicate_slots_sender: Sender, ) -> Self { Self { buffer: HashMap::<(Slot, Pubkey), BufferEntry>::default(), @@ -74,6 +79,7 @@ impl DuplicateShredHandler { blockstore, leader_schedule_cache, bank_forks, + duplicate_slots_sender, } } @@ -131,12 +137,30 @@ impl DuplicateShredHandler { shred1.into_payload(), shred2.into_payload(), )?; + if self.should_notify_state_machine(slot) { + // Notify duplicate consensus state machine + self.duplicate_slots_sender + .send(slot) + .map_err(|_| Error::DuplicateSlotSenderFailure)?; + } } self.consumed.insert(slot, true); } Ok(()) } + fn should_notify_state_machine(&self, slot: Slot) -> bool { + let root_bank = self.bank_forks.read().unwrap().root_bank(); + let Some(activated_slot) = root_bank + .feature_set + .activated_slot(&feature_set::enable_gossip_duplicate_proof_ingestion::id()) + else { + return false; + }; + root_bank.epoch_schedule().get_epoch(slot) + > root_bank.epoch_schedule().get_epoch(activated_slot) + } + fn should_consume_slot(&mut self, slot: Slot) -> bool { slot > self.last_root && slot < self.last_root.saturating_add(self.cached_slots_in_epoch) @@ -211,12 +235,14 @@ mod tests { cluster_info::DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, duplicate_shred::{from_shred, tests::new_rand_shred}, }, + crossbeam_channel::unbounded, + itertools::Itertools, solana_ledger::{ genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, get_tmp_ledger_path_auto_delete, shred::Shredder, }, - solana_runtime::bank::Bank, + solana_runtime::{accounts_background_service::AbsRequestSender, bank::Bank}, solana_sdk::{ signature::{Keypair, Signer}, timing::timestamp, @@ -271,16 +297,34 @@ mod tests { let my_pubkey = my_keypair.pubkey(); let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; - let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); + let mut bank = Bank::new_for_tests(&genesis_config); + bank.activate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id()); + let slots_in_epoch = bank.get_epoch_info().slots_in_epoch; + let bank_forks_arc = BankForks::new_rw_arc(bank); + { + let mut bank_forks = bank_forks_arc.write().unwrap(); + let bank0 = bank_forks.get(0).unwrap(); + bank_forks.insert(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 9)); + bank_forks.set_root(9, &AbsRequestSender::default(), None); + } + blockstore.set_roots([0, 9].iter()).unwrap(); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank( - &bank_forks.read().unwrap().working_bank(), + &bank_forks_arc.read().unwrap().working_bank(), )); - let mut duplicate_shred_handler = - DuplicateShredHandler::new(blockstore.clone(), leader_schedule_cache, bank_forks); + let (sender, receiver) = unbounded(); + // The feature will only be activated at Epoch 1. + let start_slot: Slot = slots_in_epoch + 1; + + let mut duplicate_shred_handler = DuplicateShredHandler::new( + blockstore.clone(), + leader_schedule_cache, + bank_forks_arc, + sender, + ); let chunks = create_duplicate_proof( my_keypair.clone(), None, - 1, + start_slot, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, ) @@ -288,20 +332,24 @@ mod tests { let chunks1 = create_duplicate_proof( my_keypair.clone(), None, - 2, + start_slot + 1, None, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, ) .unwrap(); - assert!(!blockstore.has_duplicate_shreds_in_slot(1)); - assert!(!blockstore.has_duplicate_shreds_in_slot(2)); + assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot)); + assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot + 1)); // Test that two proofs are mixed together, but we can store the proofs fine. for (chunk1, chunk2) in chunks.zip(chunks1) { duplicate_shred_handler.handle(chunk1); duplicate_shred_handler.handle(chunk2); } - assert!(blockstore.has_duplicate_shreds_in_slot(1)); - assert!(blockstore.has_duplicate_shreds_in_slot(2)); + assert!(blockstore.has_duplicate_shreds_in_slot(start_slot)); + assert!(blockstore.has_duplicate_shreds_in_slot(start_slot + 1)); + assert_eq!( + receiver.try_iter().collect_vec(), + vec![start_slot, start_slot + 1] + ); // Test all kinds of bad proofs. for error in [ @@ -312,7 +360,7 @@ mod tests { match create_duplicate_proof( my_keypair.clone(), None, - 3, + start_slot + 2, Some(error), DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, ) { @@ -321,7 +369,8 @@ mod tests { for chunk in chunks { duplicate_shred_handler.handle(chunk); } - assert!(!blockstore.has_duplicate_shreds_in_slot(3)); + assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot + 2)); + assert!(receiver.is_empty()); } } } @@ -337,13 +386,29 @@ mod tests { let my_pubkey = my_keypair.pubkey(); let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; - let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); + let mut bank = Bank::new_for_tests(&genesis_config); + bank.activate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id()); + let slots_in_epoch = bank.get_epoch_info().slots_in_epoch; + let bank_forks_arc = BankForks::new_rw_arc(bank); + { + let mut bank_forks = bank_forks_arc.write().unwrap(); + let bank0 = bank_forks.get(0).unwrap(); + bank_forks.insert(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 9)); + bank_forks.set_root(9, &AbsRequestSender::default(), None); + } + blockstore.set_roots([0, 9].iter()).unwrap(); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank( - &bank_forks.read().unwrap().working_bank(), + &bank_forks_arc.read().unwrap().working_bank(), )); - let mut duplicate_shred_handler = - DuplicateShredHandler::new(blockstore.clone(), leader_schedule_cache, bank_forks); - let start_slot: Slot = 1; + let (sender, receiver) = unbounded(); + let mut duplicate_shred_handler = DuplicateShredHandler::new( + blockstore.clone(), + leader_schedule_cache, + bank_forks_arc, + sender, + ); + // The feature will only be activated at Epoch 1. + let start_slot: Slot = slots_in_epoch + 1; // This proof will not be accepted because num_chunks is too large. let chunks = create_duplicate_proof( @@ -358,6 +423,7 @@ mod tests { duplicate_shred_handler.handle(chunk); } assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot)); + assert!(receiver.is_empty()); // This proof will be rejected because the slot is too far away in the future. let future_slot = @@ -374,6 +440,7 @@ mod tests { duplicate_shred_handler.handle(chunk); } assert!(!blockstore.has_duplicate_shreds_in_slot(future_slot)); + assert!(receiver.is_empty()); // Send in two proofs, the first proof showing up will be accepted, the following // proofs will be discarded. @@ -388,10 +455,54 @@ mod tests { // handle chunk 0 of the first proof. duplicate_shred_handler.handle(chunks.next().unwrap()); assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot)); + assert!(receiver.is_empty()); // Now send in the rest of the first proof, it will succeed. for chunk in chunks { duplicate_shred_handler.handle(chunk); } assert!(blockstore.has_duplicate_shreds_in_slot(start_slot)); + assert_eq!(receiver.try_iter().collect_vec(), vec![start_slot]); + } + + #[test] + fn test_feature_disabled() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); + let my_keypair = Arc::new(Keypair::new()); + let my_pubkey = my_keypair.pubkey(); + let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); + let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; + let mut bank = Bank::new_for_tests(&genesis_config); + bank.deactivate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id()); + assert!(!bank + .feature_set + .is_active(&feature_set::enable_gossip_duplicate_proof_ingestion::id())); + let bank_forks_arc = BankForks::new_rw_arc(bank); + let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank( + &bank_forks_arc.read().unwrap().working_bank(), + )); + let (sender, receiver) = unbounded(); + + let mut duplicate_shred_handler = DuplicateShredHandler::new( + blockstore.clone(), + leader_schedule_cache, + bank_forks_arc, + sender, + ); + let chunks = create_duplicate_proof( + my_keypair.clone(), + None, + 1, + None, + DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, + ) + .unwrap(); + assert!(!blockstore.has_duplicate_shreds_in_slot(1)); + for chunk in chunks { + duplicate_shred_handler.handle(chunk); + } + // If feature disabled, blockstore gets signal but state machine doesn't see it. + assert!(blockstore.has_duplicate_shreds_in_slot(1)); + assert!(receiver.try_iter().collect_vec().is_empty()); } } diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index b410585396f8f0..90337bb272460f 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -41,7 +41,7 @@ use { solana_vote_program::vote_transaction, std::{ borrow::Borrow, - collections::{HashMap, HashSet}, + collections::{HashMap, HashSet, VecDeque}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, path::Path, sync::{ @@ -489,6 +489,9 @@ pub fn start_gossip_voter( + std::marker::Send + 'static, sleep_ms: u64, + num_expected_peers: usize, + refresh_ms: u64, + max_votes_to_refresh: usize, ) -> GossipVoter { let exit = Arc::new(AtomicBool::new(false)); let (gossip_service, tcp_listener, cluster_info) = gossip_service::make_gossip_node( @@ -503,6 +506,15 @@ pub fn start_gossip_voter( SocketAddrSpace::Unspecified, ); + // Wait for peer discovery + while cluster_info.gossip_peers().len() < num_expected_peers { + sleep(Duration::from_millis(sleep_ms)); + } + + let mut latest_voted_slot = 0; + let mut refreshable_votes: VecDeque<(Transaction, VoteTransaction)> = VecDeque::new(); + let mut latest_push_attempt = Instant::now(); + let t_voter = { let exit = exit.clone(); let cluster_info = cluster_info.clone(); @@ -514,6 +526,18 @@ pub fn start_gossip_voter( } let (labels, votes) = cluster_info.get_votes_with_labels(&mut cursor); + if labels.is_empty() { + if latest_push_attempt.elapsed() > Duration::from_millis(refresh_ms) { + for (leader_vote_tx, parsed_vote) in refreshable_votes.iter().rev() { + let vote_slot = parsed_vote.last_voted_slot().unwrap(); + info!("gossip voter refreshing vote {}", vote_slot); + process_vote_tx(vote_slot, leader_vote_tx, parsed_vote, &cluster_info); + latest_push_attempt = Instant::now(); + } + } + sleep(Duration::from_millis(sleep_ms)); + continue; + } let mut parsed_vote_iter: Vec<_> = labels .into_iter() .zip(votes) @@ -527,22 +551,20 @@ pub fn start_gossip_voter( }); for (parsed_vote, leader_vote_tx) in &parsed_vote_iter { - if let Some(latest_vote_slot) = parsed_vote.last_voted_slot() { - info!("received vote for {}", latest_vote_slot); - process_vote_tx( - latest_vote_slot, - leader_vote_tx, - parsed_vote, - &cluster_info, - ) + if let Some(vote_slot) = parsed_vote.last_voted_slot() { + info!("received vote for {}", vote_slot); + if vote_slot > latest_voted_slot { + latest_voted_slot = vote_slot; + refreshable_votes + .push_front((leader_vote_tx.clone(), parsed_vote.clone())); + refreshable_votes.truncate(max_votes_to_refresh); + } + process_vote_tx(vote_slot, leader_vote_tx, parsed_vote, &cluster_info); + latest_push_attempt = Instant::now(); } // Give vote some time to propagate sleep(Duration::from_millis(sleep_ms)); } - - if parsed_vote_iter.is_empty() { - sleep(Duration::from_millis(sleep_ms)); - } } }) }; diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index b79a1c4e309f26..752160e5ada970 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2745,6 +2745,9 @@ fn test_oc_bad_signatures() { } }, voter_thread_sleep_ms as u64, + cluster.validators.len().saturating_sub(1), + 0, + 0, ); let (mut block_subscribe_client, receiver) = PubsubClient::block_subscribe( @@ -3745,6 +3748,18 @@ fn test_kill_partition_switch_threshold_progress() { #[serial] #[allow(unused_attributes)] fn test_duplicate_shreds_broadcast_leader() { + run_duplicate_shreds_broadcast_leader(true); +} +#[test] +#[serial] +#[ignore] +#[allow(unused_attributes)] +fn test_duplicate_shreds_broadcast_leader_ancestor_hashes() { + run_duplicate_shreds_broadcast_leader(false); +} + +fn run_duplicate_shreds_broadcast_leader(vote_on_duplicate: bool) { + solana_logger::setup_with_default(RUST_LOG_FILTER); // Create 4 nodes: // 1) Bad leader sending different versions of shreds to both of the other nodes // 2) 1 node who's voting behavior in gossip @@ -3795,11 +3810,13 @@ fn test_duplicate_shreds_broadcast_leader() { // for the partition. assert!(partition_node_stake < our_node_stake && partition_node_stake < good_node_stake); + let (duplicate_slot_sender, duplicate_slot_receiver) = unbounded(); + // 1) Set up the cluster let (mut cluster, validator_keys) = test_faulty_node( BroadcastStageType::BroadcastDuplicates(BroadcastDuplicatesConfig { partition: ClusterPartition::Stake(partition_node_stake), - duplicate_slot_sender: None, + duplicate_slot_sender: Some(duplicate_slot_sender), }), node_stakes, None, @@ -3841,27 +3858,23 @@ fn test_duplicate_shreds_broadcast_leader() { { let node_keypair = node_keypair.insecure_clone(); let vote_keypair = vote_keypair.insecure_clone(); - let mut max_vote_slot = 0; let mut gossip_vote_index = 0; + let mut duplicate_slots = vec![]; move |latest_vote_slot, leader_vote_tx, parsed_vote, cluster_info| { info!("received vote for {}", latest_vote_slot); // Add to EpochSlots. Mark all slots frozen between slot..=max_vote_slot. - if latest_vote_slot > max_vote_slot { - let new_epoch_slots: Vec = - (max_vote_slot + 1..latest_vote_slot + 1).collect(); - info!( - "Simulating epoch slots from our node: {:?}", - new_epoch_slots - ); - cluster_info.push_epoch_slots(&new_epoch_slots); - max_vote_slot = latest_vote_slot; - } + let new_epoch_slots: Vec = (0..latest_vote_slot + 1).collect(); + info!( + "Simulating epoch slots from our node: {:?}", + new_epoch_slots + ); + cluster_info.push_epoch_slots(&new_epoch_slots); - // Only vote on even slots. Note this may violate lockouts if the - // validator started voting on a different fork before we could exit - // it above. + for slot in duplicate_slot_receiver.try_iter() { + duplicate_slots.push(slot); + } let vote_hash = parsed_vote.hash(); - if latest_vote_slot % 2 == 0 { + if vote_on_duplicate || !duplicate_slots.contains(&latest_vote_slot) { info!( "Simulating vote from our node on slot {}, hash {}", latest_vote_slot, vote_hash @@ -3899,6 +3912,9 @@ fn test_duplicate_shreds_broadcast_leader() { } }, voter_thread_sleep_ms as u64, + cluster.validators.len().saturating_sub(1), + 5000, // Refresh if 5 seconds of inactivity + 5, // Refresh the past 5 votes ); // 4) Check that the cluster is making progress diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 2941c94ae81cb3..25196462e5bd94 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -772,6 +772,10 @@ pub mod cost_model_requested_write_lock_cost { solana_sdk::declare_id!("wLckV1a64ngtcKPRGU4S4grVTestXjmNjxBjaKZrAcn"); } +pub mod enable_gossip_duplicate_proof_ingestion { + solana_sdk::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -960,6 +964,7 @@ lazy_static! { (enable_zk_proof_from_account::id(), "Enable zk token proof program to read proof from accounts instead of instruction data #34750"), (curve25519_restrict_msm_length::id(), "restrict curve25519 multiscalar multiplication vector lengths #34763"), (cost_model_requested_write_lock_cost::id(), "cost model uses number of requested write locks #34819"), + (enable_gossip_duplicate_proof_ingestion::id(), "enable gossip duplicate proof ingestion #32963"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/vote/src/vote_transaction.rs b/vote/src/vote_transaction.rs index 7c52801f25dc56..fed2d730a0a177 100644 --- a/vote/src/vote_transaction.rs +++ b/vote/src/vote_transaction.rs @@ -6,7 +6,7 @@ use { solana_vote_program::vote_state::{Vote, VoteStateUpdate}, }; -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum VoteTransaction { Vote(Vote), VoteStateUpdate(VoteStateUpdate), From 7138f8767e595067eb657c10395051cb7cfc85ec Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 26 Jan 2024 09:13:09 -0800 Subject: [PATCH 069/401] [TieredStorage] Avoid AccountHash copy in AccountMetaOptionalFields (#34969) #### Problem Using non-reference type of AccountHash in AccountMetaOptionalFields causes an unnecessary copy as mentioned in #34948. #### Summary of Changes Uses &AccountHash in AccountMetaOptionalFields to avoid copying. #### Test Plan Existing unit tests. Fixes #34948 --- accounts-db/src/tiered_storage/byte_block.rs | 7 ++++--- accounts-db/src/tiered_storage/hot.rs | 18 ++++++++++-------- accounts-db/src/tiered_storage/meta.rs | 17 ++++++++++------- 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index 869036251d9b21..1cd80add0c2307 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -96,7 +96,7 @@ impl ByteBlockWriter { size += self.write_pod(&rent_epoch)?; } if let Some(hash) = opt_fields.account_hash { - size += self.write_pod(&hash)?; + size += self.write_pod(hash)?; } debug_assert_eq!(size, opt_fields.size()); @@ -352,11 +352,12 @@ mod tests { let mut writer = ByteBlockWriter::new(format); let mut opt_fields_vec = vec![]; let mut some_count = 0; + let acc_hash = AccountHash(Hash::new_unique()); // prepare a vector of optional fields that contains all combinations // of Some and None. for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(AccountHash(Hash::new_unique()))] { + for account_hash in [None, Some(&acc_hash)] { some_count += rent_epoch.iter().count() + account_hash.iter().count(); opt_fields_vec.push(AccountMetaOptionalFields { @@ -397,7 +398,7 @@ mod tests { } if let Some(expected_hash) = opt_fields.account_hash { let hash = read_pod::(&decoded_buffer, offset).unwrap(); - assert_eq!(hash, &expected_hash); + assert_eq!(hash, expected_hash); verified_count += 1; offset += std::mem::size_of::(); } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 54f62294b6a559..4ef3dca1de4578 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -469,7 +469,7 @@ fn write_optional_fields( size += file.write_pod(&rent_epoch)?; } if let Some(hash) = opt_fields.account_hash { - size += file.write_pod(&hash)?; + size += file.write_pod(hash)?; } debug_assert_eq!(size, opt_fields.size()); @@ -500,7 +500,7 @@ impl HotStorageWriter { account_data: &[u8], executable: bool, rent_epoch: Option, - account_hash: Option, + account_hash: Option<&AccountHash>, ) -> TieredStorageResult { let optional_fields = AccountMetaOptionalFields { rent_epoch, @@ -567,9 +567,9 @@ impl HotStorageWriter { acc.owner(), acc.data(), acc.executable(), - // only persist rent_epoch for those non-rent-exempt accounts + // only persist rent_epoch for those rent-paying accounts (acc.rent_epoch() != RENT_EXEMPT_RENT_EPOCH).then_some(acc.rent_epoch()), - Some(*account_hash), + Some(account_hash), ) }) .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None, None)); @@ -722,10 +722,11 @@ pub mod tests { const TEST_PADDING: u8 = 5; const TEST_OWNER_OFFSET: OwnerOffset = OwnerOffset(0x1fef_1234); const TEST_RENT_EPOCH: Epoch = 7; + let acc_hash = AccountHash(Hash::new_unique()); let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), - account_hash: Some(AccountHash(Hash::new_unique())), + account_hash: Some(&acc_hash), }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -745,6 +746,7 @@ pub mod tests { fn test_hot_account_meta_full() { let account_data = [11u8; 83]; let padding = [0u8; 5]; + let acc_hash = AccountHash(Hash::new_unique()); const TEST_LAMPORT: u64 = 2314232137; const OWNER_OFFSET: u32 = 0x1fef_1234; @@ -752,7 +754,7 @@ pub mod tests { let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), - account_hash: Some(AccountHash(Hash::new_unique())), + account_hash: Some(&acc_hash), }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -789,7 +791,7 @@ pub mod tests { assert_eq!(account_data, meta.account_data(account_block)); assert_eq!(meta.rent_epoch(account_block), optional_fields.rent_epoch); assert_eq!( - *(meta.account_hash(account_block).unwrap()), + (meta.account_hash(account_block).unwrap()), optional_fields.account_hash.unwrap() ); } @@ -1339,7 +1341,7 @@ pub mod tests { acc.owner(), acc.data(), acc.executable(), - // only persist rent_epoch for those non-rent-exempt accounts + // only persist rent_epoch for those rent-paying accounts Some(*account_hash), ) }) diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 947011b79651d3..4e2bb0d95041ca 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -102,14 +102,14 @@ impl AccountMetaFlags { /// Note that the storage representation of the optional fields might be /// different from its in-memory representation. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct AccountMetaOptionalFields { +pub struct AccountMetaOptionalFields<'a> { /// the epoch at which its associated account will next owe rent pub rent_epoch: Option, /// the hash of its associated account - pub account_hash: Option, + pub account_hash: Option<&'a AccountHash>, } -impl AccountMetaOptionalFields { +impl<'a> AccountMetaOptionalFields<'a> { /// The size of the optional fields in bytes (excluding the boolean flags). pub fn size(&self) -> usize { self.rent_epoch.map_or(0, |_| std::mem::size_of::()) @@ -210,9 +210,10 @@ pub mod tests { #[test] fn test_optional_fields_update_flags() { let test_epoch = 5432312; + let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(AccountHash(Hash::new_unique()))] { + for account_hash in [None, Some(&acc_hash)] { update_and_verify_flags(&AccountMetaOptionalFields { rent_epoch, account_hash, @@ -224,9 +225,10 @@ pub mod tests { #[test] fn test_optional_fields_size() { let test_epoch = 5432312; + let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(AccountHash(Hash::new_unique()))] { + for account_hash in [None, Some(&acc_hash)] { let opt_fields = AccountMetaOptionalFields { rent_epoch, account_hash, @@ -249,16 +251,17 @@ pub mod tests { #[test] fn test_optional_fields_offset() { let test_epoch = 5432312; + let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(AccountHash(Hash::new_unique()))] { + for account_hash in [None, Some(&acc_hash)] { let rent_epoch_offset = 0; let account_hash_offset = rent_epoch_offset + rent_epoch.as_ref().map(std::mem::size_of_val).unwrap_or(0); let derived_size = account_hash_offset + account_hash .as_ref() - .map(std::mem::size_of_val) + .map(|acc_hash| std::mem::size_of_val(*acc_hash)) .unwrap_or(0); let opt_fields = AccountMetaOptionalFields { rent_epoch, From 3380c1404474c4805639a9dad502232f6efd66ca Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 26 Jan 2024 09:40:05 -0800 Subject: [PATCH 070/401] [TieredStorage] Correct the HotStorage API for account_matches_owners (#34967) #### Problem In HotStorageReader, the account_matches_owners takes &[&Pubkey] as the address candidates. However, it should be &[Pubkey] as defined in the accounts_file API. #### Summary of Changes Correct HotStorageReader::account_matches_owners() to take &[Pubkey] instead. #### Test Plan Existing unit-tests --- accounts-db/src/tiered_storage/hot.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 4ef3dca1de4578..730ace5aa310ed 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -362,7 +362,7 @@ impl HotStorageReader { pub fn account_matches_owners( &self, account_offset: HotAccountOffset, - owners: &[&Pubkey], + owners: &[Pubkey], ) -> Result { let account_meta = self .get_account_meta_from_offset(account_offset) @@ -377,7 +377,7 @@ impl HotStorageReader { owners .iter() - .position(|candidate| &account_owner == candidate) + .position(|candidate| account_owner == candidate) .ok_or(MatchAccountOwnerError::NoMatch) } } @@ -1081,7 +1081,7 @@ pub mod tests { let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); // First, verify whether we can find the expected owners. - let mut owner_candidates: Vec<_> = owner_addresses.iter().collect(); + let mut owner_candidates = owner_addresses.clone(); owner_candidates.shuffle(&mut rng); for (account_offset, account_meta) in account_offsets.iter().zip(hot_account_metas.iter()) { @@ -1090,16 +1090,15 @@ pub mod tests { .unwrap(); assert_eq!( owner_candidates[index], - &owner_addresses[account_meta.owner_offset().0 as usize] + owner_addresses[account_meta.owner_offset().0 as usize] ); } // Second, verify the MatchAccountOwnerError::NoMatch case const NUM_UNMATCHED_OWNERS: usize = 20; - let unmatched_owners: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + let unmatched_candidates: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) .take(NUM_UNMATCHED_OWNERS) .collect(); - let unmatched_candidates: Vec<_> = unmatched_owners.iter().collect(); for account_offset in account_offsets.iter() { assert_eq!( @@ -1119,7 +1118,7 @@ pub mod tests { .unwrap(); assert_eq!( owner_candidates[index], - &owner_addresses[account_meta.owner_offset().0 as usize] + owner_addresses[account_meta.owner_offset().0 as usize] ); } } From 083890928f3558b616850f863e46253c77db9b40 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Fri, 26 Jan 2024 12:28:44 -0600 Subject: [PATCH 071/401] refactor unused parameter (#34970) --- sdk/src/fee.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index bd3af75e70da18..de77ac11436595 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -92,13 +92,12 @@ impl FeeStructure { pub fn calculate_fee( &self, message: &SanitizedMessage, - lamports_per_signature: u64, + _unused: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, ) -> u64 { self.calculate_fee_details( message, - lamports_per_signature, budget_limits, include_loaded_account_data_size_in_fee, ) @@ -110,7 +109,6 @@ impl FeeStructure { pub fn calculate_fee_details( &self, message: &SanitizedMessage, - _lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, ) -> FeeDetails { From c656ca68b8a72ae97502fd5ba1849682985e3b26 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 26 Jan 2024 15:25:23 -0500 Subject: [PATCH 072/401] Stops pushing accounts hashes to gossip in AccountsHashVerifier (#34971) --- core/src/accounts_hash_verifier.rs | 124 ++--------------------------- 1 file changed, 6 insertions(+), 118 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index d34bf9837887da..d2ed64b1a4dd03 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -1,7 +1,4 @@ -// Service to verify accounts hashes with other known validator nodes. -// -// Each interval, publish the snapshot hash which is the full accounts state -// hash on gossip. +//! Service to calculate accounts hashes use { crossbeam_channel::{Receiver, Sender}, @@ -13,14 +10,13 @@ use { }, sorted_storages::SortedStorages, }, - solana_gossip::cluster_info::{ClusterInfo, MAX_ACCOUNTS_HASHES}, + solana_gossip::cluster_info::ClusterInfo, solana_measure::measure_us, solana_runtime::{ serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_config::SnapshotConfig, snapshot_package::{ - self, retain_max_n_elements, AccountsPackage, AccountsPackageKind, SnapshotKind, - SnapshotPackage, + self, AccountsPackage, AccountsPackageKind, SnapshotKind, SnapshotPackage, }, snapshot_utils, }, @@ -50,8 +46,8 @@ impl AccountsHashVerifier { accounts_package_receiver: Receiver, snapshot_package_sender: Option>, exit: Arc, - cluster_info: Arc, - accounts_hash_fault_injector: Option, + _cluster_info: Arc, + _accounts_hash_fault_injector: Option, snapshot_config: SnapshotConfig, ) -> Self { // If there are no accounts packages to process, limit how often we re-check @@ -60,7 +56,6 @@ impl AccountsHashVerifier { .name("solAcctHashVer".to_string()) .spawn(move || { info!("AccountsHashVerifier has started"); - let mut hashes = vec![]; // To support fastboot, we must ensure the storages used in the latest POST snapshot are // not recycled nor removed early. Hold an Arc of their AppendVecs to prevent them from // expiring. @@ -95,11 +90,8 @@ impl AccountsHashVerifier { let slot = accounts_package.slot; let (_, handling_time_us) = measure_us!(Self::process_accounts_package( accounts_package, - &cluster_info, snapshot_package_sender.as_ref(), - &mut hashes, &snapshot_config, - accounts_hash_fault_injector, &exit, )); @@ -256,11 +248,8 @@ impl AccountsHashVerifier { #[allow(clippy::too_many_arguments)] fn process_accounts_package( accounts_package: AccountsPackage, - cluster_info: &ClusterInfo, snapshot_package_sender: Option<&Sender>, - hashes: &mut Vec<(Slot, Hash)>, snapshot_config: &SnapshotConfig, - accounts_hash_fault_injector: Option, exit: &AtomicBool, ) { let accounts_hash = @@ -268,14 +257,6 @@ impl AccountsHashVerifier { Self::save_epoch_accounts_hash(&accounts_package, accounts_hash); - Self::push_accounts_hashes_to_cluster( - &accounts_package, - cluster_info, - hashes, - accounts_hash, - accounts_hash_fault_injector, - ); - Self::submit_for_packaging( accounts_package, snapshot_package_sender, @@ -535,23 +516,6 @@ impl AccountsHashVerifier { } } - fn push_accounts_hashes_to_cluster( - accounts_package: &AccountsPackage, - cluster_info: &ClusterInfo, - hashes: &mut Vec<(Slot, Hash)>, - accounts_hash: AccountsHashKind, - accounts_hash_fault_injector: Option, - ) { - let hash = accounts_hash_fault_injector - .and_then(|f| f(accounts_hash.as_hash(), accounts_package.slot)) - .or(Some(*accounts_hash.as_hash())); - hashes.push((accounts_package.slot, hash.unwrap())); - - retain_max_n_elements(hashes, MAX_ACCOUNTS_HASHES); - - cluster_info.push_accounts_hashes(hashes.clone()); - } - fn submit_for_packaging( accounts_package: AccountsPackage, snapshot_package_sender: Option<&Sender>, @@ -590,83 +554,7 @@ impl AccountsHashVerifier { #[cfg(test)] mod tests { - use { - super::*, - rand::seq::SliceRandom, - solana_gossip::contact_info::ContactInfo, - solana_runtime::{ - snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_package::SnapshotKind, - }, - solana_sdk::{ - signature::{Keypair, Signer}, - timing::timestamp, - }, - solana_streamer::socket::SocketAddrSpace, - std::str::FromStr, - }; - - fn new_test_cluster_info() -> ClusterInfo { - let keypair = Arc::new(Keypair::new()); - let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp()); - ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified) - } - - #[test] - fn test_max_hashes() { - solana_logger::setup(); - let cluster_info = new_test_cluster_info(); - let cluster_info = Arc::new(cluster_info); - let exit = AtomicBool::new(false); - - let mut hashes = vec![]; - let full_snapshot_archive_interval_slots = 100; - let snapshot_config = SnapshotConfig { - full_snapshot_archive_interval_slots, - incremental_snapshot_archive_interval_slots: DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, - ..SnapshotConfig::default() - }; - let expected_hash = Hash::from_str("GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn").unwrap(); - for i in 0..MAX_ACCOUNTS_HASHES + 1 { - let slot = full_snapshot_archive_interval_slots + i as u64; - let accounts_package = AccountsPackage { - slot, - block_height: slot, - ..AccountsPackage::default_for_tests() - }; - - AccountsHashVerifier::process_accounts_package( - accounts_package, - &cluster_info, - None, - &mut hashes, - &snapshot_config, - None, - &exit, - ); - - // sleep for 1ms to create a newer timestamp for gossip entry - // otherwise the timestamp won't be newer. - std::thread::sleep(Duration::from_millis(1)); - } - cluster_info.flush_push_queue(); - let cluster_hashes = cluster_info - .get_accounts_hash_for_node(&cluster_info.id(), |c| c.clone()) - .unwrap(); - info!("{:?}", cluster_hashes); - assert_eq!(hashes.len(), MAX_ACCOUNTS_HASHES); - assert_eq!(cluster_hashes.len(), MAX_ACCOUNTS_HASHES); - assert_eq!( - cluster_hashes[0], - (full_snapshot_archive_interval_slots + 1, expected_hash) - ); - assert_eq!( - cluster_hashes[MAX_ACCOUNTS_HASHES - 1], - ( - full_snapshot_archive_interval_slots + MAX_ACCOUNTS_HASHES as u64, - expected_hash - ) - ); - } + use {super::*, rand::seq::SliceRandom, solana_runtime::snapshot_package::SnapshotKind}; fn new(package_kind: AccountsPackageKind, slot: Slot) -> AccountsPackage { AccountsPackage { From 8a0c91d84258a991f842b0ed2a5d896a80b8dae9 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 26 Jan 2024 15:24:54 -0600 Subject: [PATCH 073/401] Skip creating reward partition account for `--partitioned-epoch-rewards-force-enable-single-slot` (#34946) * skip creating reward partition account when we are testing agains mainnet * Update runtime/src/bank.rs Co-authored-by: Brooks * Update runtime/src/bank.rs Co-authored-by: Brooks * refactor bool logic for partition reward single slot enable * Update runtime/src/bank.rs Co-authored-by: Tyera * move force_partition_rewards check into data account create fn * share code --------- Co-authored-by: HaoranYi Co-authored-by: Brooks Co-authored-by: Tyera --- runtime/src/bank.rs | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 03971724438dc9..bfab5a7c2c89f7 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1533,11 +1533,7 @@ impl Bank { // After saving a snapshot of stakes, apply stake rewards and commission let (_, update_rewards_with_thread_pool_time) = measure!( { - if self.is_partitioned_rewards_feature_enabled() - || self - .partitioned_epoch_rewards_config() - .test_enable_partitioned_rewards - { + if self.is_partitioned_rewards_code_enabled() { self.begin_partitioned_rewards( reward_calc_tracer, &thread_pool, @@ -1595,6 +1591,13 @@ impl Bank { } } + fn force_partition_rewards_in_first_block_of_epoch(&self) -> bool { + self.partitioned_epoch_rewards_config() + .test_enable_partitioned_rewards + && self.get_reward_calculation_num_blocks() == 0 + && self.partitioned_rewards_stake_account_stores_per_block() == u64::MAX + } + /// Begin the process of calculating and distributing rewards. /// This process can take multiple slots. fn begin_partitioned_rewards( @@ -3622,7 +3625,18 @@ impl Bank { &solana_sdk::sysvar::id(), ) .unwrap(); - self.store_account_and_update_capitalization(&address, &new_account); + + info!( + "create epoch rewards partition data account {} {address} \ + {epoch_rewards_partition_data:?}", + self.slot + ); + + // Skip storing data account when we are testing partitioned + // rewards but feature is not yet active + if !self.force_partition_rewards_in_first_block_of_epoch() { + self.store_account_and_update_capitalization(&address, &new_account); + } } fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { @@ -6993,12 +7007,8 @@ impl Bank { fn hash_internal_state(&self) -> Hash { let slot = self.slot(); let ignore = (!self.is_partitioned_rewards_feature_enabled() - && (self - .partitioned_epoch_rewards_config() - .test_enable_partitioned_rewards - && self.get_reward_calculation_num_blocks() == 0 - && self.partitioned_rewards_stake_account_stores_per_block() == u64::MAX)) - .then_some(sysvar::epoch_rewards::id()); + && self.force_partition_rewards_in_first_block_of_epoch()) + .then_some(sysvar::epoch_rewards::id()); let accounts_delta_hash = self .rc .accounts From e38848e519e9cf0816a53b64046b58b2a8b2e119 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 26 Jan 2024 15:41:09 -0600 Subject: [PATCH 074/401] Remove unused get_append_vec_id function (#34949) remove unused get_append_vec_id fn Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index b1103cb17248fe..17617dc8755ed1 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9786,12 +9786,6 @@ pub mod tests { } impl AccountsDb { - pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option { - let ancestors = vec![(slot, 1)].into_iter().collect(); - let result = self.accounts_index.get(pubkey, Some(&ancestors), None); - result.map(|(list, index)| list.slot_list()[index].1.store_id()) - } - fn scan_snapshot_stores( &self, storage: &SortedStorages, From 0d117d420c8936577fb7d6d3c8dfb80cc1ce655e Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 26 Jan 2024 13:46:44 -0800 Subject: [PATCH 075/401] Remove BlockhashQueue dependency from SVM related code (#34974) --- accounts-db/src/transaction_results.rs | 2 +- core/src/banking_stage/consumer.rs | 30 ++++++++-------- .../scheduler_controller.rs | 4 +-- .../unprocessed_transaction_storage.rs | 2 +- runtime/src/bank.rs | 36 +++++++++---------- runtime/src/bank/tests.rs | 16 ++------- runtime/src/svm/account_loader.rs | 26 ++++---------- 7 files changed, 45 insertions(+), 71 deletions(-) diff --git a/accounts-db/src/transaction_results.rs b/accounts-db/src/transaction_results.rs index bcfe185856ace4..bc0a330f507399 100644 --- a/accounts-db/src/transaction_results.rs +++ b/accounts-db/src/transaction_results.rs @@ -17,7 +17,7 @@ use { }, }; -pub type TransactionCheckResult = (transaction::Result<()>, Option); +pub type TransactionCheckResult = (transaction::Result<()>, Option, Option); pub struct TransactionResults { pub fee_collection_results: Vec>, diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index d5dccca98a0fae..ad42da3bafbb77 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -403,7 +403,9 @@ impl Consumer { let pre_results = vec![Ok(()); txs.len()]; let check_results = bank.check_transactions(txs, &pre_results, MAX_PROCESSING_AGE, &mut error_counters); - let check_results = check_results.into_iter().map(|(result, _nonce)| result); + let check_results = check_results + .into_iter() + .map(|(result, _nonce, _lamports)| result); let mut output = self.process_and_record_transactions_with_pre_results( bank, txs, @@ -787,7 +789,7 @@ impl Consumer { valid_txs .iter() .enumerate() - .filter_map(|(index, (x, _h))| if x.is_ok() { Some(index) } else { None }) + .filter_map(|(index, (x, _h, _lamports))| if x.is_ok() { Some(index) } else { None }) .collect_vec() } } @@ -2488,24 +2490,24 @@ mod tests { fn test_bank_filter_valid_transaction_indexes() { assert_eq!( Consumer::filter_valid_transaction_indexes(&[ - (Err(TransactionError::BlockhashNotFound), None), - (Err(TransactionError::BlockhashNotFound), None), - (Ok(()), None), - (Err(TransactionError::BlockhashNotFound), None), - (Ok(()), None), - (Ok(()), None), + (Err(TransactionError::BlockhashNotFound), None, None), + (Err(TransactionError::BlockhashNotFound), None, None), + (Ok(()), None, None), + (Err(TransactionError::BlockhashNotFound), None, None), + (Ok(()), None, None), + (Ok(()), None, None), ]), [2, 4, 5] ); assert_eq!( Consumer::filter_valid_transaction_indexes(&[ - (Ok(()), None), - (Err(TransactionError::BlockhashNotFound), None), - (Err(TransactionError::BlockhashNotFound), None), - (Ok(()), None), - (Ok(()), None), - (Ok(()), None), + (Ok(()), None, None), + (Err(TransactionError::BlockhashNotFound), None, None), + (Err(TransactionError::BlockhashNotFound), None, None), + (Ok(()), None, None), + (Ok(()), None, None), + (Ok(()), None, None), ]), [0, 3, 4, 5] ); diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 225ff6a53e18c5..c336f56f8949c3 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -168,7 +168,7 @@ impl SchedulerController { let fee_check_results: Vec<_> = check_results .into_iter() .zip(transactions) - .map(|((result, _nonce), tx)| { + .map(|((result, _nonce, _lamports), tx)| { result?; // if there's already error do nothing Consumer::check_fee_payer_unlocked(bank, tx.message(), &mut error_counters) }) @@ -226,7 +226,7 @@ impl SchedulerController { &mut error_counters, ); - for ((result, _nonce), id) in check_results.into_iter().zip(chunk.iter()) { + for ((result, _nonce, _lamports), id) in check_results.into_iter().zip(chunk.iter()) { if result.is_err() { saturating_add_assign!(self.count_metrics.num_dropped_on_age_and_status, 1); self.container.remove_by_id(&id.id); diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index f8d99c77900c51..257bf1b141975b 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -776,7 +776,7 @@ impl ThreadLocalUnprocessedPackets { .iter() .enumerate() .filter_map( - |(tx_index, (result, _))| if result.is_ok() { Some(tx_index) } else { None }, + |(tx_index, (result, _, _))| if result.is_ok() { Some(tx_index) } else { None }, ) .collect_vec() } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index bfab5a7c2c89f7..548e762c9da5f7 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4460,7 +4460,7 @@ impl Bank { &hash_queue, error_counters, ), - Err(e) => (Err(e.clone()), None), + Err(e) => (Err(e.clone()), None, None), }) .collect() } @@ -4475,14 +4475,20 @@ impl Bank { ) -> TransactionCheckResult { let recent_blockhash = tx.message().recent_blockhash(); if hash_queue.is_hash_valid_for_age(recent_blockhash, max_age) { - (Ok(()), None) + ( + Ok(()), + None, + hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()), + ) } else if let Some((address, account)) = self.check_transaction_for_nonce(tx, next_durable_nonce) { - (Ok(()), Some(NoncePartial::new(address, account))) + let nonce = NoncePartial::new(address, account); + let lamports_per_signature = nonce.lamports_per_signature(); + (Ok(()), Some(nonce), lamports_per_signature) } else { error_counters.blockhash_not_found += 1; - (Err(TransactionError::BlockhashNotFound), None) + (Err(TransactionError::BlockhashNotFound), None, None) } } @@ -4508,16 +4514,16 @@ impl Bank { sanitized_txs .iter() .zip(lock_results) - .map(|(sanitized_tx, (lock_result, nonce))| { + .map(|(sanitized_tx, (lock_result, nonce, lamports))| { let sanitized_tx = sanitized_tx.borrow(); if lock_result.is_ok() && self.is_transaction_already_processed(sanitized_tx, &rcache) { error_counters.already_processed += 1; - return (Err(TransactionError::AlreadyProcessed), None); + return (Err(TransactionError::AlreadyProcessed), None, None); } - (lock_result, nonce) + (lock_result, nonce, lamports) }) .collect() } @@ -5075,19 +5081,11 @@ impl Bank { txs: &[SanitizedTransaction], lock_results: &mut [TransactionCheckResult], program_owners: &'a [Pubkey], - hash_queue: &BlockhashQueue, ) -> HashMap { let mut result: HashMap = HashMap::new(); lock_results.iter_mut().zip(txs).for_each(|etx| { - if let ((Ok(()), nonce), tx) = etx { - if nonce - .as_ref() - .map(|nonce| nonce.lamports_per_signature()) - .unwrap_or_else(|| { - hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()) - }) - .is_some() - { + if let ((Ok(()), _nonce, lamports_per_signature), tx) = etx { + if lamports_per_signature.is_some() { tx.message() .account_keys() .iter() @@ -5113,7 +5111,7 @@ impl Bank { // If the transaction's nonce account was not valid, and blockhash is not found, // the transaction will fail to process. Let's not load any programs from the // transaction, and update the status of the transaction. - *etx.0 = (Err(TransactionError::BlockhashNotFound), None); + *etx.0 = (Err(TransactionError::BlockhashNotFound), None, None); } } }); @@ -5340,7 +5338,6 @@ impl Bank { sanitized_txs, check_results, PROGRAM_OWNERS, - &self.blockhash_queue.read().unwrap(), ); let native_loader = native_loader::id(); for builtin_program in self.builtin_programs.iter() { @@ -5357,7 +5354,6 @@ impl Bank { &self.ancestors, sanitized_txs, check_results, - &self.blockhash_queue.read().unwrap(), error_counters, &self.rent_collector, &self.feature_set, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 763b8c7db42df7..337556246f2806 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -10989,8 +10989,7 @@ fn test_rent_state_list_len() { &bank.accounts().accounts_db, &bank.ancestors, &[sanitized_tx.clone()], - &[(Ok(()), None)], - &bank.blockhash_queue.read().unwrap(), + &[(Ok(()), None, Some(0))], &mut error_counters, &bank.rent_collector, &bank.feature_set, @@ -13723,8 +13722,6 @@ fn test_filter_executable_program_accounts() { &AccountSharedData::new(40, 1, &program2_pubkey), ); - let mut hash_queue = BlockhashQueue::new(100); - let tx1 = Transaction::new_with_compiled_instructions( &[&keypair1], &[non_program_pubkey1], @@ -13732,7 +13729,6 @@ fn test_filter_executable_program_accounts() { vec![account1_pubkey, account2_pubkey, account3_pubkey], vec![CompiledInstruction::new(1, &(), vec![0])], ); - hash_queue.register_hash(&tx1.message().recent_blockhash, 0); let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); let tx2 = Transaction::new_with_compiled_instructions( @@ -13742,7 +13738,6 @@ fn test_filter_executable_program_accounts() { vec![account4_pubkey, account3_pubkey, account2_pubkey], vec![CompiledInstruction::new(1, &(), vec![0])], ); - hash_queue.register_hash(&tx2.message().recent_blockhash, 0); let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -13750,9 +13745,8 @@ fn test_filter_executable_program_accounts() { let programs = bank.filter_executable_program_accounts( &ancestors, &[sanitized_tx1, sanitized_tx2], - &mut [(Ok(()), None), (Ok(()), None)], + &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], owners, - &hash_queue, ); // The result should contain only account3_pubkey, and account4_pubkey as the program accounts @@ -13822,8 +13816,6 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { &AccountSharedData::new(40, 1, &program2_pubkey), ); - let mut hash_queue = BlockhashQueue::new(100); - let tx1 = Transaction::new_with_compiled_instructions( &[&keypair1], &[non_program_pubkey1], @@ -13831,7 +13823,6 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { vec![account1_pubkey, account2_pubkey, account3_pubkey], vec![CompiledInstruction::new(1, &(), vec![0])], ); - hash_queue.register_hash(&tx1.message().recent_blockhash, 0); let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); let tx2 = Transaction::new_with_compiled_instructions( @@ -13846,13 +13837,12 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { let ancestors = vec![(0, 0)].into_iter().collect(); let owners = &[program1_pubkey, program2_pubkey]; - let mut lock_results = vec![(Ok(()), None), (Ok(()), None)]; + let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; let programs = bank.filter_executable_program_accounts( &ancestors, &[sanitized_tx1, sanitized_tx2], &mut lock_results, owners, - &hash_queue, ); // The result should contain only account3_pubkey as the program accounts diff --git a/runtime/src/svm/account_loader.rs b/runtime/src/svm/account_loader.rs index 8fa432db1556dc..beedace9ede1ac 100644 --- a/runtime/src/svm/account_loader.rs +++ b/runtime/src/svm/account_loader.rs @@ -7,8 +7,7 @@ use { accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, accounts_db::AccountsDb, ancestors::Ancestors, - blockhash_queue::BlockhashQueue, - nonce_info::{NonceFull, NonceInfo}, + nonce_info::NonceFull, rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, transaction_error_metrics::TransactionErrorMetrics, @@ -45,7 +44,6 @@ pub(crate) fn load_accounts( ancestors: &Ancestors, txs: &[SanitizedTransaction], lock_results: &[TransactionCheckResult], - hash_queue: &BlockhashQueue, error_counters: &mut TransactionErrorMetrics, rent_collector: &RentCollector, feature_set: &FeatureSet, @@ -59,17 +57,11 @@ pub(crate) fn load_accounts( txs.iter() .zip(lock_results) .map(|etx| match etx { - (tx, (Ok(()), nonce)) => { - let lamports_per_signature = nonce - .as_ref() - .map(|nonce| nonce.lamports_per_signature()) - .unwrap_or_else(|| { - hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()) - }); + (tx, (Ok(()), nonce, lamports_per_signature)) => { let fee = if let Some(lamports_per_signature) = lamports_per_signature { fee_structure.calculate_fee( tx.message(), - lamports_per_signature, + *lamports_per_signature, &process_compute_budget_instructions( tx.message().program_instructions_iter(), ) @@ -118,7 +110,7 @@ pub(crate) fn load_accounts( (Ok(loaded_transaction), nonce) } - (_, (Err(e), _nonce)) => (Err(e.clone()), None), + (_, (Err(e), _nonce, _lamports_per_signature)) => (Err(e.clone()), None), }) .collect() } @@ -525,8 +517,6 @@ mod tests { feature_set: &FeatureSet, fee_structure: &FeeStructure, ) -> Vec { - let mut hash_queue = BlockhashQueue::new(100); - hash_queue.register_hash(&tx.message().recent_blockhash, lamports_per_signature); let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); for ka in ka.iter() { @@ -539,8 +529,7 @@ mod tests { &accounts.accounts_db, &ancestors, &[sanitized_tx], - &[(Ok(()), None)], - &hash_queue, + &[(Ok(()), None, Some(lamports_per_signature))], error_counters, rent_collector, feature_set, @@ -1008,8 +997,6 @@ mod tests { ) -> Vec { let tx = SanitizedTransaction::from_transaction_for_tests(tx); let rent_collector = RentCollector::default(); - let mut hash_queue = BlockhashQueue::new(100); - hash_queue.register_hash(tx.message().recent_blockhash(), 10); let ancestors = vec![(0, 0)].into_iter().collect(); let mut error_counters = TransactionErrorMetrics::default(); @@ -1017,8 +1004,7 @@ mod tests { &accounts.accounts_db, &ancestors, &[tx], - &[(Ok(()), None)], - &hash_queue, + &[(Ok(()), None, Some(10))], &mut error_counters, &rent_collector, &FeatureSet::all_enabled(), From e1260a9604bb9db018eb214a4279e4f6e6a4dd8b Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 26 Jan 2024 16:52:05 -0500 Subject: [PATCH 076/401] Removes unused parameters from AccountsHashVerifier::new() (#34976) --- core/src/accounts_hash_verifier.rs | 3 --- core/src/validator.rs | 2 -- core/tests/epoch_accounts_hash.rs | 2 -- core/tests/snapshots.rs | 2 -- ledger-tool/src/ledger_utils.rs | 14 ++------------ 5 files changed, 2 insertions(+), 21 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index d2ed64b1a4dd03..cc3da5f85d8804 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -10,7 +10,6 @@ use { }, sorted_storages::SortedStorages, }, - solana_gossip::cluster_info::ClusterInfo, solana_measure::measure_us, solana_runtime::{ serde_snapshot::BankIncrementalSnapshotPersistence, @@ -46,8 +45,6 @@ impl AccountsHashVerifier { accounts_package_receiver: Receiver, snapshot_package_sender: Option>, exit: Arc, - _cluster_info: Arc, - _accounts_hash_fault_injector: Option, snapshot_config: SnapshotConfig, ) -> Self { // If there are no accounts packages to process, limit how often we re-check diff --git a/core/src/validator.rs b/core/src/validator.rs index f624dae1e08021..d19ba2b5216e0d 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -778,8 +778,6 @@ impl Validator { accounts_package_receiver, snapshot_package_sender, exit.clone(), - cluster_info.clone(), - config.accounts_hash_fault_injector, config.snapshot_config.clone(), ); diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 6a62ccb5a98674..106539034a2a81 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -197,8 +197,6 @@ impl BackgroundServices { accounts_package_receiver, Some(snapshot_package_sender), exit.clone(), - cluster_info, - None, snapshot_config.clone(), ); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 83af4558dfc19f..a44c63fec66da9 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -1044,8 +1044,6 @@ fn test_snapshots_with_background_services( accounts_package_receiver, Some(snapshot_package_sender), exit.clone(), - cluster_info, - None, snapshot_test_config.snapshot_config.clone(), ); diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 82797146d3a408..bcf87e826ec72e 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -12,7 +12,6 @@ use { solana_geyser_plugin_manager::geyser_plugin_service::{ GeyserPluginService, GeyserPluginServiceError, }, - solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_ledger::{ bank_forks_utils::{self, BankForksUtilsError}, blockstore::{Blockstore, BlockstoreError}, @@ -41,10 +40,9 @@ use { }, }, solana_sdk::{ - clock::Slot, genesis_config::GenesisConfig, pubkey::Pubkey, signature::Signer, - signer::keypair::Keypair, timing::timestamp, transaction::VersionedTransaction, + clock::Slot, genesis_config::GenesisConfig, pubkey::Pubkey, + transaction::VersionedTransaction, }, - solana_streamer::socket::SocketAddrSpace, solana_unified_scheduler_pool::DefaultSchedulerPool, std::{ path::{Path, PathBuf}, @@ -315,20 +313,12 @@ pub fn load_and_process_ledger( } } - let node_id = Arc::new(Keypair::new()); - let cluster_info = Arc::new(ClusterInfo::new( - ContactInfo::new_localhost(&node_id.pubkey(), timestamp()), - Arc::clone(&node_id), - SocketAddrSpace::Unspecified, - )); let (accounts_package_sender, accounts_package_receiver) = crossbeam_channel::unbounded(); let accounts_hash_verifier = AccountsHashVerifier::new( accounts_package_sender.clone(), accounts_package_receiver, None, exit.clone(), - cluster_info, - None, SnapshotConfig::new_load_only(), ); let (snapshot_request_sender, snapshot_request_receiver) = crossbeam_channel::unbounded(); From 59c2f3560992acb5e47058dcb58ce1b3d8aeb98f Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 26 Jan 2024 18:34:47 -0500 Subject: [PATCH 077/401] Removes unused retain_max_n_elements() (#34978) --- runtime/src/snapshot_package.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index f5623c550a24bf..99af3ebbe6ee2a 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -333,14 +333,3 @@ impl SnapshotKind { matches!(self, SnapshotKind::IncrementalSnapshot(_)) } } - -/// Helper function to retain only max n of elements to the right of a vector, -/// viz. remove v.len() - n elements from the left of the vector. -#[inline(always)] -pub fn retain_max_n_elements(v: &mut Vec, n: usize) { - if v.len() > n { - let to_truncate = v.len() - n; - v.rotate_left(to_truncate); - v.truncate(n); - } -} From 02062a6b6a5bab7f0a041ff691ab21d31c9b6828 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 26 Jan 2024 19:21:23 -0500 Subject: [PATCH 078/401] Removes unused AccountsHashFaultInjector (#34977) --- core/src/accounts_hash_verifier.rs | 2 -- core/src/validator.rs | 4 +--- local-cluster/src/validator_configs.rs | 1 - 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index cc3da5f85d8804..43a3911e402bc4 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -33,8 +33,6 @@ use { }, }; -pub type AccountsHashFaultInjector = fn(&Hash, Slot) -> Option; - pub struct AccountsHashVerifier { t_accounts_hash_verifier: JoinHandle<()>, } diff --git a/core/src/validator.rs b/core/src/validator.rs index d19ba2b5216e0d..2b6a807ac7c110 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -3,7 +3,7 @@ pub use solana_perf::report_target_features; use { crate::{ - accounts_hash_verifier::{AccountsHashFaultInjector, AccountsHashVerifier}, + accounts_hash_verifier::AccountsHashVerifier, admin_rpc_post_init::AdminRpcRequestMetadataPostInit, banking_trace::{self, BankingTracer}, cache_block_meta_service::{CacheBlockMetaSender, CacheBlockMetaService}, @@ -223,7 +223,6 @@ pub struct ValidatorConfig { pub repair_validators: Option>, // None = repair from all pub repair_whitelist: Arc>>, // Empty = repair with all pub gossip_validators: Option>, // None = gossip with all - pub accounts_hash_fault_injector: Option, pub accounts_hash_interval_slots: u64, pub max_genesis_archive_unpacked_size: u64, pub wal_recovery_mode: Option, @@ -294,7 +293,6 @@ impl Default for ValidatorConfig { repair_validators: None, repair_whitelist: Arc::new(RwLock::new(HashSet::default())), gossip_validators: None, - accounts_hash_fault_injector: None, accounts_hash_interval_slots: std::u64::MAX, max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, wal_recovery_mode: None, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 3479422c2f5147..21606164cc27e4 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -31,7 +31,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { repair_whitelist: config.repair_whitelist.clone(), gossip_validators: config.gossip_validators.clone(), accounts_hash_interval_slots: config.accounts_hash_interval_slots, - accounts_hash_fault_injector: config.accounts_hash_fault_injector, max_genesis_archive_unpacked_size: config.max_genesis_archive_unpacked_size, wal_recovery_mode: config.wal_recovery_mode.clone(), run_verification: config.run_verification, From d4fdcd940a023f2e70d4cc24c4e788074fb2ad03 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Sat, 27 Jan 2024 15:03:16 +0000 Subject: [PATCH 079/401] adds feature to enable chained Merkle shreds (#34916) During a cluster upgrade when only half of the cluster can ingest the new shred variant, sending shreds of the new variant can cause nodes to diverge. The commit adds a feature to enable chained Merkle shreds explicitly. --- core/src/shred_fetch_stage.rs | 33 +++++++++++++++++++++++++++++---- ledger/src/shred.rs | 18 ++++++++++++++++++ sdk/src/feature_set.rs | 5 +++++ 3 files changed, 52 insertions(+), 4 deletions(-) diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index fd72b8b8eebb3b..481e5333b14198 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -104,8 +104,22 @@ impl ShredFetchStage { // Limit shreds to 2 epochs away. let max_slot = last_slot + 2 * slots_per_epoch; - let should_drop_legacy_shreds = - |shred_slot| should_drop_legacy_shreds(shred_slot, &feature_set, &epoch_schedule); + let should_drop_legacy_shreds = |shred_slot| { + check_feature_activation( + &feature_set::drop_legacy_shreds::id(), + shred_slot, + &feature_set, + &epoch_schedule, + ) + }; + let enable_chained_merkle_shreds = |shred_slot| { + check_feature_activation( + &feature_set::enable_chained_merkle_shreds::id(), + shred_slot, + &feature_set, + &epoch_schedule, + ) + }; let turbine_disabled = turbine_disabled.load(Ordering::Relaxed); for packet in packet_batch.iter_mut().filter(|p| !p.meta().discard()) { if turbine_disabled @@ -115,6 +129,7 @@ impl ShredFetchStage { max_slot, shred_version, should_drop_legacy_shreds, + enable_chained_merkle_shreds, &mut stats, ) { @@ -394,13 +409,15 @@ pub(crate) fn receive_repair_quic_packets( } } +// Returns true if the feature is effective for the shred slot. #[must_use] -fn should_drop_legacy_shreds( +fn check_feature_activation( + feature: &Pubkey, shred_slot: Slot, feature_set: &FeatureSet, epoch_schedule: &EpochSchedule, ) -> bool { - match feature_set.activated_slot(&feature_set::drop_legacy_shreds::id()) { + match feature_set.activated_slot(feature) { None => false, Some(feature_slot) => { let feature_epoch = epoch_schedule.get_epoch(feature_slot); @@ -451,6 +468,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats, )); let coding = solana_ledger::shred::Shredder::generate_coding_shreds( @@ -465,6 +483,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats, )); } @@ -487,6 +506,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats, )); assert_eq!(stats.index_overrun, 1); @@ -509,6 +529,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats, )); assert_eq!(stats.slot_out_of_range, 1); @@ -519,6 +540,7 @@ mod tests { max_slot, 345, // shred_version |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats, )); assert_eq!(stats.shred_version_mismatch, 1); @@ -530,6 +552,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats, )); @@ -552,6 +575,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats, )); @@ -564,6 +588,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats, )); } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index bed8965073429a..54c27e237da980 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -916,6 +916,7 @@ pub fn should_discard_shred( max_slot: Slot, shred_version: u16, should_drop_legacy_shreds: impl Fn(Slot) -> bool, + enable_chained_merkle_shreds: impl Fn(Slot) -> bool, stats: &mut ShredFetchStats, ) -> bool { debug_assert!(root < max_slot); @@ -999,6 +1000,9 @@ pub fn should_discard_shred( stats.num_shreds_merkle_code = stats.num_shreds_merkle_code.saturating_add(1); } ShredVariant::MerkleCode(_, /*chained:*/ true) => { + if !enable_chained_merkle_shreds(slot) { + return true; + } stats.num_shreds_merkle_code_chained = stats.num_shreds_merkle_code_chained.saturating_add(1); } @@ -1006,6 +1010,9 @@ pub fn should_discard_shred( stats.num_shreds_merkle_data = stats.num_shreds_merkle_data.saturating_add(1); } ShredVariant::MerkleData(_, /*chained:*/ true) => { + if !enable_chained_merkle_shreds(slot) { + return true; + } stats.num_shreds_merkle_data_chained = stats.num_shreds_merkle_data_chained.saturating_add(1); } @@ -1209,6 +1216,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(stats, ShredFetchStats::default()); @@ -1220,6 +1228,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(stats.index_overrun, 1); @@ -1231,6 +1240,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(stats.index_overrun, 2); @@ -1242,6 +1252,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(stats.index_overrun, 3); @@ -1253,6 +1264,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(stats.index_overrun, 4); @@ -1264,6 +1276,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(stats.bad_parent_offset, 1); @@ -1285,6 +1298,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); @@ -1305,6 +1319,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(1, stats.index_out_of_bounds); @@ -1326,6 +1341,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); packet.buffer_mut()[OFFSET_OF_SHRED_VARIANT] = u8::MAX; @@ -1336,6 +1352,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(1, stats.bad_shred_type); @@ -1348,6 +1365,7 @@ mod tests { max_slot, shred_version, |_| false, // should_drop_legacy_shreds + |_| true, // enable_chained_merkle_shreds &mut stats )); assert_eq!(1, stats.bad_shred_type); diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 25196462e5bd94..2201ed5c400247 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -776,6 +776,10 @@ pub mod enable_gossip_duplicate_proof_ingestion { solana_sdk::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG"); } +pub mod enable_chained_merkle_shreds { + solana_sdk::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -965,6 +969,7 @@ lazy_static! { (curve25519_restrict_msm_length::id(), "restrict curve25519 multiscalar multiplication vector lengths #34763"), (cost_model_requested_write_lock_cost::id(), "cost model uses number of requested write locks #34819"), (enable_gossip_duplicate_proof_ingestion::id(), "enable gossip duplicate proof ingestion #32963"), + (enable_chained_merkle_shreds::id(), "Enable chained Merkle shreds #34916"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 79bbe4381a70eba923e23c4caa45953ee3618c52 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Sat, 27 Jan 2024 15:04:31 +0000 Subject: [PATCH 080/401] adds chained_merkle_root to shredder arguments (#34952) Working towards chaining Merkle root of erasure batches, the commit adds chained_merkle_root to shredder arguments. --- core/benches/shredder.rs | 9 ++- core/src/window_service.rs | 1 + gossip/src/duplicate_shred.rs | 2 + ledger/src/blockstore.rs | 41 +++++++---- ledger/src/shredder.rs | 69 +++++++++++++------ ledger/src/sigverify_shreds.rs | 4 +- ledger/tests/shred.rs | 2 + local-cluster/tests/local_cluster.rs | 1 + turbine/benches/retransmit_stage.rs | 3 + turbine/src/broadcast_stage.rs | 3 + .../broadcast_duplicates_run.rs | 3 + .../broadcast_fake_shreds_run.rs | 2 + .../fail_entry_verification_broadcast_run.rs | 3 + .../broadcast_stage/standard_broadcast_run.rs | 2 + 14 files changed, 108 insertions(+), 37 deletions(-) diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index 93093b1920fb5d..3c1c3e204c269c 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -4,7 +4,7 @@ extern crate test; use { - rand::seq::SliceRandom, + rand::{seq::SliceRandom, Rng}, raptorq::{Decoder, Encoder}, solana_entry::entry::{create_ticks, Entry}, solana_ledger::shred::{ @@ -50,6 +50,7 @@ fn make_shreds(num_shreds: usize) -> Vec { &Keypair::new(), &entries, true, // is_last_in_slot + None, // chained_merkle_root 0, // next_shred_index 0, // next_code_index false, // merkle_variant @@ -80,12 +81,14 @@ fn bench_shredder_ticks(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(LEGACY_SHRED_DATA_CAPACITY)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let reed_solomon_cache = ReedSolomonCache::default(); + let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); shredder.entries_to_shreds( &kp, &entries, true, + chained_merkle_root, 0, 0, true, // merkle_variant @@ -107,6 +110,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) { Some(shred_size), ); let entries = make_large_unchained_entries(txs_per_entry, num_entries); + let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); let reed_solomon_cache = ReedSolomonCache::default(); // 1Mb bencher.iter(|| { @@ -115,6 +119,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) { &kp, &entries, true, + chained_merkle_root, 0, 0, true, // merkle_variant @@ -133,10 +138,12 @@ fn bench_deshredder(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); + let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); let (data_shreds, _) = shredder.entries_to_shreds( &kp, &entries, true, + chained_merkle_root, 0, 0, true, // merkle_variant diff --git a/core/src/window_service.rs b/core/src/window_service.rs index aa801b7ebd37f2..504776db1e1a25 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -582,6 +582,7 @@ mod test { keypair, entries, true, // is_last_in_slot + None, // chained_merkle_root 0, // next_shred_index 0, // next_code_index true, // merkle_variant diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index 85f4f4fa0cf149..84c50ea602e8c8 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -409,6 +409,8 @@ pub(crate) mod tests { keypair, &entries, is_last_in_slot, + // chained_merkle_root + Some(Hash::new_from_array(rng.gen())), next_shred_index, next_code_index, // next_code_index merkle_variant, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 5b1dc475b9cf04..45c1cbf49bdb2c 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -29,6 +29,7 @@ use { crossbeam_channel::{bounded, Receiver, Sender, TrySendError}, dashmap::DashSet, log::*, + rand::Rng, rayon::{ iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, ThreadPool, @@ -1956,6 +1957,7 @@ impl Blockstore { let mut all_shreds = vec![]; let mut slot_entries = vec![]; let reed_solomon_cache = ReedSolomonCache::default(); + let mut chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); // Find all the entries for start_slot for entry in entries.into_iter() { if remaining_ticks_in_slot == 0 { @@ -1973,7 +1975,8 @@ impl Blockstore { let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds( keypair, ¤t_entries, - true, // is_last_in_slot + true, // is_last_in_slot + chained_merkle_root, start_index, // next_shred_index start_index, // next_code_index true, // merkle_variant @@ -1982,6 +1985,7 @@ impl Blockstore { ); all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); + chained_merkle_root = Some(coding_shreds.last().unwrap().merkle_root().unwrap()); shredder = Shredder::new( current_slot, parent_slot, @@ -2002,6 +2006,7 @@ impl Blockstore { keypair, &slot_entries, is_full_slot, + chained_merkle_root, 0, // next_shred_index 0, // next_code_index true, // merkle_variant @@ -4285,6 +4290,8 @@ pub fn create_new_ledger( &Keypair::new(), &entries, true, // is_last_in_slot + // chained_merkle_root + Some(Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index 0, // next_code_index true, // merkle_variant @@ -4546,6 +4553,8 @@ pub fn entries_to_test_shreds( &Keypair::new(), entries, is_full_slot, + // chained_merkle_root + Some(Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index, 0, // next_code_index merkle_variant, @@ -4806,6 +4815,7 @@ pub mod tests { InnerInstruction, InnerInstructions, Reward, Rewards, TransactionTokenBalance, }, std::{cmp::Ordering, thread::Builder, time::Duration}, + test_case::test_case, }; // used for tests only @@ -7434,7 +7444,7 @@ pub mod tests { #[test] fn test_insert_multiple_is_last() { solana_logger::setup(); - let (shreds, _) = make_slot_entries(0, 0, 20, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(0, 0, 19, /*merkle_variant:*/ true); let num_shreds = shreds.len() as u64; let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -7448,6 +7458,7 @@ pub mod tests { assert!(slot_meta.is_full()); let (shreds, _) = make_slot_entries(0, 0, 22, /*merkle_variant:*/ true); + assert!(shreds.len() > num_shreds as usize); blockstore.insert_shreds(shreds, None, false).unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); @@ -9863,7 +9874,9 @@ pub mod tests { let (data_shreds, coding_shreds) = shredder.entries_to_shreds( &leader_keypair, &entries, - true, // is_last_in_slot + true, // is_last_in_slot + // chained_merkle_root + Some(Hash::new_from_array(rand::thread_rng().gen())), fec_set_index, // next_shred_index fec_set_index, // next_code_index true, // merkle_variant @@ -9916,18 +9929,21 @@ pub mod tests { assert_eq!(num_coding_in_index, num_coding); } - #[test] - fn test_duplicate_slot() { + #[test_case(false)] + #[test_case(true)] + fn test_duplicate_slot(chained: bool) { let slot = 0; let entries1 = make_slot_entries_with_transactions(1); let entries2 = make_slot_entries_with_transactions(1); let leader_keypair = Arc::new(Keypair::new()); let reed_solomon_cache = ReedSolomonCache::default(); let shredder = Shredder::new(slot, 0, 0, 0).unwrap(); + let chained_merkle_root = chained.then(|| Hash::new_from_array(rand::thread_rng().gen())); let (shreds, _) = shredder.entries_to_shreds( &leader_keypair, &entries1, true, // is_last_in_slot + chained_merkle_root, 0, // next_shred_index 0, // next_code_index, true, // merkle_variant @@ -9938,6 +9954,7 @@ pub mod tests { &leader_keypair, &entries2, true, // is_last_in_slot + chained_merkle_root, 0, // next_shred_index 0, // next_code_index true, // merkle_variant @@ -10323,7 +10340,11 @@ pub mod tests { let num_unique_entries = max_ticks_per_n_shreds(1, None) + 1; let (mut original_shreds, original_entries) = make_slot_entries(0, 0, num_unique_entries, /*merkle_variant:*/ true); - + let mut duplicate_shreds = original_shreds.clone(); + // Mutate signature so that payloads are not the same as the originals. + for shred in &mut duplicate_shreds { + shred.sign(&Keypair::new()); + } // Discard first shred, so that the slot is not full assert!(original_shreds.len() > 1); let last_index = original_shreds.last().unwrap().index() as u64; @@ -10345,14 +10366,6 @@ pub mod tests { assert!(!blockstore.is_full(0)); } - let duplicate_shreds = entries_to_test_shreds( - &original_entries, - 0, // slot - 0, // parent_slot - true, // is_full_slot - 0, // version - true, // merkle_variant - ); let num_shreds = duplicate_shreds.len() as u64; blockstore .insert_shreds(duplicate_shreds, None, false) diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index 07a0fe0ae5b41b..ba127ef009c7a5 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -13,7 +13,7 @@ use { solana_entry::entry::Entry, solana_measure::measure::Measure, solana_rayon_threadlimit::get_thread_count, - solana_sdk::{clock::Slot, signature::Keypair}, + solana_sdk::{clock::Slot, hash::Hash, signature::Keypair}, std::{ borrow::Borrow, fmt::Debug, @@ -69,11 +69,13 @@ impl Shredder { } } + #[allow(clippy::too_many_arguments)] pub fn entries_to_shreds( &self, keypair: &Keypair, entries: &[Entry], is_last_in_slot: bool, + chained_merkle_root: Option, next_shred_index: u32, next_code_index: u32, merkle_variant: bool, @@ -93,7 +95,7 @@ impl Shredder { self.version, self.reference_tick, is_last_in_slot, - None, // chained_merkle_root + chained_merkle_root, next_shred_index, next_code_index, reed_solomon_cache, @@ -500,6 +502,7 @@ mod tests { system_transaction, }, std::{collections::HashSet, convert::TryInto, iter::repeat_with, sync::Arc}, + test_case::test_case, }; fn verify_test_code_shred(shred: &Shred, index: u32, slot: Slot, pk: &Pubkey, verify: bool) { @@ -510,7 +513,7 @@ mod tests { assert_eq!(verify, shred.verify(pk)); } - fn run_test_data_shredder(slot: Slot) { + fn run_test_data_shredder(slot: Slot, chained: bool) { let keypair = Arc::new(Keypair::new()); // Test that parent cannot be > current slot @@ -548,6 +551,8 @@ mod tests { &keypair, &entries, is_last_in_slot, + // chained_merkle_root + chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), start_index, // next_shred_index start_index, // next_code_index true, // merkle_variant @@ -602,13 +607,15 @@ mod tests { assert_eq!(entries, deshred_entries); } - #[test] - fn test_data_shredder() { - run_test_data_shredder(0x1234_5678_9abc_def0); + #[test_case(false)] + #[test_case(true)] + fn test_data_shredder(chained: bool) { + run_test_data_shredder(0x1234_5678_9abc_def0, chained); } - #[test] - fn test_deserialize_shred_payload() { + #[test_case(false)] + #[test_case(true)] + fn test_deserialize_shred_payload(chained: bool) { let keypair = Arc::new(Keypair::new()); let slot = 1; let parent_slot = 0; @@ -627,6 +634,8 @@ mod tests { &keypair, &entries, true, // is_last_in_slot + // chained_merkle_root + chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index 0, // next_code_index true, // merkle_variant @@ -639,8 +648,9 @@ mod tests { assert_eq!(deserialized_shred, *data_shreds.last().unwrap()); } - #[test] - fn test_shred_reference_tick() { + #[test_case(false)] + #[test_case(true)] + fn test_shred_reference_tick(chained: bool) { let keypair = Arc::new(Keypair::new()); let slot = 1; let parent_slot = 0; @@ -659,6 +669,8 @@ mod tests { &keypair, &entries, true, // is_last_in_slot + // chained_merkle_root, + chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index 0, // next_code_index true, // merkle_variant @@ -676,8 +688,9 @@ mod tests { assert_eq!(deserialized_shred.reference_tick(), 5); } - #[test] - fn test_shred_reference_tick_overflow() { + #[test_case(false)] + #[test_case(true)] + fn test_shred_reference_tick_overflow(chained: bool) { let keypair = Arc::new(Keypair::new()); let slot = 1; let parent_slot = 0; @@ -696,6 +709,8 @@ mod tests { &keypair, &entries, true, // is_last_in_slot + // chained_merkle_root + chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index 0, // next_code_index true, // merkle_variant @@ -722,7 +737,7 @@ mod tests { ); } - fn run_test_data_and_code_shredder(slot: Slot) { + fn run_test_data_and_code_shredder(slot: Slot, chained: bool) { let keypair = Arc::new(Keypair::new()); let shredder = Shredder::new(slot, slot - 5, 0, 0).unwrap(); // Create enough entries to make > 1 shred @@ -742,6 +757,8 @@ mod tests { &keypair, &entries, true, // is_last_in_slot + // chained_merkle_root + chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index 0, // next_code_index true, // merkle_variant @@ -766,9 +783,10 @@ mod tests { } } - #[test] - fn test_data_and_code_shredder() { - run_test_data_and_code_shredder(0x1234_5678_9abc_def0); + #[test_case(false)] + #[test_case(true)] + fn test_data_and_code_shredder(chained: bool) { + run_test_data_and_code_shredder(0x1234_5678_9abc_def0, chained); } fn run_test_recovery_and_reassembly(slot: Slot, is_last_in_slot: bool) { @@ -799,6 +817,7 @@ mod tests { &keypair, &entries, is_last_in_slot, + None, // chained_merkle_root 0, // next_shred_index 0, // next_code_index false, // merkle_variant @@ -936,6 +955,7 @@ mod tests { &keypair, &entries, true, // is_last_in_slot + None, // chained_merkle_root 25, // next_shred_index, 25, // next_code_index false, // merkle_variant @@ -1032,6 +1052,7 @@ mod tests { &keypair, &[entry], is_last_in_slot, + None, // chained_merkle_root next_shred_index, next_shred_index, // next_code_index false, // merkle_variant @@ -1073,8 +1094,9 @@ mod tests { } } - #[test] - fn test_shred_version() { + #[test_case(false)] + #[test_case(true)] + fn test_shred_version(chained: bool) { let keypair = Arc::new(Keypair::new()); let hash = hash(Hash::default().as_ref()); let version = shred_version::version_from_hash(&hash); @@ -1094,6 +1116,8 @@ mod tests { &keypair, &entries, true, // is_last_in_slot + // chained_merkle_root + chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index 0, // next_code_index true, // merkle_variant @@ -1106,8 +1130,9 @@ mod tests { .any(|s| s.version() != version)); } - #[test] - fn test_shred_fec_set_index() { + #[test_case(false)] + #[test_case(true)] + fn test_shred_fec_set_index(chained: bool) { let keypair = Arc::new(Keypair::new()); let hash = hash(Hash::default().as_ref()); let version = shred_version::version_from_hash(&hash); @@ -1127,7 +1152,9 @@ mod tests { let (data_shreds, coding_shreds) = shredder.entries_to_shreds( &keypair, &entries, - true, // is_last_in_slot + true, // is_last_in_slot + // chained_merkle_root + chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), start_index, // next_shred_index start_index, // next_code_index true, // merkle_variant diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index d52af07bf2cf46..f6d060d686757d 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -739,7 +739,9 @@ mod tests { .entries_to_shreds( keypair, &make_entries(rng, num_entries), - rng.gen(), // is_last_in_slot + rng.gen(), // is_last_in_slot + // chained_merkle_root + rng.gen::().then(|| Hash::new_from_array(rng.gen())), rng.gen_range(0..2671), // next_shred_index rng.gen_range(0..2781), // next_code_index rng.gen(), // merkle_variant, diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index 78cdb28d0b39ae..3c2a6771b635cd 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -53,6 +53,7 @@ fn test_multi_fec_block_coding() { &keypair, &entries, true, // is_last_in_slot + None, // chained_merkle_root 0, // next_shred_index 0, // next_code_index false, // merkle_variant @@ -226,6 +227,7 @@ fn setup_different_sized_fec_blocks( &keypair, &entries, is_last, + None, // chained_merkle_root next_shred_index, next_code_index, false, // merkle_variant diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 752160e5ada970..02953c632a80c3 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -5610,6 +5610,7 @@ fn test_invalid_forks_persisted_on_restart() { &majority_keypair, &entries, true, // is_full_slot + None, // chained_merkle_root 0, // next_shred_index, 0, // next_code_index false, // merkle_variant diff --git a/turbine/benches/retransmit_stage.rs b/turbine/benches/retransmit_stage.rs index bfd68239feedab..c5490d5670e6c6 100644 --- a/turbine/benches/retransmit_stage.rs +++ b/turbine/benches/retransmit_stage.rs @@ -6,6 +6,7 @@ extern crate test; use { crossbeam_channel::unbounded, log::*, + rand::Rng, solana_entry::entry::Entry, solana_gossip::{ cluster_info::{ClusterInfo, Node}, @@ -105,6 +106,8 @@ fn bench_retransmitter(bencher: &mut Bencher) { &keypair, &entries, true, // is_last_in_slot + // chained_merkle_root + Some(Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index 0, // next_code_index true, // merkle_variant diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index 98566dfa24bc48..d799c0d9b62005 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -503,6 +503,7 @@ pub mod test { use { super::*, crossbeam_channel::unbounded, + rand::Rng, solana_entry::entry::create_ticks, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -544,6 +545,8 @@ pub mod test { &Keypair::new(), &entries, true, // is_last_in_slot + // chained_merkle_root + Some(Hash::new_from_array(rand::thread_rng().gen())), 0, // next_shred_index, 0, // next_code_index true, // merkle_variant diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index bae5945aea0e13..8bee47068ac499 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -173,6 +173,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), + None, // chained_merkle_root self.next_shred_index, self.next_code_index, false, // merkle_variant @@ -190,6 +191,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &[original_last_entry], true, + None, // chained_merkle_root self.next_shred_index, self.next_code_index, false, // merkle_variant @@ -203,6 +205,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &duplicate_extra_last_entries, true, + None, // chained_merkle_root self.next_shred_index, self.next_code_index, false, // merkle_variant diff --git a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs index 1464d46493d730..20d141dee01a73 100644 --- a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -60,6 +60,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height(), + None, // chained_merkle_root next_shred_index, self.next_code_index, true, // merkle_variant @@ -81,6 +82,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { keypair, &fake_entries, last_tick_height == bank.max_tick_height(), + None, // chained_merkle_root next_shred_index, self.next_code_index, true, // merkle_variant diff --git a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index 1dda981e693218..b98972690c78a8 100644 --- a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -92,6 +92,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), + None, // chained_merkle_root self.next_shred_index, self.next_code_index, true, // merkle_variant @@ -108,6 +109,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &[good_last_entry], true, + None, // chained_merkle_root self.next_shred_index, self.next_code_index, true, // merkle_variant @@ -121,6 +123,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &[bad_last_entry], false, + None, // chained_merkle_root self.next_shred_index, self.next_code_index, true, // merkle_variant diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 82bd7f940c508d..e2b8871b4bc3c2 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -85,6 +85,7 @@ impl StandardBroadcastRun { keypair, &[], // entries true, // is_last_in_slot, + None, // chained_merkle_root state.next_shred_index, state.next_code_index, true, // merkle_variant @@ -143,6 +144,7 @@ impl StandardBroadcastRun { keypair, entries, is_slot_end, + None, // chained_merkle_root next_shred_index, next_code_index, true, // merkle_variant From 2455dc1a69f2fd6e3a9b22623c01e70a9183a639 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Sat, 27 Jan 2024 11:10:09 -0800 Subject: [PATCH 081/401] SVM: Move `TransactionAccountStateInfo` to svm and decouple from `bank` (#34981) --- runtime/src/bank.rs | 22 ++++++++++------ runtime/src/bank/tests.rs | 8 ++++-- runtime/src/svm/mod.rs | 1 + .../transaction_account_state_info.rs | 25 ++++++++----------- 4 files changed, 33 insertions(+), 23 deletions(-) rename runtime/src/{bank => svm}/transaction_account_state_info.rs (75%) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 548e762c9da5f7..e7b17913e3aa57 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -59,7 +59,10 @@ use { }, stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, - svm::account_loader::load_accounts, + svm::{ + account_loader::load_accounts, + transaction_account_state_info::TransactionAccountStateInfo, + }, transaction_batch::TransactionBatch, }, byteorder::{ByteOrder, LittleEndian}, @@ -230,7 +233,6 @@ mod serde_snapshot; mod sysvar_cache; #[cfg(test)] pub(crate) mod tests; -mod transaction_account_state_info; pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0; @@ -4862,8 +4864,11 @@ impl Bank { #[cfg(debug_assertions)] transaction_context.set_signature(tx.signature()); - let pre_account_state_info = - self.get_transaction_account_state_info(&transaction_context, tx.message()); + let pre_account_state_info = TransactionAccountStateInfo::new( + &self.rent_collector.rent, + &transaction_context, + tx.message(), + ); let log_collector = if enable_log_recording { match log_messages_bytes_limit { @@ -4908,9 +4913,12 @@ impl Bank { let mut status = process_result .and_then(|info| { - let post_account_state_info = - self.get_transaction_account_state_info(&transaction_context, tx.message()); - self.verify_transaction_account_state_changes( + let post_account_state_info = TransactionAccountStateInfo::new( + &self.rent_collector.rent, + &transaction_context, + tx.message(), + ); + TransactionAccountStateInfo::verify_changes( &pre_account_state_info, &post_account_state_info, &transaction_context, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 337556246f2806..e19eaa9aca96ad 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -11014,8 +11014,12 @@ fn test_rent_state_list_len() { ); assert_eq!( - bank.get_transaction_account_state_info(&transaction_context, sanitized_tx.message()) - .len(), + TransactionAccountStateInfo::new( + &bank.rent_collector.rent, + &transaction_context, + sanitized_tx.message() + ) + .len(), num_accounts, ); } diff --git a/runtime/src/svm/mod.rs b/runtime/src/svm/mod.rs index a863d370802e0e..d026b8f3abb26e 100644 --- a/runtime/src/svm/mod.rs +++ b/runtime/src/svm/mod.rs @@ -1,2 +1,3 @@ pub mod account_loader; pub mod account_rent_state; +pub mod transaction_account_state_info; diff --git a/runtime/src/bank/transaction_account_state_info.rs b/runtime/src/svm/transaction_account_state_info.rs similarity index 75% rename from runtime/src/bank/transaction_account_state_info.rs rename to runtime/src/svm/transaction_account_state_info.rs index 259cd5142cf3ec..48a6a63994e341 100644 --- a/runtime/src/bank/transaction_account_state_info.rs +++ b/runtime/src/svm/transaction_account_state_info.rs @@ -1,9 +1,10 @@ use { - crate::{bank::Bank, svm::account_rent_state::RentState}, + crate::svm::account_rent_state::RentState, solana_sdk::{ account::ReadableAccount, message::SanitizedMessage, native_loader, + rent::Rent, transaction::Result, transaction_context::{IndexOfAccount, TransactionContext}, }, @@ -13,12 +14,12 @@ pub(crate) struct TransactionAccountStateInfo { rent_state: Option, // None: readonly account } -impl Bank { - pub(crate) fn get_transaction_account_state_info( - &self, +impl TransactionAccountStateInfo { + pub(crate) fn new( + rent: &Rent, transaction_context: &TransactionContext, message: &SanitizedMessage, - ) -> Vec { + ) -> Vec { (0..message.account_keys().len()) .map(|i| { let rent_state = if message.is_writable(i) { @@ -31,10 +32,7 @@ impl Bank { // balances; however they will never be loaded as writable debug_assert!(!native_loader::check_id(account.owner())); - Some(RentState::from_account( - &account, - &self.rent_collector().rent, - )) + Some(RentState::from_account(&account, rent)) } else { None }; @@ -46,15 +44,14 @@ impl Bank { } else { None }; - TransactionAccountStateInfo { rent_state } + Self { rent_state } }) .collect() } - pub(crate) fn verify_transaction_account_state_changes( - &self, - pre_state_infos: &[TransactionAccountStateInfo], - post_state_infos: &[TransactionAccountStateInfo], + pub(crate) fn verify_changes( + pre_state_infos: &[Self], + post_state_infos: &[Self], transaction_context: &TransactionContext, ) -> Result<()> { for (i, (pre_state_info, post_state_info)) in From c99427eb9e12f61757ddd0d1da6d6870a8a628fe Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Sat, 27 Jan 2024 11:10:45 -0800 Subject: [PATCH 082/401] solana-program: only decode prior_voters if needed (#34972) --- sdk/program/src/serialize_utils/cursor.rs | 15 +++++ sdk/program/src/vote/state/mod.rs | 20 ++++-- .../src/vote/state/vote_state_deserialize.rs | 63 +++++++++++-------- 3 files changed, 69 insertions(+), 29 deletions(-) diff --git a/sdk/program/src/serialize_utils/cursor.rs b/sdk/program/src/serialize_utils/cursor.rs index 0066737382ab29..9d33d1e484b53a 100644 --- a/sdk/program/src/serialize_utils/cursor.rs +++ b/sdk/program/src/serialize_utils/cursor.rs @@ -61,6 +61,15 @@ pub(crate) fn read_pubkey>( Ok(Pubkey::from(buf)) } +pub(crate) fn read_bool>(cursor: &mut Cursor) -> Result { + let byte = read_u8(cursor)?; + match byte { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(InstructionError::InvalidAccountData), + } +} + #[cfg(test)] mod test { use {super::*, rand::Rng, std::fmt::Debug}; @@ -115,6 +124,12 @@ mod test { } } + #[test] + fn test_read_bool() { + test_read(read_bool, false); + test_read(read_bool, true); + } + fn test_read( reader: fn(&mut Cursor>) -> Result, test_value: T, diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 9eddce4d948cab..8cfcd0ef19d9e8 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -18,7 +18,7 @@ use { sysvar::clock::Clock, vote::{authorized_voters::AuthorizedVoters, error::VoteError}, }, - bincode::{serialize_into, ErrorKind}, + bincode::{serialize_into, serialized_size, ErrorKind}, serde_derive::{Deserialize, Serialize}, std::{collections::VecDeque, fmt::Debug, io::Cursor}, }; @@ -399,6 +399,12 @@ impl VoteState { input: &[u8], vote_state: &mut VoteState, ) -> Result<(), InstructionError> { + let minimum_size = + serialized_size(vote_state).map_err(|_| InstructionError::InvalidAccountData)?; + if (input.len() as u64) < minimum_size { + return Err(InstructionError::InvalidAccountData); + } + let mut cursor = Cursor::new(input); let variant = read_u32(&mut cursor)?; @@ -410,7 +416,13 @@ impl VoteState { // Current. the only difference from V1_14_11 is the addition of a slot-latency to each vote 2 => deserialize_vote_state_into(&mut cursor, vote_state, true), _ => Err(InstructionError::InvalidAccountData), + }?; + + if cursor.position() > input.len() as u64 { + return Err(InstructionError::InvalidAccountData); } + + Ok(()) } pub fn serialize( @@ -886,7 +898,7 @@ mod tests { // variant // provide 4x the minimum struct size in bytes to ensure we typically touch every field - let struct_bytes_x4 = std::mem::size_of::() * 4; + let struct_bytes_x4 = std::mem::size_of::() * 4; for _ in 0..1000 { let raw_data: Vec = (0..struct_bytes_x4).map(|_| rand::random::()).collect(); let mut unstructured = Unstructured::new(&raw_data); @@ -911,7 +923,7 @@ mod tests { assert_eq!(e, InstructionError::InvalidAccountData); // variant - let serialized_len_x4 = bincode::serialized_size(&test_vote_state).unwrap() * 4; + let serialized_len_x4 = serialized_size(&test_vote_state).unwrap() * 4; let mut rng = rand::thread_rng(); for _ in 0..1000 { let raw_data_length = rng.gen_range(1..serialized_len_x4); @@ -1262,7 +1274,7 @@ mod tests { fn test_vote_state_size_of() { let vote_state = VoteState::get_max_sized_vote_state(); let vote_state = VoteStateVersions::new_current(vote_state); - let size = bincode::serialized_size(&vote_state).unwrap(); + let size = serialized_size(&vote_state).unwrap(); assert_eq!(VoteState::size_of() as u64, size); } diff --git a/sdk/program/src/vote/state/vote_state_deserialize.rs b/sdk/program/src/vote/state/vote_state_deserialize.rs index b93f1c7442d10f..b457395ccbd38a 100644 --- a/sdk/program/src/vote/state/vote_state_deserialize.rs +++ b/sdk/program/src/vote/state/vote_state_deserialize.rs @@ -5,6 +5,7 @@ use { serialize_utils::cursor::*, vote::state::{BlockTimestamp, LandedVote, Lockout, VoteState, MAX_ITEMS}, }, + bincode::serialized_size, std::io::Cursor, }; @@ -66,33 +67,45 @@ fn read_prior_voters_into>( cursor: &mut Cursor, vote_state: &mut VoteState, ) -> Result<(), InstructionError> { - let mut encountered_null_voter = false; - for i in 0..MAX_ITEMS { - let prior_voter = read_pubkey(cursor)?; - let from_epoch = read_u64(cursor)?; - let until_epoch = read_u64(cursor)?; - let item = (prior_voter, from_epoch, until_epoch); - - if item == (Pubkey::default(), 0, 0) { - encountered_null_voter = true; - } else if encountered_null_voter { - // `prior_voters` should never be sparse - return Err(InstructionError::InvalidAccountData); - } else { - vote_state.prior_voters.buf[i] = item; + // record our position at the start of the struct + let prior_voters_position = cursor.position(); + + // `serialized_size()` must be used over `mem::size_of()` because of alignment + let is_empty_position = serialized_size(&vote_state.prior_voters) + .ok() + .and_then(|v| v.checked_add(prior_voters_position)) + .and_then(|v| v.checked_sub(1)) + .ok_or(InstructionError::InvalidAccountData)?; + + // move to the end, to check if we need to parse the data + cursor.set_position(is_empty_position); + + // if empty, we already read past the end of this struct and need to do no further work + // otherwise we go back to the start and proceed to decode the data + let is_empty = read_bool(cursor)?; + if !is_empty { + cursor.set_position(prior_voters_position); + + let mut encountered_null_voter = false; + for i in 0..MAX_ITEMS { + let prior_voter = read_pubkey(cursor)?; + let from_epoch = read_u64(cursor)?; + let until_epoch = read_u64(cursor)?; + let item = (prior_voter, from_epoch, until_epoch); + + if item == (Pubkey::default(), 0, 0) { + encountered_null_voter = true; + } else if encountered_null_voter { + // `prior_voters` should never be sparse + return Err(InstructionError::InvalidAccountData); + } else { + vote_state.prior_voters.buf[i] = item; + } } - } - - let idx = read_u64(cursor)? as usize; - vote_state.prior_voters.idx = idx; - let is_empty_byte = read_u8(cursor)?; - let is_empty = match is_empty_byte { - 0 => false, - 1 => true, - _ => return Err(InstructionError::InvalidAccountData), - }; - vote_state.prior_voters.is_empty = is_empty; + vote_state.prior_voters.idx = read_u64(cursor)? as usize; + vote_state.prior_voters.is_empty = read_bool(cursor)?; + } Ok(()) } From 52d3c021ef5149962920d6f88ff7ce40b4ee08ac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 28 Jan 2024 13:02:29 +0800 Subject: [PATCH 083/401] build(deps): bump chrono from 0.4.31 to 0.4.32 (#34898) * build(deps): bump chrono from 0.4.31 to 0.4.32 Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.31 to 0.4.32. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.31...v0.4.32) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c98ae60cf9cf14..08f507688d4ea1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1070,9 +1070,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "41daef31d7a747c5c847246f36de49ced6f7403b4cdabc807a97b5cc184cda7a" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1080,7 +1080,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.48.0", + "windows-targets 0.52.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 2aa92016e9a22f..cf65165e331347 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -170,7 +170,7 @@ bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" cc = "1.0.83" -chrono = { version = "0.4.31", default-features = false } +chrono = { version = "0.4.32", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" console = "0.15.8" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c1d80f5a3b1ad2..cba86e74c22a29 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -946,9 +946,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "41daef31d7a747c5c847246f36de49ced6f7403b4cdabc807a97b5cc184cda7a" dependencies = [ "android-tzdata", "iana-time-zone", @@ -956,7 +956,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.48.0", + "windows-targets 0.52.0", ] [[package]] From b9815da6ccdeed44477ff7517b9d83db55e7a4e8 Mon Sep 17 00:00:00 2001 From: Brooks Date: Sun, 28 Jan 2024 17:32:09 -0500 Subject: [PATCH 084/401] Fixes typo (#34990) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 17617dc8755ed1..4d37dcba060705 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7156,7 +7156,7 @@ impl AccountsDb { // update each atomic stat value once. // There are approximately 173 items in the cache files list, // so should be very fast to iterate and compute. - // (173 cache files == 432,000 slots / 2,5000 slots-per-cache-file) + // (173 cache files == 432,000 slots / 2,500 slots-per-cache-file) let mut hits = 0; let mut misses = 0; for cache_file in &cache_files { From 8fde8d26c718edb028a1c77de6471a6e52a7bf03 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Sun, 28 Jan 2024 16:17:46 -0800 Subject: [PATCH 085/401] don't sign X.509 certs (#34896) This get rid of 3rd party components rcgen in the path of private key access to make the code more secure. --- Cargo.lock | 69 ---------- Cargo.toml | 2 - client/src/connection_cache.rs | 10 +- connection-cache/Cargo.toml | 1 - connection-cache/src/connection_cache.rs | 3 - core/Cargo.toml | 1 - core/src/repair/quic_endpoint.rs | 14 +- core/src/tpu.rs | 12 +- core/src/validator.rs | 8 -- programs/sbf/Cargo.lock | 69 ---------- quic-client/Cargo.toml | 1 - quic-client/src/lib.rs | 37 ++---- quic-client/src/nonblocking/quic_client.rs | 6 +- quic-client/tests/quic_client.rs | 27 ++-- streamer/Cargo.toml | 2 - streamer/src/nonblocking/quic.rs | 17 +-- streamer/src/quic.rs | 21 +-- streamer/src/tls_certificates.rs | 145 +++++++++++++-------- turbine/Cargo.toml | 1 - turbine/src/quic_endpoint.rs | 14 +- 20 files changed, 128 insertions(+), 332 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08f507688d4ea1..fc5fcebdd70b53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -580,12 +580,6 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" -[[package]] -name = "base64ct" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874f8444adcb4952a8bc51305c8be95c8ec8237bb0d2e78d2e039f771f8828a0" - [[package]] name = "bincode" version = "1.3.3" @@ -1273,12 +1267,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - [[package]] name = "const_format" version = "0.2.32" @@ -1581,15 +1569,6 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid", -] - [[package]] name = "der-parser" version = "8.1.0" @@ -3900,17 +3879,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der", - "spki", - "zeroize", -] - [[package]] name = "pkg-config" version = "0.3.22" @@ -4419,18 +4387,6 @@ dependencies = [ name = "rbpf-cli" version = "1.18.0" -[[package]] -name = "rcgen" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" -dependencies = [ - "pem", - "ring 0.16.20", - "time", - "yasna", -] - [[package]] name = "rdrand" version = "0.4.0" @@ -5895,7 +5851,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rcgen", "solana-logger", "solana-measure", "solana-metrics", @@ -5934,7 +5889,6 @@ dependencies = [ "rand_chacha 0.3.1", "raptorq", "rayon", - "rcgen", "rolling-file", "rustc_version 0.4.0", "rustls", @@ -6833,7 +6787,6 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rcgen", "rustls", "solana-connection-cache", "solana-logger", @@ -7325,11 +7278,9 @@ dependencies = [ "nix 0.26.4", "pem", "percentage", - "pkcs8", "quinn", "quinn-proto", "rand 0.8.5", - "rcgen", "rustls", "solana-logger", "solana-metrics", @@ -7519,7 +7470,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rcgen", "rustls", "solana-entry", "solana-gossip", @@ -7851,16 +7801,6 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "spl-associated-token-account" version = "2.3.0" @@ -9389,15 +9329,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "yasna" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346d34a236c9d3e5f3b9b74563f238f955bbd05fa0b8b4efa53c130c43982f4c" -dependencies = [ - "time", -] - [[package]] name = "zerocopy" version = "0.7.31" diff --git a/Cargo.toml b/Cargo.toml index cf65165e331347..e1177f23f60c80 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -263,7 +263,6 @@ pbkdf2 = { version = "0.11.0", default-features = false } pem = "1.1.1" percentage = "0.1.0" pickledb = { version = "0.5.1", default-features = false } -pkcs8 = "0.8.0" predicates = "2.1" pretty-hex = "0.3.0" prio-graph = "0.2.1" @@ -282,7 +281,6 @@ rand = "0.8.5" rand_chacha = "0.3.1" raptorq = "1.8.0" rayon = "1.8.1" -rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" regex = "1.10.3" reqwest = { version = "0.11.23", default-features = false } diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 216687aecf916c..b53b66b155e719 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -91,9 +91,7 @@ impl ConnectionCache { config.update_client_endpoint(client_endpoint); } if let Some(cert_info) = cert_info { - config - .update_client_certificate(cert_info.0, cert_info.1) - .unwrap(); + config.update_client_certificate(cert_info.0, cert_info.1); } if let Some(stake_info) = stake_info { config.set_staked_nodes(stake_info.0, stake_info.1); @@ -241,19 +239,18 @@ mod tests { }, }; - fn server_args() -> (UdpSocket, Arc, Keypair, IpAddr) { + fn server_args() -> (UdpSocket, Arc, Keypair) { ( UdpSocket::bind("127.0.0.1:0").unwrap(), Arc::new(AtomicBool::new(false)), Keypair::new(), - "127.0.0.1".parse().unwrap(), ) } #[test] fn test_connection_with_specified_client_endpoint() { // Start a response receiver: - let (response_recv_socket, response_recv_exit, keypair2, response_recv_ip) = server_args(); + let (response_recv_socket, response_recv_exit, keypair2) = server_args(); let (sender2, _receiver2) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); @@ -266,7 +263,6 @@ mod tests { "quic_streamer_test", response_recv_socket, &keypair2, - response_recv_ip, sender2, response_recv_exit.clone(), 1, diff --git a/connection-cache/Cargo.toml b/connection-cache/Cargo.toml index acf52f05f9ba5d..28577bac0c066e 100644 --- a/connection-cache/Cargo.toml +++ b/connection-cache/Cargo.toml @@ -19,7 +19,6 @@ indicatif = { workspace = true, optional = true } log = { workspace = true } rand = { workspace = true } rayon = { workspace = true } -rcgen = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-sdk = { workspace = true } diff --git a/connection-cache/src/connection_cache.rs b/connection-cache/src/connection_cache.rs index eed6991abf1b5a..5e44b98d4c911d 100644 --- a/connection-cache/src/connection_cache.rs +++ b/connection-cache/src/connection_cache.rs @@ -412,9 +412,6 @@ pub enum ConnectionPoolError { #[derive(Error, Debug)] pub enum ClientError { - #[error("Certificate error: {0}")] - CertificateError(#[from] rcgen::RcgenError), - #[error("IO error: {0:?}")] IoError(#[from] std::io::Error), } diff --git a/core/Cargo.toml b/core/Cargo.toml index bc1bd4549f6751..fa6c7cd2052aea 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -36,7 +36,6 @@ quinn = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } rayon = { workspace = true } -rcgen = { workspace = true } rolling-file = { workspace = true } rustls = { workspace = true } serde = { workspace = true } diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index 89f9de78491101..5b0fd9c1490cc6 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -9,20 +9,17 @@ use { EndpointConfig, IdleTimeout, ReadError, ReadToEndError, RecvStream, SendStream, ServerConfig, TokioRuntime, TransportConfig, VarInt, WriteError, }, - rcgen::RcgenError, rustls::{Certificate, PrivateKey}, serde_bytes::ByteBuf, solana_quic_client::nonblocking::quic_client::SkipServerVerification, solana_runtime::bank_forks::BankForks, solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Keypair}, - solana_streamer::{ - quic::SkipClientVerification, tls_certificates::new_self_signed_tls_certificate, - }, + solana_streamer::{quic::SkipClientVerification, tls_certificates::new_dummy_x509_certificate}, std::{ cmp::Reverse, collections::{hash_map::Entry, HashMap}, io::{Cursor, Error as IoError}, - net::{IpAddr, SocketAddr, UdpSocket}, + net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, Arc, RwLock, @@ -88,8 +85,6 @@ pub struct RemoteRequest { #[derive(Error, Debug)] #[allow(clippy::enum_variant_names)] pub(crate) enum Error { - #[error(transparent)] - CertificateError(#[from] RcgenError), #[error("Channel Send Error")] ChannelSendError, #[error(transparent)] @@ -123,11 +118,10 @@ pub(crate) fn new_quic_endpoint( runtime: &tokio::runtime::Handle, keypair: &Keypair, socket: UdpSocket, - address: IpAddr, remote_request_sender: Sender, bank_forks: Arc>, ) -> Result<(Endpoint, AsyncSender, AsyncTryJoinHandle), Error> { - let (cert, key) = new_self_signed_tls_certificate(keypair, address)?; + let (cert, key) = new_dummy_x509_certificate(keypair); let server_config = new_server_config(cert.clone(), key.clone())?; let client_config = new_client_config(cert, key)?; let mut endpoint = { @@ -809,7 +803,6 @@ async fn report_metrics_task(name: &'static str, stats: Arc) { fn record_error(err: &Error, stats: &RepairQuicStats) { match err { - Error::CertificateError(_) => (), Error::ChannelSendError => (), Error::ConnectError(ConnectError::EndpointStopping) => { add_metric!(stats.connect_error_other) @@ -1065,7 +1058,6 @@ mod tests { runtime.handle(), keypair, socket, - IpAddr::V4(Ipv4Addr::LOCALHOST), remote_request_sender, bank_forks.clone(), ) diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 0456a33a8d91f4..548b299148d935 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -19,7 +19,7 @@ use { }, bytes::Bytes, crossbeam_channel::{unbounded, Receiver}, - solana_client::connection_cache::{ConnectionCache, Protocol}, + solana_client::connection_cache::ConnectionCache, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ blockstore::Blockstore, blockstore_processor::TransactionStatusSender, @@ -156,11 +156,6 @@ impl Tpu { "quic_streamer_tpu", transactions_quic_sockets, keypair, - cluster_info - .my_contact_info() - .tpu(Protocol::QUIC) - .expect("Operator must spin up node with valid (QUIC) TPU address") - .ip(), packet_sender, exit.clone(), MAX_QUIC_CONNECTIONS_PER_PEER, @@ -180,11 +175,6 @@ impl Tpu { "quic_streamer_tpu_forwards", transactions_forwards_quic_sockets, keypair, - cluster_info - .my_contact_info() - .tpu_forwards(Protocol::QUIC) - .expect("Operator must spin up node with valid (QUIC) TPU-forwards address") - .ip(), forwarded_packet_sender, exit.clone(), MAX_QUIC_CONNECTIONS_PER_PEER, diff --git a/core/src/validator.rs b/core/src/validator.rs index 2b6a807ac7c110..c8a3af3d02583a 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1193,10 +1193,6 @@ impl Validator { .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), &identity_keypair, node.sockets.tvu_quic, - node.info - .tvu(Protocol::QUIC) - .map_err(|err| format!("Invalid QUIC TVU address: {err:?}"))? - .ip(), turbine_quic_endpoint_sender, bank_forks.clone(), ) @@ -1226,10 +1222,6 @@ impl Validator { .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), &identity_keypair, node.sockets.serve_repair_quic, - node.info - .serve_repair(Protocol::QUIC) - .map_err(|err| format!("Invalid QUIC serve-repair address: {err:?}"))? - .ip(), repair_quic_endpoint_sender, bank_forks.clone(), ) diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cba86e74c22a29..d441666b28835f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -546,12 +546,6 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" -[[package]] -name = "base64ct" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71acf5509fc522cce1b100ac0121c635129bfd4d91cdf036bcc9b9935f97ccf5" - [[package]] name = "bincode" version = "1.3.3" @@ -1074,12 +1068,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - [[package]] name = "constant_time_eq" version = "0.3.0" @@ -1284,15 +1272,6 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid", -] - [[package]] name = "der-parser" version = "8.1.0" @@ -3497,17 +3476,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der", - "spki", - "zeroize", -] - [[package]] name = "pkg-config" version = "0.3.17" @@ -3908,18 +3876,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rcgen" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" -dependencies = [ - "pem", - "ring 0.16.20", - "time", - "yasna", -] - [[package]] name = "redox_syscall" version = "0.1.56" @@ -4953,7 +4909,6 @@ dependencies = [ "log", "rand 0.8.5", "rayon", - "rcgen", "solana-measure", "solana-metrics", "solana-sdk", @@ -4987,7 +4942,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rcgen", "rolling-file", "rustc_version", "rustls", @@ -5564,7 +5518,6 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rcgen", "rustls", "solana-connection-cache", "solana-measure", @@ -6369,11 +6322,9 @@ dependencies = [ "nix", "pem", "percentage", - "pkcs8", "quinn", "quinn-proto", "rand 0.8.5", - "rcgen", "rustls", "solana-metrics", "solana-perf", @@ -6498,7 +6449,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rcgen", "rustls", "solana-entry", "solana-gossip", @@ -6756,16 +6706,6 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c530c2b0d0bf8b69304b39fe2001993e267461948b890cd037d8ad4293fa1a0d" -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "spl-associated-token-account" version = "2.3.0" @@ -8198,15 +8138,6 @@ dependencies = [ "libc", ] -[[package]] -name = "yasna" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346d34a236c9d3e5f3b9b74563f238f955bbd05fa0b8b4efa53c130c43982f4c" -dependencies = [ - "time", -] - [[package]] name = "zerocopy" version = "0.7.31" diff --git a/quic-client/Cargo.toml b/quic-client/Cargo.toml index 8f6fcb070c86b4..811b5b8a80a961 100644 --- a/quic-client/Cargo.toml +++ b/quic-client/Cargo.toml @@ -18,7 +18,6 @@ lazy_static = { workspace = true } log = { workspace = true } quinn = { workspace = true } quinn-proto = { workspace = true } -rcgen = { workspace = true } rustls = { workspace = true, features = ["dangerous_configuration"] } solana-connection-cache = { workspace = true } solana-measure = { workspace = true } diff --git a/quic-client/src/lib.rs b/quic-client/src/lib.rs index 6bd9726cbfb3ea..86ddd154fc8b13 100644 --- a/quic-client/src/lib.rs +++ b/quic-client/src/lib.rs @@ -15,7 +15,6 @@ use { quic_client::QuicClientConnection as BlockingQuicClientConnection, }, quinn::Endpoint, - rcgen::RcgenError, solana_connection_cache::{ connection_cache::{ BaseClientConnection, ClientError, ConnectionCache, ConnectionManager, ConnectionPool, @@ -30,21 +29,14 @@ use { solana_streamer::{ nonblocking::quic::{compute_max_allowed_uni_streams, ConnectionPeerType}, streamer::StakedNodes, - tls_certificates::new_self_signed_tls_certificate, + tls_certificates::new_dummy_x509_certificate, }, std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, + net::{IpAddr, SocketAddr}, sync::{Arc, RwLock}, }, - thiserror::Error, }; -#[derive(Error, Debug)] -pub enum QuicClientError { - #[error("Certificate error: {0}")] - CertificateError(#[from] RcgenError), -} - pub struct QuicPool { connections: Vec>, endpoint: Arc, @@ -93,7 +85,6 @@ pub struct QuicConfig { // The optional specified endpoint for the quic based client connections // If not specified, the connection cache will create as needed. client_endpoint: Option, - addr: IpAddr, } impl Clone for QuicConfig { @@ -104,15 +95,13 @@ impl Clone for QuicConfig { maybe_staked_nodes: self.maybe_staked_nodes.clone(), maybe_client_pubkey: self.maybe_client_pubkey, client_endpoint: self.client_endpoint.clone(), - addr: self.addr, } } } impl NewConnectionConfig for QuicConfig { fn new() -> Result { - let addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - let (cert, priv_key) = new_self_signed_tls_certificate(&Keypair::new(), addr)?; + let (cert, priv_key) = new_dummy_x509_certificate(&Keypair::new()); Ok(Self { client_certificate: RwLock::new(Arc::new(QuicClientCertificate { certificate: cert, @@ -121,7 +110,6 @@ impl NewConnectionConfig for QuicConfig { maybe_staked_nodes: None, maybe_client_pubkey: None, client_endpoint: None, - addr, }) } } @@ -150,13 +138,8 @@ impl QuicConfig { compute_max_allowed_uni_streams(client_type, total_stake) } - pub fn update_client_certificate( - &mut self, - keypair: &Keypair, - ipaddr: IpAddr, - ) -> Result<(), RcgenError> { - let (cert, priv_key) = new_self_signed_tls_certificate(keypair, ipaddr)?; - self.addr = ipaddr; + pub fn update_client_certificate(&mut self, keypair: &Keypair, _ipaddr: IpAddr) { + let (cert, priv_key) = new_dummy_x509_certificate(keypair); let mut cert_guard = self.client_certificate.write().unwrap(); @@ -164,11 +147,10 @@ impl QuicConfig { certificate: cert, key: priv_key, }); - Ok(()) } - pub fn update_keypair(&self, keypair: &Keypair) -> Result<(), RcgenError> { - let (cert, priv_key) = new_self_signed_tls_certificate(keypair, self.addr)?; + pub fn update_keypair(&self, keypair: &Keypair) { + let (cert, priv_key) = new_dummy_x509_certificate(keypair); let mut cert_guard = self.client_certificate.write().unwrap(); @@ -176,7 +158,6 @@ impl QuicConfig { certificate: cert, key: priv_key, }); - Ok(()) } pub fn set_staked_nodes( @@ -243,7 +224,7 @@ impl ConnectionManager for QuicConnectionManager { } fn update_key(&self, key: &Keypair) -> Result<(), Box> { - self.connection_config.update_keypair(key)?; + self.connection_config.update_keypair(key); Ok(()) } } @@ -264,7 +245,7 @@ pub fn new_quic_connection_cache( connection_pool_size: usize, ) -> Result { let mut config = QuicConfig::new()?; - config.update_client_certificate(keypair, ipaddr)?; + config.update_client_certificate(keypair, ipaddr); config.set_staked_nodes(staked_nodes, &keypair.pubkey()); let connection_manager = QuicConnectionManager::new_with_connection_config(config); ConnectionCache::new(name, connection_manager, connection_pool_size) diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index e2c861df48fa57..5817a676f3b760 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -27,7 +27,7 @@ use { transport::Result as TransportResult, }, solana_streamer::{ - nonblocking::quic::ALPN_TPU_PROTOCOL_ID, tls_certificates::new_self_signed_tls_certificate, + nonblocking::quic::ALPN_TPU_PROTOCOL_ID, tls_certificates::new_dummy_x509_certificate, }, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -148,9 +148,7 @@ impl QuicLazyInitializedEndpoint { impl Default for QuicLazyInitializedEndpoint { fn default() -> Self { - let (cert, priv_key) = - new_self_signed_tls_certificate(&Keypair::new(), IpAddr::V4(Ipv4Addr::UNSPECIFIED)) - .expect("Failed to create QUIC client certificate"); + let (cert, priv_key) = new_dummy_x509_certificate(&Keypair::new()); Self::new( Arc::new(QuicClientCertificate { certificate: cert, diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index 9f18acd5c75772..658ee6a57d672d 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -11,10 +11,10 @@ mod tests { solana_sdk::{net::DEFAULT_TPU_COALESCE, packet::PACKET_DATA_SIZE, signature::Keypair}, solana_streamer::{ nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, quic::SpawnServerResult, - streamer::StakedNodes, tls_certificates::new_self_signed_tls_certificate, + streamer::StakedNodes, tls_certificates::new_dummy_x509_certificate, }, std::{ - net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, + net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -49,12 +49,11 @@ mod tests { assert!(total_packets > 0); } - fn server_args() -> (UdpSocket, Arc, Keypair, IpAddr) { + fn server_args() -> (UdpSocket, Arc, Keypair) { ( UdpSocket::bind("127.0.0.1:0").unwrap(), Arc::new(AtomicBool::new(false)), Keypair::new(), - "127.0.0.1".parse().unwrap(), ) } @@ -67,7 +66,7 @@ mod tests { solana_logger::setup(); let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (s, exit, keypair, ip) = server_args(); + let (s, exit, keypair) = server_args(); let SpawnServerResult { endpoint: _, thread: t, @@ -76,7 +75,6 @@ mod tests { "quic_streamer_test", s.try_clone().unwrap(), &keypair, - ip, sender, exit.clone(), 1, @@ -151,12 +149,11 @@ mod tests { solana_logger::setup(); let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (s, exit, keypair, ip) = server_args(); + let (s, exit, keypair) = server_args(); let (_, _, t) = solana_streamer::nonblocking::quic::spawn_server( "quic_streamer_test", s.try_clone().unwrap(), &keypair, - ip, sender, exit.clone(), 1, @@ -209,7 +206,7 @@ mod tests { // Request Receiver let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (request_recv_socket, request_recv_exit, keypair, request_recv_ip) = server_args(); + let (request_recv_socket, request_recv_exit, keypair) = server_args(); let SpawnServerResult { endpoint: request_recv_endpoint, thread: request_recv_thread, @@ -218,7 +215,6 @@ mod tests { "quic_streamer_test", request_recv_socket.try_clone().unwrap(), &keypair, - request_recv_ip, sender, request_recv_exit.clone(), 1, @@ -232,7 +228,7 @@ mod tests { drop(request_recv_endpoint); // Response Receiver: - let (response_recv_socket, response_recv_exit, keypair2, response_recv_ip) = server_args(); + let (response_recv_socket, response_recv_exit, keypair2) = server_args(); let (sender2, receiver2) = unbounded(); let addr = response_recv_socket.local_addr().unwrap().ip(); @@ -246,7 +242,6 @@ mod tests { "quic_streamer_test", response_recv_socket, &keypair2, - response_recv_ip, sender2, response_recv_exit.clone(), 1, @@ -264,9 +259,7 @@ mod tests { let tpu_addr = SocketAddr::new(addr, port); let connection_cache_stats = Arc::new(ConnectionCacheStats::default()); - let (cert, priv_key) = - new_self_signed_tls_certificate(&Keypair::new(), IpAddr::V4(Ipv4Addr::UNSPECIFIED)) - .expect("Failed to initialize QUIC client certificates"); + let (cert, priv_key) = new_dummy_x509_certificate(&Keypair::new()); let client_certificate = Arc::new(QuicClientCertificate { certificate: cert, key: priv_key, @@ -286,9 +279,7 @@ mod tests { info!("Received requests!"); // Response sender - let (cert, priv_key) = - new_self_signed_tls_certificate(&Keypair::new(), IpAddr::V4(Ipv4Addr::LOCALHOST)) - .expect("Failed to initialize QUIC client certificates"); + let (cert, priv_key) = new_dummy_x509_certificate(&Keypair::new()); let client_certificate2 = Arc::new(QuicClientCertificate { certificate: cert, diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 21ae96d11fd9a4..8e1eb12dff1d42 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -22,11 +22,9 @@ log = { workspace = true } nix = { workspace = true } pem = { workspace = true } percentage = { workspace = true } -pkcs8 = { workspace = true, features = ["alloc"] } quinn = { workspace = true } quinn-proto = { workspace = true } rand = { workspace = true } -rcgen = { workspace = true } rustls = { workspace = true, features = ["dangerous_configuration"] } solana-metrics = { workspace = true } solana-perf = { workspace = true } diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index bd0c352397eb52..f6f2357c7702e4 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -100,7 +100,6 @@ pub fn spawn_server( name: &'static str, sock: UdpSocket, keypair: &Keypair, - gossip_host: IpAddr, packet_sender: Sender, exit: Arc, max_connections_per_peer: usize, @@ -111,7 +110,7 @@ pub fn spawn_server( coalesce: Duration, ) -> Result<(Endpoint, Arc, JoinHandle<()>), QuicServerError> { info!("Start {name} quic server on {sock:?}"); - let (config, _cert) = configure_server(keypair, gossip_host)?; + let (config, _cert) = configure_server(keypair)?; let endpoint = Endpoint::new( EndpointConfig::default(), @@ -1145,7 +1144,7 @@ pub mod test { crate::{ nonblocking::quic::compute_max_allowed_uni_streams, quic::{MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, - tls_certificates::new_self_signed_tls_certificate, + tls_certificates::new_dummy_x509_certificate, }, assert_matches::assert_matches, async_channel::unbounded as async_unbounded, @@ -1157,7 +1156,7 @@ pub mod test { signature::Keypair, signer::Signer, }, - std::{collections::HashMap, net::Ipv4Addr}, + std::collections::HashMap, tokio::time::sleep, }; @@ -1184,9 +1183,7 @@ pub mod test { } pub fn get_client_config(keypair: &Keypair) -> ClientConfig { - let ipaddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let (cert, key) = new_self_signed_tls_certificate(keypair, ipaddr) - .expect("Failed to generate client certificate"); + let (cert, key) = new_dummy_x509_certificate(keypair); let mut crypto = rustls::ClientConfig::builder() .with_safe_defaults() @@ -1222,14 +1219,12 @@ pub mod test { let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); - let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(option_staked_nodes.unwrap_or_default())); let (_, stats, t) = spawn_server( "quic_streamer_test", s, &keypair, - ip, sender, exit.clone(), max_connections_per_peer, @@ -1658,14 +1653,12 @@ pub mod test { let exit = Arc::new(AtomicBool::new(false)); let (sender, _) = unbounded(); let keypair = Keypair::new(); - let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let (_, _, t) = spawn_server( "quic_streamer_test", s, &keypair, - ip, sender, exit.clone(), 1, @@ -1689,14 +1682,12 @@ pub mod test { let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); - let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let (_, stats, t) = spawn_server( "quic_streamer_test", s, &keypair, - ip, sender, exit.clone(), 2, diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 617341fd5dbde8..69a75532b8ca68 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -1,7 +1,7 @@ use { crate::{ nonblocking::quic::ALPN_TPU_PROTOCOL_ID, streamer::StakedNodes, - tls_certificates::new_self_signed_tls_certificate, + tls_certificates::new_dummy_x509_certificate, }, crossbeam_channel::Sender, pem::Pem, @@ -14,7 +14,7 @@ use { signature::Keypair, }, std::{ - net::{IpAddr, UdpSocket}, + net::UdpSocket, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, RwLock, @@ -61,9 +61,8 @@ impl rustls::server::ClientCertVerifier for SkipClientVerification { #[allow(clippy::field_reassign_with_default)] // https://github.com/rust-lang/rust-clippy/issues/6527 pub(crate) fn configure_server( identity_keypair: &Keypair, - gossip_host: IpAddr, ) -> Result<(ServerConfig, String), QuicServerError> { - let (cert, priv_key) = new_self_signed_tls_certificate(identity_keypair, gossip_host)?; + let (cert, priv_key) = new_dummy_x509_certificate(identity_keypair); let cert_chain_pem_parts = vec![Pem { tag: "CERTIFICATE".to_string(), contents: cert.0.clone(), @@ -113,20 +112,17 @@ fn rt() -> Runtime { pub enum QuicServerError { #[error("Endpoint creation failed: {0}")] EndpointFailed(std::io::Error), - #[error("Certificate error: {0}")] - CertificateError(#[from] rcgen::RcgenError), #[error("TLS error: {0}")] TlsError(#[from] rustls::Error), } pub struct EndpointKeyUpdater { endpoint: Endpoint, - gossip_host: IpAddr, } impl NotifyKeyUpdate for EndpointKeyUpdater { fn update_key(&self, key: &Keypair) -> Result<(), Box> { - let (config, _) = configure_server(key, self.gossip_host)?; + let (config, _) = configure_server(key)?; self.endpoint.set_server_config(Some(config)); Ok(()) } @@ -438,7 +434,6 @@ pub fn spawn_server( name: &'static str, sock: UdpSocket, keypair: &Keypair, - gossip_host: IpAddr, packet_sender: Sender, exit: Arc, max_connections_per_peer: usize, @@ -455,7 +450,6 @@ pub fn spawn_server( name, sock, keypair, - gossip_host, packet_sender, exit, max_connections_per_peer, @@ -476,7 +470,6 @@ pub fn spawn_server( .unwrap(); let updater = EndpointKeyUpdater { endpoint: endpoint.clone(), - gossip_host, }; Ok(SpawnServerResult { endpoint, @@ -505,7 +498,6 @@ mod test { let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); - let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let SpawnServerResult { @@ -516,7 +508,6 @@ mod test { "quic_streamer_test", s, &keypair, - ip, sender, exit.clone(), 1, @@ -565,7 +556,6 @@ mod test { let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); - let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let SpawnServerResult { @@ -576,7 +566,6 @@ mod test { "quic_streamer_test", s, &keypair, - ip, sender, exit.clone(), 2, @@ -612,7 +601,6 @@ mod test { let exit = Arc::new(AtomicBool::new(false)); let (sender, _) = unbounded(); let keypair = Keypair::new(); - let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let SpawnServerResult { @@ -623,7 +611,6 @@ mod test { "quic_streamer_test", s, &keypair, - ip, sender, exit.clone(), 1, diff --git a/streamer/src/tls_certificates.rs b/streamer/src/tls_certificates.rs index d4aaefcb2ba4f5..866f6155abe3f6 100644 --- a/streamer/src/tls_certificates.rs +++ b/streamer/src/tls_certificates.rs @@ -1,58 +1,99 @@ use { - pkcs8::{der::Document, AlgorithmIdentifier, ObjectIdentifier}, - rcgen::{CertificateParams, DistinguishedName, DnType, RcgenError, SanType}, - solana_sdk::{pubkey::Pubkey, signature::Keypair}, - std::net::IpAddr, + solana_sdk::{pubkey::Pubkey, signature::Keypair, signer::Signer}, x509_parser::{prelude::*, public_key::PublicKey}, }; -pub fn new_self_signed_tls_certificate( - keypair: &Keypair, - san: IpAddr, -) -> Result<(rustls::Certificate, rustls::PrivateKey), RcgenError> { - // TODO(terorie): Is it safe to sign the TLS cert with the identity private key? - - // Unfortunately, rcgen does not accept a "raw" Ed25519 key. +pub fn new_dummy_x509_certificate(keypair: &Keypair) -> (rustls::Certificate, rustls::PrivateKey) { + // Unfortunately, rustls does not accept a "raw" Ed25519 key. // We have to convert it to DER and pass it to the library. // Convert private key into PKCS#8 v1 object. // RFC 8410, Section 7: Private Key Format - // https://datatracker.ietf.org/doc/html/rfc8410#section- - - // from https://datatracker.ietf.org/doc/html/rfc8410#section-3 - const ED25519_IDENTIFIER: [u32; 4] = [1, 3, 101, 112]; - let mut private_key = Vec::::with_capacity(34); - private_key.extend_from_slice(&[0x04, 0x20]); // ASN.1 OCTET STRING - private_key.extend_from_slice(keypair.secret().as_bytes()); - let key_pkcs8 = pkcs8::PrivateKeyInfo { - algorithm: AlgorithmIdentifier { - oid: ObjectIdentifier::from_arcs(&ED25519_IDENTIFIER).expect("Failed to convert OID"), - parameters: None, - }, - private_key: &private_key, - public_key: None, - }; - let key_pkcs8_der = key_pkcs8 - .to_der() - .expect("Failed to convert keypair to DER") - .to_der(); + // https://www.rfc-editor.org/rfc/rfc8410#section-7 + // + // The hardcoded prefix decodes to the following ASN.1 structure: + // + // PrivateKeyInfo SEQUENCE (3 elem) + // version Version INTEGER 0 + // privateKeyAlgorithm AlgorithmIdentifier SEQUENCE (1 elem) + // algorithm OBJECT IDENTIFIER 1.3.101.112 curveEd25519 (EdDSA 25519 signature algorithm) + // privateKey PrivateKey OCTET STRING (34 byte) + const PKCS8_PREFIX: [u8; 16] = [ + 0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x04, 0x22, 0x04, + 0x20, + ]; + let mut key_pkcs8_der = Vec::::with_capacity(PKCS8_PREFIX.len() + 32); + key_pkcs8_der.extend_from_slice(&PKCS8_PREFIX); + key_pkcs8_der.extend_from_slice(keypair.secret().as_bytes()); - let rcgen_keypair = rcgen::KeyPair::from_der(&key_pkcs8_der)?; + // Create a dummy certificate. Only the SubjectPublicKeyInfo field + // is relevant to the peer-to-peer protocols. The signature of the + // X.509 certificate is deliberately invalid. (Peer authenticity is + // checked in the TLS 1.3 CertificateVerify) + // See https://www.itu.int/rec/T-REC-X.509-201910-I/en for detailed definitions. - let mut cert_params = CertificateParams::default(); - cert_params.subject_alt_names = vec![SanType::IpAddress(san)]; - cert_params.alg = &rcgen::PKCS_ED25519; - cert_params.key_pair = Some(rcgen_keypair); - cert_params.distinguished_name = DistinguishedName::new(); - cert_params - .distinguished_name - .push(DnType::CommonName, "Solana node"); + let mut cert_der = Vec::::with_capacity(0xf4); + // Certificate SEQUENCE (3 elem) + // tbsCertificate TBSCertificate SEQUENCE (8 elem) + // version [0] (1 elem) + // INTEGER 2 + // serialNumber CertificateSerialNumber INTEGER (62 bit) + // signature AlgorithmIdentifier SEQUENCE (1 elem) + // algorithm OBJECT IDENTIFIER 1.3.101.112 curveEd25519 (EdDSA 25519 signature algorithm) + // issuer Name SEQUENCE (1 elem) + // RelativeDistinguishedName SET (1 elem) + // AttributeTypeAndValue SEQUENCE (2 elem) + // type AttributeType OBJECT IDENTIFIER 2.5.4.3 commonName (X.520 DN component) + // value AttributeValue [?] UTF8String Solana + // validity Validity SEQUENCE (2 elem) + // notBefore Time UTCTime 1970-01-01 00:00:00 UTC + // notAfter Time GeneralizedTime 4096-01-01 00:00:00 UTC + // subject Name SEQUENCE (0 elem) + // subjectPublicKeyInfo SubjectPublicKeyInfo SEQUENCE (2 elem) + // algorithm AlgorithmIdentifier SEQUENCE (1 elem) + // algorithm OBJECT IDENTIFIER 1.3.101.112 curveEd25519 (EdDSA 25519 signature algorithm) + // subjectPublicKey BIT STRING (256 bit) + cert_der.extend_from_slice(&[ + 0x30, 0x81, 0xf6, 0x30, 0x81, 0xa9, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x08, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x30, 0x16, + 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x53, 0x6f, 0x6c, 0x61, + 0x6e, 0x61, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x30, 0x20, 0x17, 0x0d, 0x37, 0x30, 0x30, 0x31, + 0x30, 0x31, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x5a, 0x18, 0x0f, 0x34, 0x30, 0x39, 0x36, + 0x30, 0x31, 0x30, 0x31, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x5a, 0x30, 0x00, 0x30, 0x2a, + 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00, + ]); + cert_der.extend_from_slice(&keypair.pubkey().to_bytes()); + // extensions [3] (1 elem) + // Extensions SEQUENCE (2 elem) + // Extension SEQUENCE (3 elem) + // extnID OBJECT IDENTIFIER 2.5.29.17 subjectAltName (X.509 extension) + // critical BOOLEAN true + // extnValue OCTET STRING (13 byte) encapsulating + // SEQUENCE (1 elem) + // [2] (9 byte) localhost + // Extension SEQUENCE (3 elem) + // extnID OBJECT IDENTIFIER 2.5.29.19 basicConstraints (X.509 extension) + // critical BOOLEAN true + // extnValue OCTET STRING (2 byte) encapsulating + // SEQUENCE (0 elem) + // signatureAlgorithm AlgorithmIdentifier SEQUENCE (1 elem) + // algorithm OBJECT IDENTIFIER 1.3.101.112 curveEd25519 (EdDSA 25519 signature algorithm) + // signature BIT STRING (512 bit) + cert_der.extend_from_slice(&[ + 0xa3, 0x29, 0x30, 0x27, 0x30, 0x17, 0x06, 0x03, 0x55, 0x1d, 0x11, 0x01, 0x01, 0xff, 0x04, + 0x0d, 0x30, 0x0b, 0x82, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x01, 0x01, 0xff, 0x04, 0x02, 0x30, 0x00, 0x30, 0x05, + 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x41, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + ]); - let cert = rcgen::Certificate::from_params(cert_params)?; - let cert_der = cert.serialize_der().unwrap(); - let priv_key = cert.serialize_private_key_der(); - let priv_key = rustls::PrivateKey(priv_key); - Ok((rustls::Certificate(cert_der), priv_key)) + ( + rustls::Certificate(cert_der), + rustls::PrivateKey(key_pkcs8_der), + ) } pub fn get_pubkey_from_tls_certificate(der_cert: &rustls::Certificate) -> Option { @@ -65,22 +106,16 @@ pub fn get_pubkey_from_tls_certificate(der_cert: &rustls::Certificate) -> Option #[cfg(test)] mod tests { - use {super::*, solana_sdk::signer::Signer, std::net::Ipv4Addr}; + use {super::*, solana_sdk::signer::Signer}; #[test] fn test_generate_tls_certificate() { let keypair = Keypair::new(); - - if let Ok((cert, _)) = - new_self_signed_tls_certificate(&keypair, IpAddr::V4(Ipv4Addr::LOCALHOST)) - { - if let Some(pubkey) = get_pubkey_from_tls_certificate(&cert) { - assert_eq!(pubkey, keypair.pubkey()); - } else { - panic!("Failed to get certificate pubkey"); - } + let (cert, _) = new_dummy_x509_certificate(&keypair); + if let Some(pubkey) = get_pubkey_from_tls_certificate(&cert) { + assert_eq!(pubkey, keypair.pubkey()); } else { - panic!("Failed to generate certificates"); + panic!("Failed to get certificate pubkey"); } } } diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index e205c10bf6608f..bedd870952af99 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -21,7 +21,6 @@ quinn = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } rayon = { workspace = true } -rcgen = { workspace = true } rustls = { workspace = true } solana-entry = { workspace = true } solana-gossip = { workspace = true } diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index a947f212296fb7..c137e1c0a6ef60 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -8,19 +8,16 @@ use { EndpointConfig, IdleTimeout, SendDatagramError, ServerConfig, TokioRuntime, TransportConfig, VarInt, }, - rcgen::RcgenError, rustls::{Certificate, PrivateKey}, solana_quic_client::nonblocking::quic_client::SkipServerVerification, solana_runtime::bank_forks::BankForks, solana_sdk::{pubkey::Pubkey, signature::Keypair}, - solana_streamer::{ - quic::SkipClientVerification, tls_certificates::new_self_signed_tls_certificate, - }, + solana_streamer::{quic::SkipClientVerification, tls_certificates::new_dummy_x509_certificate}, std::{ cmp::Reverse, collections::{hash_map::Entry, HashMap}, io::Error as IoError, - net::{IpAddr, SocketAddr, UdpSocket}, + net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, Arc, RwLock, @@ -67,8 +64,6 @@ pub type AsyncTryJoinHandle = TryJoin, JoinHandle<()>>; #[derive(Error, Debug)] pub enum Error { - #[error(transparent)] - CertificateError(#[from] RcgenError), #[error("Channel Send Error")] ChannelSendError, #[error(transparent)] @@ -96,7 +91,6 @@ pub fn new_quic_endpoint( runtime: &tokio::runtime::Handle, keypair: &Keypair, socket: UdpSocket, - address: IpAddr, sender: Sender<(Pubkey, SocketAddr, Bytes)>, bank_forks: Arc>, ) -> Result< @@ -107,7 +101,7 @@ pub fn new_quic_endpoint( ), Error, > { - let (cert, key) = new_self_signed_tls_certificate(keypair, address)?; + let (cert, key) = new_dummy_x509_certificate(keypair); let server_config = new_server_config(cert.clone(), key.clone())?; let client_config = new_client_config(cert, key)?; let mut endpoint = { @@ -650,7 +644,6 @@ async fn report_metrics_task(name: &'static str, stats: Arc) { fn record_error(err: &Error, stats: &TurbineQuicStats) { match err { - Error::CertificateError(_) => (), Error::ChannelSendError => (), Error::ConnectError(ConnectError::EndpointStopping) => { add_metric!(stats.connect_error_other) @@ -838,7 +831,6 @@ mod tests { runtime.handle(), keypair, socket, - IpAddr::V4(Ipv4Addr::LOCALHOST), sender, bank_forks.clone(), ) From b1f8a89da54cb11ecf14a053b9ea62ada1a3f1ea Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 29 Jan 2024 09:00:40 -0600 Subject: [PATCH 086/401] Fix bank new_from_field for epoch reward status (#34992) * fix bank new_from_field for epoch reward status * fix bank serde test assert for epoch reward status --------- Co-authored-by: HaoranYi --- runtime/src/bank.rs | 2 +- runtime/src/bank/serde_snapshot.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e7b17913e3aa57..da2bc0fce84b87 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1853,7 +1853,7 @@ impl Bank { fields.epoch, ))), check_program_modification_slot: false, - epoch_reward_status: EpochRewardStatus::default(), + epoch_reward_status: fields.epoch_reward_status, }; bank.finish_init( genesis_config, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index ca6c6ee6adebd8..df51d31e568cee 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -411,7 +411,7 @@ mod tests { ); // assert epoch_reward_status is the same as the set epoch reward status - let epoch_reward_status = bank + let epoch_reward_status = dbank .get_epoch_reward_status_to_serialize() .unwrap_or(&EpochRewardStatus::Inactive); if let Some(rewards) = epoch_reward_status_active { @@ -504,7 +504,7 @@ mod tests { ); // assert epoch_reward_status is the same as the set epoch reward status - let epoch_reward_status = bank + let epoch_reward_status = dbank .get_epoch_reward_status_to_serialize() .unwrap_or(&EpochRewardStatus::Inactive); if let Some(rewards) = epoch_reward_status_active { @@ -593,7 +593,7 @@ mod tests { assert_eq!(0, dbank.fee_rate_governor.lamports_per_signature); // epoch_reward status should default to `Inactive` - let epoch_reward_status = bank + let epoch_reward_status = dbank .get_epoch_reward_status_to_serialize() .unwrap_or(&EpochRewardStatus::Inactive); assert_matches!(epoch_reward_status, EpochRewardStatus::Inactive); From 16a2f1bd3dc5a0786e9ea50e821462f010e7366a Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 29 Jan 2024 11:05:47 -0800 Subject: [PATCH 087/401] [TS] Add get_account() and account_matches_owner() to TieredStorageReader (#34968) #### Problem TieredStorageReader is a wrapper enum that works for both Hot and Cold storage readers, but its get_account() and account_matches_owner() API are missing. #### Summary of Changes Add get_account() and account_matches_owner() to TieredStorageReader. #### Test Plan hot.rs offers similar coverage for HotStorageReader. --- accounts-db/src/tiered_storage/hot.rs | 2 +- accounts-db/src/tiered_storage/readable.rs | 37 ++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 730ace5aa310ed..311da9916785f6 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -326,7 +326,7 @@ impl HotStorageReader { } /// Returns the offset to the account given the specified index. - fn get_account_offset( + pub(super) fn get_account_offset( &self, index_offset: IndexOffset, ) -> TieredStorageResult { diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index aff29a79fb03ab..647c78d5ca91c1 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -1,9 +1,12 @@ use { crate::{ + account_storage::meta::StoredAccountMeta, + accounts_file::MatchAccountOwnerError, accounts_hash::AccountHash, tiered_storage::{ footer::{AccountMetaFormat, TieredStorageFooter}, hot::HotStorageReader, + index::IndexOffset, meta::TieredAccountMeta, TieredStorageResult, }, @@ -111,4 +114,38 @@ impl TieredStorageReader { Self::Hot(hot) => hot.num_accounts(), } } + + /// Returns the account located at the specified index offset. + pub fn get_account( + &self, + index_offset: u32, + ) -> TieredStorageResult, usize)>> { + match self { + Self::Hot(hot) => hot.get_account(IndexOffset(index_offset)), + } + } + + /// Returns Ok(index_of_matching_owner) if the account owner at + /// `account_offset` is one of the pubkeys in `owners`. + /// + /// Returns Err(MatchAccountOwnerError::NoMatch) if the account has 0 + /// lamports or the owner is not one of the pubkeys in `owners`. + /// + /// Returns Err(MatchAccountOwnerError::UnableToLoad) if there is any internal + /// error that causes the data unable to load, including `account_offset` + /// causes a data overrun. + pub fn account_matches_owners( + &self, + index_offset: u32, + owners: &[Pubkey], + ) -> Result { + match self { + Self::Hot(hot) => { + let account_offset = hot + .get_account_offset(IndexOffset(index_offset)) + .map_err(|_| MatchAccountOwnerError::UnableToLoad)?; + hot.account_matches_owners(account_offset, owners) + } + } + } } From c8cdd0087f85c709cd445ee0b0e74364764b4048 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 29 Jan 2024 17:19:55 -0500 Subject: [PATCH 088/401] Removes pushing and pulling account hashes in gossip (#34979) --- gossip/src/cluster_info.rs | 38 +++----------------------------------- gossip/src/crds_value.rs | 9 +-------- 2 files changed, 4 insertions(+), 43 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 23038b0407d30e..7cddbdb5a963b1 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -32,8 +32,8 @@ use { CrdsFilter, CrdsTimeouts, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, }, crds_value::{ - self, AccountsHashes, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, - NodeInstance, SnapshotHashes, Version, Vote, MAX_WALLCLOCK, + self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, NodeInstance, + SnapshotHashes, Version, Vote, MAX_WALLCLOCK, }, duplicate_shred::DuplicateShred, epoch_slots::EpochSlots, @@ -259,14 +259,6 @@ struct PullData { filter: CrdsFilter, } -pub fn make_accounts_hashes_message( - keypair: &Keypair, - accounts_hashes: Vec<(Slot, Hash)>, -) -> Option { - let message = CrdsData::AccountsHashes(AccountsHashes::new(keypair.pubkey(), accounts_hashes)); - Some(CrdsValue::new_signed(message, keypair)) -} - pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; // TODO These messages should go through the gpu pipeline for spam filtering @@ -392,7 +384,6 @@ fn retain_staked(values: &mut Vec, stakes: &HashMap) { // the various dashboards. CrdsData::Version(_) => true, CrdsData::NodeInstance(_) => true, - // getHealth fails if account hashes are not propagated. CrdsData::AccountsHashes(_) => true, CrdsData::LowestSlot(_, _) | CrdsData::LegacyVersion(_) @@ -1021,19 +1012,6 @@ impl ClusterInfo { .push(message); } - pub fn push_accounts_hashes(&self, accounts_hashes: Vec<(Slot, Hash)>) { - if accounts_hashes.len() > MAX_ACCOUNTS_HASHES { - warn!( - "accounts hashes too large, ignored: {}", - accounts_hashes.len(), - ); - return; - } - - let message = CrdsData::AccountsHashes(AccountsHashes::new(self.id(), accounts_hashes)); - self.push_message(CrdsValue::new_signed(message, &self.keypair())); - } - pub fn push_snapshot_hashes( &self, full: (Slot, Hash), @@ -1221,16 +1199,6 @@ impl ClusterInfo { Ok(()) } - pub fn get_accounts_hash_for_node(&self, pubkey: &Pubkey, map: F) -> Option - where - F: FnOnce(&Vec<(Slot, Hash)>) -> Y, - { - self.time_gossip_read_lock("get_accounts_hash", &self.stats.get_accounts_hash) - .get::<&CrdsValue>(&CrdsValueLabel::AccountsHashes(*pubkey)) - .map(|x| &x.accounts_hash().unwrap().hashes) - .map(map) - } - pub fn get_snapshot_hashes_for_node(&self, pubkey: &Pubkey) -> Option { self.gossip .crds @@ -3185,7 +3153,7 @@ mod tests { super::*, crate::{ crds_gossip_pull::tests::MIN_NUM_BLOOM_FILTERS, - crds_value::{CrdsValue, CrdsValueLabel, Vote as CrdsVote}, + crds_value::{AccountsHashes, CrdsValue, CrdsValueLabel, Vote as CrdsVote}, duplicate_shred::{self, tests::new_rand_shred, MAX_DUPLICATE_SHREDS}, }, itertools::izip, diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index ad6422fc2e5188..6dcbbde2d14373 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -87,7 +87,7 @@ pub enum CrdsData { Vote(VoteIndex, Vote), LowestSlot(/*DEPRECATED:*/ u8, LowestSlot), LegacySnapshotHashes(LegacySnapshotHashes), // Deprecated - AccountsHashes(AccountsHashes), + AccountsHashes(AccountsHashes), // Deprecated EpochSlots(EpochSlotsIndex, EpochSlots), LegacyVersion(LegacyVersion), Version(Version), @@ -663,13 +663,6 @@ impl CrdsValue { } } - pub(crate) fn accounts_hash(&self) -> Option<&AccountsHashes> { - match &self.data { - CrdsData::AccountsHashes(slots) => Some(slots), - _ => None, - } - } - pub(crate) fn epoch_slots(&self) -> Option<&EpochSlots> { match &self.data { CrdsData::EpochSlots(_, slots) => Some(slots), From 9ad6198d2db6395eb8914fc38d9ef2f85163d271 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jan 2024 16:28:59 +0800 Subject: [PATCH 089/401] build(deps): bump serde from 1.0.195 to 1.0.196 (#34996) * build(deps): bump serde from 1.0.195 to 1.0.196 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.195 to 1.0.196. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.195...v1.0.196) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc5fcebdd70b53..5a2115c61cea7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4841,9 +4841,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] @@ -4859,9 +4859,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index e1177f23f60c80..f28cdf4949f2e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -292,7 +292,7 @@ rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.21" seqlock = "0.2.0" -serde = "1.0.195" +serde = "1.0.196" serde_bytes = "0.11.14" serde_derive = "1.0.103" serde_json = "1.0.111" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d441666b28835f..d5942d7b60163c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4279,9 +4279,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] @@ -4297,9 +4297,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", From 0f376254af69952e5090d085503dfa2c0d392749 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jan 2024 16:29:24 +0800 Subject: [PATCH 090/401] build(deps): bump chrono from 0.4.32 to 0.4.33 (#34997) * build(deps): bump chrono from 0.4.32 to 0.4.33 Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.32 to 0.4.33. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.32...v0.4.33) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a2115c61cea7f..489f2f0a41b07f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1064,9 +1064,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.32" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41daef31d7a747c5c847246f36de49ced6f7403b4cdabc807a97b5cc184cda7a" +checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" dependencies = [ "android-tzdata", "iana-time-zone", diff --git a/Cargo.toml b/Cargo.toml index f28cdf4949f2e4..0e55640443c733 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -170,7 +170,7 @@ bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" cc = "1.0.83" -chrono = { version = "0.4.32", default-features = false } +chrono = { version = "0.4.33", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" console = "0.15.8" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d5942d7b60163c..65776fb60c48c2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -940,9 +940,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.32" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41daef31d7a747c5c847246f36de49ced6f7403b4cdabc807a97b5cc184cda7a" +checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" dependencies = [ "android-tzdata", "iana-time-zone", From 6666660d4c4716c2a8c476a87485d93fd0f0347b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jan 2024 16:29:48 +0800 Subject: [PATCH 091/401] build(deps): bump hidapi from 2.5.0 to 2.5.1 (#34998) Bumps [hidapi](https://github.com/ruabmbua/hidapi-rs) from 2.5.0 to 2.5.1. - [Commits](https://github.com/ruabmbua/hidapi-rs/commits/v2.5.1) --- updated-dependencies: - dependency-name: hidapi dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 489f2f0a41b07f..93c712b80eacd7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2407,9 +2407,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hidapi" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b125253e27c9fd67beac20665348f4bfc5b488b5c8a1020610eeb7e6d205cde" +checksum = "830eccace7c861211d0ad04288e5dad690d6711b0db152084da58882ee7a840a" dependencies = [ "cc", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 0e55640443c733..eabe122cc72a68 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -212,7 +212,7 @@ gethostname = "0.2.3" getrandom = "0.2.10" goauth = "0.13.1" hex = "0.4.3" -hidapi = { version = "2.5.0", default-features = false } +hidapi = { version = "2.5.1", default-features = false } histogram = "0.6.9" hmac = "0.12.1" http = "0.2.11" From d5ab2fa1df07203387a13e98ad94fa4bb5aee053 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jan 2024 16:30:10 +0800 Subject: [PATCH 092/401] build(deps): bump serde_json from 1.0.111 to 1.0.113 (#35000) * build(deps): bump serde_json from 1.0.111 to 1.0.113 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.111 to 1.0.113. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.111...v1.0.113) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93c712b80eacd7..567eb3ccf992c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4870,9 +4870,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index eabe122cc72a68..095f844475fe32 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -295,7 +295,7 @@ seqlock = "0.2.0" serde = "1.0.196" serde_bytes = "0.11.14" serde_derive = "1.0.103" -serde_json = "1.0.111" +serde_json = "1.0.113" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.30" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 65776fb60c48c2..d0a8a630e415e7 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4308,9 +4308,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", From c6c23405d6d869e9d178b04d8d125e54bdd32b1d Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 30 Jan 2024 08:38:24 -0800 Subject: [PATCH 093/401] Remove SVM dependency on Bank::should_collect_rent() (#35011) --- runtime/src/bank.rs | 1 - runtime/src/bank/tests.rs | 1 - runtime/src/svm/account_loader.rs | 22 ++++++++++------------ 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index da2bc0fce84b87..b5aa6db4913dcc 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5370,7 +5370,6 @@ impl Bank { self.get_reward_interval(), &program_accounts_map, &programs_loaded_for_tx_batch.borrow(), - self.should_collect_rent(), ); load_time.stop(); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index e19eaa9aca96ad..324fac49d277e9 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -10998,7 +10998,6 @@ fn test_rent_state_list_len() { RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), - true, ); let compute_budget = bank.runtime_config.compute_budget.unwrap_or_else(|| { diff --git a/runtime/src/svm/account_loader.rs b/runtime/src/svm/account_loader.rs index beedace9ede1ac..31ce63654670e5 100644 --- a/runtime/src/svm/account_loader.rs +++ b/runtime/src/svm/account_loader.rs @@ -22,7 +22,7 @@ use { create_executable_meta, is_builtin, is_executable, Account, AccountSharedData, ReadableAccount, WritableAccount, }, - feature_set::{include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, + feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, message::SanitizedMessage, native_loader, @@ -52,7 +52,6 @@ pub(crate) fn load_accounts( in_reward_interval: RewardInterval, program_accounts: &HashMap, loaded_programs: &LoadedProgramsForTxBatch, - should_collect_rent: bool, ) -> Vec { txs.iter() .zip(lock_results) @@ -87,7 +86,6 @@ pub(crate) fn load_accounts( in_reward_interval, program_accounts, loaded_programs, - should_collect_rent, ) { Ok(loaded_transaction) => loaded_transaction, Err(e) => return (Err(e), None), @@ -128,7 +126,6 @@ fn load_transaction_accounts( reward_interval: RewardInterval, program_accounts: &HashMap, loaded_programs: &LoadedProgramsForTxBatch, - should_collect_rent: bool, ) -> Result { let in_reward_interval = reward_interval == RewardInterval::InsideInterval; @@ -190,7 +187,9 @@ fn load_transaction_accounts( .load_with_fixed_root(ancestors, key) .map(|(mut account, _)| { if message.is_writable(i) { - if should_collect_rent { + if !feature_set + .is_active(&feature_set::disable_rent_fees_collection::id()) + { let rent_due = rent_collector .collect_from_existing_account( key, @@ -514,7 +513,7 @@ mod tests { lamports_per_signature: u64, rent_collector: &RentCollector, error_counters: &mut TransactionErrorMetrics, - feature_set: &FeatureSet, + feature_set: &mut FeatureSet, fee_structure: &FeeStructure, ) -> Vec { let accounts_db = AccountsDb::new_single_for_tests(); @@ -524,6 +523,7 @@ mod tests { } let ancestors = vec![(0, 0)].into_iter().collect(); + feature_set.deactivate(&feature_set::disable_rent_fees_collection::id()); let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); load_accounts( &accounts.accounts_db, @@ -538,7 +538,6 @@ mod tests { RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), - true, ) } @@ -565,7 +564,7 @@ mod tests { lamports_per_signature, &RentCollector::default(), error_counters, - &all_features_except(exclude_features), + &mut all_features_except(exclude_features), &FeeStructure { lamports_per_signature, ..FeeStructure::default() @@ -768,7 +767,7 @@ mod tests { lamports_per_signature, &rent_collector, &mut error_counters, - &all_features_except(None), + &mut all_features_except(None), &FeeStructure::default(), ); assert_eq!(loaded_accounts.len(), 1); @@ -784,7 +783,7 @@ mod tests { lamports_per_signature, &rent_collector, &mut error_counters, - &FeatureSet::all_enabled(), + &mut FeatureSet::all_enabled(), &FeeStructure::default(), ); assert_eq!(loaded_accounts.len(), 1); @@ -801,7 +800,7 @@ mod tests { lamports_per_signature, &rent_collector, &mut error_counters, - &FeatureSet::all_enabled(), + &mut FeatureSet::all_enabled(), &FeeStructure::default(), ); assert_eq!(loaded_accounts.len(), 1); @@ -1013,7 +1012,6 @@ mod tests { RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), - true, ) } From 15423928c1d82a635ed02db9b1ade1459bff60a5 Mon Sep 17 00:00:00 2001 From: Tao Zhu Date: Tue, 30 Jan 2024 21:57:44 +0000 Subject: [PATCH 094/401] Revert "refactor unused parameter (#34970)" This reverts commit 083890928f3558b616850f863e46253c77db9b40. --- sdk/src/fee.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index de77ac11436595..bd3af75e70da18 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -92,12 +92,13 @@ impl FeeStructure { pub fn calculate_fee( &self, message: &SanitizedMessage, - _unused: u64, + lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, ) -> u64 { self.calculate_fee_details( message, + lamports_per_signature, budget_limits, include_loaded_account_data_size_in_fee, ) @@ -109,6 +110,7 @@ impl FeeStructure { pub fn calculate_fee_details( &self, message: &SanitizedMessage, + _lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, ) -> FeeDetails { From df2ee120e9277acaf533b9da1d0bfbce51b20e20 Mon Sep 17 00:00:00 2001 From: Tao Zhu Date: Tue, 30 Jan 2024 21:58:42 +0000 Subject: [PATCH 095/401] Revert "separate priority fee and transaction fee from fee calculation (#34757)" This reverts commit 5ecc47ec5a8867ca04b8b616c72749329e6a3aed. --- sdk/src/fee.rs | 47 ++++++++--------------------------------------- 1 file changed, 8 insertions(+), 39 deletions(-) diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index bd3af75e70da18..2fb045aba5d73e 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -31,19 +31,6 @@ pub struct FeeStructure { pub compute_fee_bins: Vec, } -/// Return type of calculate_fee(...) -#[derive(Debug, Default, Clone, Eq, PartialEq)] -pub struct FeeDetails { - transaction_fee: u64, - prioritization_fee: u64, -} - -impl FeeDetails { - pub fn total_fee(&self) -> u64 { - self.transaction_fee.saturating_add(self.prioritization_fee) - } -} - pub const ACCOUNT_DATA_COST_PAGE_SIZE: u64 = 32_u64.saturating_mul(1024); impl FeeStructure { @@ -88,32 +75,15 @@ impl FeeStructure { .saturating_mul(heap_cost) } + /// Calculate fee for `SanitizedMessage` #[cfg(not(target_os = "solana"))] pub fn calculate_fee( - &self, - message: &SanitizedMessage, - lamports_per_signature: u64, - budget_limits: &FeeBudgetLimits, - include_loaded_account_data_size_in_fee: bool, - ) -> u64 { - self.calculate_fee_details( - message, - lamports_per_signature, - budget_limits, - include_loaded_account_data_size_in_fee, - ) - .total_fee() - } - - /// Calculate fee details for `SanitizedMessage` - #[cfg(not(target_os = "solana"))] - pub fn calculate_fee_details( &self, message: &SanitizedMessage, _lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, - ) -> FeeDetails { + ) -> u64 { let signature_fee = message .num_signatures() .saturating_mul(self.lamports_per_signature); @@ -145,13 +115,12 @@ impl FeeStructure { .unwrap_or_default() }); - FeeDetails { - transaction_fee: (signature_fee - .saturating_add(write_lock_fee) - .saturating_add(compute_fee) as f64) - .round() as u64, - prioritization_fee: budget_limits.prioritization_fee, - } + (budget_limits + .prioritization_fee + .saturating_add(signature_fee) + .saturating_add(write_lock_fee) + .saturating_add(compute_fee) as f64) + .round() as u64 } } From 0dcac3fe7c4832fc6f358f010ddba8a116da70ab Mon Sep 17 00:00:00 2001 From: Tao Zhu Date: Tue, 30 Jan 2024 21:59:34 +0000 Subject: [PATCH 096/401] Revert "Remove congestion multiplier from calculate fee (#34865)" This reverts commit 73d3973c7c26848c50522396962daf520be4536e. --- runtime/src/bank.rs | 11 ----------- runtime/src/bank/tests.rs | 1 + sdk/src/fee.rs | 12 ++++++++++-- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b5aa6db4913dcc..6f5cd9a07f607b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6719,17 +6719,6 @@ impl Bank { &self.runtime_config.compute_budget.unwrap_or_default(), false, /* debugging_features */ )); - - // genesis_config loaded by accounts_db::open_genesis_config() from ledger - // has it's lamports_per_signature set to zero; bank sets its value correctly - // after the first block with a transaction in it. This is a hack to mimic - // the process. - let derived_fee_rate_governor = - FeeRateGovernor::new_derived(&genesis_config.fee_rate_governor, 0); - // new bank's fee_structure.lamports_per_signature should be inline with - // what's configured in GenesisConfig - self.fee_structure.lamports_per_signature = - derived_fee_rate_governor.lamports_per_signature; } pub fn set_inflation(&self, inflation: Inflation) { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 324fac49d277e9..ad28005fccfe7e 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -3335,6 +3335,7 @@ fn test_bank_parent_account_spend() { let key2 = Keypair::new(); let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); + println!("==== amount {}", amount); let tx = system_transaction::transfer(&mint_keypair, &key1.pubkey(), amount, genesis_config.hash()); diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index 2fb045aba5d73e..f3377b5254f0a6 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -80,10 +80,17 @@ impl FeeStructure { pub fn calculate_fee( &self, message: &SanitizedMessage, - _lamports_per_signature: u64, + lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, ) -> u64 { + // Fee based on compute units and signatures + let congestion_multiplier = if lamports_per_signature == 0 { + 0.0 // test only + } else { + 1.0 // multiplier that has no effect + }; + let signature_fee = message .num_signatures() .saturating_mul(self.lamports_per_signature); @@ -115,11 +122,12 @@ impl FeeStructure { .unwrap_or_default() }); - (budget_limits + ((budget_limits .prioritization_fee .saturating_add(signature_fee) .saturating_add(write_lock_fee) .saturating_add(compute_fee) as f64) + * congestion_multiplier) .round() as u64 } } From b3ea62fba31767fae75238a714081ab924cc6fbc Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 30 Jan 2024 22:50:37 -0400 Subject: [PATCH 097/401] ledger-tool: Change --snapshot-archive-path to --snapshots (#35019) This change makes solana-ledger-tool match solana-validator CLI; the old flag --snapshot-archive-path is retained as an argument alias and can still be used for the sake of backwards compatibility. --- ledger-tool/src/main.rs | 9 +++++---- ledger-tool/src/program.rs | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 50bc3a40263743..d4a5a3eb18ea69 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -819,12 +819,13 @@ fn main() { ), ) .arg( - Arg::with_name("snapshot_archive_path") - .long("snapshot-archive-path") + Arg::with_name("snapshots") + .long("snapshots") + .alias("snapshot-archive-path") .value_name("DIR") .takes_value(true) .global(true) - .help("Use DIR for snapshot location"), + .help("Use DIR for snapshot location [default: --ledger value]"), ) .arg( Arg::with_name("incremental_snapshot_archive_path") @@ -1420,7 +1421,7 @@ fn main() { info!("{} {}", crate_name!(), solana_version::version!()); let ledger_path = PathBuf::from(value_t_or_exit!(matches, "ledger_path", String)); - let snapshot_archive_path = value_t!(matches, "snapshot_archive_path", String) + let snapshot_archive_path = value_t!(matches, "snapshots", String) .ok() .map(PathBuf::from); let incremental_snapshot_archive_path = diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 732c2e8fe3aaee..b56affd4c905c2 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -71,7 +71,7 @@ fn load_accounts(path: &Path) -> Result { fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc { let process_options = parse_process_options(ledger_path, arg_matches); - let snapshot_archive_path = value_t!(arg_matches, "snapshot_archive_path", String) + let snapshot_archive_path = value_t!(arg_matches, "snapshots", String) .ok() .map(PathBuf::from); let incremental_snapshot_archive_path = From 056930483531424f2ef584be8aeaa1cee56b0db7 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 31 Jan 2024 10:52:59 -0500 Subject: [PATCH 098/401] Replaces bare Arc::default() in CacheHashData::new() (#35017) --- accounts-db/src/cache_hash_data.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index e9675b9fd22798..a0e8507c50520e 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -214,7 +214,7 @@ impl CacheHashData { cache_dir, pre_existing_cache_files: Arc::new(Mutex::new(HashSet::default())), deletion_policy, - stats: Arc::default(), + stats: Arc::new(CacheHashDataStats::default()), }; result.get_cache_files(); From daa2449ad4b4dc0cc83a4be1968653815e2e4a8c Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 1 Feb 2024 09:35:34 -0500 Subject: [PATCH 099/401] Removes RwLock on AccountsDb::shrink_paths (#35027) --- accounts-db/src/accounts_db.rs | 27 +++++++---------- core/src/validator.rs | 22 ++++++-------- ledger-tool/src/ledger_utils.rs | 1 - ledger/src/bank_forks_utils.rs | 9 ------ ledger/src/blockstore_processor.rs | 1 - local-cluster/src/validator_configs.rs | 1 - local-cluster/tests/local_cluster.rs | 1 - runtime/src/bank.rs | 4 --- validator/src/main.rs | 41 +++++++++++++------------- 9 files changed, 39 insertions(+), 68 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4d37dcba060705..2089c508ea8b4e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -494,6 +494,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING), base_working_path: None, accounts_hash_cache_path: None, + shrink_paths: None, write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -506,6 +507,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), base_working_path: None, accounts_hash_cache_path: None, + shrink_paths: None, write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -547,6 +549,7 @@ pub struct AccountsDbConfig { /// Base directory for various necessary files pub base_working_path: Option, pub accounts_hash_cache_path: Option, + pub shrink_paths: Option>, pub write_cache_limit_bytes: Option, /// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET /// Some(offset) means include slots up to (max_slot - (slots_per_epoch - 'offset')) @@ -1396,7 +1399,7 @@ pub struct AccountsDb { accounts_hash_cache_path: PathBuf, - pub shrink_paths: RwLock>>, + shrink_paths: Vec, /// Directory of paths this accounts_db needs to hold/remove #[allow(dead_code)] @@ -2433,7 +2436,7 @@ impl AccountsDb { base_working_path, base_working_temp_dir, accounts_hash_cache_path, - shrink_paths: RwLock::new(None), + shrink_paths: Vec::default(), temp_paths: None, file_size: DEFAULT_FILE_SIZE, thread_pool: rayon::ThreadPoolBuilder::new() @@ -2570,6 +2573,10 @@ impl AccountsDb { new.paths = paths; new.temp_paths = Some(temp_dirs); }; + new.shrink_paths = accounts_db_config + .as_ref() + .and_then(|config| config.shrink_paths.clone()) + .unwrap_or_else(|| new.paths.clone()); new.start_background_hasher(); { @@ -2580,15 +2587,6 @@ impl AccountsDb { new } - pub fn set_shrink_paths(&self, paths: Vec) { - assert!(!paths.is_empty()); - let mut shrink_paths = self.shrink_paths.write().unwrap(); - for path in &paths { - std::fs::create_dir_all(path).expect("Create directory failed."); - } - *shrink_paths = Some(paths); - } - pub fn file_size(&self) -> u64 { self.file_size } @@ -4153,12 +4151,7 @@ impl AccountsDb { let shrunken_store = self .try_recycle_store(slot, aligned_total, aligned_total + 1024) .unwrap_or_else(|| { - let maybe_shrink_paths = self.shrink_paths.read().unwrap(); - let (shrink_paths, from) = maybe_shrink_paths - .as_ref() - .map(|paths| (paths, "shrink-w-path")) - .unwrap_or_else(|| (&self.paths, "shrink")); - self.create_store(slot, aligned_total, from, shrink_paths) + self.create_store(slot, aligned_total, "shrink", self.shrink_paths.as_slice()) }); self.storage.shrinking_in_progress(slot, shrunken_store) } diff --git a/core/src/validator.rs b/core/src/validator.rs index c8a3af3d02583a..7301552499b119 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -205,7 +205,6 @@ pub struct ValidatorConfig { pub voting_disabled: bool, pub account_paths: Vec, pub account_snapshot_paths: Vec, - pub account_shrink_paths: Option>, pub rpc_config: JsonRpcConfig, /// Specifies which plugins to start up with pub on_start_geyser_plugin_config_files: Option>, @@ -277,7 +276,6 @@ impl Default for ValidatorConfig { max_ledger_shreds: None, account_paths: Vec::new(), account_snapshot_paths: Vec::new(), - account_shrink_paths: None, rpc_config: JsonRpcConfig::default(), on_start_geyser_plugin_config_files: None, rpc_addrs: None, @@ -1838,7 +1836,6 @@ fn load_blockstore( &genesis_config, &blockstore, config.account_paths.clone(), - config.account_shrink_paths.clone(), Some(&config.snapshot_config), &process_options, transaction_history_services @@ -1865,11 +1862,6 @@ fn load_blockstore( let mut bank_forks = bank_forks.write().unwrap(); bank_forks.set_snapshot_config(Some(config.snapshot_config.clone())); bank_forks.set_accounts_hash_interval_slots(config.accounts_hash_interval_slots); - if let Some(ref shrink_paths) = config.account_shrink_paths { - bank_forks - .working_bank() - .set_shrink_paths(shrink_paths.clone()); - } } Ok(( @@ -2448,12 +2440,16 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo } fn cleanup_accounts_paths(config: &ValidatorConfig) { - for accounts_path in &config.account_paths { - move_and_async_delete_path_contents(accounts_path); + for account_path in &config.account_paths { + move_and_async_delete_path_contents(account_path); } - if let Some(ref shrink_paths) = config.account_shrink_paths { - for accounts_path in shrink_paths { - move_and_async_delete_path_contents(accounts_path); + if let Some(shrink_paths) = config + .accounts_db_config + .as_ref() + .and_then(|config| config.shrink_paths.as_ref()) + { + for shrink_path in shrink_paths { + move_and_async_delete_path_contents(shrink_path); } } } diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index bcf87e826ec72e..ba6ac1ebe15cac 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -274,7 +274,6 @@ pub fn load_and_process_ledger( genesis_config, blockstore.as_ref(), account_paths, - None, snapshot_config.as_ref(), &process_options, None, diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 993f6d2c2f7645..48c03e1e6cc8e5 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -80,7 +80,6 @@ pub fn load( genesis_config: &GenesisConfig, blockstore: &Blockstore, account_paths: Vec, - shrink_paths: Option>, snapshot_config: Option<&SnapshotConfig>, process_options: ProcessOptions, transaction_status_sender: Option<&TransactionStatusSender>, @@ -93,7 +92,6 @@ pub fn load( genesis_config, blockstore, account_paths, - shrink_paths, snapshot_config, &process_options, cache_block_meta_sender, @@ -121,7 +119,6 @@ pub fn load_bank_forks( genesis_config: &GenesisConfig, blockstore: &Blockstore, account_paths: Vec, - shrink_paths: Option>, snapshot_config: Option<&SnapshotConfig>, process_options: &ProcessOptions, cache_block_meta_sender: Option<&CacheBlockMetaSender>, @@ -181,7 +178,6 @@ pub fn load_bank_forks( incremental_snapshot_archive_info, genesis_config, account_paths, - shrink_paths, snapshot_config, process_options, accounts_update_notifier, @@ -231,7 +227,6 @@ fn bank_forks_from_snapshot( incremental_snapshot_archive_info: Option, genesis_config: &GenesisConfig, account_paths: Vec, - shrink_paths: Option>, snapshot_config: &SnapshotConfig, process_options: &ProcessOptions, accounts_update_notifier: Option, @@ -345,10 +340,6 @@ fn bank_forks_from_snapshot( bank }; - if let Some(shrink_paths) = shrink_paths { - bank.set_shrink_paths(shrink_paths); - } - let full_snapshot_hash = FullSnapshotHash(( full_snapshot_archive_info.slot(), *full_snapshot_archive_info.hash(), diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 4fa5fa6f3aa808..2ee80b879eaaab 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -745,7 +745,6 @@ pub fn test_process_blockstore( blockstore, Vec::new(), None, - None, opts, None, None, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 21606164cc27e4..537dd6495f32e1 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -13,7 +13,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { voting_disabled: config.voting_disabled, account_paths: config.account_paths.clone(), account_snapshot_paths: config.account_snapshot_paths.clone(), - account_shrink_paths: config.account_shrink_paths.clone(), rpc_config: config.rpc_config.clone(), on_start_geyser_plugin_config_files: config.on_start_geyser_plugin_config_files.clone(), rpc_addrs: config.rpc_addrs, diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 02953c632a80c3..f9640066345745 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2211,7 +2211,6 @@ fn create_snapshot_to_hard_fork( .unwrap() .0, ], - None, Some(&snapshot_config), process_options, None, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6f5cd9a07f607b..e97d47e6e61cb9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4436,10 +4436,6 @@ impl Bank { self.rc.accounts.accounts_db.remove_unrooted_slots(slots) } - pub fn set_shrink_paths(&self, paths: Vec) { - self.rc.accounts.accounts_db.set_shrink_paths(paths); - } - fn check_age( &self, sanitized_txs: &[impl core::borrow::Borrow], diff --git a/validator/src/main.rs b/validator/src/main.rs index c0ea702da973fd..94b663cacdf1dd 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1205,10 +1205,30 @@ pub fn main() { .ok() .map(|mb| mb * MB); + let account_shrink_paths: Option> = + values_t!(matches, "account_shrink_path", String) + .map(|shrink_paths| shrink_paths.into_iter().map(PathBuf::from).collect()) + .ok(); + let account_shrink_paths = account_shrink_paths.as_ref().map(|paths| { + create_and_canonicalize_directories(paths).unwrap_or_else(|err| { + eprintln!("Unable to access account shrink path: {err}"); + exit(1); + }) + }); + let (account_shrink_run_paths, account_shrink_snapshot_paths) = account_shrink_paths + .map(|paths| { + create_all_accounts_run_and_snapshot_dirs(&paths).unwrap_or_else(|err| { + eprintln!("Error: {err}"); + exit(1); + }) + }) + .unzip(); + let accounts_db_config = AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_path.clone()), accounts_hash_cache_path: Some(accounts_hash_cache_path), + shrink_paths: account_shrink_run_paths, write_cache_limit_bytes: value_t!(matches, "accounts_db_cache_limit_mb", u64) .ok() .map(|mb| mb * MB as u64), @@ -1452,35 +1472,14 @@ pub fn main() { exit(1); }); - let account_shrink_paths: Option> = - values_t!(matches, "account_shrink_path", String) - .map(|shrink_paths| shrink_paths.into_iter().map(PathBuf::from).collect()) - .ok(); - let account_shrink_paths = account_shrink_paths.as_ref().map(|paths| { - create_and_canonicalize_directories(paths).unwrap_or_else(|err| { - eprintln!("Unable to access account shrink path: {err}"); - exit(1); - }) - }); - let (account_run_paths, account_snapshot_paths) = create_all_accounts_run_and_snapshot_dirs(&account_paths).unwrap_or_else(|err| { eprintln!("Error: {err}"); exit(1); }); - let (account_shrink_run_paths, account_shrink_snapshot_paths) = account_shrink_paths - .map(|paths| { - create_all_accounts_run_and_snapshot_dirs(&paths).unwrap_or_else(|err| { - eprintln!("Error: {err}"); - exit(1); - }) - }) - .unzip(); - // From now on, use run/ paths in the same way as the previous account_paths. validator_config.account_paths = account_run_paths; - validator_config.account_shrink_paths = account_shrink_run_paths; // These snapshot paths are only used for initial clean up, add in shrink paths if they exist. validator_config.account_snapshot_paths = From 35f900b03bb4c376f9424c66f45db80e8c94feac Mon Sep 17 00:00:00 2001 From: galactus <96341601+godmodegalactus@users.noreply.github.com> Date: Thu, 1 Feb 2024 22:06:45 +0100 Subject: [PATCH 100/401] Metrics prioritization fees (#34653) * Adding metrics for prioritization fees min/max per thread * Adding scheduled transaction prioritization fees to the metrics * Changes after andrews comments * fixing Taos comments * Adding metrics to the new scheduler * Fixing getting of min max for TransactionStateContainer * Fix clippy CI Issue * Changes after andrews comments about min/max for new scheduler * Creating a new structure to store prio fee metrics * Reporting with prio fee stats banking_stage_scheduler_counts * merging prioritization stats into SchedulerCountMetrics * Minor changes after andrews review --- core/src/banking_stage.rs | 5 +- core/src/banking_stage/consume_worker.rs | 46 +++++- core/src/banking_stage/consumer.rs | 30 ++++ core/src/banking_stage/leader_slot_metrics.rs | 131 +++++++++++++++--- .../scheduler_controller.rs | 52 ++++++- .../transaction_state_container.rs | 11 ++ .../unprocessed_packet_batches.rs | 8 ++ .../unprocessed_transaction_storage.rs | 26 ++++ 8 files changed, 287 insertions(+), 22 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 158614b32d7963..652f2569f8fd43 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -659,7 +659,10 @@ impl BankingStage { } let (decision, make_decision_time) = measure!(decision_maker.make_consume_or_forward_decision()); - let metrics_action = slot_metrics_tracker.check_leader_slot_boundary(decision.bank_start()); + let metrics_action = slot_metrics_tracker.check_leader_slot_boundary( + decision.bank_start(), + Some(unprocessed_transaction_storage), + ); slot_metrics_tracker.increment_make_decision_us(make_decision_time.as_us()); match decision { diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index d3a53aa42e91b8..7744a399e565bc 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -212,6 +212,8 @@ impl ConsumeWorkerMetrics { retryable_transaction_indexes, execute_and_commit_timings, error_counters, + min_prioritization_fees, + max_prioritization_fees, .. }: &ExecuteAndCommitTransactionsOutput, ) { @@ -227,7 +229,20 @@ impl ConsumeWorkerMetrics { self.count_metrics .retryable_transaction_count .fetch_add(retryable_transaction_indexes.len(), Ordering::Relaxed); - + let min_prioritization_fees = self + .count_metrics + .min_prioritization_fees + .fetch_min(*min_prioritization_fees, Ordering::Relaxed); + let max_prioritization_fees = self + .count_metrics + .max_prioritization_fees + .fetch_max(*max_prioritization_fees, Ordering::Relaxed); + self.count_metrics + .min_prioritization_fees + .swap(min_prioritization_fees, Ordering::Relaxed); + self.count_metrics + .max_prioritization_fees + .swap(max_prioritization_fees, Ordering::Relaxed); self.update_on_execute_and_commit_timings(execute_and_commit_timings); self.update_on_error_counters(error_counters); } @@ -368,7 +383,6 @@ impl ConsumeWorkerMetrics { } } -#[derive(Default)] struct ConsumeWorkerCountMetrics { transactions_attempted_execution_count: AtomicUsize, executed_transactions_count: AtomicUsize, @@ -376,6 +390,23 @@ struct ConsumeWorkerCountMetrics { retryable_transaction_count: AtomicUsize, retryable_expired_bank_count: AtomicUsize, cost_model_throttled_transactions_count: AtomicUsize, + min_prioritization_fees: AtomicU64, + max_prioritization_fees: AtomicU64, +} + +impl Default for ConsumeWorkerCountMetrics { + fn default() -> Self { + Self { + transactions_attempted_execution_count: AtomicUsize::default(), + executed_transactions_count: AtomicUsize::default(), + executed_with_successful_result_count: AtomicUsize::default(), + retryable_transaction_count: AtomicUsize::default(), + retryable_expired_bank_count: AtomicUsize::default(), + cost_model_throttled_transactions_count: AtomicUsize::default(), + min_prioritization_fees: AtomicU64::new(u64::MAX), + max_prioritization_fees: AtomicU64::default(), + } + } } impl ConsumeWorkerCountMetrics { @@ -416,6 +447,17 @@ impl ConsumeWorkerCountMetrics { .swap(0, Ordering::Relaxed), i64 ), + ( + "min_prioritization_fees", + self.min_prioritization_fees + .swap(u64::MAX, Ordering::Relaxed), + i64 + ), + ( + "max_prioritization_fees", + self.max_prioritization_fees.swap(0, Ordering::Relaxed), + i64 + ), ); } } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index ad42da3bafbb77..406243f21bc561 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -26,6 +26,7 @@ use { bank::{Bank, LoadAndExecuteTransactionsOutput}, svm::account_loader::validate_fee_payer, transaction_batch::TransactionBatch, + transaction_priority_details::GetTransactionPriorityDetails, }, solana_sdk::{ clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, @@ -69,6 +70,8 @@ pub struct ExecuteAndCommitTransactionsOutput { pub commit_transactions_result: Result, PohRecorderError>, pub(crate) execute_and_commit_timings: LeaderExecuteAndCommitTimings, pub(crate) error_counters: TransactionErrorMetrics, + pub(crate) min_prioritization_fees: u64, + pub(crate) max_prioritization_fees: u64, } pub struct Consumer { @@ -291,6 +294,8 @@ impl Consumer { let mut total_execute_and_commit_timings = LeaderExecuteAndCommitTimings::default(); let mut total_error_counters = TransactionErrorMetrics::default(); let mut reached_max_poh_height = false; + let mut overall_min_prioritization_fees: u64 = u64::MAX; + let mut overall_max_prioritization_fees: u64 = 0; while chunk_start != transactions.len() { let chunk_end = std::cmp::min( transactions.len(), @@ -321,6 +326,8 @@ impl Consumer { commit_transactions_result: new_commit_transactions_result, execute_and_commit_timings: new_execute_and_commit_timings, error_counters: new_error_counters, + min_prioritization_fees, + max_prioritization_fees, .. } = execute_and_commit_transactions_output; @@ -330,6 +337,10 @@ impl Consumer { total_transactions_attempted_execution_count, new_transactions_attempted_execution_count ); + overall_min_prioritization_fees = + std::cmp::min(overall_min_prioritization_fees, min_prioritization_fees); + overall_max_prioritization_fees = + std::cmp::min(overall_max_prioritization_fees, max_prioritization_fees); trace!( "process_transactions result: {:?}", @@ -390,6 +401,8 @@ impl Consumer { cost_model_us: total_cost_model_us, execute_and_commit_timings: total_execute_and_commit_timings, error_counters: total_error_counters, + min_prioritization_fees: overall_min_prioritization_fees, + max_prioritization_fees: overall_max_prioritization_fees, } } @@ -567,6 +580,19 @@ impl Consumer { }); execute_and_commit_timings.collect_balances_us = collect_balances_us; + let min_max = batch + .sanitized_transactions() + .iter() + .filter_map(|transaction| { + let round_compute_unit_price_enabled = false; // TODO get from working_bank.feature_set + transaction + .get_transaction_priority_details(round_compute_unit_price_enabled) + .map(|details| details.priority) + }) + .minmax(); + let (min_prioritization_fees, max_prioritization_fees) = + min_max.into_option().unwrap_or_default(); + let (load_and_execute_transactions_output, load_execute_us) = measure_us!(bank .load_and_execute_transactions( batch, @@ -648,6 +674,8 @@ impl Consumer { commit_transactions_result: Err(recorder_err), execute_and_commit_timings, error_counters, + min_prioritization_fees, + max_prioritization_fees, }; } @@ -703,6 +731,8 @@ impl Consumer { commit_transactions_result: Ok(commit_transaction_statuses), execute_and_commit_timings, error_counters, + min_prioritization_fees, + max_prioritization_fees, } } diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index 449ff7801991fa..1e250c5b69a17b 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -1,7 +1,9 @@ use { super::{ leader_slot_timing_metrics::{LeaderExecuteAndCommitTimings, LeaderSlotTimingMetrics}, - unprocessed_transaction_storage::InsertPacketBatchSummary, + unprocessed_transaction_storage::{ + InsertPacketBatchSummary, UnprocessedTransactionStorage, + }, }, solana_accounts_db::transaction_error_metrics::*, solana_poh::poh_recorder::BankStart, @@ -52,6 +54,53 @@ pub(crate) struct ProcessTransactionsSummary { // Breakdown of all the transaction errors from transactions passed for execution pub error_counters: TransactionErrorMetrics, + + pub min_prioritization_fees: u64, + pub max_prioritization_fees: u64, +} + +// Metrics describing prioritization fee information for each transaction storage before processing transactions +#[derive(Debug, Default)] +struct LeaderPrioritizationFeesMetrics { + // minimum prioritization fees in the MinMaxHeap + min_prioritization_fees_per_cu: u64, + // maximum prioritization fees in the MinMaxHeap + max_prioritization_fees_per_cu: u64, +} + +impl LeaderPrioritizationFeesMetrics { + fn new(unprocessed_transaction_storage: Option<&UnprocessedTransactionStorage>) -> Self { + if let Some(unprocessed_transaction_storage) = unprocessed_transaction_storage { + Self { + min_prioritization_fees_per_cu: unprocessed_transaction_storage + .get_min_priority() + .unwrap_or_default(), + max_prioritization_fees_per_cu: unprocessed_transaction_storage + .get_max_priority() + .unwrap_or_default(), + } + } else { + Self::default() + } + } + + fn report(&self, id: u32, slot: Slot) { + datapoint_info!( + "banking_stage-leader_prioritization_fees_info", + ("id", id, i64), + ("slot", slot, i64), + ( + "min_prioritization_fees_per_cu", + self.min_prioritization_fees_per_cu, + i64 + ), + ( + "max_prioritization_fees_per_cu", + self.max_prioritization_fees_per_cu, + i64 + ) + ); + } } // Metrics describing packets ingested/processed in various parts of BankingStage during this @@ -138,6 +187,11 @@ struct LeaderSlotPacketCountMetrics { // total number of forwardable batches that were attempted for forwarding. A forwardable batch // is defined in `ForwardPacketBatchesByAccounts` in `forward_packet_batches_by_accounts.rs` forwardable_batches_count: u64, + + // min prioritization fees for scheduled transactions + min_prioritization_fees: u64, + // max prioritization fees for scheduled transactions + max_prioritization_fees: u64, } impl LeaderSlotPacketCountMetrics { @@ -255,6 +309,16 @@ impl LeaderSlotPacketCountMetrics { self.end_of_slot_unprocessed_buffer_len as i64, i64 ), + ( + "min_prioritization_fees", + self.min_prioritization_fees as i64, + i64 + ), + ( + "max_prioritization_fees", + self.max_prioritization_fees as i64, + i64 + ), ); } } @@ -277,12 +341,19 @@ pub(crate) struct LeaderSlotMetrics { timing_metrics: LeaderSlotTimingMetrics, + prioritization_fees_metric: LeaderPrioritizationFeesMetrics, + // Used by tests to check if the `self.report()` method was called is_reported: bool, } impl LeaderSlotMetrics { - pub(crate) fn new(id: u32, slot: Slot, bank_creation_time: &Instant) -> Self { + pub(crate) fn new( + id: u32, + slot: Slot, + bank_creation_time: &Instant, + unprocessed_transaction_storage: Option<&UnprocessedTransactionStorage>, + ) -> Self { Self { id, slot, @@ -290,6 +361,9 @@ impl LeaderSlotMetrics { transaction_error_metrics: TransactionErrorMetrics::new(), vote_packet_count_metrics: VotePacketCountMetrics::new(), timing_metrics: LeaderSlotTimingMetrics::new(bank_creation_time), + prioritization_fees_metric: LeaderPrioritizationFeesMetrics::new( + unprocessed_transaction_storage, + ), is_reported: false, } } @@ -301,6 +375,7 @@ impl LeaderSlotMetrics { self.transaction_error_metrics.report(self.id, self.slot); self.packet_count_metrics.report(self.id, self.slot); self.vote_packet_count_metrics.report(self.id, self.slot); + self.prioritization_fees_metric.report(self.id, self.slot); } /// Returns `Some(self.slot)` if the metrics have been reported, otherwise returns None @@ -372,6 +447,7 @@ impl LeaderSlotMetricsTracker { pub(crate) fn check_leader_slot_boundary( &mut self, bank_start: Option<&BankStart>, + unprocessed_transaction_storage: Option<&UnprocessedTransactionStorage>, ) -> MetricsTrackerAction { match (self.leader_slot_metrics.as_mut(), bank_start) { (None, None) => MetricsTrackerAction::Noop, @@ -387,6 +463,7 @@ impl LeaderSlotMetricsTracker { self.id, bank_start.working_bank.slot(), &bank_start.bank_creation_time, + unprocessed_transaction_storage, ))) } @@ -398,6 +475,7 @@ impl LeaderSlotMetricsTracker { self.id, bank_start.working_bank.slot(), &bank_start.bank_creation_time, + unprocessed_transaction_storage, ))) } else { MetricsTrackerAction::Noop @@ -449,6 +527,8 @@ impl LeaderSlotMetricsTracker { cost_model_us, ref execute_and_commit_timings, error_counters, + min_prioritization_fees, + max_prioritization_fees, .. } = process_transactions_summary; @@ -525,6 +605,23 @@ impl LeaderSlotMetricsTracker { *cost_model_us ); + leader_slot_metrics + .packet_count_metrics + .min_prioritization_fees = std::cmp::min( + leader_slot_metrics + .packet_count_metrics + .min_prioritization_fees, + *min_prioritization_fees, + ); + leader_slot_metrics + .packet_count_metrics + .max_prioritization_fees = std::cmp::min( + leader_slot_metrics + .packet_count_metrics + .max_prioritization_fees, + *max_prioritization_fees, + ); + leader_slot_metrics .timing_metrics .execute_and_commit_timings @@ -896,7 +993,7 @@ mod tests { .. } = setup_test_slot_boundary_banks(); // Test that with no bank being tracked, and no new bank being tracked, nothing is reported - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::Noop), mem::discriminant(&action) @@ -916,8 +1013,8 @@ mod tests { // Test case where the thread has not detected a leader bank, and now sees a leader bank. // Metrics should not be reported because leader slot has not ended assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none()); - let action = - leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + let action = leader_slot_metrics_tracker + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert_eq!( mem::discriminant(&MetricsTrackerAction::NewTracker(None)), mem::discriminant(&action) @@ -941,12 +1038,12 @@ mod tests { { // Setup first_bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert reporting if slot has ended - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndResetTracker), mem::discriminant(&action) @@ -959,7 +1056,7 @@ mod tests { } { // Assert no-op if still no new bank - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::Noop), mem::discriminant(&action) @@ -981,13 +1078,13 @@ mod tests { { // Setup with first_bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert nop-op if same bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert_eq!( mem::discriminant(&MetricsTrackerAction::Noop), mem::discriminant(&action) @@ -996,7 +1093,7 @@ mod tests { } { // Assert reporting if slot has ended - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndResetTracker), mem::discriminant(&action) @@ -1025,13 +1122,13 @@ mod tests { { // Setup with first_bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert reporting if new bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&next_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&next_poh_recorder_bank), None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndNewTracker(None)), mem::discriminant(&action) @@ -1044,7 +1141,7 @@ mod tests { } { // Assert reporting if slot has ended - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndResetTracker), mem::discriminant(&action) @@ -1072,13 +1169,13 @@ mod tests { { // Setup with next_bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&next_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&next_poh_recorder_bank), None); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert reporting if new bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndNewTracker(None)), mem::discriminant(&action) @@ -1091,7 +1188,7 @@ mod tests { } { // Assert reporting if slot has ended - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndResetTracker), mem::discriminant(&action) diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index c336f56f8949c3..ed2b807431f45b 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -17,6 +17,7 @@ use { TOTAL_BUFFERED_PACKETS, }, crossbeam_channel::RecvTimeoutError, + itertools::MinMaxResult, solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, solana_cost_model::cost_model::CostModel, solana_measure::measure_us, @@ -95,10 +96,11 @@ impl SchedulerController { if !self.receive_and_buffer_packets(&decision) { break; } - // Report metrics only if there is data. // Reset intervals when appropriate, regardless of report. let should_report = self.count_metrics.has_data(); + self.count_metrics + .update_prioritization_stats(self.container.get_min_max_prioritization_fees()); self.count_metrics.maybe_report_and_reset(should_report); self.timing_metrics.maybe_report_and_reset(should_report); self.worker_metrics @@ -419,6 +421,10 @@ struct SchedulerCountMetrics { num_dropped_on_age_and_status: usize, /// Number of transactions that were dropped due to exceeded capacity. num_dropped_on_capacity: usize, + /// Min prioritization fees in the transaction container + min_prioritization_fees: u64, + /// Max prioritization fees in the transaction container + max_prioritization_fees: u64, } impl SchedulerCountMetrics { @@ -468,7 +474,17 @@ impl SchedulerCountMetrics { self.num_dropped_on_age_and_status, i64 ), - ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64) + ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64), + ( + "min_prioritization_fees", + self.get_min_prioritization_fees(), + i64 + ), + ( + "max_prioritization_fees", + self.get_max_prioritization_fees(), + i64 + ) ); } @@ -504,6 +520,38 @@ impl SchedulerCountMetrics { self.num_dropped_on_clear = 0; self.num_dropped_on_age_and_status = 0; self.num_dropped_on_capacity = 0; + self.min_prioritization_fees = u64::MAX; + self.max_prioritization_fees = 0; + } + + pub fn update_prioritization_stats(&mut self, min_max_fees: MinMaxResult) { + // update min/max priotization fees + match min_max_fees { + itertools::MinMaxResult::NoElements => { + // do nothing + } + itertools::MinMaxResult::OneElement(e) => { + self.min_prioritization_fees = e; + self.max_prioritization_fees = e; + } + itertools::MinMaxResult::MinMax(min, max) => { + self.min_prioritization_fees = min; + self.max_prioritization_fees = max; + } + } + } + + pub fn get_min_prioritization_fees(&self) -> u64 { + // to avoid getting u64::max recorded by metrics / in case of edge cases + if self.min_prioritization_fees != u64::MAX { + self.min_prioritization_fees + } else { + 0 + } + } + + pub fn get_max_prioritization_fees(&self) -> u64 { + self.max_prioritization_fees } } diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index d7d79cb21b7c32..f0688dee67bb5f 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -4,6 +4,7 @@ use { transaction_state::{SanitizedTransactionTTL, TransactionState}, }, crate::banking_stage::scheduler_messages::TransactionId, + itertools::MinMaxResult, min_max_heap::MinMaxHeap, solana_cost_model::transaction_cost::TransactionCost, solana_runtime::transaction_priority_details::TransactionPriorityDetails, @@ -149,6 +150,16 @@ impl TransactionStateContainer { .remove(id) .expect("transaction must exist"); } + + pub(crate) fn get_min_max_prioritization_fees(&self) -> MinMaxResult { + match self.priority_queue.peek_min() { + Some(min) => match self.priority_queue.peek_max() { + Some(max) => MinMaxResult::MinMax(min.priority, max.priority), + None => MinMaxResult::OneElement(min.priority), + }, + None => MinMaxResult::NoElements, + } + } } #[cfg(test)] diff --git a/core/src/banking_stage/unprocessed_packet_batches.rs b/core/src/banking_stage/unprocessed_packet_batches.rs index ff323ef25f18ee..9341fd4a54ec61 100644 --- a/core/src/banking_stage/unprocessed_packet_batches.rs +++ b/core/src/banking_stage/unprocessed_packet_batches.rs @@ -193,6 +193,14 @@ impl UnprocessedPacketBatches { self.packet_priority_queue.is_empty() } + pub fn get_min_priority(&self) -> Option { + self.packet_priority_queue.peek_min().map(|x| x.priority()) + } + + pub fn get_max_priority(&self) -> Option { + self.packet_priority_queue.peek_max().map(|x| x.priority()) + } + fn push_internal(&mut self, deserialized_packet: DeserializedPacket) { // Push into the priority queue self.packet_priority_queue diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 257bf1b141975b..7e6f882ed5e32d 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -282,6 +282,24 @@ impl UnprocessedTransactionStorage { } } + pub fn get_min_priority(&self) -> Option { + match self { + Self::VoteStorage(_) => None, + Self::LocalTransactionStorage(transaction_storage) => { + transaction_storage.get_min_priority() + } + } + } + + pub fn get_max_priority(&self) -> Option { + match self { + Self::VoteStorage(_) => None, + Self::LocalTransactionStorage(transaction_storage) => { + transaction_storage.get_max_priority() + } + } + } + /// Returns the maximum number of packets a receive should accept pub fn max_receive_size(&self) -> usize { match self { @@ -529,6 +547,14 @@ impl ThreadLocalUnprocessedPackets { self.unprocessed_packet_batches.len() } + pub fn get_min_priority(&self) -> Option { + self.unprocessed_packet_batches.get_min_priority() + } + + pub fn get_max_priority(&self) -> Option { + self.unprocessed_packet_batches.get_max_priority() + } + fn max_receive_size(&self) -> usize { self.unprocessed_packet_batches.capacity() - self.unprocessed_packet_batches.len() } From be9f17f053a99afbbab491b42e37150c85843430 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Thu, 1 Feb 2024 13:33:42 -0800 Subject: [PATCH 101/401] [TieredStorage] Have HotStorageWriter::write_account() return Vec (#34929) #### Problem To allow hot-storage to use HotStorageWriter::write_account() to implement AccountsFile::append_accounts(), it is required to provide a Vector of StoredAccountInfo to allow AccountsDB to properly prepare the entry for each account. #### Summary of Changes This PR enables HotStorageWriter::write_account() to return Vec. #### Test Plan Extend existing tests for HotStorageWriter to verify the correctness of the returned Vec. --- accounts-db/src/tiered_storage/hot.rs | 101 ++++++++++++++++++-------- 1 file changed, 69 insertions(+), 32 deletions(-) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 311da9916785f6..c6e3efdbbb78c6 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -2,7 +2,7 @@ use { crate::{ - account_storage::meta::StoredAccountMeta, + account_storage::meta::{StoredAccountInfo, StoredAccountMeta}, accounts_file::MatchAccountOwnerError, accounts_hash::AccountHash, rent_collector::RENT_EXEMPT_RENT_EPOCH, @@ -543,7 +543,7 @@ impl HotStorageWriter { &self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, skip: usize, - ) -> TieredStorageResult<()> { + ) -> TieredStorageResult> { let mut footer = new_hot_footer(); let mut index = vec![]; let mut owners_table = OwnersTable::default(); @@ -551,6 +551,8 @@ impl HotStorageWriter { // writing accounts blocks let len = accounts.accounts.len(); + let total_input_accounts = len - skip; + let mut stored_infos = Vec::with_capacity(total_input_accounts); for i in skip..len { let (account, address, account_hash, _write_version) = accounts.get(i); let index_entry = AccountIndexWriterEntry { @@ -574,7 +576,7 @@ impl HotStorageWriter { }) .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None, None)); let owner_offset = owners_table.insert(owner); - cursor += self.write_account( + let stored_size = self.write_account( lamports, owner_offset, data, @@ -582,9 +584,25 @@ impl HotStorageWriter { rent_epoch, account_hash, )?; + cursor += stored_size; + + stored_infos.push(StoredAccountInfo { + // Here we pass the IndexOffset as the get_account() API + // takes IndexOffset. Given the account address is also + // maintained outside the TieredStorage, a potential optimization + // is to store AccountOffset instead, which can further save + // one jump from the index block to the accounts block. + offset: index.len(), + // Here we only include the stored size that the account directly + // contribute (i.e., account entry + index entry that include the + // account meta, data, optional fields, its address, and AccountOffset). + // Storage size from those shared blocks like footer and owners block + // is not included. + size: stored_size + footer.index_block_format.entry_size::(), + }); index.push(index_entry); } - footer.account_entry_count = (len - skip) as u32; + footer.account_entry_count = total_input_accounts as u32; // writing index block // expect the offset of each block aligned. @@ -611,7 +629,7 @@ impl HotStorageWriter { footer.write_footer_block(&self.storage)?; - Ok(()) + Ok(stored_infos) } } @@ -1280,6 +1298,37 @@ pub mod tests { (stored_meta, AccountSharedData::from(account)) } + fn verify_account( + stored_meta: &StoredAccountMeta<'_>, + account: Option<&impl ReadableAccount>, + address: &Pubkey, + account_hash: &AccountHash, + ) { + let (lamports, owner, data, executable, account_hash) = account + .map(|acc| { + ( + acc.lamports(), + acc.owner(), + acc.data(), + acc.executable(), + // only persist rent_epoch for those rent-paying accounts + Some(*account_hash), + ) + }) + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); + + assert_eq!(stored_meta.lamports(), lamports); + assert_eq!(stored_meta.data().len(), data.len()); + assert_eq!(stored_meta.data(), data); + assert_eq!(stored_meta.executable(), executable); + assert_eq!(stored_meta.owner(), owner); + assert_eq!(stored_meta.pubkey(), address); + assert_eq!( + *stored_meta.hash(), + account_hash.unwrap_or(AccountHash(Hash::default())) + ); + } + #[test] fn test_write_account_and_index_blocks() { let account_data_sizes = &[ @@ -1316,11 +1365,10 @@ pub mod tests { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path().join("test_write_account_and_index_blocks"); - - { + let stored_infos = { let writer = HotStorageWriter::new(&path).unwrap(); - writer.write_accounts(&storable_accounts, 0).unwrap(); - } + writer.write_accounts(&storable_accounts, 0).unwrap() + }; let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1333,29 +1381,7 @@ pub mod tests { .unwrap(); let (account, address, account_hash, _write_version) = storable_accounts.get(i); - let (lamports, owner, data, executable, account_hash) = account - .map(|acc| { - ( - acc.lamports(), - acc.owner(), - acc.data(), - acc.executable(), - // only persist rent_epoch for those rent-paying accounts - Some(*account_hash), - ) - }) - .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); - - assert_eq!(stored_meta.lamports(), lamports); - assert_eq!(stored_meta.data().len(), data.len()); - assert_eq!(stored_meta.data(), data); - assert_eq!(stored_meta.executable(), executable); - assert_eq!(stored_meta.owner(), owner); - assert_eq!(stored_meta.pubkey(), address); - assert_eq!( - *stored_meta.hash(), - account_hash.unwrap_or(AccountHash(Hash::default())) - ); + verify_account(&stored_meta, account, address, account_hash); assert_eq!(i + 1, next); } @@ -1365,5 +1391,16 @@ pub mod tests { hot_storage.get_account(IndexOffset(num_accounts as u32)), Ok(None) ); + + for stored_info in stored_infos { + let (stored_meta, _) = hot_storage + .get_account(IndexOffset(stored_info.offset as u32)) + .unwrap() + .unwrap(); + + let (account, address, account_hash, _write_version) = + storable_accounts.get(stored_info.offset); + verify_account(&stored_meta, account, address, account_hash); + } } } From 3646291dc60112d2f67d009de44ca3f77e71c3e1 Mon Sep 17 00:00:00 2001 From: Jon C Date: Fri, 2 Feb 2024 00:00:31 +0100 Subject: [PATCH 102/401] sdk: Do not derive AbiEnum on InstructionError for Solana builds (#35038) --- sdk/program/src/instruction.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index db26af5ad04fde..572fa42c69796d 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -28,9 +28,8 @@ use { /// an error be consistent across software versions. For example, it is /// dangerous to include error strings from 3rd party crates because they could /// change at any time and changes to them are difficult to detect. -#[derive( - Serialize, Deserialize, Debug, Error, PartialEq, Eq, Clone, AbiExample, AbiEnumVisitor, -)] +#[cfg_attr(not(target_os = "solana"), derive(AbiExample, AbiEnumVisitor))] +#[derive(Serialize, Deserialize, Debug, Error, PartialEq, Eq, Clone)] pub enum InstructionError { /// Deprecated! Use CustomError instead! /// The program instruction returned an error From 5dd9609aeae4d302298a23005291675feca88a25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 2 Feb 2024 13:35:36 +0100 Subject: [PATCH 103/401] Fix - LoadedPrograms statistics (#35026) Fixes hits, misses. Adds reloads, lost_insertions. Removes prunes_expired. --- program-runtime/src/loaded_programs.rs | 129 ++++++++++++++----------- runtime/src/bank.rs | 4 +- 2 files changed, 77 insertions(+), 56 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index a92da7bd001bbe..1e92944ca8c75a 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -150,22 +150,24 @@ pub struct LoadedProgram { /// Global cache statistics for [LoadedPrograms]. #[derive(Debug, Default)] pub struct Stats { - /// a program was requested + /// a program was already in the cache pub hits: AtomicU64, - /// a program was polled during cooperative loading + /// a program was not found and loaded instead pub misses: AtomicU64, /// a compiled executable was unloaded pub evictions: HashMap, - /// a program was loaded + /// an unloaded program was loaded again (opposite of eviction) + pub reloads: AtomicU64, + /// a program was loaded or un/re/deployed pub insertions: AtomicU64, - /// a program was reloaded or redeployed + /// a program was loaded but can not be extracted on its own fork anymore + pub lost_insertions: AtomicU64, + /// a program which was already in the cache was reloaded by mistake pub replacements: AtomicU64, /// a program was only used once before being unloaded pub one_hit_wonders: AtomicU64, /// a program became unreachable in the fork graph because of rerooting pub prunes_orphan: AtomicU64, - /// a program got pruned because its expiration slot passed - pub prunes_expired: AtomicU64, /// a program got pruned because it was not recompiled for the next epoch pub prunes_environment: AtomicU64, /// the [SecondLevel] was empty because all slot versions got pruned @@ -177,12 +179,13 @@ impl Stats { pub fn submit(&self, slot: Slot) { let hits = self.hits.load(Ordering::Relaxed); let misses = self.misses.load(Ordering::Relaxed); + let evictions: u64 = self.evictions.values().sum(); + let reloads = self.reloads.load(Ordering::Relaxed); let insertions = self.insertions.load(Ordering::Relaxed); + let lost_insertions = self.insertions.load(Ordering::Relaxed); let replacements = self.replacements.load(Ordering::Relaxed); let one_hit_wonders = self.one_hit_wonders.load(Ordering::Relaxed); - let evictions: u64 = self.evictions.values().sum(); let prunes_orphan = self.prunes_orphan.load(Ordering::Relaxed); - let prunes_expired = self.prunes_expired.load(Ordering::Relaxed); let prunes_environment = self.prunes_environment.load(Ordering::Relaxed); let empty_entries = self.empty_entries.load(Ordering::Relaxed); datapoint_info!( @@ -191,17 +194,18 @@ impl Stats { ("hits", hits, i64), ("misses", misses, i64), ("evictions", evictions, i64), + ("reloads", reloads, i64), ("insertions", insertions, i64), + ("lost_insertions", lost_insertions, i64), ("replacements", replacements, i64), ("one_hit_wonders", one_hit_wonders, i64), ("prunes_orphan", prunes_orphan, i64), - ("prunes_expired", prunes_expired, i64), ("prunes_environment", prunes_environment, i64), ("empty_entries", empty_entries, i64), ); debug!( - "Loaded Programs Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Insertions: {}, Replacements: {}, One-Hit-Wonders: {}, Prunes-Orphan: {}, Prunes-Expired: {}, Prunes-Environment: {}, Empty: {}", - hits, misses, evictions, insertions, replacements, one_hit_wonders, prunes_orphan, prunes_expired, prunes_environment, empty_entries + "Loaded Programs Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Reloads: {}, Insertions: {} Lost-Insertions: {}, Replacements: {}, One-Hit-Wonders: {}, Prunes-Orphan: {}, Prunes-Environment: {}, Empty: {}", + hits, misses, evictions, reloads, insertions, lost_insertions, replacements, one_hit_wonders, prunes_orphan, prunes_environment, empty_entries ); if log_enabled!(log::Level::Trace) && !self.evictions.is_empty() { let mut evictions = self.evictions.iter().collect::>(); @@ -716,9 +720,7 @@ impl LoadedPrograms { let index = slot_versions .iter() .position(|at| at.effective_slot >= entry.effective_slot); - if let Some((existing, entry_index)) = - index.and_then(|index| slot_versions.get(index).map(|value| (value, index))) - { + if let Some(existing) = index.and_then(|index| slot_versions.get_mut(index)) { if existing.deployment_slot == entry.deployment_slot && existing.effective_slot == entry.effective_slot { @@ -733,17 +735,19 @@ impl LoadedPrograms { existing.ix_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, ); - slot_versions.remove(entry_index); + self.stats.reloads.fetch_add(1, Ordering::Relaxed); } else if existing.is_tombstone() != entry.is_tombstone() { // Either the old entry is tombstone and the new one is not. // (Let's give the new entry a chance). // Or, the old entry is not a tombstone and the new one is a tombstone. // (Remove the old entry, as the tombstone makes it obsolete). - slot_versions.remove(entry_index); + self.stats.insertions.fetch_add(1, Ordering::Relaxed); } else { self.stats.replacements.fetch_add(1, Ordering::Relaxed); return (true, existing.clone()); } + *existing = entry.clone(); + return (false, entry); } } self.stats.insertions.fetch_add(1, Ordering::Relaxed); @@ -833,7 +837,6 @@ impl LoadedPrograms { // Remove expired if let Some(expiration) = entry.maybe_expiration_slot { if expiration <= new_root_slot { - self.stats.prunes_expired.fetch_add(1, Ordering::Relaxed); return false; } } @@ -906,6 +909,7 @@ impl LoadedPrograms { &mut self, search_for: &mut Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))>, loaded_programs_for_tx_batch: &mut LoadedProgramsForTxBatch, + is_first_round: bool, ) -> Option<(Pubkey, u64)> { debug_assert!(self.fork_graph.is_some()); let locked_fork_graph = self.fork_graph.as_ref().unwrap().read().unwrap(); @@ -913,15 +917,14 @@ impl LoadedPrograms { search_for.retain(|(key, (match_criteria, usage_count))| { if let Some(second_level) = self.entries.get_mut(key) { for entry in second_level.slot_versions.iter().rev() { - let is_ancestor = matches!( - locked_fork_graph - .relationship(entry.deployment_slot, loaded_programs_for_tx_batch.slot), - BlockRelation::Ancestor - ); - if entry.deployment_slot <= self.latest_root_slot - || entry.deployment_slot == loaded_programs_for_tx_batch.slot - || is_ancestor + || matches!( + locked_fork_graph.relationship( + entry.deployment_slot, + loaded_programs_for_tx_batch.slot + ), + BlockRelation::Equal | BlockRelation::Ancestor + ) { let entry_to_return = if loaded_programs_for_tx_batch.slot >= entry.effective_slot @@ -980,13 +983,15 @@ impl LoadedPrograms { true }); drop(locked_fork_graph); - self.stats - .misses - .fetch_add(search_for.len() as u64, Ordering::Relaxed); - self.stats.hits.fetch_add( - loaded_programs_for_tx_batch.entries.len() as u64, - Ordering::Relaxed, - ); + if is_first_round { + self.stats + .misses + .fetch_add(search_for.len() as u64, Ordering::Relaxed); + self.stats.hits.fetch_add( + loaded_programs_for_tx_batch.entries.len() as u64, + Ordering::Relaxed, + ); + } cooperative_loading_task } @@ -1003,6 +1008,20 @@ impl LoadedPrograms { Some((slot, std::thread::current().id())) ); second_level.cooperative_loading_lock = None; + // Check that it will be visible to our own fork once inserted + if loaded_program.deployment_slot > self.latest_root_slot + && !matches!( + self.fork_graph + .as_ref() + .unwrap() + .read() + .unwrap() + .relationship(loaded_program.deployment_slot, slot), + BlockRelation::Equal | BlockRelation::Ancestor + ) + { + self.stats.lost_insertions.fetch_add(1, Ordering::Relaxed); + } self.assign_program(key, loaded_program); self.loading_task_waiter.notify(); } @@ -2080,7 +2099,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 4)), ]; let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 20, 22)); assert!(match_slot(&extracted, &program4, 0, 22)); @@ -2096,7 +2115,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 15)); assert!(match_slot(&extracted, &program2, 11, 15)); @@ -2119,7 +2138,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(18, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 18)); assert!(match_slot(&extracted, &program2, 11, 18)); @@ -2137,7 +2156,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 23)); assert!(match_slot(&extracted, &program2, 11, 23)); @@ -2155,7 +2174,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(11, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 11)); // program2 was updated at slot 11, but is not effective till slot 12. The result should contain a tombstone. @@ -2189,7 +2208,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 19)); assert!(match_slot(&extracted, &program2, 11, 19)); @@ -2207,7 +2226,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 21)); assert!(match_slot(&extracted, &program2, 11, 21)); @@ -2245,7 +2264,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); // Since the fork was pruned, we should not find the entry deployed at slot 20. assert!(match_slot(&extracted, &program1, 0, 21)); @@ -2262,7 +2281,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 27)); assert!(match_slot(&extracted, &program2, 11, 27)); @@ -2294,7 +2313,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 23)); assert!(match_slot(&extracted, &program2, 11, 23)); @@ -2349,7 +2368,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 12)); assert!(match_slot(&extracted, &program2, 11, 12)); @@ -2369,7 +2388,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program2, 11, 12)); @@ -2439,7 +2458,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 19)); assert!(match_slot(&extracted, &program2, 11, 19)); @@ -2453,7 +2472,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 27)); assert!(match_slot(&extracted, &program2, 11, 27)); @@ -2467,7 +2486,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 20, 22)); @@ -2532,7 +2551,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); // Program1 deployed at slot 11 should not be expired yet assert!(match_slot(&extracted, &program1, 11, 12)); @@ -2548,7 +2567,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program2, 11, 15)); @@ -2614,7 +2633,7 @@ mod tests { let mut missing = vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))]; let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); // The cache should have the program deployed at slot 0 assert_eq!( @@ -2658,7 +2677,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_slot(&extracted, &program2, 10, 20)); @@ -2668,7 +2687,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 5, 6)); assert!(match_missing(&missing, &program2, false)); @@ -2682,7 +2701,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_slot(&extracted, &program2, 10, 20)); @@ -2692,7 +2711,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 6)); assert!(match_missing(&missing, &program2, false)); @@ -2706,7 +2725,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_missing(&missing, &program2, false)); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e97d47e6e61cb9..94e7bf979ff5f0 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5032,7 +5032,8 @@ impl Bank { // Lock the global cache. let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); // Initialize our local cache. - if loaded_programs_for_txs.is_none() { + let is_first_round = loaded_programs_for_txs.is_none(); + if is_first_round { loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( self.slot, loaded_programs_cache @@ -5052,6 +5053,7 @@ impl Bank { let program_to_load = loaded_programs_cache.extract( &mut missing_programs, loaded_programs_for_txs.as_mut().unwrap(), + is_first_round, ); let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); (program_to_load, task_waiter.cookie(), task_waiter) From f62293918d3462417ff94e4439c427c257d5c20c Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 2 Feb 2024 09:21:26 -0500 Subject: [PATCH 104/401] Moves the async deleter code to accounts-db (#35040) --- accounts-db/src/utils.rs | 81 +++++++++++++++++++++++++++++++++ core/src/validator.rs | 7 ++- ledger-tool/src/ledger_utils.rs | 7 ++- runtime/src/snapshot_utils.rs | 80 +------------------------------- 4 files changed, 89 insertions(+), 86 deletions(-) diff --git a/accounts-db/src/utils.rs b/accounts-db/src/utils.rs index 7a38d23b04f68a..6ac1674a30e8da 100644 --- a/accounts-db/src/utils.rs +++ b/accounts-db/src/utils.rs @@ -1,8 +1,13 @@ use { + lazy_static, log::*, + solana_measure::measure, std::{ + collections::HashSet, fs, path::{Path, PathBuf}, + sync::Mutex, + thread, }, }; @@ -54,6 +59,82 @@ pub fn create_accounts_run_and_snapshot_dirs( Ok((run_path, snapshot_path)) } +/// Moves and asynchronously deletes the contents of a directory to avoid blocking on it. +/// The directory is re-created after the move, and should now be empty. +pub fn move_and_async_delete_path_contents(path: impl AsRef) { + move_and_async_delete_path(&path); + // The following could fail if the rename failed. + // If that happens, the directory should be left as is. + // So we ignore errors here. + _ = std::fs::create_dir(path); +} + +/// Delete directories/files asynchronously to avoid blocking on it. +/// First, in sync context, check if the original path exists, if it +/// does, rename the original path to *_to_be_deleted. +/// If there's an in-progress deleting thread for this path, return. +/// Then spawn a thread to delete the renamed path. +pub fn move_and_async_delete_path(path: impl AsRef) { + lazy_static! { + static ref IN_PROGRESS_DELETES: Mutex> = Mutex::new(HashSet::new()); + }; + + // Grab the mutex so no new async delete threads can be spawned for this path. + let mut lock = IN_PROGRESS_DELETES.lock().unwrap(); + + // If the path does not exist, there's nothing to delete. + if !path.as_ref().exists() { + return; + } + + // If the original path (`pathbuf` here) is already being deleted, + // then the path should not be moved and deleted again. + if lock.contains(path.as_ref()) { + return; + } + + let mut path_delete = path.as_ref().to_path_buf(); + path_delete.set_file_name(format!( + "{}{}", + path_delete.file_name().unwrap().to_str().unwrap(), + "_to_be_deleted" + )); + if let Err(err) = fs::rename(&path, &path_delete) { + warn!( + "Cannot async delete, retrying in sync mode: failed to rename '{}' to '{}': {err}", + path.as_ref().display(), + path_delete.display(), + ); + // Although the delete here is synchronous, we want to prevent another thread + // from moving & deleting this directory via `move_and_async_delete_path`. + lock.insert(path.as_ref().to_path_buf()); + drop(lock); // unlock before doing sync delete + + delete_contents_of_path(&path); + IN_PROGRESS_DELETES.lock().unwrap().remove(path.as_ref()); + return; + } + + lock.insert(path_delete.clone()); + drop(lock); + thread::Builder::new() + .name("solDeletePath".to_string()) + .spawn(move || { + trace!("background deleting {}...", path_delete.display()); + let (result, measure_delete) = measure!(fs::remove_dir_all(&path_delete)); + if let Err(err) = result { + panic!("Failed to async delete '{}': {err}", path_delete.display()); + } + trace!( + "background deleting {}... Done, and{measure_delete}", + path_delete.display() + ); + + IN_PROGRESS_DELETES.lock().unwrap().remove(&path_delete); + }) + .expect("spawn background delete thread"); +} + /// Delete the files and subdirectories in a directory. /// This is useful if the process does not have permission /// to delete the top level directory it might be able to diff --git a/core/src/validator.rs b/core/src/validator.rs index 7301552499b119..3adaa699beaa51 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -35,6 +35,7 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + utils::{move_and_async_delete_path, move_and_async_delete_path_contents}, }, solana_client::connection_cache::{ConnectionCache, Protocol}, solana_entry::poh::compute_hash_time_ns, @@ -100,9 +101,7 @@ use { snapshot_bank_utils::{self, DISABLED_SNAPSHOT_ARCHIVE_INTERVAL}, snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, - snapshot_utils::{ - self, clean_orphaned_account_snapshot_dirs, move_and_async_delete_path_contents, - }, + snapshot_utils::{self, clean_orphaned_account_snapshot_dirs}, }, solana_sdk::{ clock::Slot, @@ -623,7 +622,7 @@ impl Validator { ]; for old_accounts_hash_cache_dir in old_accounts_hash_cache_dirs { if old_accounts_hash_cache_dir.exists() { - snapshot_utils::move_and_async_delete_path(old_accounts_hash_cache_dir); + move_and_async_delete_path(old_accounts_hash_cache_dir); } } diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index ba6ac1ebe15cac..2663a205fb5f37 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -4,7 +4,8 @@ use { crossbeam_channel::unbounded, log::*, solana_accounts_db::{ - hardened_unpack::open_genesis_config, utils::create_all_accounts_run_and_snapshot_dirs, + hardened_unpack::open_genesis_config, + utils::{create_all_accounts_run_and_snapshot_dirs, move_and_async_delete_path_contents}, }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, validator::BlockVerificationMethod, @@ -35,9 +36,7 @@ use { prioritization_fee_cache::PrioritizationFeeCache, snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, - snapshot_utils::{ - self, clean_orphaned_account_snapshot_dirs, move_and_async_delete_path_contents, - }, + snapshot_utils::{self, clean_orphaned_account_snapshot_dirs}, }, solana_sdk::{ clock::Slot, genesis_config::GenesisConfig, pubkey::Pubkey, diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index ff0afc1e779b0a..1bd9c4d254958d 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -23,7 +23,7 @@ use { append_vec::AppendVec, hardened_unpack::{self, ParallelSelector, UnpackError}, shared_buffer_reader::{SharedBuffer, SharedBufferReader}, - utils::{delete_contents_of_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, + utils::{move_and_async_delete_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, }, solana_measure::{measure, measure::Measure}, solana_sdk::{clock::Slot, hash::Hash}, @@ -36,7 +36,7 @@ use { path::{Path, PathBuf}, process::ExitStatus, str::FromStr, - sync::{Arc, Mutex}, + sync::Arc, thread::{Builder, JoinHandle}, }, tar::{self, Archive}, @@ -529,82 +529,6 @@ pub enum GetSnapshotAccountsHardLinkDirError { }, } -/// Moves and asynchronously deletes the contents of a directory to avoid blocking on it. -/// The directory is re-created after the move, and should now be empty. -pub fn move_and_async_delete_path_contents(path: impl AsRef) { - move_and_async_delete_path(&path); - // The following could fail if the rename failed. - // If that happens, the directory should be left as is. - // So we ignore errors here. - _ = std::fs::create_dir(path); -} - -/// Delete directories/files asynchronously to avoid blocking on it. -/// First, in sync context, check if the original path exists, if it -/// does, rename the original path to *_to_be_deleted. -/// If there's an in-progress deleting thread for this path, return. -/// Then spawn a thread to delete the renamed path. -pub fn move_and_async_delete_path(path: impl AsRef) { - lazy_static! { - static ref IN_PROGRESS_DELETES: Mutex> = Mutex::new(HashSet::new()); - }; - - // Grab the mutex so no new async delete threads can be spawned for this path. - let mut lock = IN_PROGRESS_DELETES.lock().unwrap(); - - // If the path does not exist, there's nothing to delete. - if !path.as_ref().exists() { - return; - } - - // If the original path (`pathbuf` here) is already being deleted, - // then the path should not be moved and deleted again. - if lock.contains(path.as_ref()) { - return; - } - - let mut path_delete = path.as_ref().to_path_buf(); - path_delete.set_file_name(format!( - "{}{}", - path_delete.file_name().unwrap().to_str().unwrap(), - "_to_be_deleted" - )); - if let Err(err) = fs::rename(&path, &path_delete) { - warn!( - "Cannot async delete, retrying in sync mode: failed to rename '{}' to '{}': {err}", - path.as_ref().display(), - path_delete.display(), - ); - // Although the delete here is synchronous, we want to prevent another thread - // from moving & deleting this directory via `move_and_async_delete_path`. - lock.insert(path.as_ref().to_path_buf()); - drop(lock); // unlock before doing sync delete - - delete_contents_of_path(&path); - IN_PROGRESS_DELETES.lock().unwrap().remove(path.as_ref()); - return; - } - - lock.insert(path_delete.clone()); - drop(lock); - Builder::new() - .name("solDeletePath".to_string()) - .spawn(move || { - trace!("background deleting {}...", path_delete.display()); - let (result, measure_delete) = measure!(fs::remove_dir_all(&path_delete)); - if let Err(err) = result { - panic!("Failed to async delete '{}': {err}", path_delete.display()); - } - trace!( - "background deleting {}... Done, and{measure_delete}", - path_delete.display() - ); - - IN_PROGRESS_DELETES.lock().unwrap().remove(&path_delete); - }) - .expect("spawn background delete thread"); -} - /// The account snapshot directories under /snapshot/ contain account files hardlinked /// from /run taken at snapshot time. They are referenced by the symlinks from the /// bank snapshot dir snapshot//accounts_hardlinks/. We observed that sometimes the bank snapshot dir From 9c595bca5491d72babf34fe0b08fe58c4523f894 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Feb 2024 23:45:10 +0800 Subject: [PATCH 105/401] build(deps): bump libc from 0.2.152 to 0.2.153 (#35032) * build(deps): bump libc from 0.2.152 to 0.2.153 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.152 to 0.2.153. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.152...0.2.153) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 567eb3ccf992c7..69ad4b3411c709 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2916,9 +2916,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" diff --git a/Cargo.toml b/Cargo.toml index 095f844475fe32..52e342089375f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -237,7 +237,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" jsonrpc-server-utils = "18.0.0" lazy_static = "1.4.0" -libc = "0.2.152" +libc = "0.2.153" libloading = "0.7.4" libsecp256k1 = "0.6.0" light-poseidon = "0.2.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d0a8a630e415e7..016c30266ba038 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2518,9 +2518,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" From 97d994ee6f6520a65bcd9ff7641498462c6b8d99 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 2 Feb 2024 09:53:13 -0800 Subject: [PATCH 106/401] [TieredStorage] Use IndexOffset in TieredStorageMeta and get_account() (#35046) #### Problem TieredStorageMeta and TieredStorageReader::get_account API uses u32 to represent IndexOffset. However, within the TieredStorage scope, IndexOffset should be used, it is not until working with AccountsFile API when u32 representation of offset is needed. #### Summary of Changes Have TieredStorageMeta and TieredStorageReader to use IndexOffset. #### Test Plan Existing unit-tests. --- accounts-db/src/account_storage/meta.rs | 2 +- accounts-db/src/tiered_storage/hot.rs | 10 +++++----- accounts-db/src/tiered_storage/readable.rs | 14 +++++++------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 1442b4845bf604..69c24d7be75f7d 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -142,7 +142,7 @@ impl<'storage> StoredAccountMeta<'storage> { pub fn offset(&self) -> usize { match self { Self::AppendVec(av) => av.offset(), - Self::Hot(hot) => hot.index(), + Self::Hot(hot) => hot.index().0 as usize, } } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index c6e3efdbbb78c6..805b50f2fa3668 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -435,7 +435,7 @@ impl HotStorageReader { pub fn get_account( &self, index_offset: IndexOffset, - ) -> TieredStorageResult, usize)>> { + ) -> TieredStorageResult, IndexOffset)>> { if index_offset.0 >= self.footer.account_entry_count { return Ok(None); } @@ -452,10 +452,10 @@ impl HotStorageReader { meta, address, owner, - index: index_offset.0 as usize, + index: index_offset, account_block, }), - index_offset.0.saturating_add(1) as usize, + IndexOffset(index_offset.0.saturating_add(1)), ))) } } @@ -1244,7 +1244,7 @@ pub mod tests { ); assert_eq!(*stored_meta.pubkey(), addresses[i]); - assert_eq!(i + 1, next); + assert_eq!(i + 1, next.0 as usize); } // Make sure it returns None on NUM_ACCOUNTS to allow termination on // while loop in actual accounts-db read case. @@ -1383,7 +1383,7 @@ pub mod tests { let (account, address, account_hash, _write_version) = storable_accounts.get(i); verify_account(&stored_meta, account, address, account_hash); - assert_eq!(i + 1, next); + assert_eq!(i + 1, next.0 as usize); } // Make sure it returns None on NUM_ACCOUNTS to allow termination on // while loop in actual accounts-db read case. diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 647c78d5ca91c1..b6d841b65f4dd8 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -25,7 +25,7 @@ pub struct TieredReadableAccount<'accounts_file, M: TieredAccountMeta> { /// The address of the account owner pub owner: &'accounts_file Pubkey, /// The index for accessing the account inside its belonging AccountsFile - pub index: usize, + pub index: IndexOffset, /// The account block that contains this account. Note that this account /// block may be shared with other accounts. pub account_block: &'accounts_file [u8], @@ -43,7 +43,7 @@ impl<'accounts_file, M: TieredAccountMeta> TieredReadableAccount<'accounts_file, } /// Returns the index to this account in its AccountsFile. - pub fn index(&self) -> usize { + pub fn index(&self) -> IndexOffset { self.index } @@ -118,10 +118,10 @@ impl TieredStorageReader { /// Returns the account located at the specified index offset. pub fn get_account( &self, - index_offset: u32, - ) -> TieredStorageResult, usize)>> { + index_offset: IndexOffset, + ) -> TieredStorageResult, IndexOffset)>> { match self { - Self::Hot(hot) => hot.get_account(IndexOffset(index_offset)), + Self::Hot(hot) => hot.get_account(index_offset), } } @@ -136,13 +136,13 @@ impl TieredStorageReader { /// causes a data overrun. pub fn account_matches_owners( &self, - index_offset: u32, + index_offset: IndexOffset, owners: &[Pubkey], ) -> Result { match self { Self::Hot(hot) => { let account_offset = hot - .get_account_offset(IndexOffset(index_offset)) + .get_account_offset(index_offset) .map_err(|_| MatchAccountOwnerError::UnableToLoad)?; hot.account_matches_owners(account_offset, owners) } From 4b528e890c98bc23abf5bd9ff0940b97c41b6106 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 2 Feb 2024 13:00:54 -0500 Subject: [PATCH 107/401] Ensures STORE_META_OVERHEAD remains accurate (#35053) --- accounts-db/src/append_vec.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 782abee7f2a9ff..353cb58e606e96 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -44,6 +44,14 @@ pub mod test_utils; /// we need to add data len and align it to get the actual stored size pub const STORE_META_OVERHEAD: usize = 136; +// Ensure the STORE_META_OVERHEAD constant remains accurate +const _: () = assert!( + STORE_META_OVERHEAD + == mem::size_of::() + + mem::size_of::() + + mem::size_of::() +); + /// Returns the size this item will take to store plus possible alignment padding bytes before the next entry. /// fixed-size portion of per-account data written /// plus 'data_len', aligned to next boundary From bf95f65ce1501644719947ba07317fab94206a4f Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 2 Feb 2024 10:10:43 -0800 Subject: [PATCH 108/401] Fix: decayed_counter can overflow if shifted more than 63 (#35054) --- program-runtime/src/loaded_programs.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 1e92944ca8c75a..e8e3b9ee325c2c 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -464,7 +464,8 @@ impl LoadedProgram { pub fn decayed_usage_counter(&self, now: Slot) -> u64 { let last_access = self.latest_access_slot.load(Ordering::Relaxed); - let decaying_for = now.saturating_sub(last_access); + // Shifting the u64 value for more than 63 will cause an overflow. + let decaying_for = std::cmp::min(63, now.saturating_sub(last_access)); self.tx_usage_counter.load(Ordering::Relaxed) >> decaying_for } } @@ -1359,6 +1360,10 @@ mod tests { assert_eq!(program.decayed_usage_counter(19), 16); assert_eq!(program.decayed_usage_counter(20), 8); assert_eq!(program.decayed_usage_counter(21), 4); + + // Decay for 63 or more slots + assert_eq!(program.decayed_usage_counter(18 + 63), 0); + assert_eq!(program.decayed_usage_counter(100), 0); } #[test] From dd30175e55eb4902585843b04f395ec4c426ebd1 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:18:52 -0800 Subject: [PATCH 109/401] [TieredStorage] TieredStorageReader:: and HotStorageReader:: accounts() (#35031) #### Problem HotStorageReader and TieredStorageReader haven't implemented accounts() that is required by AcocuntsFile. #### Summary of Changes This PR implements accounts() for both HotStorageReader and TieredStorageReader #### Test Plan Extend the existing test to cover accounts(). --- accounts-db/src/tiered_storage/hot.rs | 34 ++++++++++++++++++++++ accounts-db/src/tiered_storage/readable.rs | 11 +++++++ 2 files changed, 45 insertions(+) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 805b50f2fa3668..54091313cb9de7 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -458,6 +458,24 @@ impl HotStorageReader { IndexOffset(index_offset.0.saturating_add(1)), ))) } + + /// Return a vector of account metadata for each account, starting from + /// `index_offset` + pub fn accounts( + &self, + mut index_offset: IndexOffset, + ) -> TieredStorageResult> { + let mut accounts = Vec::with_capacity( + self.footer + .account_entry_count + .saturating_sub(index_offset.0) as usize, + ); + while let Some((account, next)) = self.get_account(index_offset)? { + accounts.push(account); + index_offset = next; + } + Ok(accounts) + } } fn write_optional_fields( @@ -1402,5 +1420,21 @@ pub mod tests { storable_accounts.get(stored_info.offset); verify_account(&stored_meta, account, address, account_hash); } + + // verify get_accounts + let accounts = hot_storage.accounts(IndexOffset(0)).unwrap(); + + // first, we verify everything + for (i, stored_meta) in accounts.iter().enumerate() { + let (account, address, account_hash, _write_version) = storable_accounts.get(i); + verify_account(stored_meta, account, address, account_hash); + } + + // second, we verify various initial position + let total_stored_accounts = accounts.len(); + for i in 0..total_stored_accounts { + let partial_accounts = hot_storage.accounts(IndexOffset(i as u32)).unwrap(); + assert_eq!(&partial_accounts, &accounts[i..]); + } } } diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index b6d841b65f4dd8..12c4a8224d48ea 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -148,4 +148,15 @@ impl TieredStorageReader { } } } + + /// Return a vector of account metadata for each account, starting from + /// `index_offset` + pub fn accounts( + &self, + index_offset: IndexOffset, + ) -> TieredStorageResult> { + match self { + Self::Hot(hot) => hot.accounts(index_offset), + } + } } From 919b3067335ed8f15327945f1b4817ce1a0cc83e Mon Sep 17 00:00:00 2001 From: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Date: Fri, 2 Feb 2024 17:55:11 -0500 Subject: [PATCH 110/401] [docs] updated page metadata and minor fixes (#35059) feat: updated metadata --- docs/build-cli-usage.sh | 2 +- docs/publish-docs.sh | 2 +- docs/sidebars.js | 4 --- docs/src/architecture.md | 7 +++-- docs/src/cli/.usage.md.header | 1 + docs/src/cli/examples/choose-a-cluster.md | 4 ++- docs/src/cli/examples/delegate-stake.md | 14 ++++++---- docs/src/cli/examples/deploy-a-program.md | 28 +++++++++---------- docs/src/cli/examples/durable-nonce.md | 4 ++- docs/src/cli/examples/offline-signing.md | 4 ++- .../src/cli/examples/sign-offchain-message.md | 4 ++- docs/src/cli/examples/test-validator.md | 1 + docs/src/cli/examples/transfer-tokens.md | 4 ++- docs/src/cli/index.md | 7 +++-- docs/src/cli/install.md | 1 + docs/src/cli/intro.md | 7 +++-- docs/src/cli/wallets/file-system.md | 4 ++- docs/src/cli/wallets/hardware/index.md | 1 + docs/src/cli/wallets/hardware/ledger.md | 1 + docs/src/cli/wallets/index.md | 3 +- docs/src/cli/wallets/paper.md | 4 ++- docs/src/clusters/available.md | 4 ++- docs/src/clusters/index.md | 5 ++-- docs/src/clusters/metrics.md | 4 ++- docs/src/consensus/commitments.md | 4 ++- docs/src/consensus/leader-rotation.md | 4 ++- docs/src/index.mdx | 2 +- docs/src/operations/_category_.json | 6 +--- docs/src/operations/best-practices/general.md | 3 +- .../operations/best-practices/monitoring.md | 3 +- .../src/operations/best-practices/security.md | 3 +- docs/src/operations/guides/restart-cluster.md | 8 +++++- .../operations/guides/validator-failover.md | 5 +++- docs/src/operations/guides/validator-info.md | 5 +++- .../operations/guides/validator-monitor.md | 5 +++- docs/src/operations/guides/validator-stake.md | 5 +++- docs/src/operations/guides/validator-start.md | 5 +++- .../guides/validator-troubleshoot.md | 5 +++- docs/src/operations/guides/vote-accounts.md | 5 +++- docs/src/operations/prerequisites.md | 3 +- docs/src/operations/requirements.md | 5 ++-- docs/src/operations/validator-or-rpc-node.md | 3 +- docs/src/runtime/programs.md | 4 ++- docs/src/runtime/sysvars.md | 4 ++- docs/src/runtime/zk-token-proof.md | 4 ++- docs/src/validator/anatomy.md | 3 +- docs/src/validator/blockstore.md | 4 ++- docs/src/validator/geyser.md | 4 ++- docs/src/validator/gossip.md | 4 ++- docs/src/validator/runtime.md | 4 ++- docs/src/validator/tpu.md | 5 ++-- docs/src/validator/tvu.md | 5 ++-- 52 files changed, 158 insertions(+), 82 deletions(-) diff --git a/docs/build-cli-usage.sh b/docs/build-cli-usage.sh index 0917cb4737af9f..8e6090474f10fc 100755 --- a/docs/build-cli-usage.sh +++ b/docs/build-cli-usage.sh @@ -58,6 +58,6 @@ in_subcommands=0 while read -r subcommand rest; do [[ $subcommand == "SUBCOMMANDS:" ]] && in_subcommands=1 && continue if ((in_subcommands)); then - section "$(cargo -q run -p solana-cli -- help "$subcommand" | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')" "####" >> "$out" + section "$(cargo -q run -p solana-cli -- help "$subcommand" | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')" "###" >> "$out" fi done <<<"$usage">>"$out" diff --git a/docs/publish-docs.sh b/docs/publish-docs.sh index 06c34134db7462..0cbedcf882001d 100755 --- a/docs/publish-docs.sh +++ b/docs/publish-docs.sh @@ -62,7 +62,7 @@ cat > "$CONFIG_FILE" < ``` -### Extend a program +## Extend a program If a program has already been deployed, and a redeployment goes beyond the `max_len` of the account, it's possible to extend the program to fit the larger @@ -121,7 +121,7 @@ redeployment: solana program extend ``` -### Resuming a failed deploy +## Resuming a failed deploy If program deployment fails, there will be a hanging intermediate buffer account that contains a non-zero balance. In order to recoup that balance you may resume @@ -159,7 +159,7 @@ Then issue a new `deploy` command and specify the buffer: solana program deploy --buffer ``` -### Closing program and buffer accounts, and reclaiming their lamports +## Closing program and buffer accounts, and reclaiming their lamports Both program and buffer accounts can be closed and their lamport balances transferred to a recipient's account. @@ -214,7 +214,7 @@ To show all buffer accounts regardless of the authority solana program show --buffers --all ``` -### Set a program's upgrade authority +## Set a program's upgrade authority The program's upgrade authority must be present to deploy a program. If no authority is specified during program deployment, the default keypair is used as @@ -245,7 +245,7 @@ they do not have access to. The `--skip-new-upgrade-authority-signer-check` option relaxes the signer check. This can be useful for situations where the new upgrade authority is an offline signer or a multisig. -### Immutable programs +## Immutable programs A program can be marked immutable, which prevents all further redeployments, by specifying the `--final` flag during deployment: @@ -260,7 +260,7 @@ Or anytime after: solana program set-upgrade-authority --final ``` -### Dumping a program to a file +## Dumping a program to a file The deployed program may be dumped back to a local file: @@ -283,7 +283,7 @@ $ truncate -r dump.so extended.so $ sha256sum extended.so dump.so ``` -### Using an intermediary Buffer account +## Using an intermediary Buffer account Instead of deploying directly to the program account, the program can be written to an intermediary buffer account. Intermediary accounts can be useful for @@ -328,7 +328,7 @@ account are refunded to a spill account. Buffers also support `show` and `dump` just like programs do. -### Upgrading program using offline signer as authority +## Upgrading program using offline signer as authority Some security models require separating the signing process from the transaction broadcast, such that the signing keys can be completely disconnected from any network, also known as [offline signing](offline-signing.md). diff --git a/docs/src/cli/examples/durable-nonce.md b/docs/src/cli/examples/durable-nonce.md index 7f0199b8d44fc5..11c90c3936b348 100644 --- a/docs/src/cli/examples/durable-nonce.md +++ b/docs/src/cli/examples/durable-nonce.md @@ -1,5 +1,7 @@ --- -title: Durable Transaction Nonces +title: Durable Transaction Nonces in the Solana CLI +pagination_label: "Solana CLI: Durable Transaction Nonces" +sidebar_label: Durable Transaction Nonces --- Durable transaction nonces are a mechanism for getting around the typical short diff --git a/docs/src/cli/examples/offline-signing.md b/docs/src/cli/examples/offline-signing.md index 8b9312853a9a11..28b54561732878 100644 --- a/docs/src/cli/examples/offline-signing.md +++ b/docs/src/cli/examples/offline-signing.md @@ -1,5 +1,7 @@ --- -title: Offline Transaction Signing +title: Offline Transaction Signing with the Solana CLI +pagination_label: "Solana CLI: Offline Transaction Signing" +sidebar_label: Offline Transaction Signing --- Some security models require keeping signing keys, and thus the signing diff --git a/docs/src/cli/examples/sign-offchain-message.md b/docs/src/cli/examples/sign-offchain-message.md index ae14119f7b91b9..578ba511eee0d6 100644 --- a/docs/src/cli/examples/sign-offchain-message.md +++ b/docs/src/cli/examples/sign-offchain-message.md @@ -1,5 +1,7 @@ --- -title: Off-Chain Message Signing +title: Off-Chain Message Signing with the Solana CLI +pagination_label: "Solana CLI: Off-Chain Message Signing" +sidebar_label: Off-Chain Message Signing --- Off-chain message signing is a method of signing non-transaction messages with diff --git a/docs/src/cli/examples/test-validator.md b/docs/src/cli/examples/test-validator.md index 70f050c77f5663..4641e36b55125e 100644 --- a/docs/src/cli/examples/test-validator.md +++ b/docs/src/cli/examples/test-validator.md @@ -1,5 +1,6 @@ --- title: Solana Test Validator +pagination_label: "Solana CLI: Test Validator" sidebar_label: Test Validator --- diff --git a/docs/src/cli/examples/transfer-tokens.md b/docs/src/cli/examples/transfer-tokens.md index 89374ebf43864a..28c933be6665de 100644 --- a/docs/src/cli/examples/transfer-tokens.md +++ b/docs/src/cli/examples/transfer-tokens.md @@ -1,5 +1,7 @@ --- -title: Send and Receive Tokens +title: Send and Receive Tokens with the Solana CLI +pagination_label: "Solana CLI: Send and Receive Tokens" +sidebar_label: Send and Receive Tokens --- This page describes how to receive and send SOL tokens using the command line diff --git a/docs/src/cli/index.md b/docs/src/cli/index.md index cdf1ed10f83d76..77574419618354 100644 --- a/docs/src/cli/index.md +++ b/docs/src/cli/index.md @@ -1,7 +1,8 @@ --- title: Solana CLI Tool Suite -sidebar_label: Overview sidebar_position: 0 +sidebar_label: Overview +pagination_label: Solana CLI Tool Suite --- In this section, we will describe how to use the Solana command-line tools to @@ -19,6 +20,6 @@ secure access to your Solana accounts. To get started using the Solana Command Line (CLI) tools: - [Install the Solana CLI Tool Suite](./install.md) -- [Choose a Cluster](./examples/choose-a-cluster.md) -- [Create a Wallet](./wallets/index.md) - [Introduction to our CLI conventions](./intro.md) +- [Create a Wallet using the CLI](./wallets/index.md) +- [Choose a Cluster to connect to using the CLI](./examples/choose-a-cluster.md) diff --git a/docs/src/cli/install.md b/docs/src/cli/install.md index 7773631dda59d3..3667c733e3f4d4 100644 --- a/docs/src/cli/install.md +++ b/docs/src/cli/install.md @@ -1,5 +1,6 @@ --- title: Install the Solana CLI +pagination_label: Install the Solana CLI sidebar_label: Installation sidebar_position: 1 --- diff --git a/docs/src/cli/intro.md b/docs/src/cli/intro.md index 1701450173e39a..436776ee718e14 100644 --- a/docs/src/cli/intro.md +++ b/docs/src/cli/intro.md @@ -1,5 +1,6 @@ --- title: Introduction to the Solana CLI +pagination_label: Introduction to the Solana CLI sidebar_label: Introduction sidebar_position: 2 --- @@ -45,7 +46,7 @@ solana-keygen pubkey Below, we show how to resolve what you should put in `` depending on your wallet type. -#### Paper Wallet +## Paper Wallet In a paper wallet, the keypair is securely derived from the seed words and optional passphrase you entered when the wallet was created. To use a paper @@ -59,7 +60,7 @@ To display the wallet address of a Paper Wallet: solana-keygen pubkey prompt:// ``` -#### File System Wallet +## File System Wallet With a file system wallet, the keypair is stored in a file on your computer. Replace `` with the complete file path to the keypair file. @@ -71,7 +72,7 @@ For example, if the file system keypair file location is solana-keygen pubkey /home/solana/my_wallet.json ``` -#### Hardware Wallet +## Hardware Wallet If you chose a hardware wallet, use your [keypair URL](./wallets/hardware/index.md#specify-a-hardware-wallet-key), diff --git a/docs/src/cli/wallets/file-system.md b/docs/src/cli/wallets/file-system.md index 0041c51876b490..dd21203c4e1451 100644 --- a/docs/src/cli/wallets/file-system.md +++ b/docs/src/cli/wallets/file-system.md @@ -1,5 +1,7 @@ --- -title: File System Wallets +title: File System Wallets using the CLI +pagination_label: File System Wallets using the CLI +sidebar_label: File System Wallets sidebar_position: 2 --- diff --git a/docs/src/cli/wallets/hardware/index.md b/docs/src/cli/wallets/hardware/index.md index 9c8642cf34c6a1..30f53f86d3a3d8 100644 --- a/docs/src/cli/wallets/hardware/index.md +++ b/docs/src/cli/wallets/hardware/index.md @@ -1,5 +1,6 @@ --- title: Using Hardware Wallets in the Solana CLI +pagination_label: "Using Hardware Wallets in the Solana CLI" sidebar_label: Using in the Solana CLI sidebar_position: 0 --- diff --git a/docs/src/cli/wallets/hardware/ledger.md b/docs/src/cli/wallets/hardware/ledger.md index e0060aba803eb2..e5a45c63df07d8 100644 --- a/docs/src/cli/wallets/hardware/ledger.md +++ b/docs/src/cli/wallets/hardware/ledger.md @@ -1,5 +1,6 @@ --- title: Using Ledger Nano Hardware Wallets in the Solana CLI +pagination_label: "Hardware Wallets in the Solana CLI: Ledger Nano" sidebar_label: Ledger Nano --- diff --git a/docs/src/cli/wallets/index.md b/docs/src/cli/wallets/index.md index fcd907629c8d85..9643ef61ec13eb 100644 --- a/docs/src/cli/wallets/index.md +++ b/docs/src/cli/wallets/index.md @@ -1,5 +1,6 @@ --- -title: Command Line Wallets +title: Solana Wallets with the CLI +pagination_label: Command Line Wallets sidebar_label: Overview sidebar_position: 0 --- diff --git a/docs/src/cli/wallets/paper.md b/docs/src/cli/wallets/paper.md index 85c76779b852ed..4e3c3c39ac8732 100644 --- a/docs/src/cli/wallets/paper.md +++ b/docs/src/cli/wallets/paper.md @@ -1,5 +1,7 @@ --- -title: Paper Wallets +title: Paper Wallets using the Solana CLI +pagination_label: Paper Wallets using the CLI +sidebar_label: Paper Wallets sidebar_position: 1 --- diff --git a/docs/src/clusters/available.md b/docs/src/clusters/available.md index 7abfb06880e858..dfbca41672b499 100644 --- a/docs/src/clusters/available.md +++ b/docs/src/clusters/available.md @@ -1,5 +1,7 @@ --- -title: Solana Clusters +title: Available Solana Clusters +sidebar_label: Solana Clusters +pagination_label: Available Solana Clusters --- Solana maintains several different clusters with different purposes. diff --git a/docs/src/clusters/index.md b/docs/src/clusters/index.md index 8ac1dee11d6e68..e2d25c603b4388 100644 --- a/docs/src/clusters/index.md +++ b/docs/src/clusters/index.md @@ -1,7 +1,8 @@ --- -title: A Solana Cluster -sidebar_label: Overview +title: Overview of a Solana Cluster sidebar_position: 0 +sidebar_label: Overview +pagination_label: Overview of a Solana Cluster --- A Solana cluster is a set of validators working together to serve client transactions and maintain the integrity of the ledger. Many clusters may coexist. When two clusters share a common genesis block, they attempt to converge. Otherwise, they simply ignore the existence of the other. Transactions sent to the wrong one are quietly rejected. In this section, we'll discuss how a cluster is created, how nodes join the cluster, how they share the ledger, how they ensure the ledger is replicated, and how they cope with buggy and malicious nodes. diff --git a/docs/src/clusters/metrics.md b/docs/src/clusters/metrics.md index 575c46a26e0019..c162ed50117ca4 100644 --- a/docs/src/clusters/metrics.md +++ b/docs/src/clusters/metrics.md @@ -1,5 +1,7 @@ --- -title: Performance Metrics +title: Solana Cluster Performance Metrics +sidebar_label: Performance Metrics +pagination_label: Cluster Performance Metrics --- Solana cluster performance is measured as average number of transactions per second that the network can sustain \(TPS\). And, how long it takes for a transaction to be confirmed by super majority of the cluster \(Confirmation Time\). diff --git a/docs/src/consensus/commitments.md b/docs/src/consensus/commitments.md index 0bfb55e9237a34..404f41dc7f5f71 100644 --- a/docs/src/consensus/commitments.md +++ b/docs/src/consensus/commitments.md @@ -1,5 +1,7 @@ --- -title: Commitment Status +title: Solana Commitment Status +sidebar_label: Commitment Status +pagination_label: Consensus Commitment Status description: "Processed, confirmed, and finalized. Learn the differences between the different commitment statuses on the Solana blockchain." diff --git a/docs/src/consensus/leader-rotation.md b/docs/src/consensus/leader-rotation.md index a52cbb7eafc465..c65d91c7306176 100644 --- a/docs/src/consensus/leader-rotation.md +++ b/docs/src/consensus/leader-rotation.md @@ -1,5 +1,7 @@ --- -title: Leader Rotation +title: Solana Leader Rotation +sidebar_label: Leader Rotation +pagination_label: Leader Rotation --- At any given moment, a cluster expects only one validator to produce ledger entries. By having only one leader at a time, all validators are able to replay identical copies of the ledger. The drawback of only one leader at a time, however, is that a malicious leader is capable of censoring votes and transactions. Since censoring cannot be distinguished from the network dropping packets, the cluster cannot simply elect a single node to hold the leader role indefinitely. Instead, the cluster minimizes the influence of a malicious leader by rotating which node takes the lead. diff --git a/docs/src/index.mdx b/docs/src/index.mdx index 422404b0a7379b..eff65e951be627 100644 --- a/docs/src/index.mdx +++ b/docs/src/index.mdx @@ -3,10 +3,10 @@ slug: / id: home title: Home sidebar_label: Home +pagination_label: Solana Validator Documentation Home description: "Solana is a high performance network that is utilized for a range of use cases, \ including finance, NFTs, payments, and gaming." -# displayed_sidebar: introductionSidebar --- # Solana Validator Documentation diff --git a/docs/src/operations/_category_.json b/docs/src/operations/_category_.json index a32cdd91fe0f18..289f63ff9507ea 100644 --- a/docs/src/operations/_category_.json +++ b/docs/src/operations/_category_.json @@ -2,9 +2,5 @@ "position": 4, "label": "Operating a Validator", "collapsible": true, - "collapsed": true, - "link": { - "type": "doc", - "id": "operations/index" - } + "collapsed": true } diff --git a/docs/src/operations/best-practices/general.md b/docs/src/operations/best-practices/general.md index 3bd0f906f729a7..29ef42c81b7f5f 100644 --- a/docs/src/operations/best-practices/general.md +++ b/docs/src/operations/best-practices/general.md @@ -1,6 +1,7 @@ --- -title: Validator Operations Best Practices +title: Solana Validator Operations Best Practices sidebar_label: General Operations +pagination_label: "Best Practices: Validator Operations" --- After you have successfully setup and started a diff --git a/docs/src/operations/best-practices/monitoring.md b/docs/src/operations/best-practices/monitoring.md index b866a88b869531..6d04fc38487be7 100644 --- a/docs/src/operations/best-practices/monitoring.md +++ b/docs/src/operations/best-practices/monitoring.md @@ -1,6 +1,7 @@ --- -title: Validator Monitoring Best Practices +title: Solana Validator Monitoring Best Practices sidebar_label: Monitoring +pagination_label: "Best Practices: Validator Monitoring" --- It is essential that you have monitoring in place on your validator. In the event that your validator is delinquent (behind the rest of the network) you want to respond immediately to fix the issue. One very useful tool to monitor your validator is [`solana-watchtower`](#solana-watchtower). diff --git a/docs/src/operations/best-practices/security.md b/docs/src/operations/best-practices/security.md index d53491c115ae9f..fab46b665ad7fa 100644 --- a/docs/src/operations/best-practices/security.md +++ b/docs/src/operations/best-practices/security.md @@ -1,6 +1,7 @@ --- -title: Validator Security Best Practices +title: Solana Validator Security Best Practices sidebar_label: Security +pagination_label: "Best Practices: Validator Security" --- Being a system administrator for an Ubuntu computer requires technical knowledge of the system and best security practices. The following list should help you get started and is considered the bare minimum for keeping your system safe. diff --git a/docs/src/operations/guides/restart-cluster.md b/docs/src/operations/guides/restart-cluster.md index 4039f69a6b468f..85d4731d604c65 100644 --- a/docs/src/operations/guides/restart-cluster.md +++ b/docs/src/operations/guides/restart-cluster.md @@ -1,4 +1,10 @@ -## Restarting a cluster +--- +title: "Restarting a Solana Cluster" +# really high number to ensure it is listed last in the sidebar +sidebar_position: 999 +sidebar_label: Restart a Cluster +pagination_label: "Validator Guides: Restart a Cluster" +--- ### Step 1. Identify the latest optimistically confirmed slot for the cluster diff --git a/docs/src/operations/guides/validator-failover.md b/docs/src/operations/guides/validator-failover.md index 34968b73640933..168a1a4312cec0 100644 --- a/docs/src/operations/guides/validator-failover.md +++ b/docs/src/operations/guides/validator-failover.md @@ -1,5 +1,8 @@ --- -title: Failover Setup +title: "Validator Guide: Setup Node Failover" +sidebar_position: 9 +sidebar_label: Node Failover +pagination_label: "Validator Guides: Node Failover" --- A simple two machine instance failover method is described here, which allows you to: diff --git a/docs/src/operations/guides/validator-info.md b/docs/src/operations/guides/validator-info.md index 5b232ba02d0b32..56b74f732c4232 100644 --- a/docs/src/operations/guides/validator-info.md +++ b/docs/src/operations/guides/validator-info.md @@ -1,5 +1,8 @@ --- -title: Publishing Validator Info +title: "Validator Guide: Publishing Validator Info" +sidebar_position: 1 +sidebar_label: Publishing Validator Info +pagination_label: "Validator Guides: Publishing Validator Info" --- You can publish your validator information to the chain to be publicly visible to other users. diff --git a/docs/src/operations/guides/validator-monitor.md b/docs/src/operations/guides/validator-monitor.md index ef187271917302..5e314c52e82b20 100644 --- a/docs/src/operations/guides/validator-monitor.md +++ b/docs/src/operations/guides/validator-monitor.md @@ -1,5 +1,8 @@ --- -title: Monitoring a Validator +title: "Validator Guide: Monitoring a Validator" +sidebar_position: 2 +sidebar_label: Monitoring a Validator +pagination_label: "Validator Guides: Monitoring a Validator" --- ## Check Gossip diff --git a/docs/src/operations/guides/validator-stake.md b/docs/src/operations/guides/validator-stake.md index 85da5c3380316a..da43c3071d4fb7 100644 --- a/docs/src/operations/guides/validator-stake.md +++ b/docs/src/operations/guides/validator-stake.md @@ -1,5 +1,8 @@ --- -title: Staking +title: "Validator Guide: Staking" +sidebar_position: 3 +sidebar_label: Staking +pagination_label: "Validator Guides: Staking" --- **By default your validator will have no stake.** This means it will be diff --git a/docs/src/operations/guides/validator-start.md b/docs/src/operations/guides/validator-start.md index 69cef1315c05b8..378783798b3ce8 100644 --- a/docs/src/operations/guides/validator-start.md +++ b/docs/src/operations/guides/validator-start.md @@ -1,5 +1,8 @@ --- -title: Starting a Validator +title: "Validator Guide: Starting a Validator" +sidebar_position: 0 +sidebar_label: Starting a Validator +pagination_label: "Validator Guides: Starting a Validator" --- ## Configure Solana CLI diff --git a/docs/src/operations/guides/validator-troubleshoot.md b/docs/src/operations/guides/validator-troubleshoot.md index abf8d8f442c33a..17ae09cb4d6510 100644 --- a/docs/src/operations/guides/validator-troubleshoot.md +++ b/docs/src/operations/guides/validator-troubleshoot.md @@ -1,5 +1,8 @@ --- -title: Troubleshooting +title: "Validator Guide: Troubleshooting" +sidebar_position: 4 +sidebar_label: Troubleshooting +pagination_label: "Validator Guides: Troubleshooting" --- There is a `#validator-support` Discord channel available to reach other diff --git a/docs/src/operations/guides/vote-accounts.md b/docs/src/operations/guides/vote-accounts.md index c86b66cb85bba4..b962b1a1dffa43 100644 --- a/docs/src/operations/guides/vote-accounts.md +++ b/docs/src/operations/guides/vote-accounts.md @@ -1,5 +1,8 @@ --- -title: Vote Account Management +title: "Validator Guide: Vote Account Management" +sidebar_position: 5 +sidebar_label: Vote Account Management +pagination_label: "Validator Guides: Vote Account Management" --- This page describes how to set up an on-chain _vote account_. Creating a vote diff --git a/docs/src/operations/prerequisites.md b/docs/src/operations/prerequisites.md index c44c15fc205300..fb37d9ec4de3ff 100644 --- a/docs/src/operations/prerequisites.md +++ b/docs/src/operations/prerequisites.md @@ -1,7 +1,8 @@ --- title: Solana Validator Prerequisites -sidebar_label: Prerequisites sidebar_position: 2 +sidebar_label: Prerequisites +pagination_label: Prerequisites to run a Validator --- Operating a Solana validator is an interesting and rewarding task. Generally speaking, it requires someone with a technical background but also involves community engagement and marketing. diff --git a/docs/src/operations/requirements.md b/docs/src/operations/requirements.md index 8c9e8d62cb5a08..2c9cf576e1fcf9 100644 --- a/docs/src/operations/requirements.md +++ b/docs/src/operations/requirements.md @@ -1,7 +1,8 @@ --- -title: Validator Requirements -sidebar_label: Requirements +title: Solana Validator Requirements sidebar_position: 3 +sidebar_label: Requirements +pagination_label: Requirements to Operate a Validator --- ## Minimum SOL requirements diff --git a/docs/src/operations/validator-or-rpc-node.md b/docs/src/operations/validator-or-rpc-node.md index c07c5201f100f9..ca4ded555972c2 100644 --- a/docs/src/operations/validator-or-rpc-node.md +++ b/docs/src/operations/validator-or-rpc-node.md @@ -1,7 +1,8 @@ --- title: Consensus Validator or RPC Node? -sidebar_label: Validator vs RPC Node sidebar_position: 1 +sidebar_label: Validator vs RPC Node +pagination_label: Consensus Validator vs RPC Node --- Operators who run a [consensus validator](../what-is-a-validator.md) have much diff --git a/docs/src/runtime/programs.md b/docs/src/runtime/programs.md index ae6b0127a2e490..018169ee1c68f9 100644 --- a/docs/src/runtime/programs.md +++ b/docs/src/runtime/programs.md @@ -1,5 +1,7 @@ --- -title: "Native Programs" +title: "Native Programs in the Solana Runtime" +pagination_label: Runtime Native Programs +sidebar_label: Native Programs --- Solana contains a small handful of native programs, which are required to run diff --git a/docs/src/runtime/sysvars.md b/docs/src/runtime/sysvars.md index 99d271f0a3c056..36c00747bfaa03 100644 --- a/docs/src/runtime/sysvars.md +++ b/docs/src/runtime/sysvars.md @@ -1,5 +1,7 @@ --- -title: Sysvar Cluster Data +title: Solana Sysvar Cluster Data +pagination_label: Runtime Sysvar Cluster Data +sidebar_label: Sysvar Cluster Data --- Solana exposes a variety of cluster state data to programs via diff --git a/docs/src/runtime/zk-token-proof.md b/docs/src/runtime/zk-token-proof.md index 4127409eeb00fa..46fab4c7112f9a 100644 --- a/docs/src/runtime/zk-token-proof.md +++ b/docs/src/runtime/zk-token-proof.md @@ -1,5 +1,7 @@ --- -title: ZK Token Proof Program +title: Solana ZK Token Proof Program +pagination_label: Native ZK Token Proof Program +sidebar_label: ZK Token Proof Program --- The native Solana ZK Token proof program verifies a number of zero-knowledge diff --git a/docs/src/validator/anatomy.md b/docs/src/validator/anatomy.md index 5a61eeff7ef11c..465b08d3fa904d 100644 --- a/docs/src/validator/anatomy.md +++ b/docs/src/validator/anatomy.md @@ -1,7 +1,8 @@ --- title: Anatomy of a Validator -sidebar_label: Anatomy sidebar_position: 1 +sidebar_label: Anatomy +pagination_label: Anatomy of a Validator --- ![Validator block diagrams](/img/validator.svg) diff --git a/docs/src/validator/blockstore.md b/docs/src/validator/blockstore.md index e49e576bf26353..71d41d8a39275d 100644 --- a/docs/src/validator/blockstore.md +++ b/docs/src/validator/blockstore.md @@ -1,6 +1,8 @@ --- -title: Blockstore +title: Blockstore in a Solana Validator sidebar_position: 3 +sidebar_label: Blockstore +pagination_label: Validator Blockstore --- After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../consensus/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized. diff --git a/docs/src/validator/geyser.md b/docs/src/validator/geyser.md index 3ea07473a61f88..a8a29d10dd022a 100644 --- a/docs/src/validator/geyser.md +++ b/docs/src/validator/geyser.md @@ -1,5 +1,7 @@ --- -title: Geyser Plugins +title: Solana Validator Geyser Plugins +sidebar_label: Geyser Plugins +pagination_label: Validator Geyser Plugins --- ## Overview diff --git a/docs/src/validator/gossip.md b/docs/src/validator/gossip.md index 3c637f5c707357..f0a2e43f511414 100644 --- a/docs/src/validator/gossip.md +++ b/docs/src/validator/gossip.md @@ -1,6 +1,8 @@ --- -title: Gossip Service +title: Gossip Service in a Solana Validator sidebar_position: 5 +sidebar_label: Gossip Service +pagination_label: Validator Gossip Service --- The Gossip Service acts as a gateway to nodes in the diff --git a/docs/src/validator/runtime.md b/docs/src/validator/runtime.md index 2bf8a52563f88b..a9afba3c056b31 100644 --- a/docs/src/validator/runtime.md +++ b/docs/src/validator/runtime.md @@ -1,6 +1,8 @@ --- -title: Runtime +title: Solana Runtime on a Solana Validator sidebar_position: 6 +sidebar_label: Runtime +pagination_label: Validator Runtime --- The runtime is a concurrent transaction processor. Transactions specify their data dependencies upfront and dynamic memory allocation is explicit. By separating program code from the state it operates on, the runtime is able to choreograph concurrent access. Transactions accessing only read-only accounts are executed in parallel whereas transactions accessing writable accounts are serialized. The runtime interacts with the program through an entrypoint with a well-defined interface. The data stored in an account is an opaque type, an array of bytes. The program has full control over its contents. diff --git a/docs/src/validator/tpu.md b/docs/src/validator/tpu.md index 0082902078d76e..7585911bf20179 100644 --- a/docs/src/validator/tpu.md +++ b/docs/src/validator/tpu.md @@ -1,7 +1,8 @@ --- -title: Transaction Processing Unit -sidebar_label: TPU +title: Transaction Processing Unit in a Solana Validator sidebar_position: 2 +sidebar_label: TPU +pagination_label: Validator's Transaction Processing Unit (TPU) --- TPU (Transaction Processing Unit) is the logic of the validator diff --git a/docs/src/validator/tvu.md b/docs/src/validator/tvu.md index e3ac7776f0d128..362b6bae165975 100644 --- a/docs/src/validator/tvu.md +++ b/docs/src/validator/tvu.md @@ -1,7 +1,8 @@ --- -title: Transaction Validation Unit -sidebar_label: TVU +title: Transaction Validation Unit in a Solana Validator sidebar_position: 3 +sidebar_label: TVU +pagination_label: Validator's Transaction Validation Unit (TVU) --- TVU (Transaction Validation Unit) is the logic of the validator From 9935c2b5e7eac7d78f1fa64d3dd4aef466320d06 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Sat, 3 Feb 2024 23:02:09 -0800 Subject: [PATCH 111/401] [AppendVec] Use proper Vec initial size in append_accounts() (#35047) #### Problem append_accounts() only appends (len - skip) accounts. However, AppendVec::append_accounts() reserves `len` instead of `(len - skip)` for its vectors. #### Summary of Changes Use (len - skip) as the initial size of the Vectors. --- accounts-db/src/append_vec.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 353cb58e606e96..578371c45a376c 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -586,7 +586,11 @@ impl AppendVec { let mut offset = self.len(); let len = accounts.accounts.len(); - let mut offsets = Vec::with_capacity(len); + // Here we have `len - skip` number of accounts. The +1 extra capacity + // is for storing the aligned offset of the last entry to that is used + // to compute the StoredAccountInfo of the last entry. + let offsets_len = len - skip + 1; + let mut offsets = Vec::with_capacity(offsets_len); for i in skip..len { let (account, pubkey, hash, write_version_obsolete) = accounts.get(i); let account_meta = account @@ -629,10 +633,11 @@ impl AppendVec { if offsets.is_empty() { None } else { + let mut rv = Vec::with_capacity(offsets.len()); + // The last entry in this offset needs to be the u64 aligned offset, because that's // where the *next* entry will begin to be stored. offsets.push(u64_align!(offset)); - let mut rv = Vec::with_capacity(len); for offsets in offsets.windows(2) { rv.push(StoredAccountInfo { offset: offsets[0], From cb0f13ef0774356de506dd892659497df44643e6 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Sun, 4 Feb 2024 08:07:39 -0800 Subject: [PATCH 112/401] SVM: Split transaction processing code into its own struct (#35044) * Split transaction processing code into its own struct * define and implement callback trait --- runtime/src/bank.rs | 424 ++++++++++++++++------- runtime/src/bank/address_lookup_table.rs | 1 + runtime/src/bank/sysvar_cache.rs | 22 +- runtime/src/bank/tests.rs | 18 +- runtime/src/bank_forks.rs | 5 +- runtime/src/svm/account_loader.rs | 132 ++++--- 6 files changed, 397 insertions(+), 205 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 94e7bf979ff5f0..0f10ce51e16564 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -86,6 +86,7 @@ use { AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, }, + accounts_file::MatchAccountOwnerError, accounts_hash::{ AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, }, @@ -576,15 +577,14 @@ impl PartialEq for Bank { freeze_started: _, vote_only_bank: _, cost_tracker: _, - sysvar_cache: _, accounts_data_size_initial: _, accounts_data_size_delta_on_chain: _, accounts_data_size_delta_off_chain: _, fee_structure: _, incremental_snapshot_persistence: _, loaded_programs_cache: _, - check_program_modification_slot: _, epoch_reward_status: _, + transaction_processor: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this PartialEq is accordingly updated. @@ -824,8 +824,6 @@ pub struct Bank { cost_tracker: RwLock, - sysvar_cache: RwLock, - /// The initial accounts data size at the start of this Bank, before processing any transactions/etc accounts_data_size_initial: u64, /// The change to accounts data size in this Bank, due on-chain events (i.e. transactions) @@ -844,9 +842,9 @@ pub struct Bank { pub loaded_programs_cache: Arc>>, - pub check_program_modification_slot: bool, - epoch_reward_status: EpochRewardStatus, + + transaction_processor: TransactionBatchProcessor, } struct VoteWithStakeDelegations { @@ -1026,7 +1024,6 @@ impl Bank { freeze_started: AtomicBool::default(), vote_only_bank: false, cost_tracker: RwLock::::default(), - sysvar_cache: RwLock::::default(), accounts_data_size_initial: 0, accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), @@ -1035,10 +1032,12 @@ impl Bank { Slot::default(), Epoch::default(), ))), - check_program_modification_slot: false, epoch_reward_status: EpochRewardStatus::default(), + transaction_processor: TransactionBatchProcessor::default(), }; + bank.transaction_processor = TransactionBatchProcessor::new(&bank); + let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; bank.accounts_data_size_initial = accounts_data_size_initial; @@ -1339,16 +1338,17 @@ impl Bank { )), freeze_started: AtomicBool::new(false), cost_tracker: RwLock::new(CostTracker::default()), - sysvar_cache: RwLock::new(SysvarCache::default()), accounts_data_size_initial, accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: parent.fee_structure.clone(), loaded_programs_cache: parent.loaded_programs_cache.clone(), - check_program_modification_slot: false, epoch_reward_status: parent.epoch_reward_status.clone(), + transaction_processor: TransactionBatchProcessor::default(), }; + new.transaction_processor = TransactionBatchProcessor::new(&new); + let (_, ancestors_time_us) = measure_us!({ let mut ancestors = Vec::with_capacity(1 + new.parents().len()); ancestors.push(new.slot()); @@ -1843,7 +1843,6 @@ impl Bank { freeze_started: AtomicBool::new(fields.hash != Hash::default()), vote_only_bank: false, cost_tracker: RwLock::new(CostTracker::default()), - sysvar_cache: RwLock::new(SysvarCache::default()), accounts_data_size_initial, accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), @@ -1852,9 +1851,12 @@ impl Bank { fields.slot, fields.epoch, ))), - check_program_modification_slot: false, epoch_reward_status: fields.epoch_reward_status, + transaction_processor: TransactionBatchProcessor::default(), }; + + bank.transaction_processor = TransactionBatchProcessor::new(&bank); + bank.finish_init( genesis_config, additional_builtins, @@ -3340,7 +3342,7 @@ impl Bank { let pre_lamport = curr_stake_account.lamports(); let post_lamport = post_stake_account.lamports(); assert_eq!(pre_lamport + u64::try_from(reward_amount).unwrap(), post_lamport, - "stake account balance has changed since the reward calculation! account: {stake_pubkey}, pre balance: {pre_lamport}, post balance: {post_lamport}, rewards: {reward_amount}"); + "stake account balance has changed since the reward calculation! account: {stake_pubkey}, pre balance: {pre_lamport}, post balance: {post_lamport}, rewards: {reward_amount}"); } } } @@ -4588,18 +4590,24 @@ impl Bank { } balances } +} - fn program_modification_slot(&self, pubkey: &Pubkey) -> Result { - let program = self - .get_account_with_fixed_root(pubkey) +impl TransactionBatchProcessor { + fn program_modification_slot( + &self, + callbacks: &CB, + pubkey: &Pubkey, + ) -> Result { + let program = callbacks + .get_account_shared_data(pubkey) .ok_or(TransactionError::ProgramAccountNotFound)?; if bpf_loader_upgradeable::check_id(program.owner()) { if let Ok(UpgradeableLoaderState::Program { programdata_address, }) = program.state() { - let programdata = self - .get_account_with_fixed_root(&programdata_address) + let programdata = callbacks + .get_account_shared_data(&programdata_address) .ok_or(TransactionError::ProgramAccountNotFound)?; if let Ok(UpgradeableLoaderState::ProgramData { slot, @@ -4619,12 +4627,13 @@ impl Bank { } } - fn load_program_accounts( + fn load_program_accounts( &self, + callbacks: &CB, pubkey: &Pubkey, environments: &ProgramRuntimeEnvironments, ) -> ProgramAccountLoadResult { - let program_account = match self.get_account_with_fixed_root(pubkey) { + let program_account = match callbacks.get_account_shared_data(pubkey) { None => return ProgramAccountLoadResult::AccountNotFound, Some(account) => account, }; @@ -4653,7 +4662,8 @@ impl Bank { programdata_address, }) = program_account.state() { - let programdata_account = match self.get_account_with_fixed_root(&programdata_address) { + let programdata_account = match callbacks.get_account_shared_data(&programdata_address) + { None => return ProgramAccountLoadResult::AccountNotFound, Some(account) => account, }; @@ -4710,8 +4720,9 @@ impl Bank { } } - pub fn load_program( + pub fn load_program( &self, + callbacks: &CB, pubkey: &Pubkey, reload: bool, recompile: Option>, @@ -4728,100 +4739,107 @@ impl Bank { ..LoadProgramMetrics::default() }; - let mut loaded_program = match self.load_program_accounts(pubkey, environments) { - ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( - self.slot, - LoadedProgramType::Closed, - )), + let mut loaded_program = + match self.load_program_accounts(callbacks, pubkey, environments) { + ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( + self.slot, + LoadedProgramType::Closed, + )), - ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), - - ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { - Self::load_program_from_bytes( - &mut load_program_metrics, - program_account.data(), - program_account.owner(), - program_account.data().len(), - 0, - environments.program_runtime_v1.clone(), - reload, - ) - .map_err(|_| (0, environments.program_runtime_v1.clone())) - } + ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), - ProgramAccountLoadResult::ProgramOfLoaderV3( - program_account, - programdata_account, - slot, - ) => programdata_account - .data() - .get(UpgradeableLoaderState::size_of_programdata_metadata()..) - .ok_or(Box::new(InstructionError::InvalidAccountData).into()) - .and_then(|programdata| { + ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { Self::load_program_from_bytes( &mut load_program_metrics, - programdata, + program_account.data(), program_account.owner(), - program_account - .data() - .len() - .saturating_add(programdata_account.data().len()), - slot, + program_account.data().len(), + 0, environments.program_runtime_v1.clone(), reload, ) - }) - .map_err(|_| (slot, environments.program_runtime_v1.clone())), + .map_err(|_| (0, environments.program_runtime_v1.clone())) + } - ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => program_account - .data() - .get(LoaderV4State::program_data_offset()..) - .ok_or(Box::new(InstructionError::InvalidAccountData).into()) - .and_then(|elf_bytes| { - Self::load_program_from_bytes( - &mut load_program_metrics, - elf_bytes, - &loader_v4::id(), - program_account.data().len(), - slot, - environments.program_runtime_v2.clone(), - reload, - ) - }) - .map_err(|_| (slot, environments.program_runtime_v2.clone())), - } - .unwrap_or_else(|(slot, env)| { - LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) - }); + ProgramAccountLoadResult::ProgramOfLoaderV3( + program_account, + programdata_account, + slot, + ) => programdata_account + .data() + .get(UpgradeableLoaderState::size_of_programdata_metadata()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|programdata| { + Self::load_program_from_bytes( + &mut load_program_metrics, + programdata, + program_account.owner(), + program_account + .data() + .len() + .saturating_add(programdata_account.data().len()), + slot, + environments.program_runtime_v1.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v1.clone())), + + ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => { + program_account + .data() + .get(LoaderV4State::program_data_offset()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|elf_bytes| { + Self::load_program_from_bytes( + &mut load_program_metrics, + elf_bytes, + &loader_v4::id(), + program_account.data().len(), + slot, + environments.program_runtime_v2.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v2.clone())) + } + } + .unwrap_or_else(|(slot, env)| { + LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) + }); let mut timings = ExecuteDetailsTimings::default(); load_program_metrics.submit_datapoint(&mut timings); if let Some(recompile) = recompile { - loaded_program.effective_slot = loaded_program.effective_slot.max( - self.epoch_schedule() - .get_first_slot_in_epoch(effective_epoch), - ); + loaded_program.effective_slot = loaded_program + .effective_slot + .max(self.epoch_schedule.get_first_slot_in_epoch(effective_epoch)); loaded_program.tx_usage_counter = AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); loaded_program.ix_usage_counter = AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); } - loaded_program.update_access_slot(self.slot()); + loaded_program.update_access_slot(self.slot); Arc::new(loaded_program) } +} +impl Bank { pub fn clear_program_cache(&self) { self.loaded_programs_cache .write() .unwrap() .unload_all_programs(); } +} +impl TransactionBatchProcessor { /// Execute a transaction using the provided loaded accounts and update /// the executors cache if the transaction was successful. #[allow(clippy::too_many_arguments)] - fn execute_loaded_transaction( + fn execute_loaded_transaction( &self, + callback: &CB, tx: &SanitizedTransaction, loaded_transaction: &mut LoadedTransaction, compute_budget: ComputeBudget, @@ -4853,7 +4871,7 @@ impl Bank { let mut transaction_context = TransactionContext::new( transaction_accounts, - self.rent_collector.rent.clone(), + callback.get_rent_collector().rent.clone(), compute_budget.max_invoke_stack_height, compute_budget.max_instruction_trace_length, ); @@ -4861,7 +4879,7 @@ impl Bank { transaction_context.set_signature(tx.signature()); let pre_account_state_info = TransactionAccountStateInfo::new( - &self.rent_collector.rent, + &callback.get_rent_collector().rent, &transaction_context, tx.message(), ); @@ -4877,7 +4895,8 @@ impl Bank { None }; - let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); + let (blockhash, lamports_per_signature) = + callback.get_last_blockhash_and_lamports_per_signature(); let mut executed_units = 0u64; let mut programs_modified_by_tx = LoadedProgramsForTxBatch::new( @@ -4892,7 +4911,7 @@ impl Bank { log_collector.clone(), programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - self.feature_set.clone(), + callback.get_feature_set(), compute_budget, timings, &self.sysvar_cache.read().unwrap(), @@ -4910,7 +4929,7 @@ impl Bank { let mut status = process_result .and_then(|info| { let post_account_state_info = TransactionAccountStateInfo::new( - &self.rent_collector.rent, + &callback.get_rent_collector().rent, &transaction_context, tx.message(), ); @@ -4995,8 +5014,9 @@ impl Bank { } } - fn replenish_program_cache( + fn replenish_program_cache( &self, + callback: &CB, program_accounts_map: &HashMap, ) -> LoadedProgramsForTxBatch { let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = @@ -5007,7 +5027,7 @@ impl Bank { ( *pubkey, ( - self.program_modification_slot(pubkey) + self.program_modification_slot(callback, pubkey) .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) }), @@ -5043,11 +5063,7 @@ impl Bank { } // Submit our last completed loading task. if let Some((key, program)) = program_to_store.take() { - loaded_programs_cache.finish_cooperative_loading_task( - self.slot(), - key, - program, - ); + loaded_programs_cache.finish_cooperative_loading_task(self.slot, key, program); } // Figure out which program needs to be loaded next. let program_to_load = loaded_programs_cache.extract( @@ -5062,7 +5078,7 @@ impl Bank { if let Some((key, count)) = program_to_load { // Load, verify and compile one program. - let program = self.load_program(&key, false, None); + let program = self.load_program(callback, &key, false, None); program.tx_usage_counter.store(count, Ordering::Relaxed); program_to_store = Some((key, program)); } else if missing_programs.is_empty() { @@ -5081,9 +5097,9 @@ impl Bank { /// Returns a hash map of executable program accounts (program accounts that are not writable /// in the given transactions), and their owners, for the transactions with a valid /// blockhash or nonce. - fn filter_executable_program_accounts<'a>( + fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( &self, - ancestors: &Ancestors, + callbacks: &CB, txs: &[SanitizedTransaction], lock_results: &mut [TransactionCheckResult], program_owners: &'a [Pubkey], @@ -5101,11 +5117,8 @@ impl Bank { saturating_add_assign!(*count, 1); } Entry::Vacant(entry) => { - if let Ok(index) = self - .rc - .accounts - .accounts_db - .account_matches_owners(ancestors, key, program_owners) + if let Ok(index) = + callbacks.account_matches_owners(key, program_owners) { program_owners .get(index) @@ -5123,7 +5136,9 @@ impl Bank { }); result } +} +impl Bank { #[allow(clippy::type_complexity)] pub fn load_and_execute_transactions( &self, @@ -5187,17 +5202,21 @@ impl Bank { debug!("check: {}us", check_time.as_us()); timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_time.as_us()); - let sanitized_output = self.load_and_execute_sanitized_transactions( - sanitized_txs, - &mut check_results, - &mut error_counters, - enable_cpi_recording, - enable_log_recording, - enable_return_data_recording, - timings, - account_overrides, - log_messages_bytes_limit, - ); + let sanitized_output = self + .transaction_processor + .load_and_execute_sanitized_transactions( + self, + sanitized_txs, + &mut check_results, + &mut error_counters, + enable_cpi_recording, + enable_log_recording, + enable_return_data_recording, + timings, + account_overrides, + self.builtin_programs.iter(), + log_messages_bytes_limit, + ); let mut signature_count = 0; @@ -5325,10 +5344,13 @@ impl Bank { error_counters, } } +} +impl TransactionBatchProcessor { #[allow(clippy::too_many_arguments)] - fn load_and_execute_sanitized_transactions( + fn load_and_execute_sanitized_transactions<'a, CB: TransactionProcessingCallback>( &self, + callbacks: &CB, sanitized_txs: &[SanitizedTransaction], check_results: &mut [TransactionCheckResult], error_counters: &mut TransactionErrorMetrics, @@ -5337,35 +5359,32 @@ impl Bank { enable_return_data_recording: bool, timings: &mut ExecuteTimings, account_overrides: Option<&AccountOverrides>, + builtin_programs: impl Iterator, log_messages_bytes_limit: Option, ) -> LoadAndExecuteSanitizedTransactionsOutput { let mut program_accounts_map = self.filter_executable_program_accounts( - &self.ancestors, + callbacks, sanitized_txs, check_results, PROGRAM_OWNERS, ); let native_loader = native_loader::id(); - for builtin_program in self.builtin_programs.iter() { + for builtin_program in builtin_programs { program_accounts_map.insert(*builtin_program, (&native_loader, 0)); } let programs_loaded_for_tx_batch = Rc::new(RefCell::new( - self.replenish_program_cache(&program_accounts_map), + self.replenish_program_cache(callbacks, &program_accounts_map), )); let mut load_time = Measure::start("accounts_load"); let mut loaded_transactions = load_accounts( - &self.rc.accounts.accounts_db, - &self.ancestors, + callbacks, sanitized_txs, check_results, error_counters, - &self.rent_collector, - &self.feature_set, &self.fee_structure, account_overrides, - self.get_reward_interval(), &program_accounts_map, &programs_loaded_for_tx_batch.borrow(), ); @@ -5402,6 +5421,7 @@ impl Bank { }; let result = self.execute_loaded_transaction( + callbacks, tx, loaded_transaction, compute_budget, @@ -5442,7 +5462,7 @@ impl Bank { .unwrap() .evict_using_2s_random_selection( Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE), - self.slot(), + self.slot, ); debug!( @@ -5460,7 +5480,9 @@ impl Bank { execution_results, } } +} +impl Bank { /// Load the accounts data size, in bytes pub fn load_accounts_data_size(&self) -> u64 { self.accounts_data_size_initial @@ -8190,7 +8212,7 @@ impl Bank { pub fn is_in_slot_hashes_history(&self, slot: &Slot) -> bool { if slot < &self.slot { - if let Ok(sysvar_cache) = self.sysvar_cache.read() { + if let Ok(sysvar_cache) = self.transaction_processor.sysvar_cache.read() { if let Ok(slot_hashes) = sysvar_cache.get_slot_hashes() { return slot_hashes.get(slot).is_some(); } @@ -8198,6 +8220,156 @@ impl Bank { } false } + + pub fn check_program_modification_slot(&mut self) { + self.transaction_processor.check_program_modification_slot = true; + } + + pub fn load_program( + &self, + pubkey: &Pubkey, + reload: bool, + recompile: Option>, + ) -> Arc { + self.transaction_processor + .load_program(self, pubkey, reload, recompile) + } +} + +pub trait TransactionProcessingCallback { + fn account_matches_owners( + &self, + account: &Pubkey, + owners: &[Pubkey], + ) -> std::result::Result; + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64); + + fn get_rent_collector(&self) -> &RentCollector; + + fn get_feature_set(&self) -> Arc; + + fn check_account_access( + &self, + _tx: &SanitizedTransaction, + _account_index: usize, + _account: &AccountSharedData, + _error_counters: &mut TransactionErrorMetrics, + ) -> Result<()> { + Ok(()) + } +} + +impl TransactionProcessingCallback for Bank { + fn account_matches_owners( + &self, + account: &Pubkey, + owners: &[Pubkey], + ) -> std::result::Result { + self.rc + .accounts + .accounts_db + .account_matches_owners(&self.ancestors, account, owners) + } + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + self.rc + .accounts + .accounts_db + .load_with_fixed_root(&self.ancestors, pubkey) + .map(|(acc, _)| acc) + } + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { + self.last_blockhash_and_lamports_per_signature() + } + + fn get_rent_collector(&self) -> &RentCollector { + &self.rent_collector + } + + fn get_feature_set(&self) -> Arc { + self.feature_set.clone() + } + + fn check_account_access( + &self, + tx: &SanitizedTransaction, + account_index: usize, + account: &AccountSharedData, + error_counters: &mut TransactionErrorMetrics, + ) -> Result<()> { + if self.get_reward_interval() == RewardInterval::InsideInterval + && tx.message().is_writable(account_index) + && solana_stake_program::check_id(account.owner()) + { + error_counters.program_execution_temporarily_restricted += 1; + Err(TransactionError::ProgramExecutionTemporarilyRestricted { + account_index: account_index as u8, + }) + } else { + Ok(()) + } + } +} + +#[derive(AbiExample, Debug)] +struct TransactionBatchProcessor { + /// Bank slot (i.e. block) + slot: Slot, + + /// Bank epoch + epoch: Epoch, + + /// initialized from genesis + epoch_schedule: EpochSchedule, + + /// Transaction fee structure + fee_structure: FeeStructure, + + pub check_program_modification_slot: bool, + + /// Optional config parameters that can override runtime behavior + runtime_config: Arc, + + sysvar_cache: RwLock, + + pub loaded_programs_cache: Arc>>, +} + +impl Default for TransactionBatchProcessor { + fn default() -> Self { + Self { + slot: Slot::default(), + epoch: Epoch::default(), + epoch_schedule: EpochSchedule::default(), + fee_structure: FeeStructure::default(), + check_program_modification_slot: false, + runtime_config: Arc::::default(), + sysvar_cache: RwLock::::default(), + loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + Slot::default(), + Epoch::default(), + ))), + } + } +} + +impl TransactionBatchProcessor { + fn new(bank: &Bank) -> Self { + Self { + slot: bank.slot(), + epoch: bank.epoch(), + epoch_schedule: bank.epoch_schedule.clone(), + fee_structure: bank.fee_structure.clone(), + check_program_modification_slot: false, + runtime_config: bank.runtime_config.clone(), + sysvar_cache: RwLock::::default(), + loaded_programs_cache: bank.loaded_programs_cache.clone(), + } + } } #[cfg(feature = "dev-context-only-utils")] diff --git a/runtime/src/bank/address_lookup_table.rs b/runtime/src/bank/address_lookup_table.rs index 07c82acf6da8b1..483ec7cea00ea1 100644 --- a/runtime/src/bank/address_lookup_table.rs +++ b/runtime/src/bank/address_lookup_table.rs @@ -16,6 +16,7 @@ impl AddressLoader for &Bank { address_table_lookups: &[MessageAddressTableLookup], ) -> Result { let slot_hashes = self + .transaction_processor .sysvar_cache .read() .unwrap() diff --git a/runtime/src/bank/sysvar_cache.rs b/runtime/src/bank/sysvar_cache.rs index d6131695fb7d5f..91a22907d6e888 100644 --- a/runtime/src/bank/sysvar_cache.rs +++ b/runtime/src/bank/sysvar_cache.rs @@ -5,7 +5,7 @@ use { impl Bank { pub(crate) fn fill_missing_sysvar_cache_entries(&self) { - let mut sysvar_cache = self.sysvar_cache.write().unwrap(); + let mut sysvar_cache = self.transaction_processor.sysvar_cache.write().unwrap(); sysvar_cache.fill_missing_entries(|pubkey, callback| { if let Some(account) = self.get_account_with_fixed_root(pubkey) { callback(account.data()); @@ -14,12 +14,16 @@ impl Bank { } pub(crate) fn reset_sysvar_cache(&self) { - let mut sysvar_cache = self.sysvar_cache.write().unwrap(); + let mut sysvar_cache = self.transaction_processor.sysvar_cache.write().unwrap(); sysvar_cache.reset(); } pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache { - self.sysvar_cache.read().unwrap().clone() + self.transaction_processor + .sysvar_cache + .read() + .unwrap() + .clone() } } @@ -40,7 +44,7 @@ mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(100_000); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank0_sysvar_cache = bank0.sysvar_cache.read().unwrap(); + let bank0_sysvar_cache = bank0.transaction_processor.sysvar_cache.read().unwrap(); let bank0_cached_clock = bank0_sysvar_cache.get_clock(); let bank0_cached_epoch_schedule = bank0_sysvar_cache.get_epoch_schedule(); let bank0_cached_fees = bank0_sysvar_cache.get_fees(); @@ -60,7 +64,7 @@ mod tests { bank1_slot, )); - let bank1_sysvar_cache = bank1.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); let bank1_cached_clock = bank1_sysvar_cache.get_clock(); let bank1_cached_epoch_schedule = bank1_sysvar_cache.get_epoch_schedule(); let bank1_cached_fees = bank1_sysvar_cache.get_fees(); @@ -81,7 +85,7 @@ mod tests { let bank2_slot = bank1.slot() + 1; let bank2 = Bank::new_from_parent(bank1.clone(), &Pubkey::default(), bank2_slot); - let bank2_sysvar_cache = bank2.sysvar_cache.read().unwrap(); + let bank2_sysvar_cache = bank2.transaction_processor.sysvar_cache.read().unwrap(); let bank2_cached_clock = bank2_sysvar_cache.get_clock(); let bank2_cached_epoch_schedule = bank2_sysvar_cache.get_epoch_schedule(); let bank2_cached_fees = bank2_sysvar_cache.get_fees(); @@ -112,7 +116,7 @@ mod tests { let bank1_slot = bank0.slot() + 1; let mut bank1 = Bank::new_from_parent(bank0, &Pubkey::default(), bank1_slot); - let bank1_sysvar_cache = bank1.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); let bank1_cached_clock = bank1_sysvar_cache.get_clock(); let bank1_cached_epoch_schedule = bank1_sysvar_cache.get_epoch_schedule(); let bank1_cached_fees = bank1_sysvar_cache.get_fees(); @@ -130,7 +134,7 @@ mod tests { drop(bank1_sysvar_cache); bank1.reset_sysvar_cache(); - let bank1_sysvar_cache = bank1.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); assert!(bank1_sysvar_cache.get_clock().is_err()); assert!(bank1_sysvar_cache.get_epoch_schedule().is_err()); assert!(bank1_sysvar_cache.get_fees().is_err()); @@ -155,7 +159,7 @@ mod tests { bank1.fill_missing_sysvar_cache_entries(); - let bank1_sysvar_cache = bank1.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); assert_eq!(bank1_sysvar_cache.get_clock(), bank1_cached_clock); assert_eq!( bank1_sysvar_cache.get_epoch_schedule(), diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index ad28005fccfe7e..c8378108928763 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -10987,16 +10987,12 @@ fn test_rent_state_list_len() { let sanitized_tx = SanitizedTransaction::try_from_legacy_transaction(tx).unwrap(); let mut error_counters = TransactionErrorMetrics::default(); let loaded_txs = load_accounts( - &bank.accounts().accounts_db, - &bank.ancestors, + &bank, &[sanitized_tx.clone()], &[(Ok(()), None, Some(0))], &mut error_counters, - &bank.rent_collector, - &bank.feature_set, &FeeStructure::default(), None, - RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), ); @@ -13744,10 +13740,10 @@ fn test_filter_executable_program_accounts() { ); let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - let ancestors = vec![(0, 0)].into_iter().collect(); let owners = &[program1_pubkey, program2_pubkey]; - let programs = bank.filter_executable_program_accounts( - &ancestors, + let transaction_processor = TransactionBatchProcessor::new(&bank); + let programs = transaction_processor.filter_executable_program_accounts( + &bank, &[sanitized_tx1, sanitized_tx2], &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], owners, @@ -13839,11 +13835,11 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { // Let's not register blockhash from tx2. This should cause the tx2 to fail let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - let ancestors = vec![(0, 0)].into_iter().collect(); let owners = &[program1_pubkey, program2_pubkey]; let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; - let programs = bank.filter_executable_program_accounts( - &ancestors, + let transaction_processor = TransactionBatchProcessor::new(&bank); + let programs = transaction_processor.filter_executable_program_accounts( + &bank, &[sanitized_tx1, sanitized_tx2], &mut lock_results, owners, diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index d481bf1b43bda8..668062c8d31cce 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -221,8 +221,9 @@ impl BankForks { } pub fn insert(&mut self, mut bank: Bank) -> BankWithScheduler { - bank.check_program_modification_slot = - self.root.load(Ordering::Relaxed) < self.highest_slot_at_startup; + if self.root.load(Ordering::Relaxed) < self.highest_slot_at_startup { + bank.check_program_modification_slot(); + } let bank = Arc::new(bank); let bank = if let Some(scheduler_pool) = &self.scheduler_pool { diff --git a/runtime/src/svm/account_loader.rs b/runtime/src/svm/account_loader.rs index 31ce63654670e5..e496f94b8061ac 100644 --- a/runtime/src/svm/account_loader.rs +++ b/runtime/src/svm/account_loader.rs @@ -1,12 +1,10 @@ use { - crate::{bank::RewardInterval, svm::account_rent_state::RentState}, + crate::{bank::TransactionProcessingCallback, svm::account_rent_state::RentState}, itertools::Itertools, log::warn, solana_accounts_db::{ account_overrides::AccountOverrides, accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, - accounts_db::AccountsDb, - ancestors::Ancestors, nonce_info::NonceFull, rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, @@ -22,7 +20,7 @@ use { create_executable_meta, is_builtin, is_executable, Account, AccountSharedData, ReadableAccount, WritableAccount, }, - feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, + feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation}, fee::FeeStructure, message::SanitizedMessage, native_loader, @@ -38,21 +36,17 @@ use { std::{collections::HashMap, num::NonZeroUsize}, }; -#[allow(clippy::too_many_arguments)] -pub(crate) fn load_accounts( - accounts_db: &AccountsDb, - ancestors: &Ancestors, +pub(crate) fn load_accounts( + callbacks: &CB, txs: &[SanitizedTransaction], lock_results: &[TransactionCheckResult], error_counters: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, - feature_set: &FeatureSet, fee_structure: &FeeStructure, account_overrides: Option<&AccountOverrides>, - in_reward_interval: RewardInterval, program_accounts: &HashMap, loaded_programs: &LoadedProgramsForTxBatch, ) -> Vec { + let feature_set = callbacks.get_feature_set(); txs.iter() .zip(lock_results) .map(|etx| match etx { @@ -75,15 +69,11 @@ pub(crate) fn load_accounts( // load transactions let loaded_transaction = match load_transaction_accounts( - accounts_db, - ancestors, + callbacks, tx, fee, error_counters, - rent_collector, - feature_set, account_overrides, - in_reward_interval, program_accounts, loaded_programs, ) { @@ -113,27 +103,22 @@ pub(crate) fn load_accounts( .collect() } -#[allow(clippy::too_many_arguments)] -fn load_transaction_accounts( - accounts_db: &AccountsDb, - ancestors: &Ancestors, +fn load_transaction_accounts( + callbacks: &CB, tx: &SanitizedTransaction, fee: u64, error_counters: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, - feature_set: &FeatureSet, account_overrides: Option<&AccountOverrides>, - reward_interval: RewardInterval, program_accounts: &HashMap, loaded_programs: &LoadedProgramsForTxBatch, ) -> Result { - let in_reward_interval = reward_interval == RewardInterval::InsideInterval; - // NOTE: this check will never fail because `tx` is sanitized if tx.signatures().is_empty() && fee != 0 { return Err(TransactionError::MissingSignatureForFee); } + let feature_set = callbacks.get_feature_set(); + // There is no way to predict what program will execute without an error // If a fee can pay for execution then the program will be scheduled let mut validated_fee_payer = false; @@ -143,6 +128,7 @@ fn load_transaction_accounts( let mut accounts_found = Vec::with_capacity(account_keys.len()); let mut account_deps = Vec::with_capacity(account_keys.len()); let mut rent_debits = RentDebits::default(); + let rent_collector = callbacks.get_rent_collector(); let set_exempt_rent_epoch_max = feature_set.is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); @@ -183,9 +169,9 @@ fn load_transaction_accounts( account_shared_data_from_program(key, program_accounts) .map(|program_account| (program.account_size, program_account, 0))? } else { - accounts_db - .load_with_fixed_root(ancestors, key) - .map(|(mut account, _)| { + callbacks + .get_account_shared_data(key) + .map(|mut account| { if message.is_writable(i) { if !feature_set .is_active(&feature_set::disable_rent_fees_collection::id()) @@ -253,15 +239,7 @@ fn load_transaction_accounts( validated_fee_payer = true; } - if in_reward_interval - && message.is_writable(i) - && solana_stake_program::check_id(account.owner()) - { - error_counters.program_execution_temporarily_restricted += 1; - return Err(TransactionError::ProgramExecutionTemporarilyRestricted { - account_index: i as u8, - }); - } + callbacks.check_account_access(tx, i, &account, error_counters)?; tx_rent += rent; rent_debits.insert(key, rent, account.lamports()); @@ -306,7 +284,7 @@ fn load_transaction_accounts( return Err(TransactionError::ProgramAccountNotFound); } - if !(is_builtin(program_account) || is_executable(program_account, feature_set)) { + if !(is_builtin(program_account) || is_executable(program_account, &feature_set)) { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); } @@ -324,12 +302,10 @@ fn load_transaction_accounts( builtins_start_index.saturating_add(owner_index) } else { let owner_index = accounts.len(); - if let Some((owner_account, _)) = - accounts_db.load_with_fixed_root(ancestors, owner_id) - { + if let Some(owner_account) = callbacks.get_account_shared_data(owner_id) { if !native_loader::check_id(owner_account.owner()) || !(is_builtin(&owner_account) - || is_executable(&owner_account, feature_set)) + || is_executable(&owner_account, &feature_set)) { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); @@ -484,7 +460,10 @@ mod tests { use { super::*, nonce::state::Versions as NonceVersions, - solana_accounts_db::{accounts::Accounts, rent_collector::RentCollector}, + solana_accounts_db::{ + accounts::Accounts, accounts_db::AccountsDb, accounts_file::MatchAccountOwnerError, + ancestors::Ancestors, rent_collector::RentCollector, + }, solana_program_runtime::{ compute_budget_processor, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, @@ -494,6 +473,7 @@ mod tests { bpf_loader_upgradeable, compute_budget::ComputeBudgetInstruction, epoch_schedule::EpochSchedule, + feature_set::FeatureSet, hash::Hash, instruction::CompiledInstruction, message::{Message, SanitizedMessage}, @@ -507,6 +487,41 @@ mod tests { std::{convert::TryFrom, sync::Arc}, }; + struct TestCallbacks { + accounts: Accounts, + ancestors: Ancestors, + rent_collector: RentCollector, + feature_set: Arc, + } + + impl TransactionProcessingCallback for TestCallbacks { + fn account_matches_owners( + &self, + _account: &Pubkey, + _owners: &[Pubkey], + ) -> std::result::Result { + Err(MatchAccountOwnerError::UnableToLoad) + } + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + self.accounts + .load_without_fixed_root(&self.ancestors, pubkey) + .map(|(acc, _slot)| acc) + } + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { + (Hash::new_unique(), 0) + } + + fn get_rent_collector(&self) -> &RentCollector { + &self.rent_collector + } + + fn get_feature_set(&self) -> Arc { + self.feature_set.clone() + } + } + fn load_accounts_with_fee_and_rent( tx: Transaction, ka: &[TransactionAccount], @@ -525,17 +540,19 @@ mod tests { let ancestors = vec![(0, 0)].into_iter().collect(); feature_set.deactivate(&feature_set::disable_rent_fees_collection::id()); let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); + let callbacks = TestCallbacks { + accounts, + ancestors, + rent_collector: rent_collector.clone(), + feature_set: Arc::new(feature_set.clone()), + }; load_accounts( - &accounts.accounts_db, - &ancestors, + &callbacks, &[sanitized_tx], &[(Ok(()), None, Some(lamports_per_signature))], error_counters, - rent_collector, - feature_set, fee_structure, None, - RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), ) @@ -990,26 +1007,27 @@ mod tests { } fn load_accounts_no_store( - accounts: &Accounts, + accounts: Accounts, tx: Transaction, account_overrides: Option<&AccountOverrides>, ) -> Vec { let tx = SanitizedTransaction::from_transaction_for_tests(tx); - let rent_collector = RentCollector::default(); let ancestors = vec![(0, 0)].into_iter().collect(); let mut error_counters = TransactionErrorMetrics::default(); + let callbacks = TestCallbacks { + accounts, + ancestors, + rent_collector: RentCollector::default(), + feature_set: Arc::new(FeatureSet::all_enabled()), + }; load_accounts( - &accounts.accounts_db, - &ancestors, + &callbacks, &[tx], &[(Ok(()), None, Some(10))], &mut error_counters, - &rent_collector, - &FeatureSet::all_enabled(), &FeeStructure::default(), account_overrides, - RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), ) @@ -1032,7 +1050,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_no_store(&accounts, tx, None); + let loaded_accounts = load_accounts_no_store(accounts, tx, None); assert_eq!(loaded_accounts.len(), 1); assert!(loaded_accounts[0].0.is_err()); } @@ -1060,7 +1078,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_no_store(&accounts, tx, Some(&account_overrides)); + let loaded_accounts = load_accounts_no_store(accounts, tx, Some(&account_overrides)); assert_eq!(loaded_accounts.len(), 1); let loaded_transaction = loaded_accounts[0].0.as_ref().unwrap(); assert_eq!(loaded_transaction.accounts[0].0, keypair.pubkey()); From a16f982169eb197fad0eb8c58c307fb069f69d8f Mon Sep 17 00:00:00 2001 From: Brooks Date: Sun, 4 Feb 2024 11:56:12 -0500 Subject: [PATCH 113/401] Removes redundant check on STORE_META_OVERHEAD (#35069) --- accounts-db/src/append_vec.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 578371c45a376c..bf91ca0d111523 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -729,13 +729,6 @@ pub mod tests { } } - static_assertions::const_assert_eq!( - STORE_META_OVERHEAD, - std::mem::size_of::() - + std::mem::size_of::() - + std::mem::size_of::() - ); - // Hash is [u8; 32], which has no alignment static_assertions::assert_eq_align!(u64, StoredMeta, AccountMeta); From 116119cfd2637f9cb92065cf1cefab0d165cf3dd Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Mon, 5 Feb 2024 08:18:17 -0800 Subject: [PATCH 114/401] SVM: Move transaction processing code out of `bank.rs` (#35075) SVM: Move transaction processing code out of bank.rs --- runtime/src/bank.rs | 824 +--------------------- runtime/src/bank/tests.rs | 16 +- runtime/src/svm/account_loader.rs | 4 +- runtime/src/svm/mod.rs | 1 + runtime/src/svm/transaction_processor.rs | 831 +++++++++++++++++++++++ 5 files changed, 857 insertions(+), 819 deletions(-) create mode 100644 runtime/src/svm/transaction_processor.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0f10ce51e16564..92689644db1ac7 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -59,9 +59,8 @@ use { }, stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, - svm::{ - account_loader::load_accounts, - transaction_account_state_info::TransactionAccountStateInfo, + svm::transaction_processor::{ + TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, }, transaction_batch::TransactionBatch, }, @@ -69,7 +68,6 @@ use { dashmap::{DashMap, DashSet}, itertools::izip, log::*, - percentage::Percentage, rayon::{ iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, slice::ParallelSlice, @@ -78,10 +76,7 @@ use { serde::Serialize, solana_accounts_db::{ account_overrides::AccountOverrides, - accounts::{ - AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, - TransactionLoadResult, - }, + accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot, TransactionLoadResult}, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, @@ -105,7 +100,6 @@ use { storable_accounts::StorableAccounts, transaction_error_metrics::TransactionErrorMetrics, transaction_results::{ - inner_instructions_list_from_instruction_trace, DurableNonceFee, TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, }, @@ -116,27 +110,17 @@ use { solana_measure::{measure, measure::Measure, measure_us}, solana_perf::perf_libs, solana_program_runtime::{ - compute_budget::ComputeBudget, compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, - loaded_programs::{ - LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, - LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, - ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, - }, - log_collector::LogCollector, - message_processor::MessageProcessor, - sysvar_cache::SysvarCache, - timings::{ExecuteDetailsTimings, ExecuteTimingType, ExecuteTimings}, + loaded_programs::{LoadedProgram, LoadedProgramType, LoadedPrograms}, + timings::{ExecuteTimingType, ExecuteTimings}, }, solana_sdk::{ account::{ create_account_shared_data_with_fields as create_account, create_executable_meta, from_account, Account, AccountSharedData, InheritableAccountFields, ReadableAccount, - WritableAccount, PROGRAM_OWNERS, + WritableAccount, }, - account_utils::StateMut, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, @@ -160,8 +144,6 @@ use { incinerator, inflation::Inflation, inner_instruction::InnerInstructions, - instruction::InstructionError, - loader_v4::{self, LoaderV4State, LoaderV4Status}, message::{AccountKeys, SanitizedMessage}, native_loader, native_token::LAMPORTS_PER_SOL, @@ -183,9 +165,7 @@ use { self, MessageHash, Result, SanitizedTransaction, Transaction, TransactionError, TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, }, - transaction_context::{ - ExecutionRecord, TransactionAccount, TransactionContext, TransactionReturnData, - }, + transaction_context::{TransactionAccount, TransactionReturnData}, }, solana_stake_program::stake_state::{ self, InflationPointCalculationEvent, PointValue, StakeStateV2, @@ -195,18 +175,16 @@ use { solana_vote_program::vote_state::VoteState, std::{ borrow::Cow, - cell::RefCell, - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{HashMap, HashSet}, convert::TryFrom, fmt, mem, ops::{AddAssign, RangeInclusive}, path::PathBuf, - rc::Rc, slice, sync::{ atomic::{ AtomicBool, AtomicI64, AtomicU64, AtomicUsize, - Ordering::{self, AcqRel, Acquire, Relaxed}, + Ordering::{AcqRel, Acquire, Relaxed}, }, Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, }, @@ -315,14 +293,6 @@ impl BankRc { } } -enum ProgramAccountLoadResult { - AccountNotFound, - InvalidAccountData(ProgramRuntimeEnvironment), - ProgramOfLoaderV1orV2(AccountSharedData), - ProgramOfLoaderV3(AccountSharedData, AccountSharedData, Slot), - ProgramOfLoaderV4(AccountSharedData, Slot), -} - pub struct LoadAndExecuteTransactionsOutput { pub loaded_transactions: Vec, // Vector of results indicating whether a transaction was executed or could not @@ -340,13 +310,6 @@ pub struct LoadAndExecuteTransactionsOutput { pub error_counters: TransactionErrorMetrics, } -pub struct LoadAndExecuteSanitizedTransactionsOutput { - pub loaded_transactions: Vec, - // Vector of results indicating whether a transaction was executed or could not - // be executed. Note executed transactions can still have failed! - pub execution_results: Vec, -} - pub struct TransactionSimulationResult { pub result: Result<()>, pub logs: TransactionLogMessages, @@ -371,9 +334,6 @@ impl TransactionBalancesSet { } pub type TransactionBalances = Vec>; -/// A list of log messages emitted during a transaction -pub type TransactionLogMessages = Vec; - #[derive(Serialize, Deserialize, AbiExample, AbiEnumVisitor, Debug, PartialEq, Eq)] pub enum TransactionLogCollectorFilter { All, @@ -773,7 +733,7 @@ pub struct Bank { rent_collector: RentCollector, /// initialized from genesis - epoch_schedule: EpochSchedule, + pub(crate) epoch_schedule: EpochSchedule, /// inflation specs inflation: Arc>, @@ -792,7 +752,7 @@ pub struct Bank { builtin_programs: HashSet, /// Optional config parameters that can override runtime behavior - runtime_config: Arc, + pub(crate) runtime_config: Arc, /// Protocol-level rewards that were distributed by this bank pub rewards: RwLock>, @@ -4590,555 +4550,14 @@ impl Bank { } balances } -} - -impl TransactionBatchProcessor { - fn program_modification_slot( - &self, - callbacks: &CB, - pubkey: &Pubkey, - ) -> Result { - let program = callbacks - .get_account_shared_data(pubkey) - .ok_or(TransactionError::ProgramAccountNotFound)?; - if bpf_loader_upgradeable::check_id(program.owner()) { - if let Ok(UpgradeableLoaderState::Program { - programdata_address, - }) = program.state() - { - let programdata = callbacks - .get_account_shared_data(&programdata_address) - .ok_or(TransactionError::ProgramAccountNotFound)?; - if let Ok(UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: _, - }) = programdata.state() - { - return Ok(slot); - } - } - Err(TransactionError::ProgramAccountNotFound) - } else if loader_v4::check_id(program.owner()) { - let state = solana_loader_v4_program::get_state(program.data()) - .map_err(|_| TransactionError::ProgramAccountNotFound)?; - Ok(state.slot) - } else { - Ok(0) - } - } - - fn load_program_accounts( - &self, - callbacks: &CB, - pubkey: &Pubkey, - environments: &ProgramRuntimeEnvironments, - ) -> ProgramAccountLoadResult { - let program_account = match callbacks.get_account_shared_data(pubkey) { - None => return ProgramAccountLoadResult::AccountNotFound, - Some(account) => account, - }; - - debug_assert!(solana_bpf_loader_program::check_loader_id( - program_account.owner() - )); - - if loader_v4::check_id(program_account.owner()) { - return solana_loader_v4_program::get_state(program_account.data()) - .ok() - .and_then(|state| { - (!matches!(state.status, LoaderV4Status::Retracted)).then_some(state.slot) - }) - .map(|slot| ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot)) - .unwrap_or(ProgramAccountLoadResult::InvalidAccountData( - environments.program_runtime_v2.clone(), - )); - } - - if !bpf_loader_upgradeable::check_id(program_account.owner()) { - return ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account); - } - - if let Ok(UpgradeableLoaderState::Program { - programdata_address, - }) = program_account.state() - { - let programdata_account = match callbacks.get_account_shared_data(&programdata_address) - { - None => return ProgramAccountLoadResult::AccountNotFound, - Some(account) => account, - }; - - if let Ok(UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: _, - }) = programdata_account.state() - { - return ProgramAccountLoadResult::ProgramOfLoaderV3( - program_account, - programdata_account, - slot, - ); - } - } - ProgramAccountLoadResult::InvalidAccountData(environments.program_runtime_v1.clone()) - } - - fn load_program_from_bytes( - load_program_metrics: &mut LoadProgramMetrics, - programdata: &[u8], - loader_key: &Pubkey, - account_size: usize, - deployment_slot: Slot, - program_runtime_environment: ProgramRuntimeEnvironment, - reloading: bool, - ) -> std::result::Result> { - if reloading { - // Safety: this is safe because the program is being reloaded in the cache. - unsafe { - LoadedProgram::reload( - loader_key, - program_runtime_environment.clone(), - deployment_slot, - deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, - programdata, - account_size, - load_program_metrics, - ) - } - } else { - LoadedProgram::new( - loader_key, - program_runtime_environment.clone(), - deployment_slot, - deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, - programdata, - account_size, - load_program_metrics, - ) - } - } - - pub fn load_program( - &self, - callbacks: &CB, - pubkey: &Pubkey, - reload: bool, - recompile: Option>, - ) -> Arc { - let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - let effective_epoch = if recompile.is_some() { - loaded_programs_cache.latest_root_epoch.saturating_add(1) - } else { - self.epoch - }; - let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); - let mut load_program_metrics = LoadProgramMetrics { - program_id: pubkey.to_string(), - ..LoadProgramMetrics::default() - }; - - let mut loaded_program = - match self.load_program_accounts(callbacks, pubkey, environments) { - ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( - self.slot, - LoadedProgramType::Closed, - )), - - ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), - - ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { - Self::load_program_from_bytes( - &mut load_program_metrics, - program_account.data(), - program_account.owner(), - program_account.data().len(), - 0, - environments.program_runtime_v1.clone(), - reload, - ) - .map_err(|_| (0, environments.program_runtime_v1.clone())) - } - ProgramAccountLoadResult::ProgramOfLoaderV3( - program_account, - programdata_account, - slot, - ) => programdata_account - .data() - .get(UpgradeableLoaderState::size_of_programdata_metadata()..) - .ok_or(Box::new(InstructionError::InvalidAccountData).into()) - .and_then(|programdata| { - Self::load_program_from_bytes( - &mut load_program_metrics, - programdata, - program_account.owner(), - program_account - .data() - .len() - .saturating_add(programdata_account.data().len()), - slot, - environments.program_runtime_v1.clone(), - reload, - ) - }) - .map_err(|_| (slot, environments.program_runtime_v1.clone())), - - ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => { - program_account - .data() - .get(LoaderV4State::program_data_offset()..) - .ok_or(Box::new(InstructionError::InvalidAccountData).into()) - .and_then(|elf_bytes| { - Self::load_program_from_bytes( - &mut load_program_metrics, - elf_bytes, - &loader_v4::id(), - program_account.data().len(), - slot, - environments.program_runtime_v2.clone(), - reload, - ) - }) - .map_err(|_| (slot, environments.program_runtime_v2.clone())) - } - } - .unwrap_or_else(|(slot, env)| { - LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) - }); - - let mut timings = ExecuteDetailsTimings::default(); - load_program_metrics.submit_datapoint(&mut timings); - if let Some(recompile) = recompile { - loaded_program.effective_slot = loaded_program - .effective_slot - .max(self.epoch_schedule.get_first_slot_in_epoch(effective_epoch)); - loaded_program.tx_usage_counter = - AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); - loaded_program.ix_usage_counter = - AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); - } - loaded_program.update_access_slot(self.slot); - Arc::new(loaded_program) - } -} - -impl Bank { pub fn clear_program_cache(&self) { self.loaded_programs_cache .write() .unwrap() .unload_all_programs(); } -} - -impl TransactionBatchProcessor { - /// Execute a transaction using the provided loaded accounts and update - /// the executors cache if the transaction was successful. - #[allow(clippy::too_many_arguments)] - fn execute_loaded_transaction( - &self, - callback: &CB, - tx: &SanitizedTransaction, - loaded_transaction: &mut LoadedTransaction, - compute_budget: ComputeBudget, - durable_nonce_fee: Option, - enable_cpi_recording: bool, - enable_log_recording: bool, - enable_return_data_recording: bool, - timings: &mut ExecuteTimings, - error_counters: &mut TransactionErrorMetrics, - log_messages_bytes_limit: Option, - programs_loaded_for_tx_batch: &LoadedProgramsForTxBatch, - ) -> TransactionExecutionResult { - let transaction_accounts = std::mem::take(&mut loaded_transaction.accounts); - - fn transaction_accounts_lamports_sum( - accounts: &[(Pubkey, AccountSharedData)], - message: &SanitizedMessage, - ) -> Option { - let mut lamports_sum = 0u128; - for i in 0..message.account_keys().len() { - let (_, account) = accounts.get(i)?; - lamports_sum = lamports_sum.checked_add(u128::from(account.lamports()))?; - } - Some(lamports_sum) - } - - let lamports_before_tx = - transaction_accounts_lamports_sum(&transaction_accounts, tx.message()).unwrap_or(0); - - let mut transaction_context = TransactionContext::new( - transaction_accounts, - callback.get_rent_collector().rent.clone(), - compute_budget.max_invoke_stack_height, - compute_budget.max_instruction_trace_length, - ); - #[cfg(debug_assertions)] - transaction_context.set_signature(tx.signature()); - - let pre_account_state_info = TransactionAccountStateInfo::new( - &callback.get_rent_collector().rent, - &transaction_context, - tx.message(), - ); - - let log_collector = if enable_log_recording { - match log_messages_bytes_limit { - None => Some(LogCollector::new_ref()), - Some(log_messages_bytes_limit) => Some(LogCollector::new_ref_with_limit(Some( - log_messages_bytes_limit, - ))), - } - } else { - None - }; - - let (blockhash, lamports_per_signature) = - callback.get_last_blockhash_and_lamports_per_signature(); - - let mut executed_units = 0u64; - let mut programs_modified_by_tx = LoadedProgramsForTxBatch::new( - self.slot, - programs_loaded_for_tx_batch.environments.clone(), - ); - let mut process_message_time = Measure::start("process_message_time"); - let process_result = MessageProcessor::process_message( - tx.message(), - &loaded_transaction.program_indices, - &mut transaction_context, - log_collector.clone(), - programs_loaded_for_tx_batch, - &mut programs_modified_by_tx, - callback.get_feature_set(), - compute_budget, - timings, - &self.sysvar_cache.read().unwrap(), - blockhash, - lamports_per_signature, - &mut executed_units, - ); - process_message_time.stop(); - - saturating_add_assign!( - timings.execute_accessories.process_message_us, - process_message_time.as_us() - ); - - let mut status = process_result - .and_then(|info| { - let post_account_state_info = TransactionAccountStateInfo::new( - &callback.get_rent_collector().rent, - &transaction_context, - tx.message(), - ); - TransactionAccountStateInfo::verify_changes( - &pre_account_state_info, - &post_account_state_info, - &transaction_context, - ) - .map(|_| info) - }) - .map_err(|err| { - match err { - TransactionError::InvalidRentPayingAccount - | TransactionError::InsufficientFundsForRent { .. } => { - error_counters.invalid_rent_paying_account += 1; - } - TransactionError::InvalidAccountIndex => { - error_counters.invalid_account_index += 1; - } - _ => { - error_counters.instruction_error += 1; - } - } - err - }); - - let log_messages: Option = - log_collector.and_then(|log_collector| { - Rc::try_unwrap(log_collector) - .map(|log_collector| log_collector.into_inner().into_messages()) - .ok() - }); - - let inner_instructions = if enable_cpi_recording { - Some(inner_instructions_list_from_instruction_trace( - &transaction_context, - )) - } else { - None - }; - - let ExecutionRecord { - accounts, - return_data, - touched_account_count, - accounts_resize_delta: accounts_data_len_delta, - } = transaction_context.into(); - - if status.is_ok() - && transaction_accounts_lamports_sum(&accounts, tx.message()) - .filter(|lamports_after_tx| lamports_before_tx == *lamports_after_tx) - .is_none() - { - status = Err(TransactionError::UnbalancedTransaction); - } - let status = status.map(|_| ()); - - loaded_transaction.accounts = accounts; - saturating_add_assign!( - timings.details.total_account_count, - loaded_transaction.accounts.len() as u64 - ); - saturating_add_assign!(timings.details.changed_account_count, touched_account_count); - - let return_data = if enable_return_data_recording && !return_data.data.is_empty() { - Some(return_data) - } else { - None - }; - - TransactionExecutionResult::Executed { - details: TransactionExecutionDetails { - status, - log_messages, - inner_instructions, - durable_nonce_fee, - return_data, - executed_units, - accounts_data_len_delta, - }, - programs_modified_by_tx: Box::new(programs_modified_by_tx), - } - } - - fn replenish_program_cache( - &self, - callback: &CB, - program_accounts_map: &HashMap, - ) -> LoadedProgramsForTxBatch { - let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = - if self.check_program_modification_slot { - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - ( - *pubkey, - ( - self.program_modification_slot(callback, pubkey) - .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { - LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) - }), - *count, - ), - ) - }) - .collect() - } else { - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - (*pubkey, (LoadedProgramMatchCriteria::NoCriteria, *count)) - }) - .collect() - }; - - let mut loaded_programs_for_txs = None; - let mut program_to_store = None; - loop { - let (program_to_load, task_cookie, task_waiter) = { - // Lock the global cache. - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - // Initialize our local cache. - let is_first_round = loaded_programs_for_txs.is_none(); - if is_first_round { - loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( - self.slot, - loaded_programs_cache - .get_environments_for_epoch(self.epoch) - .clone(), - )); - } - // Submit our last completed loading task. - if let Some((key, program)) = program_to_store.take() { - loaded_programs_cache.finish_cooperative_loading_task(self.slot, key, program); - } - // Figure out which program needs to be loaded next. - let program_to_load = loaded_programs_cache.extract( - &mut missing_programs, - loaded_programs_for_txs.as_mut().unwrap(), - is_first_round, - ); - let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); - (program_to_load, task_waiter.cookie(), task_waiter) - // Unlock the global cache again. - }; - - if let Some((key, count)) = program_to_load { - // Load, verify and compile one program. - let program = self.load_program(callback, &key, false, None); - program.tx_usage_counter.store(count, Ordering::Relaxed); - program_to_store = Some((key, program)); - } else if missing_programs.is_empty() { - break; - } else { - // Sleep until the next finish_cooperative_loading_task() call. - // Once a task completes we'll wake up and try to load the - // missing programs inside the tx batch again. - let _new_cookie = task_waiter.wait(task_cookie); - } - } - - loaded_programs_for_txs.unwrap() - } - - /// Returns a hash map of executable program accounts (program accounts that are not writable - /// in the given transactions), and their owners, for the transactions with a valid - /// blockhash or nonce. - fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( - &self, - callbacks: &CB, - txs: &[SanitizedTransaction], - lock_results: &mut [TransactionCheckResult], - program_owners: &'a [Pubkey], - ) -> HashMap { - let mut result: HashMap = HashMap::new(); - lock_results.iter_mut().zip(txs).for_each(|etx| { - if let ((Ok(()), _nonce, lamports_per_signature), tx) = etx { - if lamports_per_signature.is_some() { - tx.message() - .account_keys() - .iter() - .for_each(|key| match result.entry(*key) { - Entry::Occupied(mut entry) => { - let (_, count) = entry.get_mut(); - saturating_add_assign!(*count, 1); - } - Entry::Vacant(entry) => { - if let Ok(index) = - callbacks.account_matches_owners(key, program_owners) - { - program_owners - .get(index) - .map(|owner| entry.insert((owner, 1))); - } - } - }); - } else { - // If the transaction's nonce account was not valid, and blockhash is not found, - // the transaction will fail to process. Let's not load any programs from the - // transaction, and update the status of the transaction. - *etx.0 = (Err(TransactionError::BlockhashNotFound), None, None); - } - } - }); - result - } -} -impl Bank { #[allow(clippy::type_complexity)] pub fn load_and_execute_transactions( &self, @@ -5344,145 +4763,7 @@ impl Bank { error_counters, } } -} - -impl TransactionBatchProcessor { - #[allow(clippy::too_many_arguments)] - fn load_and_execute_sanitized_transactions<'a, CB: TransactionProcessingCallback>( - &self, - callbacks: &CB, - sanitized_txs: &[SanitizedTransaction], - check_results: &mut [TransactionCheckResult], - error_counters: &mut TransactionErrorMetrics, - enable_cpi_recording: bool, - enable_log_recording: bool, - enable_return_data_recording: bool, - timings: &mut ExecuteTimings, - account_overrides: Option<&AccountOverrides>, - builtin_programs: impl Iterator, - log_messages_bytes_limit: Option, - ) -> LoadAndExecuteSanitizedTransactionsOutput { - let mut program_accounts_map = self.filter_executable_program_accounts( - callbacks, - sanitized_txs, - check_results, - PROGRAM_OWNERS, - ); - let native_loader = native_loader::id(); - for builtin_program in builtin_programs { - program_accounts_map.insert(*builtin_program, (&native_loader, 0)); - } - - let programs_loaded_for_tx_batch = Rc::new(RefCell::new( - self.replenish_program_cache(callbacks, &program_accounts_map), - )); - - let mut load_time = Measure::start("accounts_load"); - let mut loaded_transactions = load_accounts( - callbacks, - sanitized_txs, - check_results, - error_counters, - &self.fee_structure, - account_overrides, - &program_accounts_map, - &programs_loaded_for_tx_batch.borrow(), - ); - load_time.stop(); - - let mut execution_time = Measure::start("execution_time"); - - let execution_results: Vec = loaded_transactions - .iter_mut() - .zip(sanitized_txs.iter()) - .map(|(accs, tx)| match accs { - (Err(e), _nonce) => TransactionExecutionResult::NotExecuted(e.clone()), - (Ok(loaded_transaction), nonce) => { - let compute_budget = - if let Some(compute_budget) = self.runtime_config.compute_budget { - compute_budget - } else { - let mut compute_budget_process_transaction_time = - Measure::start("compute_budget_process_transaction_time"); - let maybe_compute_budget = ComputeBudget::try_from_instructions( - tx.message().program_instructions_iter(), - ); - compute_budget_process_transaction_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .compute_budget_process_transaction_us, - compute_budget_process_transaction_time.as_us() - ); - if let Err(err) = maybe_compute_budget { - return TransactionExecutionResult::NotExecuted(err); - } - maybe_compute_budget.unwrap() - }; - - let result = self.execute_loaded_transaction( - callbacks, - tx, - loaded_transaction, - compute_budget, - nonce.as_ref().map(DurableNonceFee::from), - enable_cpi_recording, - enable_log_recording, - enable_return_data_recording, - timings, - error_counters, - log_messages_bytes_limit, - &programs_loaded_for_tx_batch.borrow(), - ); - if let TransactionExecutionResult::Executed { - details, - programs_modified_by_tx, - } = &result - { - // Update batch specific cache of the loaded programs with the modifications - // made by the transaction, if it executed successfully. - if details.status.is_ok() { - programs_loaded_for_tx_batch - .borrow_mut() - .merge(programs_modified_by_tx); - } - } - - result - } - }) - .collect(); - - execution_time.stop(); - - const SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE: u8 = 90; - self.loaded_programs_cache - .write() - .unwrap() - .evict_using_2s_random_selection( - Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE), - self.slot, - ); - - debug!( - "load: {}us execute: {}us txs_len={}", - load_time.as_us(), - execution_time.as_us(), - sanitized_txs.len(), - ); - - timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); - timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); - - LoadAndExecuteSanitizedTransactionsOutput { - loaded_transactions, - execution_results, - } - } -} - -impl Bank { /// Load the accounts data size, in bytes pub fn load_accounts_data_size(&self) -> u64 { self.accounts_data_size_initial @@ -8236,32 +7517,6 @@ impl Bank { } } -pub trait TransactionProcessingCallback { - fn account_matches_owners( - &self, - account: &Pubkey, - owners: &[Pubkey], - ) -> std::result::Result; - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; - - fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64); - - fn get_rent_collector(&self) -> &RentCollector; - - fn get_feature_set(&self) -> Arc; - - fn check_account_access( - &self, - _tx: &SanitizedTransaction, - _account_index: usize, - _account: &AccountSharedData, - _error_counters: &mut TransactionErrorMetrics, - ) -> Result<()> { - Ok(()) - } -} - impl TransactionProcessingCallback for Bank { fn account_matches_owners( &self, @@ -8315,63 +7570,6 @@ impl TransactionProcessingCallback for Bank { } } -#[derive(AbiExample, Debug)] -struct TransactionBatchProcessor { - /// Bank slot (i.e. block) - slot: Slot, - - /// Bank epoch - epoch: Epoch, - - /// initialized from genesis - epoch_schedule: EpochSchedule, - - /// Transaction fee structure - fee_structure: FeeStructure, - - pub check_program_modification_slot: bool, - - /// Optional config parameters that can override runtime behavior - runtime_config: Arc, - - sysvar_cache: RwLock, - - pub loaded_programs_cache: Arc>>, -} - -impl Default for TransactionBatchProcessor { - fn default() -> Self { - Self { - slot: Slot::default(), - epoch: Epoch::default(), - epoch_schedule: EpochSchedule::default(), - fee_structure: FeeStructure::default(), - check_program_modification_slot: false, - runtime_config: Arc::::default(), - sysvar_cache: RwLock::::default(), - loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( - Slot::default(), - Epoch::default(), - ))), - } - } -} - -impl TransactionBatchProcessor { - fn new(bank: &Bank) -> Self { - Self { - slot: bank.slot(), - epoch: bank.epoch(), - epoch_schedule: bank.epoch_schedule.clone(), - fee_structure: bank.fee_structure.clone(), - check_program_modification_slot: false, - runtime_config: bank.runtime_config.clone(), - sysvar_cache: RwLock::::default(), - loaded_programs_cache: bank.loaded_programs_cache.clone(), - } - } -} - #[cfg(feature = "dev-context-only-utils")] impl Bank { pub fn wrap_with_bank_forks_for_tests(self) -> (Arc, Arc>) { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index c8378108928763..3757220a759f8f 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -18,6 +18,10 @@ use { }, snapshot_bank_utils, snapshot_utils, status_cache::MAX_CACHE_ENTRIES, + svm::{ + account_loader::load_accounts, + transaction_account_state_info::TransactionAccountStateInfo, + }, }, assert_matches::assert_matches, crossbeam_channel::{bounded, unbounded}, @@ -38,6 +42,7 @@ use { partitioned_rewards::TestPartitionedEpochRewards, rent_collector::RENT_EXEMPT_RENT_EPOCH, transaction_error_metrics::TransactionErrorMetrics, + transaction_results::DurableNonceFee, }, solana_logger, solana_program_runtime::{ @@ -45,7 +50,10 @@ use { compute_budget_processor::{self, MAX_COMPUTE_UNIT_LIMIT}, declare_process_instruction, invoke_context::mock_process_instruction, - loaded_programs::{LoadedProgram, LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET}, + loaded_programs::{ + LoadedProgram, LoadedProgramType, LoadedProgramsForTxBatch, + DELAY_VISIBILITY_SLOT_OFFSET, + }, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, timings::ExecuteTimings, }, @@ -13741,8 +13749,7 @@ fn test_filter_executable_program_accounts() { let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); let owners = &[program1_pubkey, program2_pubkey]; - let transaction_processor = TransactionBatchProcessor::new(&bank); - let programs = transaction_processor.filter_executable_program_accounts( + let programs = TransactionBatchProcessor::filter_executable_program_accounts( &bank, &[sanitized_tx1, sanitized_tx2], &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], @@ -13837,8 +13844,7 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { let owners = &[program1_pubkey, program2_pubkey]; let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; - let transaction_processor = TransactionBatchProcessor::new(&bank); - let programs = transaction_processor.filter_executable_program_accounts( + let programs = TransactionBatchProcessor::filter_executable_program_accounts( &bank, &[sanitized_tx1, sanitized_tx2], &mut lock_results, diff --git a/runtime/src/svm/account_loader.rs b/runtime/src/svm/account_loader.rs index e496f94b8061ac..2bf4b9f3d7d3d5 100644 --- a/runtime/src/svm/account_loader.rs +++ b/runtime/src/svm/account_loader.rs @@ -1,5 +1,7 @@ use { - crate::{bank::TransactionProcessingCallback, svm::account_rent_state::RentState}, + crate::svm::{ + account_rent_state::RentState, transaction_processor::TransactionProcessingCallback, + }, itertools::Itertools, log::warn, solana_accounts_db::{ diff --git a/runtime/src/svm/mod.rs b/runtime/src/svm/mod.rs index d026b8f3abb26e..84ed57077383df 100644 --- a/runtime/src/svm/mod.rs +++ b/runtime/src/svm/mod.rs @@ -1,3 +1,4 @@ pub mod account_loader; pub mod account_rent_state; pub mod transaction_account_state_info; +pub mod transaction_processor; diff --git a/runtime/src/svm/transaction_processor.rs b/runtime/src/svm/transaction_processor.rs new file mode 100644 index 00000000000000..cbb4924c614ed7 --- /dev/null +++ b/runtime/src/svm/transaction_processor.rs @@ -0,0 +1,831 @@ +use { + crate::{ + bank::Bank, + bank_forks::BankForks, + runtime_config::RuntimeConfig, + svm::{ + account_loader::load_accounts, + transaction_account_state_info::TransactionAccountStateInfo, + }, + }, + log::debug, + percentage::Percentage, + solana_accounts_db::{ + account_overrides::AccountOverrides, + accounts::{LoadedTransaction, TransactionLoadResult}, + accounts_file::MatchAccountOwnerError, + rent_collector::RentCollector, + transaction_error_metrics::TransactionErrorMetrics, + transaction_results::{ + inner_instructions_list_from_instruction_trace, DurableNonceFee, + TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, + }, + }, + solana_measure::measure::Measure, + solana_program_runtime::{ + compute_budget::ComputeBudget, + loaded_programs::{ + LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, + LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, + }, + log_collector::LogCollector, + message_processor::MessageProcessor, + sysvar_cache::SysvarCache, + timings::{ExecuteDetailsTimings, ExecuteTimingType, ExecuteTimings}, + }, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount, PROGRAM_OWNERS}, + account_utils::StateMut, + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + clock::{Epoch, Slot}, + epoch_schedule::EpochSchedule, + feature_set::FeatureSet, + fee::FeeStructure, + hash::Hash, + instruction::InstructionError, + loader_v4::{self, LoaderV4State, LoaderV4Status}, + message::SanitizedMessage, + native_loader, + pubkey::Pubkey, + saturating_add_assign, + transaction::{self, SanitizedTransaction, TransactionError}, + transaction_context::{ExecutionRecord, TransactionContext}, + }, + std::{ + cell::RefCell, + collections::{hash_map::Entry, HashMap}, + rc::Rc, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, RwLock, + }, + }, +}; + +/// A list of log messages emitted during a transaction +pub type TransactionLogMessages = Vec; + +pub struct LoadAndExecuteSanitizedTransactionsOutput { + pub loaded_transactions: Vec, + // Vector of results indicating whether a transaction was executed or could not + // be executed. Note executed transactions can still have failed! + pub execution_results: Vec, +} + +pub trait TransactionProcessingCallback { + fn account_matches_owners( + &self, + account: &Pubkey, + owners: &[Pubkey], + ) -> std::result::Result; + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64); + + fn get_rent_collector(&self) -> &RentCollector; + + fn get_feature_set(&self) -> Arc; + + fn check_account_access( + &self, + _tx: &SanitizedTransaction, + _account_index: usize, + _account: &AccountSharedData, + _error_counters: &mut TransactionErrorMetrics, + ) -> transaction::Result<()> { + Ok(()) + } +} + +enum ProgramAccountLoadResult { + AccountNotFound, + InvalidAccountData(ProgramRuntimeEnvironment), + ProgramOfLoaderV1orV2(AccountSharedData), + ProgramOfLoaderV3(AccountSharedData, AccountSharedData, Slot), + ProgramOfLoaderV4(AccountSharedData, Slot), +} + +#[derive(AbiExample, Debug)] +pub struct TransactionBatchProcessor { + /// Bank slot (i.e. block) + slot: Slot, + + /// Bank epoch + epoch: Epoch, + + /// initialized from genesis + epoch_schedule: EpochSchedule, + + /// Transaction fee structure + fee_structure: FeeStructure, + + pub check_program_modification_slot: bool, + + /// Optional config parameters that can override runtime behavior + runtime_config: Arc, + + pub sysvar_cache: RwLock, + + pub loaded_programs_cache: Arc>>, +} + +impl Default for TransactionBatchProcessor { + fn default() -> Self { + Self { + slot: Slot::default(), + epoch: Epoch::default(), + epoch_schedule: EpochSchedule::default(), + fee_structure: FeeStructure::default(), + check_program_modification_slot: false, + runtime_config: Arc::::default(), + sysvar_cache: RwLock::::default(), + loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + Slot::default(), + Epoch::default(), + ))), + } + } +} + +impl TransactionBatchProcessor { + pub fn new(bank: &Bank) -> Self { + Self { + slot: bank.slot(), + epoch: bank.epoch(), + epoch_schedule: bank.epoch_schedule.clone(), + fee_structure: bank.fee_structure.clone(), + check_program_modification_slot: false, + runtime_config: bank.runtime_config.clone(), + sysvar_cache: RwLock::::default(), + loaded_programs_cache: bank.loaded_programs_cache.clone(), + } + } + + #[allow(clippy::too_many_arguments)] + pub fn load_and_execute_sanitized_transactions<'a, CB: TransactionProcessingCallback>( + &self, + callbacks: &CB, + sanitized_txs: &[SanitizedTransaction], + check_results: &mut [TransactionCheckResult], + error_counters: &mut TransactionErrorMetrics, + enable_cpi_recording: bool, + enable_log_recording: bool, + enable_return_data_recording: bool, + timings: &mut ExecuteTimings, + account_overrides: Option<&AccountOverrides>, + builtin_programs: impl Iterator, + log_messages_bytes_limit: Option, + ) -> LoadAndExecuteSanitizedTransactionsOutput { + let mut program_accounts_map = Self::filter_executable_program_accounts( + callbacks, + sanitized_txs, + check_results, + PROGRAM_OWNERS, + ); + let native_loader = native_loader::id(); + for builtin_program in builtin_programs { + program_accounts_map.insert(*builtin_program, (&native_loader, 0)); + } + + let programs_loaded_for_tx_batch = Rc::new(RefCell::new( + self.replenish_program_cache(callbacks, &program_accounts_map), + )); + + let mut load_time = Measure::start("accounts_load"); + let mut loaded_transactions = load_accounts( + callbacks, + sanitized_txs, + check_results, + error_counters, + &self.fee_structure, + account_overrides, + &program_accounts_map, + &programs_loaded_for_tx_batch.borrow(), + ); + load_time.stop(); + + let mut execution_time = Measure::start("execution_time"); + + let execution_results: Vec = loaded_transactions + .iter_mut() + .zip(sanitized_txs.iter()) + .map(|(accs, tx)| match accs { + (Err(e), _nonce) => TransactionExecutionResult::NotExecuted(e.clone()), + (Ok(loaded_transaction), nonce) => { + let compute_budget = + if let Some(compute_budget) = self.runtime_config.compute_budget { + compute_budget + } else { + let mut compute_budget_process_transaction_time = + Measure::start("compute_budget_process_transaction_time"); + let maybe_compute_budget = ComputeBudget::try_from_instructions( + tx.message().program_instructions_iter(), + ); + compute_budget_process_transaction_time.stop(); + saturating_add_assign!( + timings + .execute_accessories + .compute_budget_process_transaction_us, + compute_budget_process_transaction_time.as_us() + ); + if let Err(err) = maybe_compute_budget { + return TransactionExecutionResult::NotExecuted(err); + } + maybe_compute_budget.unwrap() + }; + + let result = self.execute_loaded_transaction( + callbacks, + tx, + loaded_transaction, + compute_budget, + nonce.as_ref().map(DurableNonceFee::from), + enable_cpi_recording, + enable_log_recording, + enable_return_data_recording, + timings, + error_counters, + log_messages_bytes_limit, + &programs_loaded_for_tx_batch.borrow(), + ); + + if let TransactionExecutionResult::Executed { + details, + programs_modified_by_tx, + } = &result + { + // Update batch specific cache of the loaded programs with the modifications + // made by the transaction, if it executed successfully. + if details.status.is_ok() { + programs_loaded_for_tx_batch + .borrow_mut() + .merge(programs_modified_by_tx); + } + } + + result + } + }) + .collect(); + + execution_time.stop(); + + const SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE: u8 = 90; + self.loaded_programs_cache + .write() + .unwrap() + .evict_using_2s_random_selection( + Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE), + self.slot, + ); + + debug!( + "load: {}us execute: {}us txs_len={}", + load_time.as_us(), + execution_time.as_us(), + sanitized_txs.len(), + ); + + timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); + timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); + + LoadAndExecuteSanitizedTransactionsOutput { + loaded_transactions, + execution_results, + } + } + + /// Returns a hash map of executable program accounts (program accounts that are not writable + /// in the given transactions), and their owners, for the transactions with a valid + /// blockhash or nonce. + pub fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( + callbacks: &CB, + txs: &[SanitizedTransaction], + lock_results: &mut [TransactionCheckResult], + program_owners: &'a [Pubkey], + ) -> HashMap { + let mut result: HashMap = HashMap::new(); + lock_results.iter_mut().zip(txs).for_each(|etx| { + if let ((Ok(()), _nonce, lamports_per_signature), tx) = etx { + if lamports_per_signature.is_some() { + tx.message() + .account_keys() + .iter() + .for_each(|key| match result.entry(*key) { + Entry::Occupied(mut entry) => { + let (_, count) = entry.get_mut(); + saturating_add_assign!(*count, 1); + } + Entry::Vacant(entry) => { + if let Ok(index) = + callbacks.account_matches_owners(key, program_owners) + { + program_owners + .get(index) + .map(|owner| entry.insert((owner, 1))); + } + } + }); + } else { + // If the transaction's nonce account was not valid, and blockhash is not found, + // the transaction will fail to process. Let's not load any programs from the + // transaction, and update the status of the transaction. + *etx.0 = (Err(TransactionError::BlockhashNotFound), None, None); + } + } + }); + result + } + + fn replenish_program_cache( + &self, + callback: &CB, + program_accounts_map: &HashMap, + ) -> LoadedProgramsForTxBatch { + let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = + if self.check_program_modification_slot { + program_accounts_map + .iter() + .map(|(pubkey, (_, count))| { + ( + *pubkey, + ( + self.program_modification_slot(callback, pubkey) + .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { + LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) + }), + *count, + ), + ) + }) + .collect() + } else { + program_accounts_map + .iter() + .map(|(pubkey, (_, count))| { + (*pubkey, (LoadedProgramMatchCriteria::NoCriteria, *count)) + }) + .collect() + }; + + let mut loaded_programs_for_txs = None; + let mut program_to_store = None; + loop { + let (program_to_load, task_cookie, task_waiter) = { + // Lock the global cache. + let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); + // Initialize our local cache. + let is_first_round = loaded_programs_for_txs.is_none(); + if is_first_round { + loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( + self.slot, + loaded_programs_cache + .get_environments_for_epoch(self.epoch) + .clone(), + )); + } + // Submit our last completed loading task. + if let Some((key, program)) = program_to_store.take() { + loaded_programs_cache.finish_cooperative_loading_task(self.slot, key, program); + } + // Figure out which program needs to be loaded next. + let program_to_load = loaded_programs_cache.extract( + &mut missing_programs, + loaded_programs_for_txs.as_mut().unwrap(), + is_first_round, + ); + let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); + (program_to_load, task_waiter.cookie(), task_waiter) + // Unlock the global cache again. + }; + + if let Some((key, count)) = program_to_load { + // Load, verify and compile one program. + let program = self.load_program(callback, &key, false, None); + program.tx_usage_counter.store(count, Ordering::Relaxed); + program_to_store = Some((key, program)); + } else if missing_programs.is_empty() { + break; + } else { + // Sleep until the next finish_cooperative_loading_task() call. + // Once a task completes we'll wake up and try to load the + // missing programs inside the tx batch again. + let _new_cookie = task_waiter.wait(task_cookie); + } + } + + loaded_programs_for_txs.unwrap() + } + + /// Execute a transaction using the provided loaded accounts and update + /// the executors cache if the transaction was successful. + #[allow(clippy::too_many_arguments)] + fn execute_loaded_transaction( + &self, + callback: &CB, + tx: &SanitizedTransaction, + loaded_transaction: &mut LoadedTransaction, + compute_budget: ComputeBudget, + durable_nonce_fee: Option, + enable_cpi_recording: bool, + enable_log_recording: bool, + enable_return_data_recording: bool, + timings: &mut ExecuteTimings, + error_counters: &mut TransactionErrorMetrics, + log_messages_bytes_limit: Option, + programs_loaded_for_tx_batch: &LoadedProgramsForTxBatch, + ) -> TransactionExecutionResult { + let transaction_accounts = std::mem::take(&mut loaded_transaction.accounts); + + fn transaction_accounts_lamports_sum( + accounts: &[(Pubkey, AccountSharedData)], + message: &SanitizedMessage, + ) -> Option { + let mut lamports_sum = 0u128; + for i in 0..message.account_keys().len() { + let (_, account) = accounts.get(i)?; + lamports_sum = lamports_sum.checked_add(u128::from(account.lamports()))?; + } + Some(lamports_sum) + } + + let lamports_before_tx = + transaction_accounts_lamports_sum(&transaction_accounts, tx.message()).unwrap_or(0); + + let mut transaction_context = TransactionContext::new( + transaction_accounts, + callback.get_rent_collector().rent.clone(), + compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_trace_length, + ); + #[cfg(debug_assertions)] + transaction_context.set_signature(tx.signature()); + + let pre_account_state_info = TransactionAccountStateInfo::new( + &callback.get_rent_collector().rent, + &transaction_context, + tx.message(), + ); + + let log_collector = if enable_log_recording { + match log_messages_bytes_limit { + None => Some(LogCollector::new_ref()), + Some(log_messages_bytes_limit) => Some(LogCollector::new_ref_with_limit(Some( + log_messages_bytes_limit, + ))), + } + } else { + None + }; + + let (blockhash, lamports_per_signature) = + callback.get_last_blockhash_and_lamports_per_signature(); + + let mut executed_units = 0u64; + let mut programs_modified_by_tx = LoadedProgramsForTxBatch::new( + self.slot, + programs_loaded_for_tx_batch.environments.clone(), + ); + let mut process_message_time = Measure::start("process_message_time"); + let process_result = MessageProcessor::process_message( + tx.message(), + &loaded_transaction.program_indices, + &mut transaction_context, + log_collector.clone(), + programs_loaded_for_tx_batch, + &mut programs_modified_by_tx, + callback.get_feature_set(), + compute_budget, + timings, + &self.sysvar_cache.read().unwrap(), + blockhash, + lamports_per_signature, + &mut executed_units, + ); + process_message_time.stop(); + + saturating_add_assign!( + timings.execute_accessories.process_message_us, + process_message_time.as_us() + ); + + let mut status = process_result + .and_then(|info| { + let post_account_state_info = TransactionAccountStateInfo::new( + &callback.get_rent_collector().rent, + &transaction_context, + tx.message(), + ); + TransactionAccountStateInfo::verify_changes( + &pre_account_state_info, + &post_account_state_info, + &transaction_context, + ) + .map(|_| info) + }) + .map_err(|err| { + match err { + TransactionError::InvalidRentPayingAccount + | TransactionError::InsufficientFundsForRent { .. } => { + error_counters.invalid_rent_paying_account += 1; + } + TransactionError::InvalidAccountIndex => { + error_counters.invalid_account_index += 1; + } + _ => { + error_counters.instruction_error += 1; + } + } + err + }); + + let log_messages: Option = + log_collector.and_then(|log_collector| { + Rc::try_unwrap(log_collector) + .map(|log_collector| log_collector.into_inner().into_messages()) + .ok() + }); + + let inner_instructions = if enable_cpi_recording { + Some(inner_instructions_list_from_instruction_trace( + &transaction_context, + )) + } else { + None + }; + + let ExecutionRecord { + accounts, + return_data, + touched_account_count, + accounts_resize_delta: accounts_data_len_delta, + } = transaction_context.into(); + + if status.is_ok() + && transaction_accounts_lamports_sum(&accounts, tx.message()) + .filter(|lamports_after_tx| lamports_before_tx == *lamports_after_tx) + .is_none() + { + status = Err(TransactionError::UnbalancedTransaction); + } + let status = status.map(|_| ()); + + loaded_transaction.accounts = accounts; + saturating_add_assign!( + timings.details.total_account_count, + loaded_transaction.accounts.len() as u64 + ); + saturating_add_assign!(timings.details.changed_account_count, touched_account_count); + + let return_data = if enable_return_data_recording && !return_data.data.is_empty() { + Some(return_data) + } else { + None + }; + + TransactionExecutionResult::Executed { + details: TransactionExecutionDetails { + status, + log_messages, + inner_instructions, + durable_nonce_fee, + return_data, + executed_units, + accounts_data_len_delta, + }, + programs_modified_by_tx: Box::new(programs_modified_by_tx), + } + } + + fn program_modification_slot( + &self, + callbacks: &CB, + pubkey: &Pubkey, + ) -> transaction::Result { + let program = callbacks + .get_account_shared_data(pubkey) + .ok_or(TransactionError::ProgramAccountNotFound)?; + if bpf_loader_upgradeable::check_id(program.owner()) { + if let Ok(UpgradeableLoaderState::Program { + programdata_address, + }) = program.state() + { + let programdata = callbacks + .get_account_shared_data(&programdata_address) + .ok_or(TransactionError::ProgramAccountNotFound)?; + if let Ok(UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: _, + }) = programdata.state() + { + return Ok(slot); + } + } + Err(TransactionError::ProgramAccountNotFound) + } else if loader_v4::check_id(program.owner()) { + let state = solana_loader_v4_program::get_state(program.data()) + .map_err(|_| TransactionError::ProgramAccountNotFound)?; + Ok(state.slot) + } else { + Ok(0) + } + } + + pub fn load_program( + &self, + callbacks: &CB, + pubkey: &Pubkey, + reload: bool, + recompile: Option>, + ) -> Arc { + let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); + let effective_epoch = if recompile.is_some() { + loaded_programs_cache.latest_root_epoch.saturating_add(1) + } else { + self.epoch + }; + let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); + let mut load_program_metrics = LoadProgramMetrics { + program_id: pubkey.to_string(), + ..LoadProgramMetrics::default() + }; + + let mut loaded_program = + match self.load_program_accounts(callbacks, pubkey, environments) { + ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( + self.slot, + LoadedProgramType::Closed, + )), + + ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), + + ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { + Self::load_program_from_bytes( + &mut load_program_metrics, + program_account.data(), + program_account.owner(), + program_account.data().len(), + 0, + environments.program_runtime_v1.clone(), + reload, + ) + .map_err(|_| (0, environments.program_runtime_v1.clone())) + } + + ProgramAccountLoadResult::ProgramOfLoaderV3( + program_account, + programdata_account, + slot, + ) => programdata_account + .data() + .get(UpgradeableLoaderState::size_of_programdata_metadata()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|programdata| { + Self::load_program_from_bytes( + &mut load_program_metrics, + programdata, + program_account.owner(), + program_account + .data() + .len() + .saturating_add(programdata_account.data().len()), + slot, + environments.program_runtime_v1.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v1.clone())), + + ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => { + program_account + .data() + .get(LoaderV4State::program_data_offset()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|elf_bytes| { + Self::load_program_from_bytes( + &mut load_program_metrics, + elf_bytes, + &loader_v4::id(), + program_account.data().len(), + slot, + environments.program_runtime_v2.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v2.clone())) + } + } + .unwrap_or_else(|(slot, env)| { + LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) + }); + + let mut timings = ExecuteDetailsTimings::default(); + load_program_metrics.submit_datapoint(&mut timings); + if let Some(recompile) = recompile { + loaded_program.effective_slot = loaded_program + .effective_slot + .max(self.epoch_schedule.get_first_slot_in_epoch(effective_epoch)); + loaded_program.tx_usage_counter = + AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); + loaded_program.ix_usage_counter = + AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); + } + loaded_program.update_access_slot(self.slot); + Arc::new(loaded_program) + } + + fn load_program_from_bytes( + load_program_metrics: &mut LoadProgramMetrics, + programdata: &[u8], + loader_key: &Pubkey, + account_size: usize, + deployment_slot: Slot, + program_runtime_environment: ProgramRuntimeEnvironment, + reloading: bool, + ) -> std::result::Result> { + if reloading { + // Safety: this is safe because the program is being reloaded in the cache. + unsafe { + LoadedProgram::reload( + loader_key, + program_runtime_environment.clone(), + deployment_slot, + deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), + None, + programdata, + account_size, + load_program_metrics, + ) + } + } else { + LoadedProgram::new( + loader_key, + program_runtime_environment.clone(), + deployment_slot, + deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), + None, + programdata, + account_size, + load_program_metrics, + ) + } + } + + fn load_program_accounts( + &self, + callbacks: &CB, + pubkey: &Pubkey, + environments: &ProgramRuntimeEnvironments, + ) -> ProgramAccountLoadResult { + let program_account = match callbacks.get_account_shared_data(pubkey) { + None => return ProgramAccountLoadResult::AccountNotFound, + Some(account) => account, + }; + + debug_assert!(solana_bpf_loader_program::check_loader_id( + program_account.owner() + )); + + if loader_v4::check_id(program_account.owner()) { + return solana_loader_v4_program::get_state(program_account.data()) + .ok() + .and_then(|state| { + (!matches!(state.status, LoaderV4Status::Retracted)).then_some(state.slot) + }) + .map(|slot| ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot)) + .unwrap_or(ProgramAccountLoadResult::InvalidAccountData( + environments.program_runtime_v2.clone(), + )); + } + + if !bpf_loader_upgradeable::check_id(program_account.owner()) { + return ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account); + } + + if let Ok(UpgradeableLoaderState::Program { + programdata_address, + }) = program_account.state() + { + let programdata_account = match callbacks.get_account_shared_data(&programdata_address) + { + None => return ProgramAccountLoadResult::AccountNotFound, + Some(account) => account, + }; + + if let Ok(UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: _, + }) = programdata_account.state() + { + return ProgramAccountLoadResult::ProgramOfLoaderV3( + program_account, + programdata_account, + slot, + ); + } + } + ProgramAccountLoadResult::InvalidAccountData(environments.program_runtime_v1.clone()) + } +} From 440c3bb156d6eeca8e17656485699b945add89b3 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 5 Feb 2024 10:47:00 -0600 Subject: [PATCH 115/401] Avoid account index entry Arc clone in shrinking (#35010) * avoid account index entry Arc clone in shrink * use scan to addref * update code comments for scan fn * expect * warn * update log message --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 33 ++++++++++++++++++++++++++----- accounts-db/src/accounts_index.rs | 31 +++++++++++++++++------------ 2 files changed, 46 insertions(+), 18 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2089c508ea8b4e..2853bb7a05edb6 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3983,16 +3983,39 @@ impl AccountsDb { shrink_collect.alive_total_bytes as u64, shrink_collect.capacity, ) { + warn!( + "Unexpected shrink for slot {} alive {} capacity {}, \ + likely caused by a bug for calculating alive bytes.", + slot, shrink_collect.alive_total_bytes, shrink_collect.capacity + ); + self.shrink_stats .skipped_shrink .fetch_add(1, Ordering::Relaxed); - for pubkey in shrink_collect.unrefed_pubkeys { - if let Some(locked_entry) = self.accounts_index.get_account_read_entry(pubkey) { + + self.accounts_index.scan( + shrink_collect.unrefed_pubkeys.into_iter(), + |pubkey, _slot_refs, entry| { // pubkeys in `unrefed_pubkeys` were unref'd in `shrink_collect` above under the assumption that we would shrink everything. // Since shrink is not occurring, we need to addref the pubkeys to get the system back to the prior state since the account still exists at this slot. - locked_entry.addref(); - } - } + if let Some(entry) = entry { + entry.addref(); + } else { + // We also expect that the accounts index must contain an + // entry for `pubkey`. Log a warning for now. In future, + // we will panic when this happens. + warn!("pubkey {pubkey} in slot {slot} was NOT found in accounts index during shrink"); + datapoint_warn!( + "accounts_db-shink_pubkey_missing_from_index", + ("store_slot", slot, i64), + ("pubkey", pubkey.to_string(), String), + ) + } + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty + }, + None, + true, + ); return; } diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index fc389116d09b71..493bb3130a9e2d 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1358,10 +1358,24 @@ impl + Into> AccountsIndex { self.storage.get_startup_remaining_items_to_flush_estimate() } - /// For each pubkey, find the slot list in the accounts index - /// apply 'avoid_callback_result' if specified. - /// otherwise, call `callback` - /// if 'provide_entry_in_callback' is true, populate callback with the Arc of the entry itself. + /// Scan AccountsIndex for a given iterator of Pubkeys. + /// + /// This fn takes 4 arguments. + /// - an iterator of pubkeys to scan + /// - callback fn to run for each pubkey in the accounts index + /// - avoid_callback_result. If it is Some(default), then callback is ignored and + /// default is returned instead. + /// - provide_entry_in_callback. If true, populate the ref of the Arc of the + /// index entry to `callback` fn. Otherwise, provide None. + /// + /// The `callback` fn must return `AccountsIndexScanResult`, which is + /// used to indicates whether the AccountIndex Entry should be added to + /// in-memory cache. The `callback` fn takes in 3 arguments: + /// - the first an immutable ref of the pubkey, + /// - the second an option of the SlotList and RefCount + /// - the third an option of the AccountMapEntry, which is only populated + /// when `provide_entry_in_callback` is true. Otherwise, it will be + /// None. pub(crate) fn scan<'a, F, I>( &self, pubkeys: I, @@ -1369,15 +1383,6 @@ impl + Into> AccountsIndex { avoid_callback_result: Option, provide_entry_in_callback: bool, ) where - // params: - // pubkey looked up - // slots_refs is Option<(slot_list, ref_count)> - // None if 'pubkey' is not in accounts index. - // slot_list: comes from accounts index for 'pubkey' - // ref_count: refcount of entry in index - // entry, if 'provide_entry_in_callback' is true - // if 'avoid_callback_result' is Some(_), then callback is NOT called - // and _ is returned as if callback were called. F: FnMut( &'a Pubkey, Option<(&SlotList, RefCount)>, From c3d1831b8ee5d20b6656d2c49202a92aee816948 Mon Sep 17 00:00:00 2001 From: drebaglioni <57418452+drebaglioni@users.noreply.github.com> Date: Mon, 5 Feb 2024 10:13:04 -0800 Subject: [PATCH 116/401] Update SECURITY.md (#35048) Removed language relating to previous payments method --- SECURITY.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 24093819f8d57c..99f4ac8c113bad 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -110,7 +110,6 @@ _Note: Payments will continue to be paid out in 12-month locked SOL._ #### Loss of Funds: -Current: $2,000,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 25,000 SOL tokens. Min: 6,250 SOL tokens_ * Theft of funds without users signature from any account @@ -118,14 +117,12 @@ _**As of 2/1/24:** Max: 25,000 SOL tokens. Min: 6,250 SOL tokens_ * Theft of funds that requires users signature - creating a vote program that drains the delegated stakes. #### Consensus/Safety Violations: -Current: $1,000,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 12,500 SOL tokens. Min: 3,125 SOL tokens_ * Consensus safety violation * Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc. #### Liveness / Loss of Availability: -Current: $400,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 5,000 SOL tokens. Min: 1,250 SOL tokens_ * Whereby consensus halts and requires human intervention @@ -133,19 +130,16 @@ _**As of 2/1/24:** Max: 5,000 SOL tokens. Min: 1,250 SOL tokens_ * Remote attacks that partition the network, #### DoS Attacks: -Current: $100,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 1,250 SOL tokens. Min: 315 SOL tokens_ * Remote resource exhaustion via Non-RPC protocols #### Supply Chain Attacks: -Current: $100,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 1,250 SOL tokens. Min: 315 SOL tokens_ * Non-social attacks against source code change management, automated testing, release build, release publication and release hosting infrastructure of the monorepo. #### RPC DoS/Crashes: -Current: $5,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 65 SOL tokens. Min: 20 SOL tokens_ * RPC attacks @@ -190,6 +184,4 @@ bi = 2 ^ (R - ri) / ((2^R) - 1) ### Payment of Bug Bounties: * Bounties are currently awarded on a rolling/weekly basis and paid out within 30 days upon receipt of an invoice. -* The SOL/USD conversion rate used for payments is the market price of SOL (denominated in USD) at the end of the day the invoice is submitted by the researcher. -* The reference for this price is the Closing Price given by Coingecko.com on that date given here: https://www.coingecko.com/en/coins/solana/historical_data/usd#panel * Bug bounties that are paid out in SOL are paid to stake accounts with a lockup expiring 12 months from the date of delivery of SOL. From 785dd2132ef7dca14612594d92f0b57bae7b9f87 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 5 Feb 2024 10:23:30 -0800 Subject: [PATCH 117/401] [TieredStorage] Enable hot-storage in TieredStorage::write_accounts() (#35049) #### Problem While the implementation of hot-storage reader and writer are mostly done, it is not yet connected to TieredStorage. #### Summary of Changes This PR enables hot-storage in TieredStorage::write_accounts(). #### Test Plan Completes the existing tests in TieredStorage to directly write and read from a TieredStorage with the hot storage format. --- accounts-db/src/tiered_storage.rs | 126 ++++++++++++++---------- accounts-db/src/tiered_storage/error.rs | 2 +- 2 files changed, 77 insertions(+), 51 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index a6a8dc5fb0471e..92a4f0869e0c2a 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -20,6 +20,7 @@ use { }, error::TieredStorageError, footer::{AccountBlockFormat, AccountMetaFormat}, + hot::{HotStorageWriter, HOT_FORMAT}, index::IndexBlockFormat, owners::OwnersBlockFormat, readable::TieredStorageReader, @@ -30,14 +31,13 @@ use { path::{Path, PathBuf}, sync::OnceLock, }, - writer::TieredStorageWriter, }; pub type TieredStorageResult = Result; /// The struct that defines the formats of all building blocks of a /// TieredStorage. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct TieredStorageFormat { pub meta_entry_size: usize, pub account_meta_format: AccountMetaFormat, @@ -115,19 +115,23 @@ impl TieredStorage { )); } - let result = { - let writer = TieredStorageWriter::new(&self.path, format)?; - writer.write_accounts(accounts, skip) - }; + if format == &HOT_FORMAT { + let result = { + let writer = HotStorageWriter::new(&self.path)?; + writer.write_accounts(accounts, skip) + }; + + // panic here if self.reader.get() is not None as self.reader can only be + // None since we have passed `is_read_only()` check previously, indicating + // self.reader is not yet set. + self.reader + .set(TieredStorageReader::new_from_path(&self.path)?) + .unwrap(); - // panic here if self.reader.get() is not None as self.reader can only be - // None since we have passed `is_read_only()` check previously, indicating - // self.reader is not yet set. - self.reader - .set(TieredStorageReader::new_from_path(&self.path)?) - .unwrap(); + return result; + } - result + Err(TieredStorageError::UnknownFormat(self.path.to_path_buf())) } /// Returns the underlying reader of the TieredStorage. None will be @@ -156,9 +160,11 @@ impl TieredStorage { mod tests { use { super::*, - crate::account_storage::meta::{StoredMeta, StoredMetaWriteVersion}, + crate::account_storage::meta::{StoredAccountMeta, StoredMeta, StoredMetaWriteVersion}, footer::{TieredStorageFooter, TieredStorageMagicNumber}, hot::HOT_FORMAT, + index::IndexOffset, + owners::OWNER_NO_OWNER, solana_accounts_db::rent_collector::RENT_EXEMPT_RENT_EPOCH, solana_sdk::{ account::{Account, AccountSharedData}, @@ -167,7 +173,10 @@ mod tests { pubkey::Pubkey, system_instruction::MAX_PERMITTED_DATA_LENGTH, }, - std::mem::ManuallyDrop, + std::{ + collections::{HashMap, HashSet}, + mem::ManuallyDrop, + }, tempfile::tempdir, }; @@ -201,6 +210,7 @@ mod tests { Err(TieredStorageError::AttemptToUpdateReadOnly(_)), ) => {} (Err(TieredStorageError::Unsupported()), Err(TieredStorageError::Unsupported())) => {} + (Ok(_), Ok(_)) => {} // we don't expect error type mis-match or other error types here _ => { panic!("actual: {result:?}, expected: {expected_result:?}"); @@ -229,10 +239,7 @@ mod tests { assert_eq!(tiered_storage.path(), tiered_storage_path); assert_eq!(tiered_storage.file_size().unwrap(), 0); - // Expect the result to be TieredStorageError::Unsupported as the feature - // is not yet fully supported, but we can still check its partial results - // in the test. - write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); + write_zero_accounts(&tiered_storage, Ok(vec![])); } let tiered_storage_readonly = TieredStorage::new_readonly(&tiered_storage_path).unwrap(); @@ -257,10 +264,7 @@ mod tests { let tiered_storage_path = temp_dir.path().join("test_write_accounts_twice"); let tiered_storage = TieredStorage::new_writable(&tiered_storage_path); - // Expect the result to be TieredStorageError::Unsupported as the feature - // is not yet fully supported, but we can still check its partial results - // in the test. - write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); + write_zero_accounts(&tiered_storage, Ok(vec![])); // Expect AttemptToUpdateReadOnly error as write_accounts can only // be invoked once. write_zero_accounts( @@ -278,7 +282,7 @@ mod tests { let tiered_storage_path = temp_dir.path().join("test_remove_on_drop"); { let tiered_storage = TieredStorage::new_writable(&tiered_storage_path); - write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); + write_zero_accounts(&tiered_storage, Ok(vec![])); } // expect the file does not exists as it has been removed on drop assert!(!tiered_storage_path.try_exists().unwrap()); @@ -286,7 +290,7 @@ mod tests { { let tiered_storage = ManuallyDrop::new(TieredStorage::new_writable(&tiered_storage_path)); - write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); + write_zero_accounts(&tiered_storage, Ok(vec![])); } // expect the file exists as we have ManuallyDrop this time. assert!(tiered_storage_path.try_exists().unwrap()); @@ -329,6 +333,35 @@ mod tests { (stored_meta, AccountSharedData::from(account)) } + fn verify_account( + stored_meta: &StoredAccountMeta<'_>, + account: Option<&impl ReadableAccount>, + account_hash: &AccountHash, + ) { + let (lamports, owner, data, executable, account_hash) = account + .map(|acc| { + ( + acc.lamports(), + acc.owner(), + acc.data(), + acc.executable(), + // only persist rent_epoch for those rent-paying accounts + Some(*account_hash), + ) + }) + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); + + assert_eq!(stored_meta.lamports(), lamports); + assert_eq!(stored_meta.data().len(), data.len()); + assert_eq!(stored_meta.data(), data); + assert_eq!(stored_meta.executable(), executable); + assert_eq!(stored_meta.owner(), owner); + assert_eq!( + *stored_meta.hash(), + account_hash.unwrap_or(AccountHash(Hash::default())) + ); + } + /// The helper function for all write_accounts tests. /// Currently only supports hot accounts. fn do_test_write_accounts( @@ -368,34 +401,27 @@ mod tests { let tiered_storage = TieredStorage::new_writable(tiered_storage_path); _ = tiered_storage.write_accounts(&storable_accounts, 0, &format); - verify_hot_storage(&tiered_storage, &accounts, format); - } - - /// Verify the generated tiered storage in the test. - fn verify_hot_storage( - tiered_storage: &TieredStorage, - expected_accounts: &[(StoredMeta, AccountSharedData)], - expected_format: TieredStorageFormat, - ) { let reader = tiered_storage.reader().unwrap(); - assert_eq!(reader.num_accounts(), expected_accounts.len()); - - let footer = reader.footer(); - let expected_footer = TieredStorageFooter { - account_meta_format: expected_format.account_meta_format, - owners_block_format: expected_format.owners_block_format, - index_block_format: expected_format.index_block_format, - account_block_format: expected_format.account_block_format, - account_entry_count: expected_accounts.len() as u32, - // Hash is not yet implemented, so we bypass the check - hash: footer.hash, - ..TieredStorageFooter::default() - }; + let num_accounts = storable_accounts.len(); + assert_eq!(reader.num_accounts(), num_accounts); - // TODO(yhchiang): verify account meta and data once the reader side - // is implemented in a separate PR. + let mut expected_accounts_map = HashMap::new(); + for i in 0..num_accounts { + let (account, address, account_hash, _write_version) = storable_accounts.get(i); + expected_accounts_map.insert(address, (account, account_hash)); + } - assert_eq!(*footer, expected_footer); + let mut index_offset = IndexOffset(0); + let mut verified_accounts = HashSet::new(); + while let Some((stored_meta, next)) = reader.get_account(index_offset).unwrap() { + if let Some((account, account_hash)) = expected_accounts_map.get(stored_meta.pubkey()) { + verify_account(&stored_meta, *account, account_hash); + verified_accounts.insert(stored_meta.pubkey()); + } + index_offset = next; + } + assert!(!verified_accounts.is_empty()); + assert_eq!(verified_accounts.len(), expected_accounts_map.len()) } #[test] diff --git a/accounts-db/src/tiered_storage/error.rs b/accounts-db/src/tiered_storage/error.rs index e0c8ffa5ca482d..145334574b4ea3 100644 --- a/accounts-db/src/tiered_storage/error.rs +++ b/accounts-db/src/tiered_storage/error.rs @@ -11,7 +11,7 @@ pub enum TieredStorageError { #[error("AttemptToUpdateReadOnly: attempted to update read-only file {0}")] AttemptToUpdateReadOnly(PathBuf), - #[error("UnknownFormat: the tiered storage format is unavailable for file {0}")] + #[error("UnknownFormat: the tiered storage format is unknown for file {0}")] UnknownFormat(PathBuf), #[error("Unsupported: the feature is not yet supported")] From 65701820f3a5a734c4abb529770e38985160e540 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Mon, 5 Feb 2024 11:48:42 -0800 Subject: [PATCH 118/401] SVM: remove dependency on bank and bank_forks (#35084) --- runtime/src/bank.rs | 29 ++++++++++-- runtime/src/bank/tests.rs | 4 +- runtime/src/svm/transaction_processor.rs | 56 +++++++++++++++++------- 3 files changed, 67 insertions(+), 22 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 92689644db1ac7..515b3684c29fef 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -804,7 +804,7 @@ pub struct Bank { epoch_reward_status: EpochRewardStatus, - transaction_processor: TransactionBatchProcessor, + transaction_processor: TransactionBatchProcessor, } struct VoteWithStakeDelegations { @@ -996,7 +996,14 @@ impl Bank { transaction_processor: TransactionBatchProcessor::default(), }; - bank.transaction_processor = TransactionBatchProcessor::new(&bank); + bank.transaction_processor = TransactionBatchProcessor::new( + bank.slot, + bank.epoch, + bank.epoch_schedule.clone(), + bank.fee_structure.clone(), + bank.runtime_config.clone(), + bank.loaded_programs_cache.clone(), + ); let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; bank.accounts_data_size_initial = accounts_data_size_initial; @@ -1307,7 +1314,14 @@ impl Bank { transaction_processor: TransactionBatchProcessor::default(), }; - new.transaction_processor = TransactionBatchProcessor::new(&new); + new.transaction_processor = TransactionBatchProcessor::new( + new.slot, + new.epoch, + new.epoch_schedule.clone(), + new.fee_structure.clone(), + new.runtime_config.clone(), + new.loaded_programs_cache.clone(), + ); let (_, ancestors_time_us) = measure_us!({ let mut ancestors = Vec::with_capacity(1 + new.parents().len()); @@ -1815,7 +1829,14 @@ impl Bank { transaction_processor: TransactionBatchProcessor::default(), }; - bank.transaction_processor = TransactionBatchProcessor::new(&bank); + bank.transaction_processor = TransactionBatchProcessor::new( + bank.slot, + bank.epoch, + bank.epoch_schedule.clone(), + bank.fee_structure.clone(), + bank.runtime_config.clone(), + bank.loaded_programs_cache.clone(), + ); bank.finish_init( genesis_config, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 3757220a759f8f..fca911f93d6e76 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13749,7 +13749,7 @@ fn test_filter_executable_program_accounts() { let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); let owners = &[program1_pubkey, program2_pubkey]; - let programs = TransactionBatchProcessor::filter_executable_program_accounts( + let programs = TransactionBatchProcessor::::filter_executable_program_accounts( &bank, &[sanitized_tx1, sanitized_tx2], &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], @@ -13844,7 +13844,7 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { let owners = &[program1_pubkey, program2_pubkey]; let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; - let programs = TransactionBatchProcessor::filter_executable_program_accounts( + let programs = TransactionBatchProcessor::::filter_executable_program_accounts( &bank, &[sanitized_tx1, sanitized_tx2], &mut lock_results, diff --git a/runtime/src/svm/transaction_processor.rs b/runtime/src/svm/transaction_processor.rs index cbb4924c614ed7..b7ea1934b30f85 100644 --- a/runtime/src/svm/transaction_processor.rs +++ b/runtime/src/svm/transaction_processor.rs @@ -1,7 +1,5 @@ use { crate::{ - bank::Bank, - bank_forks::BankForks, runtime_config::RuntimeConfig, svm::{ account_loader::load_accounts, @@ -25,8 +23,8 @@ use { solana_program_runtime::{ compute_budget::ComputeBudget, loaded_programs::{ - LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, - LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + ForkGraph, LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, + LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, log_collector::LogCollector, @@ -55,6 +53,7 @@ use { std::{ cell::RefCell, collections::{hash_map::Entry, HashMap}, + fmt::{Debug, Formatter}, rc::Rc, sync::{ atomic::{AtomicU64, Ordering}, @@ -107,8 +106,8 @@ enum ProgramAccountLoadResult { ProgramOfLoaderV4(AccountSharedData, Slot), } -#[derive(AbiExample, Debug)] -pub struct TransactionBatchProcessor { +#[derive(AbiExample)] +pub struct TransactionBatchProcessor { /// Bank slot (i.e. block) slot: Slot, @@ -128,10 +127,28 @@ pub struct TransactionBatchProcessor { pub sysvar_cache: RwLock, - pub loaded_programs_cache: Arc>>, + pub loaded_programs_cache: Arc>>, } -impl Default for TransactionBatchProcessor { +impl Debug for TransactionBatchProcessor { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TransactionBatchProcessor") + .field("slot", &self.slot) + .field("epoch", &self.epoch) + .field("epoch_schedule", &self.epoch_schedule) + .field("fee_structure", &self.fee_structure) + .field( + "check_program_modification_slot", + &self.check_program_modification_slot, + ) + .field("runtime_config", &self.runtime_config) + .field("sysvar_cache", &self.sysvar_cache) + .field("loaded_programs_cache", &self.loaded_programs_cache) + .finish() + } +} + +impl Default for TransactionBatchProcessor { fn default() -> Self { Self { slot: Slot::default(), @@ -149,17 +166,24 @@ impl Default for TransactionBatchProcessor { } } -impl TransactionBatchProcessor { - pub fn new(bank: &Bank) -> Self { +impl TransactionBatchProcessor { + pub fn new( + slot: Slot, + epoch: Epoch, + epoch_schedule: EpochSchedule, + fee_structure: FeeStructure, + runtime_config: Arc, + loaded_programs_cache: Arc>>, + ) -> Self { Self { - slot: bank.slot(), - epoch: bank.epoch(), - epoch_schedule: bank.epoch_schedule.clone(), - fee_structure: bank.fee_structure.clone(), + slot, + epoch, + epoch_schedule, + fee_structure, check_program_modification_slot: false, - runtime_config: bank.runtime_config.clone(), + runtime_config, sysvar_cache: RwLock::::default(), - loaded_programs_cache: bank.loaded_programs_cache.clone(), + loaded_programs_cache, } } From 3cf5dd2afb7f8c5fa7f5bf32774a1537782f0266 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Mon, 5 Feb 2024 13:49:36 -0800 Subject: [PATCH 119/401] SVM: Move RuntimeConfig to svm folder (#35085) --- core/src/validator.rs | 2 +- core/tests/epoch_accounts_hash.rs | 2 +- core/tests/snapshots.rs | 2 +- ledger-tool/src/args.rs | 2 +- ledger/src/blockstore_processor.rs | 2 +- program-test/src/lib.rs | 2 +- runtime/src/bank.rs | 8 +++++--- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/lib.rs | 1 - runtime/src/serde_snapshot.rs | 2 +- runtime/src/snapshot_bank_utils.rs | 2 +- runtime/src/svm/mod.rs | 1 + runtime/src/{ => svm}/runtime_config.rs | 0 runtime/src/svm/transaction_processor.rs | 9 +++------ test-validator/src/lib.rs | 2 +- validator/src/main.rs | 2 +- 16 files changed, 20 insertions(+), 21 deletions(-) rename runtime/src/{ => svm}/runtime_config.rs (100%) diff --git a/core/src/validator.rs b/core/src/validator.rs index 3adaa699beaa51..aed2731b5298dc 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -96,12 +96,12 @@ use { bank_forks::BankForks, commitment::BlockCommitmentCache, prioritization_fee_cache::PrioritizationFeeCache, - runtime_config::RuntimeConfig, snapshot_archive_info::SnapshotArchiveInfoGetter, snapshot_bank_utils::{self, DISABLED_SNAPSHOT_ARCHIVE_INTERVAL}, snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, snapshot_utils::{self, clean_orphaned_account_snapshot_dirs}, + svm::runtime_config::RuntimeConfig, }, solana_sdk::{ clock::Slot, diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 106539034a2a81..af9c93ba147241 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -23,11 +23,11 @@ use { bank::{epoch_accounts_hash_utils, Bank}, bank_forks::BankForks, genesis_utils::{self, GenesisConfigInfo}, - runtime_config::RuntimeConfig, snapshot_archive_info::SnapshotArchiveInfoGetter, snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_utils, + svm::runtime_config::RuntimeConfig, }, solana_sdk::{ clock::Slot, diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index a44c63fec66da9..7aee26a742b79a 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -25,7 +25,6 @@ use { bank::Bank, bank_forks::BankForks, genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, - runtime_config::RuntimeConfig, snapshot_archive_info::FullSnapshotArchiveInfo, snapshot_bank_utils::{self, DISABLED_SNAPSHOT_ARCHIVE_INTERVAL}, snapshot_config::SnapshotConfig, @@ -36,6 +35,7 @@ use { SnapshotVersion::{self, V1_2_0}, }, status_cache::MAX_CACHE_ENTRIES, + svm::runtime_config::RuntimeConfig, }, solana_sdk::{ clock::Slot, diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 1c6f9744437555..7ea5bed687f1a5 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -12,7 +12,7 @@ use { blockstore_processor::ProcessOptions, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, - solana_runtime::runtime_config::RuntimeConfig, + solana_runtime::svm::runtime_config::RuntimeConfig, solana_sdk::clock::Slot, std::{ collections::HashSet, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 2ee80b879eaaab..c30a3742f25662 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -41,7 +41,7 @@ use { commitment::VOTE_THRESHOLD_SIZE, installed_scheduler_pool::BankWithScheduler, prioritization_fee_cache::PrioritizationFeeCache, - runtime_config::RuntimeConfig, + svm::runtime_config::RuntimeConfig, transaction_batch::TransactionBatch, }, solana_sdk::{ diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 32dbb276ee2c7a..09c55fe793efba 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -25,7 +25,7 @@ use { bank_forks::BankForks, commitment::BlockCommitmentCache, genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo}, - runtime_config::RuntimeConfig, + svm::runtime_config::RuntimeConfig, }, solana_sdk::{ account::{create_account_shared_data_for_test, Account, AccountSharedData}, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 515b3684c29fef..261174329588f3 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -48,7 +48,6 @@ use { epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, - runtime_config::RuntimeConfig, serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, stake_account::StakeAccount, @@ -59,8 +58,11 @@ use { }, stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, - svm::transaction_processor::{ - TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, + svm::{ + runtime_config::RuntimeConfig, + transaction_processor::{ + TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, + }, }, transaction_batch::TransactionBatch, }, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index df51d31e568cee..7a3c1a2c62439a 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -7,7 +7,6 @@ mod tests { StartBlockHeightAndRewards, }, genesis_utils::activate_all_features, - runtime_config::RuntimeConfig, serde_snapshot::{ reserialize_bank_with_new_accounts_hash, BankIncrementalSnapshotPersistence, SerdeAccountsHash, SerdeIncrementalAccountsHash, SerdeStyle, SnapshotStreams, @@ -18,6 +17,7 @@ mod tests { StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, }, status_cache::StatusCache, + svm::runtime_config::RuntimeConfig, }, assert_matches::assert_matches, solana_accounts_db::{ diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 0612ac0cca74d2..2e574ef7f89217 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -21,7 +21,6 @@ pub mod non_circulating_supply; pub mod prioritization_fee; pub mod prioritization_fee_cache; pub mod root_bank_cache; -pub mod runtime_config; pub mod serde_snapshot; pub mod snapshot_archive_info; pub mod snapshot_bank_utils; diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index ddcaef833b8275..b9f8300e400789 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -3,12 +3,12 @@ use { bank::{Bank, BankFieldsToDeserialize, BankRc}, builtins::BuiltinPrototype, epoch_stakes::EpochStakes, - runtime_config::RuntimeConfig, serde_snapshot::storage::SerializableAccountStorageEntry, snapshot_utils::{ self, SnapshotError, StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, }, stakes::Stakes, + svm::runtime_config::RuntimeConfig, }, bincode::{self, config::Options, Error}, log::*, diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 5494eb1beb716c..d932ab8408fdb4 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -2,7 +2,6 @@ use { crate::{ bank::{Bank, BankFieldsToDeserialize, BankSlotDelta}, builtins::BuiltinPrototype, - runtime_config::RuntimeConfig, serde_snapshot::{ bank_from_streams, bank_to_stream, fields_from_streams, BankIncrementalSnapshotPersistence, SerdeStyle, @@ -24,6 +23,7 @@ use { UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError, }, status_cache, + svm::runtime_config::RuntimeConfig, }, bincode::{config::Options, serialize_into}, log::*, diff --git a/runtime/src/svm/mod.rs b/runtime/src/svm/mod.rs index 84ed57077383df..ae25f4b2ea63ac 100644 --- a/runtime/src/svm/mod.rs +++ b/runtime/src/svm/mod.rs @@ -1,4 +1,5 @@ pub mod account_loader; pub mod account_rent_state; +pub mod runtime_config; pub mod transaction_account_state_info; pub mod transaction_processor; diff --git a/runtime/src/runtime_config.rs b/runtime/src/svm/runtime_config.rs similarity index 100% rename from runtime/src/runtime_config.rs rename to runtime/src/svm/runtime_config.rs diff --git a/runtime/src/svm/transaction_processor.rs b/runtime/src/svm/transaction_processor.rs index b7ea1934b30f85..f9fe0a30511dab 100644 --- a/runtime/src/svm/transaction_processor.rs +++ b/runtime/src/svm/transaction_processor.rs @@ -1,10 +1,7 @@ use { - crate::{ - runtime_config::RuntimeConfig, - svm::{ - account_loader::load_accounts, - transaction_account_state_info::TransactionAccountStateInfo, - }, + crate::svm::{ + account_loader::load_accounts, runtime_config::RuntimeConfig, + transaction_account_state_info::TransactionAccountStateInfo, }, log::debug, percentage::Percentage, diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index f041d80e6148e3..270e8d9d816ffe 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -34,7 +34,7 @@ use { solana_rpc_client::{nonblocking, rpc_client::RpcClient}, solana_runtime::{ bank_forks::BankForks, genesis_utils::create_genesis_config_with_leader_ex, - runtime_config::RuntimeConfig, snapshot_config::SnapshotConfig, + snapshot_config::SnapshotConfig, svm::runtime_config::RuntimeConfig, }, solana_sdk::{ account::{Account, AccountSharedData}, diff --git a/validator/src/main.rs b/validator/src/main.rs index 94b663cacdf1dd..de1efeddbd111a 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -45,10 +45,10 @@ use { solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::config::RpcLeaderScheduleConfig, solana_runtime::{ - runtime_config::RuntimeConfig, snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_config::{SnapshotConfig, SnapshotUsage}, snapshot_utils::{self, ArchiveFormat, SnapshotVersion}, + svm::runtime_config::RuntimeConfig, }, solana_sdk::{ clock::{Slot, DEFAULT_S_PER_SLOT}, From 0e4e81a44c599c5d2f1e558024a42d4cd228c8ee Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Mon, 5 Feb 2024 14:46:32 -0800 Subject: [PATCH 120/401] banking stage: remove spammy packet conversion metric (#35014) --- core/src/banking_stage/unprocessed_transaction_storage.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 7e6f882ed5e32d..ffc408a68971cd 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -775,7 +775,6 @@ impl ThreadLocalUnprocessedPackets { }) .unzip(); - inc_new_counter_info!("banking_stage-packet_conversion", 1); let filtered_count = packets_to_process.len().saturating_sub(transactions.len()); saturating_add_assign!(*total_dropped_packets, filtered_count); From ab30fb5f05d286c169e37c7439ec92e08aeed012 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Mon, 5 Feb 2024 16:16:47 -0800 Subject: [PATCH 121/401] SVM: Move AccountOverrides from accounts-db to SVM (#35091) --- accounts-db/src/lib.rs | 1 - runtime/src/bank.rs | 2 +- runtime/src/svm/account_loader.rs | 4 ++-- {accounts-db/src => runtime/src/svm}/account_overrides.rs | 0 runtime/src/svm/mod.rs | 1 + runtime/src/svm/transaction_processor.rs | 5 ++--- 6 files changed, 6 insertions(+), 7 deletions(-) rename {accounts-db/src => runtime/src/svm}/account_overrides.rs (100%) diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 74fdb8627193ee..ce9908f105ceeb 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -5,7 +5,6 @@ extern crate lazy_static; pub mod account_info; -pub mod account_overrides; pub mod account_storage; pub mod accounts; pub mod accounts_cache; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 261174329588f3..c4b87a917ac8da 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -59,6 +59,7 @@ use { stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, svm::{ + account_overrides::AccountOverrides, runtime_config::RuntimeConfig, transaction_processor::{ TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, @@ -77,7 +78,6 @@ use { }, serde::Serialize, solana_accounts_db::{ - account_overrides::AccountOverrides, accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot, TransactionLoadResult}, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, diff --git a/runtime/src/svm/account_loader.rs b/runtime/src/svm/account_loader.rs index 2bf4b9f3d7d3d5..4b093c75ead2fa 100644 --- a/runtime/src/svm/account_loader.rs +++ b/runtime/src/svm/account_loader.rs @@ -1,11 +1,11 @@ use { crate::svm::{ - account_rent_state::RentState, transaction_processor::TransactionProcessingCallback, + account_overrides::AccountOverrides, account_rent_state::RentState, + transaction_processor::TransactionProcessingCallback, }, itertools::Itertools, log::warn, solana_accounts_db::{ - account_overrides::AccountOverrides, accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, nonce_info::NonceFull, rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, diff --git a/accounts-db/src/account_overrides.rs b/runtime/src/svm/account_overrides.rs similarity index 100% rename from accounts-db/src/account_overrides.rs rename to runtime/src/svm/account_overrides.rs diff --git a/runtime/src/svm/mod.rs b/runtime/src/svm/mod.rs index ae25f4b2ea63ac..bec00cfd132e37 100644 --- a/runtime/src/svm/mod.rs +++ b/runtime/src/svm/mod.rs @@ -1,4 +1,5 @@ pub mod account_loader; +pub mod account_overrides; pub mod account_rent_state; pub mod runtime_config; pub mod transaction_account_state_info; diff --git a/runtime/src/svm/transaction_processor.rs b/runtime/src/svm/transaction_processor.rs index f9fe0a30511dab..56a3d9a774f58c 100644 --- a/runtime/src/svm/transaction_processor.rs +++ b/runtime/src/svm/transaction_processor.rs @@ -1,12 +1,11 @@ use { crate::svm::{ - account_loader::load_accounts, runtime_config::RuntimeConfig, - transaction_account_state_info::TransactionAccountStateInfo, + account_loader::load_accounts, account_overrides::AccountOverrides, + runtime_config::RuntimeConfig, transaction_account_state_info::TransactionAccountStateInfo, }, log::debug, percentage::Percentage, solana_accounts_db::{ - account_overrides::AccountOverrides, accounts::{LoadedTransaction, TransactionLoadResult}, accounts_file::MatchAccountOwnerError, rent_collector::RentCollector, From 9dca15a5b7a6e0938e45b3249156d84b51832c27 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 5 Feb 2024 16:41:01 -0800 Subject: [PATCH 122/401] Rename priority to compute_unit_price (#35062) * rename several priorities to compute_unit_price * TransactionPriorityDetails -> ComputeBudgetDetails * prioritization_fee_cache: fix comment * transaction_state: fix comments and variable names * immutable_deserialized_packet: fix comment --- core/src/banking_stage/consumer.rs | 6 +- .../immutable_deserialized_packet.rs | 28 ++++---- .../prio_graph_scheduler.rs | 16 +++-- .../scheduler_controller.rs | 10 +-- .../transaction_state.rs | 62 ++++++++-------- .../transaction_state_container.rs | 27 ++++--- .../unprocessed_packet_batches.rs | 27 ++++--- .../unprocessed_transaction_storage.rs | 12 ++-- program-runtime/src/prioritization_fee.rs | 24 +++---- ...y_details.rs => compute_budget_details.rs} | 70 +++++++++---------- runtime/src/lib.rs | 2 +- runtime/src/prioritization_fee.rs | 4 +- runtime/src/prioritization_fee_cache.rs | 21 +++--- 13 files changed, 156 insertions(+), 153 deletions(-) rename runtime/src/{transaction_priority_details.rs => compute_budget_details.rs} (74%) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 406243f21bc561..e7016b0bbb127a 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -24,9 +24,9 @@ use { }, solana_runtime::{ bank::{Bank, LoadAndExecuteTransactionsOutput}, + compute_budget_details::GetComputeBudgetDetails, svm::account_loader::validate_fee_payer, transaction_batch::TransactionBatch, - transaction_priority_details::GetTransactionPriorityDetails, }, solana_sdk::{ clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, @@ -586,8 +586,8 @@ impl Consumer { .filter_map(|transaction| { let round_compute_unit_price_enabled = false; // TODO get from working_bank.feature_set transaction - .get_transaction_priority_details(round_compute_unit_price_enabled) - .map(|details| details.priority) + .get_compute_budget_details(round_compute_unit_price_enabled) + .map(|details| details.compute_unit_price) }) .minmax(); let (min_prioritization_fees, max_prioritization_fees) = diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index 8a9d82e32a38c0..26ede7045d3480 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -1,8 +1,6 @@ use { solana_perf::packet::Packet, - solana_runtime::transaction_priority_details::{ - GetTransactionPriorityDetails, TransactionPriorityDetails, - }, + solana_runtime::compute_budget_details::{ComputeBudgetDetails, GetComputeBudgetDetails}, solana_sdk::{ feature_set, hash::Hash, @@ -42,7 +40,7 @@ pub struct ImmutableDeserializedPacket { transaction: SanitizedVersionedTransaction, message_hash: Hash, is_simple_vote: bool, - priority_details: TransactionPriorityDetails, + compute_budget_details: ComputeBudgetDetails, } impl ImmutableDeserializedPacket { @@ -54,13 +52,13 @@ impl ImmutableDeserializedPacket { let is_simple_vote = packet.meta().is_simple_vote_tx(); // drop transaction if prioritization fails. - let mut priority_details = sanitized_transaction - .get_transaction_priority_details(packet.meta().round_compute_unit_price()) + let mut compute_budget_details = sanitized_transaction + .get_compute_budget_details(packet.meta().round_compute_unit_price()) .ok_or(DeserializedPacketError::PrioritizationFailure)?; - // set priority to zero for vote transactions + // set compute unit price to zero for vote transactions if is_simple_vote { - priority_details.priority = 0; + compute_budget_details.compute_unit_price = 0; }; Ok(Self { @@ -68,7 +66,7 @@ impl ImmutableDeserializedPacket { transaction: sanitized_transaction, message_hash, is_simple_vote, - priority_details, + compute_budget_details, }) } @@ -88,16 +86,16 @@ impl ImmutableDeserializedPacket { self.is_simple_vote } - pub fn priority(&self) -> u64 { - self.priority_details.priority + pub fn compute_unit_price(&self) -> u64 { + self.compute_budget_details.compute_unit_price } pub fn compute_unit_limit(&self) -> u64 { - self.priority_details.compute_unit_limit + self.compute_budget_details.compute_unit_limit } - pub fn priority_details(&self) -> TransactionPriorityDetails { - self.priority_details.clone() + pub fn compute_budget_details(&self) -> ComputeBudgetDetails { + self.compute_budget_details.clone() } // This function deserializes packets into transactions, computes the blake3 hash of transaction @@ -131,7 +129,7 @@ impl PartialOrd for ImmutableDeserializedPacket { impl Ord for ImmutableDeserializedPacket { fn cmp(&self, other: &Self) -> Ordering { - self.priority().cmp(&other.priority()) + self.compute_unit_price().cmp(&other.compute_unit_price()) } } diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index e17f34d3223411..f1be7339f3cd73 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -491,7 +491,7 @@ mod tests { crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, solana_cost_model::cost_model::CostModel, - solana_runtime::transaction_priority_details::TransactionPriorityDetails, + solana_runtime::compute_budget_details::ComputeBudgetDetails, solana_sdk::{ compute_budget::ComputeBudgetInstruction, feature_set::FeatureSet, hash::Hash, message::Message, pubkey::Pubkey, signature::Keypair, signer::Signer, @@ -562,12 +562,16 @@ mod tests { >, ) -> TransactionStateContainer { let mut container = TransactionStateContainer::with_capacity(10 * 1024); - for (index, (from_keypair, to_pubkeys, lamports, priority)) in + for (index, (from_keypair, to_pubkeys, lamports, compute_unit_price)) in tx_infos.into_iter().enumerate() { let id = TransactionId::new(index as u64); - let transaction = - prioritized_tranfers(from_keypair.borrow(), to_pubkeys, lamports, priority); + let transaction = prioritized_tranfers( + from_keypair.borrow(), + to_pubkeys, + lamports, + compute_unit_price, + ); let transaction_cost = CostModel::calculate_cost(&transaction, &FeatureSet::default()); let transaction_ttl = SanitizedTransactionTTL { transaction, @@ -576,8 +580,8 @@ mod tests { container.insert_new_transaction( id, transaction_ttl, - TransactionPriorityDetails { - priority, + ComputeBudgetDetails { + compute_unit_price, compute_unit_limit: 1, }, transaction_cost, diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index ed2b807431f45b..394479042557f6 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -311,12 +311,12 @@ impl SchedulerController { let mut error_counts = TransactionErrorMetrics::default(); for chunk in packets.chunks(CHUNK_SIZE) { let mut post_sanitization_count: usize = 0; - let (transactions, priority_details): (Vec<_>, Vec<_>) = chunk + let (transactions, compute_budget_details): (Vec<_>, Vec<_>) = chunk .iter() .filter_map(|packet| { packet .build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) - .map(|tx| (tx, packet.priority_details())) + .map(|tx| (tx, packet.compute_budget_details())) }) .inspect(|_| saturating_add_assign!(post_sanitization_count, 1)) .filter(|(tx, _)| { @@ -337,9 +337,9 @@ impl SchedulerController { let post_lock_validation_count = transactions.len(); let mut post_transaction_check_count: usize = 0; - for ((transaction, priority_details), _) in transactions + for ((transaction, compute_budget_details), _) in transactions .into_iter() - .zip(priority_details) + .zip(compute_budget_details) .zip(check_results) .filter(|(_, check_result)| check_result.0.is_ok()) { @@ -355,7 +355,7 @@ impl SchedulerController { if self.container.insert_new_transaction( transaction_id, transaction_ttl, - priority_details, + compute_budget_details, transaction_cost, ) { saturating_add_assign!(self.count_metrics.num_dropped_on_capacity, 1); diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state.rs b/core/src/banking_stage/transaction_scheduler/transaction_state.rs index 650ffa1cd3ce7e..e8878e25c006f3 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state.rs @@ -1,6 +1,6 @@ use { solana_cost_model::transaction_cost::TransactionCost, - solana_runtime::transaction_priority_details::TransactionPriorityDetails, + solana_runtime::compute_budget_details::ComputeBudgetDetails, solana_sdk::{slot_history::Slot, transaction::SanitizedTransaction}, }; @@ -34,13 +34,13 @@ pub(crate) enum TransactionState { /// The transaction is available for scheduling. Unprocessed { transaction_ttl: SanitizedTransactionTTL, - transaction_priority_details: TransactionPriorityDetails, + compute_budget_details: ComputeBudgetDetails, transaction_cost: TransactionCost, forwarded: bool, }, /// The transaction is currently scheduled or being processed. Pending { - transaction_priority_details: TransactionPriorityDetails, + compute_budget_details: ComputeBudgetDetails, transaction_cost: TransactionCost, forwarded: bool, }, @@ -50,28 +50,28 @@ impl TransactionState { /// Creates a new `TransactionState` in the `Unprocessed` state. pub(crate) fn new( transaction_ttl: SanitizedTransactionTTL, - transaction_priority_details: TransactionPriorityDetails, + compute_budget_details: ComputeBudgetDetails, transaction_cost: TransactionCost, ) -> Self { Self::Unprocessed { transaction_ttl, - transaction_priority_details, + compute_budget_details, transaction_cost, forwarded: false, } } - /// Returns a reference to the priority details of the transaction. - pub(crate) fn transaction_priority_details(&self) -> &TransactionPriorityDetails { + /// Returns a reference to the compute budget details of the transaction. + pub(crate) fn compute_budget_details(&self) -> &ComputeBudgetDetails { match self { Self::Unprocessed { - transaction_priority_details, + compute_budget_details, .. - } => transaction_priority_details, + } => compute_budget_details, Self::Pending { - transaction_priority_details, + compute_budget_details, .. - } => transaction_priority_details, + } => compute_budget_details, } } @@ -87,9 +87,9 @@ impl TransactionState { } } - /// Returns the priority of the transaction. - pub(crate) fn priority(&self) -> u64 { - self.transaction_priority_details().priority + /// Returns the compute unit price of the transaction. + pub(crate) fn compute_unit_price(&self) -> u64 { + self.compute_budget_details().compute_unit_price } /// Returns whether or not the transaction has already been forwarded. @@ -119,12 +119,12 @@ impl TransactionState { match self.take() { TransactionState::Unprocessed { transaction_ttl, - transaction_priority_details, + compute_budget_details, transaction_cost, forwarded, } => { *self = TransactionState::Pending { - transaction_priority_details, + compute_budget_details, transaction_cost, forwarded, }; @@ -146,13 +146,13 @@ impl TransactionState { match self.take() { TransactionState::Unprocessed { .. } => panic!("already unprocessed"), TransactionState::Pending { - transaction_priority_details, + compute_budget_details, transaction_cost, forwarded, } => { *self = Self::Unprocessed { transaction_ttl, - transaction_priority_details, + compute_budget_details, transaction_cost, forwarded, } @@ -179,8 +179,8 @@ impl TransactionState { core::mem::replace( self, Self::Pending { - transaction_priority_details: TransactionPriorityDetails { - priority: 0, + compute_budget_details: ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: 0, }, transaction_cost: TransactionCost::SimpleVote { @@ -203,7 +203,7 @@ mod tests { }, }; - fn create_transaction_state(priority: u64) -> TransactionState { + fn create_transaction_state(compute_unit_price: u64) -> TransactionState { let from_keypair = Keypair::new(); let ixs = vec![ system_instruction::transfer( @@ -211,7 +211,7 @@ mod tests { &solana_sdk::pubkey::new_rand(), 1, ), - ComputeBudgetInstruction::set_compute_unit_price(priority), + ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price), ]; let message = Message::new(&ixs, Some(&from_keypair.pubkey())); let tx = Transaction::new(&[&from_keypair], message, Hash::default()); @@ -227,8 +227,8 @@ mod tests { TransactionState::new( transaction_ttl, - TransactionPriorityDetails { - priority, + ComputeBudgetDetails { + compute_unit_price, compute_unit_limit: 0, }, transaction_cost, @@ -294,16 +294,16 @@ mod tests { } #[test] - fn test_transaction_priority_details() { - let priority = 15; - let mut transaction_state = create_transaction_state(priority); - assert_eq!(transaction_state.priority(), priority); + fn test_compute_unit_price() { + let compute_unit_price = 15; + let mut transaction_state = create_transaction_state(compute_unit_price); + assert_eq!(transaction_state.compute_unit_price(), compute_unit_price); - // ensure priority is not lost through state transitions + // ensure compute unit price is not lost through state transitions let transaction_ttl = transaction_state.transition_to_pending(); - assert_eq!(transaction_state.priority(), priority); + assert_eq!(transaction_state.compute_unit_price(), compute_unit_price); transaction_state.transition_to_unprocessed(transaction_ttl); - assert_eq!(transaction_state.priority(), priority); + assert_eq!(transaction_state.compute_unit_price(), compute_unit_price); } #[test] diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index f0688dee67bb5f..e314a3e49cda83 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -7,7 +7,7 @@ use { itertools::MinMaxResult, min_max_heap::MinMaxHeap, solana_cost_model::transaction_cost::TransactionCost, - solana_runtime::transaction_priority_details::TransactionPriorityDetails, + solana_runtime::compute_budget_details::ComputeBudgetDetails, std::collections::HashMap, }; @@ -99,18 +99,14 @@ impl TransactionStateContainer { &mut self, transaction_id: TransactionId, transaction_ttl: SanitizedTransactionTTL, - transaction_priority_details: TransactionPriorityDetails, + compute_budget_details: ComputeBudgetDetails, transaction_cost: TransactionCost, ) -> bool { let priority_id = - TransactionPriorityId::new(transaction_priority_details.priority, transaction_id); + TransactionPriorityId::new(compute_budget_details.compute_unit_price, transaction_id); self.id_to_transaction_state.insert( transaction_id, - TransactionState::new( - transaction_ttl, - transaction_priority_details, - transaction_cost, - ), + TransactionState::new(transaction_ttl, compute_budget_details, transaction_cost), ); self.push_id_into_queue(priority_id) } @@ -125,7 +121,8 @@ impl TransactionStateContainer { let transaction_state = self .get_mut_transaction_state(&transaction_id) .expect("transaction must exist"); - let priority_id = TransactionPriorityId::new(transaction_state.priority(), transaction_id); + let priority_id = + TransactionPriorityId::new(transaction_state.compute_unit_price(), transaction_id); transaction_state.transition_to_unprocessed(transaction_ttl); self.push_id_into_queue(priority_id); } @@ -184,7 +181,7 @@ mod tests { priority: u64, ) -> ( SanitizedTransactionTTL, - TransactionPriorityDetails, + ComputeBudgetDetails, TransactionCost, ) { let from_keypair = Keypair::new(); @@ -209,8 +206,8 @@ mod tests { }; ( transaction_ttl, - TransactionPriorityDetails { - priority, + ComputeBudgetDetails { + compute_unit_price: priority, compute_unit_limit: 0, }, transaction_cost, @@ -220,12 +217,12 @@ mod tests { fn push_to_container(container: &mut TransactionStateContainer, num: usize) { for id in 0..num as u64 { let priority = id; - let (transaction_ttl, transaction_priority_details, transaction_cost) = + let (transaction_ttl, compute_budget_details, transaction_cost) = test_transaction(priority); container.insert_new_transaction( TransactionId::new(id), transaction_ttl, - transaction_priority_details, + compute_budget_details, transaction_cost, ); } @@ -251,7 +248,7 @@ mod tests { container .id_to_transaction_state .iter() - .map(|ts| ts.1.priority()) + .map(|ts| ts.1.compute_unit_price()) .next() .unwrap(), 4 diff --git a/core/src/banking_stage/unprocessed_packet_batches.rs b/core/src/banking_stage/unprocessed_packet_batches.rs index 9341fd4a54ec61..b87cfef291b991 100644 --- a/core/src/banking_stage/unprocessed_packet_batches.rs +++ b/core/src/banking_stage/unprocessed_packet_batches.rs @@ -49,8 +49,8 @@ impl PartialOrd for DeserializedPacket { impl Ord for DeserializedPacket { fn cmp(&self, other: &Self) -> Ordering { self.immutable_section() - .priority() - .cmp(&other.immutable_section().priority()) + .compute_unit_price() + .cmp(&other.immutable_section().compute_unit_price()) } } @@ -193,12 +193,16 @@ impl UnprocessedPacketBatches { self.packet_priority_queue.is_empty() } - pub fn get_min_priority(&self) -> Option { - self.packet_priority_queue.peek_min().map(|x| x.priority()) + pub fn get_min_compute_unit_price(&self) -> Option { + self.packet_priority_queue + .peek_min() + .map(|x| x.compute_unit_price()) } - pub fn get_max_priority(&self) -> Option { - self.packet_priority_queue.peek_max().map(|x| x.priority()) + pub fn get_max_compute_unit_price(&self) -> Option { + self.packet_priority_queue + .peek_max() + .map(|x| x.compute_unit_price()) } fn push_internal(&mut self, deserialized_packet: DeserializedPacket) { @@ -325,12 +329,15 @@ mod tests { DeserializedPacket::new(packet).unwrap() } - fn packet_with_priority_details(priority: u64, compute_unit_limit: u64) -> DeserializedPacket { + fn packet_with_compute_budget_details( + compute_unit_price: u64, + compute_unit_limit: u64, + ) -> DeserializedPacket { let from_account = solana_sdk::pubkey::new_rand(); let tx = Transaction::new_unsigned(Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit(compute_unit_limit as u32), - ComputeBudgetInstruction::set_compute_unit_price(priority), + ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price), system_instruction::transfer(&from_account, &solana_sdk::pubkey::new_rand(), 1), ], Some(&from_account), @@ -356,10 +363,10 @@ mod tests { #[test] fn test_unprocessed_packet_batches_insert_minimum_packet_over_capacity() { let heavier_packet_weight = 2; - let heavier_packet = packet_with_priority_details(heavier_packet_weight, 200_000); + let heavier_packet = packet_with_compute_budget_details(heavier_packet_weight, 200_000); let lesser_packet_weight = heavier_packet_weight - 1; - let lesser_packet = packet_with_priority_details(lesser_packet_weight, 200_000); + let lesser_packet = packet_with_compute_budget_details(lesser_packet_weight, 200_000); // Test that the heavier packet is actually heavier let mut unprocessed_packet_batches = UnprocessedPacketBatches::with_capacity(2); diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index ffc408a68971cd..3ed633d982e9b1 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -286,7 +286,7 @@ impl UnprocessedTransactionStorage { match self { Self::VoteStorage(_) => None, Self::LocalTransactionStorage(transaction_storage) => { - transaction_storage.get_min_priority() + transaction_storage.get_min_compute_unit_price() } } } @@ -295,7 +295,7 @@ impl UnprocessedTransactionStorage { match self { Self::VoteStorage(_) => None, Self::LocalTransactionStorage(transaction_storage) => { - transaction_storage.get_max_priority() + transaction_storage.get_max_compute_unit_price() } } } @@ -547,12 +547,12 @@ impl ThreadLocalUnprocessedPackets { self.unprocessed_packet_batches.len() } - pub fn get_min_priority(&self) -> Option { - self.unprocessed_packet_batches.get_min_priority() + pub fn get_min_compute_unit_price(&self) -> Option { + self.unprocessed_packet_batches.get_min_compute_unit_price() } - pub fn get_max_priority(&self) -> Option { - self.unprocessed_packet_batches.get_max_priority() + pub fn get_max_compute_unit_price(&self) -> Option { + self.unprocessed_packet_batches.get_max_compute_unit_price() } fn max_receive_size(&self) -> usize { diff --git a/program-runtime/src/prioritization_fee.rs b/program-runtime/src/prioritization_fee.rs index e77ae15aac7f21..398b8d310be854 100644 --- a/program-runtime/src/prioritization_fee.rs +++ b/program-runtime/src/prioritization_fee.rs @@ -10,15 +10,15 @@ pub enum PrioritizationFeeType { #[derive(Default, Debug, PartialEq, Eq)] pub struct PrioritizationFeeDetails { fee: u64, - priority: u64, + compute_unit_price: u64, } impl PrioritizationFeeDetails { pub fn new(fee_type: PrioritizationFeeType, compute_unit_limit: u64) -> Self { match fee_type { - PrioritizationFeeType::ComputeUnitPrice(cu_price) => { + PrioritizationFeeType::ComputeUnitPrice(compute_unit_price) => { let micro_lamport_fee: MicroLamports = - (cu_price as u128).saturating_mul(compute_unit_limit as u128); + (compute_unit_price as u128).saturating_mul(compute_unit_limit as u128); let fee = micro_lamport_fee .saturating_add(MICRO_LAMPORTS_PER_LAMPORT.saturating_sub(1) as u128) .checked_div(MICRO_LAMPORTS_PER_LAMPORT as u128) @@ -27,7 +27,7 @@ impl PrioritizationFeeDetails { Self { fee, - priority: cu_price, + compute_unit_price, } } } @@ -37,8 +37,8 @@ impl PrioritizationFeeDetails { self.fee } - pub fn get_priority(&self) -> u64 { - self.priority + pub fn get_compute_unit_price(&self) -> u64 { + self.compute_unit_price } } @@ -62,7 +62,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(MICRO_LAMPORTS_PER_LAMPORT - 1), 1), FeeDetails { fee: 1, - priority: MICRO_LAMPORTS_PER_LAMPORT - 1, + compute_unit_price: MICRO_LAMPORTS_PER_LAMPORT - 1, }, "should round up (<1.0) lamport fee to 1 lamport" ); @@ -71,7 +71,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(MICRO_LAMPORTS_PER_LAMPORT), 1), FeeDetails { fee: 1, - priority: MICRO_LAMPORTS_PER_LAMPORT, + compute_unit_price: MICRO_LAMPORTS_PER_LAMPORT, }, ); @@ -79,7 +79,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(MICRO_LAMPORTS_PER_LAMPORT + 1), 1), FeeDetails { fee: 2, - priority: MICRO_LAMPORTS_PER_LAMPORT + 1, + compute_unit_price: MICRO_LAMPORTS_PER_LAMPORT + 1, }, "should round up (>1.0) lamport fee to 2 lamports" ); @@ -88,7 +88,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(200), 100_000), FeeDetails { fee: 20, - priority: 200, + compute_unit_price: 200, }, ); @@ -99,7 +99,7 @@ mod test { ), FeeDetails { fee: u64::MAX, - priority: MICRO_LAMPORTS_PER_LAMPORT, + compute_unit_price: MICRO_LAMPORTS_PER_LAMPORT, }, ); @@ -107,7 +107,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(u64::MAX), u64::MAX), FeeDetails { fee: u64::MAX, - priority: u64::MAX, + compute_unit_price: u64::MAX, }, ); } diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/compute_budget_details.rs similarity index 74% rename from runtime/src/transaction_priority_details.rs rename to runtime/src/compute_budget_details.rs index 284acb791a2e6a..69756d4567ff70 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/compute_budget_details.rs @@ -8,34 +8,34 @@ use { }; #[derive(Clone, Debug, PartialEq, Eq)] -pub struct TransactionPriorityDetails { - pub priority: u64, +pub struct ComputeBudgetDetails { + pub compute_unit_price: u64, pub compute_unit_limit: u64, } -pub trait GetTransactionPriorityDetails { - fn get_transaction_priority_details( +pub trait GetComputeBudgetDetails { + fn get_compute_budget_details( &self, round_compute_unit_price_enabled: bool, - ) -> Option; + ) -> Option; fn process_compute_budget_instruction<'a>( instructions: impl Iterator, _round_compute_unit_price_enabled: bool, - ) -> Option { + ) -> Option { let compute_budget_limits = process_compute_budget_instructions(instructions).ok()?; - Some(TransactionPriorityDetails { - priority: compute_budget_limits.compute_unit_price, + Some(ComputeBudgetDetails { + compute_unit_price: compute_budget_limits.compute_unit_price, compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), }) } } -impl GetTransactionPriorityDetails for SanitizedVersionedTransaction { - fn get_transaction_priority_details( +impl GetComputeBudgetDetails for SanitizedVersionedTransaction { + fn get_compute_budget_details( &self, round_compute_unit_price_enabled: bool, - ) -> Option { + ) -> Option { Self::process_compute_budget_instruction( self.get_message().program_instructions_iter(), round_compute_unit_price_enabled, @@ -43,11 +43,11 @@ impl GetTransactionPriorityDetails for SanitizedVersionedTransaction { } } -impl GetTransactionPriorityDetails for SanitizedTransaction { - fn get_transaction_priority_details( +impl GetComputeBudgetDetails for SanitizedTransaction { + fn get_compute_budget_details( &self, round_compute_unit_price_enabled: bool, - ) -> Option { + ) -> Option { Self::process_compute_budget_instruction( self.message().program_instructions_iter(), round_compute_unit_price_enabled, @@ -70,7 +70,7 @@ mod tests { }; #[test] - fn test_get_priority_with_valid_request_heap_frame_tx() { + fn test_get_compute_budget_details_with_valid_request_heap_frame_tx() { let keypair = Keypair::new(); let transaction = Transaction::new_unsigned(Message::new( &[ @@ -85,9 +85,9 @@ mod tests { let sanitized_versioned_transaction = SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); assert_eq!( - sanitized_versioned_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: 0, + sanitized_versioned_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, @@ -98,9 +98,9 @@ mod tests { let sanitized_transaction = SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); assert_eq!( - sanitized_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: 0, + sanitized_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, @@ -109,7 +109,7 @@ mod tests { } #[test] - fn test_get_priority_with_valid_set_compute_units_limit() { + fn test_get_compute_budget_details_with_valid_set_compute_units_limit() { let requested_cu = 101u32; let keypair = Keypair::new(); let transaction = Transaction::new_unsigned(Message::new( @@ -125,9 +125,9 @@ mod tests { let sanitized_versioned_transaction = SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); assert_eq!( - sanitized_versioned_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: 0, + sanitized_versioned_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: requested_cu as u64, }) ); @@ -136,16 +136,16 @@ mod tests { let sanitized_transaction = SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); assert_eq!( - sanitized_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: 0, + sanitized_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: requested_cu as u64, }) ); } #[test] - fn test_get_priority_with_valid_set_compute_unit_price() { + fn test_get_compute_budget_details_with_valid_set_compute_unit_price() { let requested_price = 1_000; let keypair = Keypair::new(); let transaction = Transaction::new_unsigned(Message::new( @@ -161,9 +161,9 @@ mod tests { let sanitized_versioned_transaction = SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); assert_eq!( - sanitized_versioned_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: requested_price, + sanitized_versioned_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: requested_price, compute_unit_limit: solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, @@ -174,9 +174,9 @@ mod tests { let sanitized_transaction = SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); assert_eq!( - sanitized_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: requested_price, + sanitized_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: requested_price, compute_unit_limit: solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 2e574ef7f89217..ba6ca17d427931 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -11,6 +11,7 @@ pub mod bank_forks; pub mod bank_utils; pub mod builtins; pub mod commitment; +pub mod compute_budget_details; mod epoch_rewards_hasher; pub mod epoch_stakes; pub mod genesis_utils; @@ -37,7 +38,6 @@ pub mod static_ids; pub mod status_cache; pub mod svm; pub mod transaction_batch; -pub mod transaction_priority_details; #[macro_use] extern crate solana_metrics; diff --git a/runtime/src/prioritization_fee.rs b/runtime/src/prioritization_fee.rs index bb5f7632c97e01..90cc66b981ce3a 100644 --- a/runtime/src/prioritization_fee.rs +++ b/runtime/src/prioritization_fee.rs @@ -124,9 +124,9 @@ pub enum PrioritizationFeeError { // minimum fees. FailGetTransactionAccountLocks, - // Not able to read priority details, including compute-unit price, from transaction. + // Not able to read compute budget details, including compute-unit price, from transaction. // Compute-unit price is required to update block minimum fees. - FailGetTransactionPriorityDetails, + FailGetComputeBudgetDetails, // Block is already finalized, trying to finalize it again is usually unexpected BlockIsAlreadyFinalized, diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index ece749387a9147..839519020ff42f 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -1,8 +1,5 @@ use { - crate::{ - bank::Bank, prioritization_fee::*, - transaction_priority_details::GetTransactionPriorityDetails, - }, + crate::{bank::Bank, compute_budget_details::GetComputeBudgetDetails, prioritization_fee::*}, crossbeam_channel::{unbounded, Receiver, Sender}, dashmap::DashMap, log::*, @@ -208,8 +205,8 @@ impl PrioritizationFeeCache { } } - /// Update with a list of non-vote transactions' tx_priority_details and tx_account_locks; Only - /// transactions have both valid priority_detail and account_locks will be used to update + /// Update with a list of non-vote transactions' compute_budget_details and account_locks; Only + /// transactions have both valid compute_budget_details and account_locks will be used to update /// fee_cache asynchronously. pub fn update<'a>(&self, bank: &Bank, txs: impl Iterator) { let (_, send_updates_time) = measure!( @@ -222,19 +219,19 @@ impl PrioritizationFeeCache { } let round_compute_unit_price_enabled = false; // TODO: bank.feture_set.is_active(round_compute_unit_price) - let priority_details = sanitized_transaction - .get_transaction_priority_details(round_compute_unit_price_enabled); + let compute_budget_details = sanitized_transaction + .get_compute_budget_details(round_compute_unit_price_enabled); let account_locks = sanitized_transaction .get_account_locks(bank.get_transaction_account_lock_limit()); - if priority_details.is_none() || account_locks.is_err() { + if compute_budget_details.is_none() || account_locks.is_err() { continue; } - let priority_details = priority_details.unwrap(); + let compute_budget_details = compute_budget_details.unwrap(); // filter out any transaction that requests zero compute_unit_limit // since its priority fee amount is not instructive - if priority_details.compute_unit_limit == 0 { + if compute_budget_details.compute_unit_limit == 0 { continue; } @@ -251,7 +248,7 @@ impl PrioritizationFeeCache { .send(CacheServiceUpdate::TransactionUpdate { slot: bank.slot(), bank_id: bank.bank_id(), - transaction_fee: priority_details.priority, + transaction_fee: compute_budget_details.compute_unit_price, writable_accounts, }) .unwrap_or_else(|err| { From f01f361f13694d2aeec67de520acac5ad568aa4d Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Fri, 2 Feb 2024 10:22:20 -0700 Subject: [PATCH 123/401] Revert "Add rpc support for partitioned rewards (#34773)" This reverts commit 22500c23dbb2e36c9b7c67a21c0172e509c94ca2. --- rpc/src/rpc.rs | 157 +++++---------------- validator/src/bin/solana-test-validator.rs | 5 +- 2 files changed, 38 insertions(+), 124 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 16d78a913bc90b..5cc5b82344e0d1 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -7,7 +7,7 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, bincode::{config::Options, serialize}, crossbeam_channel::{unbounded, Receiver, Sender}, - jsonrpc_core::{futures::future, types::error, BoxFuture, Error, ErrorCode, Metadata, Result}, + jsonrpc_core::{futures::future, types::error, BoxFuture, Error, Metadata, Result}, jsonrpc_derive::rpc, solana_account_decoder::{ parse_token::{is_known_spl_token_id, token_amount_to_ui_amount, UiTokenAmount}, @@ -62,10 +62,6 @@ use { clock::{Slot, UnixTimestamp, MAX_RECENT_BLOCKHASHES}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, - epoch_rewards_hasher::EpochRewardsHasher, - epoch_rewards_partition_data::{ - get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, - }, epoch_schedule::EpochSchedule, exit::Exit, feature_set, @@ -523,38 +519,6 @@ impl JsonRpcRequestProcessor { }) } - async fn get_reward_map( - &self, - slot: Slot, - addresses: &[String], - reward_type_filter: &F, - config: &RpcEpochConfig, - ) -> Result> - where - F: Fn(RewardType) -> bool, - { - let Ok(Some(block)) = self - .get_block( - slot, - Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), - ) - .await - else { - return Err(RpcCustomError::BlockNotAvailable { slot }.into()); - }; - - Ok(block - .rewards - .unwrap_or_default() - .into_iter() - .filter(|reward| { - reward.reward_type.is_some_and(reward_type_filter) - && addresses.contains(&reward.pubkey) - }) - .map(|reward| (reward.clone().pubkey, (reward, slot))) - .collect()) - } - pub async fn get_inflation_reward( &self, addresses: Vec, @@ -563,20 +527,18 @@ impl JsonRpcRequestProcessor { let config = config.unwrap_or_default(); let epoch_schedule = self.get_epoch_schedule(); let first_available_block = self.get_first_available_block().await; - let slot_context = RpcContextConfig { - commitment: config.commitment, - min_context_slot: config.min_context_slot, - }; let epoch = match config.epoch { Some(epoch) => epoch, None => epoch_schedule - .get_epoch(self.get_slot(slot_context)?) + .get_epoch(self.get_slot(RpcContextConfig { + commitment: config.commitment, + min_context_slot: config.min_context_slot, + })?) .saturating_sub(1), }; - // Rewards for this epoch are found in the first confirmed block of the next epoch - let rewards_epoch = epoch.saturating_add(1); - let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(rewards_epoch); + // Rewards for this epoch are found in the first confirmed block of the next epoch + let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch.saturating_add(1)); if first_slot_in_epoch < first_available_block { if self.bigtable_ledger_storage.is_some() { return Err(RpcCustomError::LongTermStorageSlotSkipped { @@ -592,8 +554,6 @@ impl JsonRpcRequestProcessor { } } - let bank = self.get_bank_with_config(slot_context)?; - let first_confirmed_block_in_epoch = *self .get_blocks_with_limit(first_slot_in_epoch, 1, config.commitment) .await? @@ -601,94 +561,44 @@ impl JsonRpcRequestProcessor { .ok_or(RpcCustomError::BlockNotAvailable { slot: first_slot_in_epoch, })?; - let partitioned_epoch_reward_enabled_slot = bank - .feature_set - .activated_slot(&feature_set::enable_partitioned_epoch_reward::id()); - let partitioned_epoch_reward_enabled = partitioned_epoch_reward_enabled_slot - .map(|slot| slot <= first_confirmed_block_in_epoch) - .unwrap_or(false); - - let mut reward_map: HashMap = { - let addresses: Vec = - addresses.iter().map(|pubkey| pubkey.to_string()).collect(); - self.get_reward_map( + let Ok(Some(first_confirmed_block)) = self + .get_block( first_confirmed_block_in_epoch, - &addresses, - &|reward_type| -> bool { - reward_type == RewardType::Voting - || (!partitioned_epoch_reward_enabled && reward_type == RewardType::Staking) - }, - &config, + Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), ) - .await? + .await + else { + return Err(RpcCustomError::BlockNotAvailable { + slot: first_confirmed_block_in_epoch, + } + .into()); }; - if partitioned_epoch_reward_enabled { - let partition_data_address = get_epoch_rewards_partition_data_address(rewards_epoch); - let partition_data_account = - bank.get_account(&partition_data_address) - .ok_or_else(|| Error { - code: ErrorCode::InternalError, - message: format!( - "Partition data account not found for epoch {:?} at {:?}", - epoch, partition_data_address - ), - data: None, - })?; - let EpochRewardsPartitionDataVersion::V0(partition_data) = - bincode::deserialize(partition_data_account.data()) - .map_err(|_| Error::internal_error())?; - let hasher = EpochRewardsHasher::new( - partition_data.num_partitions, - &partition_data.parent_blockhash, - ); - let mut partition_index_addresses: HashMap> = HashMap::new(); - for address in addresses.iter() { - let address_string = address.to_string(); - // Skip this address if (Voting) rewards were already found in - // the first block of the epoch - if !reward_map.contains_key(&address_string) { - let partition_index = hasher.clone().hash_address_to_partition(address); - partition_index_addresses - .entry(partition_index) - .and_modify(|list| list.push(address_string.clone())) - .or_insert(vec![address_string]); - } - } + let addresses: Vec = addresses + .into_iter() + .map(|pubkey| pubkey.to_string()) + .collect(); - let block_list = self - .get_blocks_with_limit( - first_confirmed_block_in_epoch + 1, - partition_data.num_partitions, - config.commitment, - ) - .await?; - - for (partition_index, addresses) in partition_index_addresses.iter() { - let slot = *block_list - .get(*partition_index) - .ok_or_else(Error::internal_error)?; - - let index_reward_map = self - .get_reward_map( - slot, - addresses, - &|reward_type| -> bool { reward_type == RewardType::Staking }, - &config, - ) - .await?; - reward_map.extend(index_reward_map); - } - } + let reward_hash: HashMap = first_confirmed_block + .rewards + .unwrap_or_default() + .into_iter() + .filter_map(|reward| match reward.reward_type? { + RewardType::Staking | RewardType::Voting => addresses + .contains(&reward.pubkey) + .then(|| (reward.clone().pubkey, reward)), + _ => None, + }) + .collect(); let rewards = addresses .iter() .map(|address| { - if let Some((reward, slot)) = reward_map.get(&address.to_string()) { + if let Some(reward) = reward_hash.get(address) { return Some(RpcInflationReward { epoch, - effective_slot: *slot, + effective_slot: first_confirmed_block_in_epoch, amount: reward.lamports.unsigned_abs(), post_balance: reward.post_balance, commission: reward.commission, @@ -697,6 +607,7 @@ impl JsonRpcRequestProcessor { None }) .collect(); + Ok(rewards) } diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 3c851e7788e2c3..aee5fc039df410 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -19,6 +19,7 @@ use { account::AccountSharedData, clock::Slot, epoch_schedule::EpochSchedule, + feature_set, native_token::sol_to_lamports, pubkey::Pubkey, rent::Rent, @@ -348,7 +349,9 @@ fn main() { exit(1); }); - let features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); + let mut features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); + // Remove this when client support is ready for the enable_partitioned_epoch_reward feature + features_to_deactivate.push(feature_set::enable_partitioned_epoch_reward::id()); if TestValidatorGenesis::ledger_exists(&ledger_path) { for (name, long) in &[ From e76da4a8fee235a9009fdb91629a01860dd4f6e5 Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Fri, 2 Feb 2024 10:22:56 -0700 Subject: [PATCH 124/401] Revert "Support json parsing of epoch-rewards partition data sysvar accounts (#34914)" This reverts commit b9947bd327ef44e0bcc921c76dcf851f04677e63. --- account-decoder/src/parse_sysvar.rs | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/account-decoder/src/parse_sysvar.rs b/account-decoder/src/parse_sysvar.rs index 3fda8e8560c623..35746949c7f9ef 100644 --- a/account-decoder/src/parse_sysvar.rs +++ b/account-decoder/src/parse_sysvar.rs @@ -9,7 +9,6 @@ use { bv::BitVec, solana_sdk::{ clock::{Clock, Epoch, Slot, UnixTimestamp}, - epoch_rewards_partition_data::EpochRewardsPartitionDataVersion, epoch_schedule::EpochSchedule, pubkey::Pubkey, rent::Rent, @@ -97,24 +96,7 @@ pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result(data) - { - let EpochRewardsPartitionDataVersion::V0(partition_data) = - epoch_rewards_partition_data; - Some(SysvarAccountType::EpochRewardsPartitionData( - UiEpochRewardsPartitionData { - version: 0, - num_partitions: partition_data.num_partitions as u64, - parent_blockhash: partition_data.parent_blockhash.to_string(), - }, - )) - } else { - None - } + None } }; parsed_account.ok_or(ParseAccountError::AccountNotParsable( @@ -138,7 +120,6 @@ pub enum SysvarAccountType { StakeHistory(Vec), LastRestartSlot(UiLastRestartSlot), EpochRewards(EpochRewards), - EpochRewardsPartitionData(UiEpochRewardsPartitionData), } #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default)] @@ -258,14 +239,6 @@ pub struct UiLastRestartSlot { pub last_restart_slot: Slot, } -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default)] -#[serde(rename_all = "camelCase")] -pub struct UiEpochRewardsPartitionData { - pub version: u32, - pub num_partitions: u64, - pub parent_blockhash: String, -} - #[cfg(test)] mod test { #[allow(deprecated)] From 57634e44194a3e3cb95ba622319fe7d37bf75bc6 Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Fri, 2 Feb 2024 10:23:29 -0700 Subject: [PATCH 125/401] Revert "Fix epoch rewards partition-data program owner (#34913)" This reverts commit 7ebe0bccd69abf0c5cad253bb916e59d5fbb23a4. --- runtime/src/bank.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c4b87a917ac8da..d92f71b21ebb9c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3609,7 +3609,7 @@ impl Bank { let new_account = AccountSharedData::new_data( account_balance, &epoch_rewards_partition_data, - &solana_sdk::sysvar::id(), + &solana_sdk::stake::program::id(), ) .unwrap(); From d7179e4fa0c19402c28eb2f83bc7cb8f6d79dbbc Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Fri, 2 Feb 2024 10:23:53 -0700 Subject: [PATCH 126/401] Revert "Define epoch-rewards partition data program id (#34862)" This reverts commit 8aa726bfdfbdb630196815204e5538ebcb3e19ff. --- runtime/src/bank.rs | 3 ++- sdk/program/src/epoch_rewards_partition_data.rs | 15 +++++---------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d92f71b21ebb9c..6301b306f22bc2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -132,7 +132,7 @@ use { }, epoch_info::EpochInfo, epoch_rewards_partition_data::{ - get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, + get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, HasherKind, PartitionData, }, epoch_schedule::EpochSchedule, @@ -3601,6 +3601,7 @@ impl Bank { let epoch_rewards_partition_data = EpochRewardsPartitionDataVersion::V0(PartitionData { num_partitions, parent_blockhash, + hasher_kind: HasherKind::Sip13, }); let address = get_epoch_rewards_partition_data_address(self.epoch()); diff --git a/sdk/program/src/epoch_rewards_partition_data.rs b/sdk/program/src/epoch_rewards_partition_data.rs index 2ff511af8fb72b..62e75ca5112d5a 100644 --- a/sdk/program/src/epoch_rewards_partition_data.rs +++ b/sdk/program/src/epoch_rewards_partition_data.rs @@ -8,14 +8,7 @@ pub enum EpochRewardsPartitionDataVersion { V0(PartitionData), } -impl EpochRewardsPartitionDataVersion { - pub fn get_hasher_kind(&self) -> HasherKind { - match self { - EpochRewardsPartitionDataVersion::V0(_) => HasherKind::Sip13, - } - } -} - +#[repr(u8)] #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] pub enum HasherKind { Sip13, @@ -28,12 +21,14 @@ pub struct PartitionData { pub num_partitions: usize, /// Blockhash of the last block of the previous epoch, used to create EpochRewardsHasher pub parent_blockhash: Hash, + /// Kind of hasher used to generate partitions + pub hasher_kind: HasherKind, } pub fn get_epoch_rewards_partition_data_address(epoch: u64) -> Pubkey { let (address, _bump_seed) = Pubkey::find_program_address( - &[b"EpochRewards", b"PartitionData", &epoch.to_le_bytes()], - &crate::sysvar::id(), + &[b"EpochRewardsPartitionData", &epoch.to_le_bytes()], + &crate::stake::program::id(), ); address } From 57bbd3363c62e616538fa622417670b0e083d80d Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Fri, 2 Feb 2024 10:25:37 -0700 Subject: [PATCH 127/401] Revert "Populate partitioned-rewards PDA during calculation (#34624)" This reverts commit 4385ed11b1a955642944a6b11353c09b1fba9187. --- runtime/src/bank.rs | 52 +------------------ runtime/src/epoch_rewards_hasher.rs | 6 +-- .../src/epoch_rewards_partition_data.rs | 34 ------------ sdk/program/src/lib.rs | 1 - sdk/src/lib.rs | 4 +- 5 files changed, 6 insertions(+), 91 deletions(-) delete mode 100644 sdk/program/src/epoch_rewards_partition_data.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6301b306f22bc2..9c97632f5b3306 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -131,10 +131,6 @@ use { UPDATED_HASHES_PER_TICK4, UPDATED_HASHES_PER_TICK5, UPDATED_HASHES_PER_TICK6, }, epoch_info::EpochInfo, - epoch_rewards_partition_data::{ - get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, HasherKind, - PartitionData, - }, epoch_schedule::EpochSchedule, feature, feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, @@ -864,7 +860,6 @@ struct PartitionedRewardsCalculation { foundation_rate: f64, prev_epoch_duration_in_years: f64, capitalization: u64, - parent_blockhash: Hash, } /// result of calculating the stake rewards at beginning of new epoch @@ -882,8 +877,6 @@ struct CalculateRewardsAndDistributeVoteRewardsResult { distributed_rewards: u64, /// stake rewards that still need to be distributed, grouped by partition stake_rewards_by_partition: Vec, - /// blockhash of parent, used to create EpochRewardsHasher - parent_blockhash: Hash, } pub(crate) type StakeRewards = Vec; @@ -1591,7 +1584,6 @@ impl Bank { total_rewards, distributed_rewards, stake_rewards_by_partition, - parent_blockhash, } = self.calculate_rewards_and_distribute_vote_rewards( parent_epoch, reward_calc_tracer, @@ -1599,11 +1591,9 @@ impl Bank { rewards_metrics, ); - let num_partitions = stake_rewards_by_partition.len(); - let slot = self.slot(); let credit_start = self.block_height() + self.get_reward_calculation_num_blocks(); - let credit_end_exclusive = credit_start + num_partitions as u64; + let credit_end_exclusive = credit_start + stake_rewards_by_partition.len() as u64; self.set_epoch_reward_status_active(stake_rewards_by_partition); @@ -1611,8 +1601,6 @@ impl Bank { // (total_rewards, distributed_rewards, credit_end_exclusive), total capital will increase by (total_rewards - distributed_rewards) self.create_epoch_rewards_sysvar(total_rewards, distributed_rewards, credit_end_exclusive); - self.create_epoch_rewards_partition_data_account(num_partitions, parent_blockhash); - datapoint_info!( "epoch-rewards-status-update", ("start_slot", slot, i64), @@ -2387,7 +2375,6 @@ impl Bank { foundation_rate, prev_epoch_duration_in_years, capitalization, - parent_blockhash, } } @@ -2408,7 +2395,6 @@ impl Bank { foundation_rate, prev_epoch_duration_in_years, capitalization, - parent_blockhash, } = self.calculate_rewards_for_partitioning( prev_epoch, reward_calc_tracer, @@ -2478,7 +2464,6 @@ impl Bank { total_rewards: validator_rewards_paid + total_stake_rewards_lamports, distributed_rewards: validator_rewards_paid, stake_rewards_by_partition, - parent_blockhash, } } @@ -3592,41 +3577,6 @@ impl Bank { self.log_epoch_rewards_sysvar("update"); } - /// Create the persistent PDA containing the epoch-rewards data - fn create_epoch_rewards_partition_data_account( - &self, - num_partitions: usize, - parent_blockhash: Hash, - ) { - let epoch_rewards_partition_data = EpochRewardsPartitionDataVersion::V0(PartitionData { - num_partitions, - parent_blockhash, - hasher_kind: HasherKind::Sip13, - }); - let address = get_epoch_rewards_partition_data_address(self.epoch()); - - let data_len = bincode::serialized_size(&epoch_rewards_partition_data).unwrap() as usize; - let account_balance = self.get_minimum_balance_for_rent_exemption(data_len); - let new_account = AccountSharedData::new_data( - account_balance, - &epoch_rewards_partition_data, - &solana_sdk::stake::program::id(), - ) - .unwrap(); - - info!( - "create epoch rewards partition data account {} {address} \ - {epoch_rewards_partition_data:?}", - self.slot - ); - - // Skip storing data account when we are testing partitioned - // rewards but feature is not yet active - if !self.force_partition_rewards_in_first_block_of_epoch() { - self.store_account_and_update_capitalization(&address, &new_account); - } - } - fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { diff --git a/runtime/src/epoch_rewards_hasher.rs b/runtime/src/epoch_rewards_hasher.rs index 120bb0c2c98500..b594b05a5cfe3b 100644 --- a/runtime/src/epoch_rewards_hasher.rs +++ b/runtime/src/epoch_rewards_hasher.rs @@ -9,7 +9,7 @@ pub(crate) fn hash_rewards_into_partitions( num_partitions: usize, ) -> Vec { let hasher = EpochRewardsHasher::new(num_partitions, parent_blockhash); - let mut rewards = vec![vec![]; num_partitions]; + let mut result = vec![vec![]; num_partitions]; for reward in stake_rewards { // clone here so the hasher's state is re-used on each call to `hash_address_to_partition`. @@ -18,9 +18,9 @@ pub(crate) fn hash_rewards_into_partitions( let partition_index = hasher .clone() .hash_address_to_partition(&reward.stake_pubkey); - rewards[partition_index].push(reward); + result[partition_index].push(reward); } - rewards + result } #[cfg(test)] diff --git a/sdk/program/src/epoch_rewards_partition_data.rs b/sdk/program/src/epoch_rewards_partition_data.rs deleted file mode 100644 index 62e75ca5112d5a..00000000000000 --- a/sdk/program/src/epoch_rewards_partition_data.rs +++ /dev/null @@ -1,34 +0,0 @@ -use { - crate::{hash::Hash, pubkey::Pubkey}, - serde_derive::{Deserialize, Serialize}, -}; - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub enum EpochRewardsPartitionDataVersion { - V0(PartitionData), -} - -#[repr(u8)] -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub enum HasherKind { - Sip13, -} - -/// Data about a rewards partitions for an epoch -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub struct PartitionData { - /// Number of partitions used for epoch rewards this epoch - pub num_partitions: usize, - /// Blockhash of the last block of the previous epoch, used to create EpochRewardsHasher - pub parent_blockhash: Hash, - /// Kind of hasher used to generate partitions - pub hasher_kind: HasherKind, -} - -pub fn get_epoch_rewards_partition_data_address(epoch: u64) -> Pubkey { - let (address, _bump_seed) = Pubkey::find_program_address( - &[b"EpochRewardsPartitionData", &epoch.to_le_bytes()], - &crate::stake::program::id(), - ); - address -} diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 016585d403ae2a..54de9d817205a8 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -491,7 +491,6 @@ pub mod ed25519_program; pub mod entrypoint; pub mod entrypoint_deprecated; pub mod epoch_rewards; -pub mod epoch_rewards_partition_data; pub mod epoch_schedule; pub mod feature; pub mod fee_calculator; diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index e64d6ddc57d0fd..4bf36a5d271929 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -48,8 +48,8 @@ pub use solana_program::{ account_info, address_lookup_table, alt_bn128, big_mod_exp, blake3, borsh, borsh0_10, borsh0_9, borsh1, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, custom_heap_default, custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, - declare_sysvar_id, decode_error, ed25519_program, epoch_rewards, epoch_rewards_partition_data, - epoch_schedule, fee_calculator, impl_sysvar_get, incinerator, instruction, keccak, lamports, + declare_sysvar_id, decode_error, ed25519_program, epoch_rewards, epoch_schedule, + fee_calculator, impl_sysvar_get, incinerator, instruction, keccak, lamports, loader_instruction, loader_upgradeable_instruction, loader_v4, loader_v4_instruction, message, msg, native_token, nonce, poseidon, program, program_error, program_memory, program_option, program_pack, rent, sanitize, sdk_ids, secp256k1_program, secp256k1_recover, serde_varint, From adc9da5f120dcf857b4d26bcbe5e018b1adc4d7c Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Tue, 6 Feb 2024 03:31:14 -0500 Subject: [PATCH 128/401] cli: add transaction retry pool max size (#35080) * cli: add transaction retry pool max size * Update send-transaction-service/src/send_transaction_service.rs Co-authored-by: Tyera * rename transaction_retry_pool_max_size --------- Co-authored-by: Tyera --- .../src/send_transaction_service.rs | 9 ++++++--- validator/src/cli.rs | 13 +++++++++++++ validator/src/main.rs | 5 +++++ 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index dd09ccc69698f5..4e4ba9956f760f 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -28,8 +28,8 @@ use { }, }; -/// Maximum size of the transaction queue -const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day +/// Maximum size of the transaction retry pool +const MAX_TRANSACTION_RETRY_POOL_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day /// Default retry interval const DEFAULT_RETRY_RATE_MS: u64 = 2_000; @@ -114,6 +114,8 @@ pub struct Config { pub batch_size: usize, /// How frequently batches are sent pub batch_send_rate_ms: u64, + /// When the retry pool exceeds this max size, new transactions are dropped after their first broadcast attempt + pub retry_pool_max_size: usize, } impl Default for Config { @@ -125,6 +127,7 @@ impl Default for Config { service_max_retries: DEFAULT_SERVICE_MAX_RETRIES, batch_size: DEFAULT_TRANSACTION_BATCH_SIZE, batch_send_rate_ms: DEFAULT_BATCH_SEND_RATE_MS, + retry_pool_max_size: MAX_TRANSACTION_RETRY_POOL_SIZE, } } } @@ -477,7 +480,7 @@ impl SendTransactionService { let retry_len = retry_transactions.len(); let entry = retry_transactions.entry(signature); if let Entry::Vacant(_) = entry { - if retry_len >= MAX_TRANSACTION_QUEUE_SIZE { + if retry_len >= config.retry_pool_max_size { datapoint_warn!("send_transaction_service-queue-overflow"); break; } else { diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 08a7288843d803..958cdc4ec947de 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1045,6 +1045,15 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .default_value(&default_args.rpc_send_transaction_batch_size) .help("The size of transactions to be sent in batch."), ) + .arg( + Arg::with_name("rpc_send_transaction_retry_pool_max_size") + .long("rpc-send-transaction-retry-pool-max-size") + .value_name("NUMBER") + .takes_value(true) + .validator(is_parsable::) + .default_value(&default_args.rpc_send_transaction_retry_pool_max_size) + .help("The maximum size of transactions retry pool.") + ) .arg( Arg::with_name("rpc_scan_and_fix_roots") .long("rpc-scan-and-fix-roots") @@ -1957,6 +1966,7 @@ pub struct DefaultArgs { pub rpc_send_transaction_leader_forward_count: String, pub rpc_send_transaction_service_max_retries: String, pub rpc_send_transaction_batch_size: String, + pub rpc_send_transaction_retry_pool_max_size: String, pub rpc_threads: String, pub rpc_niceness_adjustment: String, pub rpc_bigtable_timeout: String, @@ -2042,6 +2052,9 @@ impl DefaultArgs { rpc_send_transaction_batch_size: default_send_transaction_service_config .batch_size .to_string(), + rpc_send_transaction_retry_pool_max_size: default_send_transaction_service_config + .retry_pool_max_size + .to_string(), rpc_threads: num_cpus::get().to_string(), rpc_niceness_adjustment: "0".to_string(), rpc_bigtable_timeout: "30".to_string(), diff --git a/validator/src/main.rs b/validator/src/main.rs index de1efeddbd111a..0cff3139a9d22c 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1412,6 +1412,11 @@ pub fn main() { ), batch_send_rate_ms: rpc_send_batch_send_rate_ms, batch_size: rpc_send_batch_size, + retry_pool_max_size: value_t_or_exit!( + matches, + "rpc_send_transaction_retry_pool_max_size", + usize + ), }, no_poh_speed_test: matches.is_present("no_poh_speed_test"), no_os_memory_stats_reporting: matches.is_present("no_os_memory_stats_reporting"), From 99760e519ad7ba55f7b67d155e63e7f697c223b0 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 6 Feb 2024 08:38:24 -0800 Subject: [PATCH 129/401] SVM: move `inner_instructions_list_from_instruction_trace` to SVM (#35099) SVM: move inner_instructions_list_from_instruction_trace to SVM --- accounts-db/src/transaction_results.rs | 109 +------------------- runtime/src/svm/transaction_processor.rs | 126 ++++++++++++++++++++++- 2 files changed, 123 insertions(+), 112 deletions(-) diff --git a/accounts-db/src/transaction_results.rs b/accounts-db/src/transaction_results.rs index bc0a330f507399..79efa66425aabd 100644 --- a/accounts-db/src/transaction_results.rs +++ b/accounts-db/src/transaction_results.rs @@ -11,9 +11,8 @@ use { }, solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ - instruction::{CompiledInstruction, TRANSACTION_LEVEL_STACK_HEIGHT}, transaction::{self, TransactionError}, - transaction_context::{TransactionContext, TransactionReturnData}, + transaction_context::TransactionReturnData, }, }; @@ -110,109 +109,3 @@ impl DurableNonceFee { } } } - -/// Extract the InnerInstructionsList from a TransactionContext -pub fn inner_instructions_list_from_instruction_trace( - transaction_context: &TransactionContext, -) -> InnerInstructionsList { - debug_assert!(transaction_context - .get_instruction_context_at_index_in_trace(0) - .map(|instruction_context| instruction_context.get_stack_height() - == TRANSACTION_LEVEL_STACK_HEIGHT) - .unwrap_or(true)); - let mut outer_instructions = Vec::new(); - for index_in_trace in 0..transaction_context.get_instruction_trace_length() { - if let Ok(instruction_context) = - transaction_context.get_instruction_context_at_index_in_trace(index_in_trace) - { - let stack_height = instruction_context.get_stack_height(); - if stack_height == TRANSACTION_LEVEL_STACK_HEIGHT { - outer_instructions.push(Vec::new()); - } else if let Some(inner_instructions) = outer_instructions.last_mut() { - let stack_height = u8::try_from(stack_height).unwrap_or(u8::MAX); - let instruction = CompiledInstruction::new_from_raw_parts( - instruction_context - .get_index_of_program_account_in_transaction( - instruction_context - .get_number_of_program_accounts() - .saturating_sub(1), - ) - .unwrap_or_default() as u8, - instruction_context.get_instruction_data().to_vec(), - (0..instruction_context.get_number_of_instruction_accounts()) - .map(|instruction_account_index| { - instruction_context - .get_index_of_instruction_account_in_transaction( - instruction_account_index, - ) - .unwrap_or_default() as u8 - }) - .collect(), - ); - inner_instructions.push(InnerInstruction { - instruction, - stack_height, - }); - } else { - debug_assert!(false); - } - } else { - debug_assert!(false); - } - } - outer_instructions -} - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{sysvar::rent::Rent, transaction_context::TransactionContext}, - }; - - #[test] - fn test_inner_instructions_list_from_instruction_trace() { - let instruction_trace = [1, 2, 1, 1, 2, 3, 2]; - let mut transaction_context = - TransactionContext::new(vec![], Rent::default(), 3, instruction_trace.len()); - for (index_in_trace, stack_height) in instruction_trace.into_iter().enumerate() { - while stack_height <= transaction_context.get_instruction_context_stack_height() { - transaction_context.pop().unwrap(); - } - if stack_height > transaction_context.get_instruction_context_stack_height() { - transaction_context - .get_next_instruction_context() - .unwrap() - .configure(&[], &[], &[index_in_trace as u8]); - transaction_context.push().unwrap(); - } - } - let inner_instructions = - inner_instructions_list_from_instruction_trace(&transaction_context); - - assert_eq!( - inner_instructions, - vec![ - vec![InnerInstruction { - instruction: CompiledInstruction::new_from_raw_parts(0, vec![1], vec![]), - stack_height: 2, - }], - vec![], - vec![ - InnerInstruction { - instruction: CompiledInstruction::new_from_raw_parts(0, vec![4], vec![]), - stack_height: 2, - }, - InnerInstruction { - instruction: CompiledInstruction::new_from_raw_parts(0, vec![5], vec![]), - stack_height: 3, - }, - InnerInstruction { - instruction: CompiledInstruction::new_from_raw_parts(0, vec![6], vec![]), - stack_height: 2, - }, - ] - ] - ); - } -} diff --git a/runtime/src/svm/transaction_processor.rs b/runtime/src/svm/transaction_processor.rs index 56a3d9a774f58c..e72caca4d3c85a 100644 --- a/runtime/src/svm/transaction_processor.rs +++ b/runtime/src/svm/transaction_processor.rs @@ -11,8 +11,8 @@ use { rent_collector::RentCollector, transaction_error_metrics::TransactionErrorMetrics, transaction_results::{ - inner_instructions_list_from_instruction_trace, DurableNonceFee, - TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, + DurableNonceFee, TransactionCheckResult, TransactionExecutionDetails, + TransactionExecutionResult, }, }, solana_measure::measure::Measure, @@ -37,7 +37,8 @@ use { feature_set::FeatureSet, fee::FeeStructure, hash::Hash, - instruction::InstructionError, + inner_instruction::{InnerInstruction, InnerInstructionsList}, + instruction::{CompiledInstruction, InstructionError, TRANSACTION_LEVEL_STACK_HEIGHT}, loader_v4::{self, LoaderV4State, LoaderV4Status}, message::SanitizedMessage, native_loader, @@ -569,7 +570,7 @@ impl TransactionBatchProcessor { }); let inner_instructions = if enable_cpi_recording { - Some(inner_instructions_list_from_instruction_trace( + Some(Self::inner_instructions_list_from_instruction_trace( &transaction_context, )) } else { @@ -848,4 +849,121 @@ impl TransactionBatchProcessor { } ProgramAccountLoadResult::InvalidAccountData(environments.program_runtime_v1.clone()) } + + /// Extract the InnerInstructionsList from a TransactionContext + fn inner_instructions_list_from_instruction_trace( + transaction_context: &TransactionContext, + ) -> InnerInstructionsList { + debug_assert!(transaction_context + .get_instruction_context_at_index_in_trace(0) + .map(|instruction_context| instruction_context.get_stack_height() + == TRANSACTION_LEVEL_STACK_HEIGHT) + .unwrap_or(true)); + let mut outer_instructions = Vec::new(); + for index_in_trace in 0..transaction_context.get_instruction_trace_length() { + if let Ok(instruction_context) = + transaction_context.get_instruction_context_at_index_in_trace(index_in_trace) + { + let stack_height = instruction_context.get_stack_height(); + if stack_height == TRANSACTION_LEVEL_STACK_HEIGHT { + outer_instructions.push(Vec::new()); + } else if let Some(inner_instructions) = outer_instructions.last_mut() { + let stack_height = u8::try_from(stack_height).unwrap_or(u8::MAX); + let instruction = CompiledInstruction::new_from_raw_parts( + instruction_context + .get_index_of_program_account_in_transaction( + instruction_context + .get_number_of_program_accounts() + .saturating_sub(1), + ) + .unwrap_or_default() as u8, + instruction_context.get_instruction_data().to_vec(), + (0..instruction_context.get_number_of_instruction_accounts()) + .map(|instruction_account_index| { + instruction_context + .get_index_of_instruction_account_in_transaction( + instruction_account_index, + ) + .unwrap_or_default() as u8 + }) + .collect(), + ); + inner_instructions.push(InnerInstruction { + instruction, + stack_height, + }); + } else { + debug_assert!(false); + } + } else { + debug_assert!(false); + } + } + outer_instructions + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_program_runtime::loaded_programs::BlockRelation, + solana_sdk::{sysvar::rent::Rent, transaction_context::TransactionContext}, + }; + + struct TestForkGraph {} + + impl ForkGraph for TestForkGraph { + fn relationship(&self, _a: Slot, _b: Slot) -> BlockRelation { + BlockRelation::Unknown + } + } + + #[test] + fn test_inner_instructions_list_from_instruction_trace() { + let instruction_trace = [1, 2, 1, 1, 2, 3, 2]; + let mut transaction_context = + TransactionContext::new(vec![], Rent::default(), 3, instruction_trace.len()); + for (index_in_trace, stack_height) in instruction_trace.into_iter().enumerate() { + while stack_height <= transaction_context.get_instruction_context_stack_height() { + transaction_context.pop().unwrap(); + } + if stack_height > transaction_context.get_instruction_context_stack_height() { + transaction_context + .get_next_instruction_context() + .unwrap() + .configure(&[], &[], &[index_in_trace as u8]); + transaction_context.push().unwrap(); + } + } + let inner_instructions = + TransactionBatchProcessor::::inner_instructions_list_from_instruction_trace( + &transaction_context, + ); + + assert_eq!( + inner_instructions, + vec![ + vec![InnerInstruction { + instruction: CompiledInstruction::new_from_raw_parts(0, vec![1], vec![]), + stack_height: 2, + }], + vec![], + vec![ + InnerInstruction { + instruction: CompiledInstruction::new_from_raw_parts(0, vec![4], vec![]), + stack_height: 2, + }, + InnerInstruction { + instruction: CompiledInstruction::new_from_raw_parts(0, vec![5], vec![]), + stack_height: 3, + }, + InnerInstruction { + instruction: CompiledInstruction::new_from_raw_parts(0, vec![6], vec![]), + stack_height: 2, + }, + ] + ] + ); + } } From 3e24b410fbb0bb15de44075983619b04e99e9942 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 6 Feb 2024 11:09:59 -0800 Subject: [PATCH 130/401] replay: votes made before restart are eligible for refresh (#34737) * replay: votes made before restart are eligible for refresh * pr feedback: rename to mark * pr feedback: limit scope to non voting validators --- core/src/consensus.rs | 25 ++++++-- core/src/consensus/tower1_14_11.rs | 6 +- core/src/consensus/tower1_7_14.rs | 5 +- core/src/consensus/tower_storage.rs | 4 +- core/src/replay_stage.rs | 96 +++++++++++++++++++---------- 5 files changed, 89 insertions(+), 47 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 54312baf30d9ec..4f129b18282218 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -210,6 +210,17 @@ impl TowerVersions { } } +#[derive(PartialEq, Eq, Debug, Default, Clone, Copy, AbiExample)] +pub(crate) enum BlockhashStatus { + /// No vote since restart + #[default] + Uninitialized, + /// Non voting validator + NonVoting, + /// Successfully generated vote tx with blockhash + Blockhash(Hash), +} + #[frozen_abi(digest = "iZi6s9BvytU3HbRsibrAD71jwMLvrqHdCjVk6qKcVvd")] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] pub struct Tower { @@ -223,8 +234,8 @@ pub struct Tower { // blockhash of the voted block itself, depending if the vote slot was refreshed. // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in // block 10, in which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5. - // For non voting validators this is None - last_vote_tx_blockhash: Option, + // For non voting validators this is NonVoting + last_vote_tx_blockhash: BlockhashStatus, last_timestamp: BlockTimestamp, #[serde(skip)] // Restored last voted slot which cannot be found in SlotHistory at replayed root @@ -247,7 +258,7 @@ impl Default for Tower { vote_state: VoteState::default(), last_vote: VoteTransaction::from(VoteStateUpdate::default()), last_timestamp: BlockTimestamp::default(), - last_vote_tx_blockhash: None, + last_vote_tx_blockhash: BlockhashStatus::default(), stray_restored_slot: Option::default(), last_switch_threshold_check: Option::default(), }; @@ -486,7 +497,7 @@ impl Tower { self.vote_state.tower() } - pub fn last_vote_tx_blockhash(&self) -> Option { + pub(crate) fn last_vote_tx_blockhash(&self) -> BlockhashStatus { self.last_vote_tx_blockhash } @@ -530,7 +541,11 @@ impl Tower { } pub fn refresh_last_vote_tx_blockhash(&mut self, new_vote_tx_blockhash: Hash) { - self.last_vote_tx_blockhash = Some(new_vote_tx_blockhash); + self.last_vote_tx_blockhash = BlockhashStatus::Blockhash(new_vote_tx_blockhash); + } + + pub(crate) fn mark_last_vote_tx_blockhash_non_voting(&mut self) { + self.last_vote_tx_blockhash = BlockhashStatus::NonVoting; } pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option { diff --git a/core/src/consensus/tower1_14_11.rs b/core/src/consensus/tower1_14_11.rs index befce935034eff..22c396e0975e59 100644 --- a/core/src/consensus/tower1_14_11.rs +++ b/core/src/consensus/tower1_14_11.rs @@ -1,6 +1,6 @@ use { - crate::consensus::SwitchForkDecision, - solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}, + crate::consensus::{BlockhashStatus, SwitchForkDecision}, + solana_sdk::{clock::Slot, pubkey::Pubkey}, solana_vote_program::vote_state::{ vote_state_1_14_11::VoteState1_14_11, BlockTimestamp, VoteTransaction, }, @@ -19,7 +19,7 @@ pub struct Tower1_14_11 { // blockhash of the voted block itself, depending if the vote slot was refreshed. // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in // block 10, in which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5. - pub(crate) last_vote_tx_blockhash: Option, + pub(crate) last_vote_tx_blockhash: BlockhashStatus, pub(crate) last_timestamp: BlockTimestamp, #[serde(skip)] // Restored last voted slot which cannot be found in SlotHistory at replayed root diff --git a/core/src/consensus/tower1_7_14.rs b/core/src/consensus/tower1_7_14.rs index 62e5870b4efbb6..725b781924d8c9 100644 --- a/core/src/consensus/tower1_7_14.rs +++ b/core/src/consensus/tower1_7_14.rs @@ -1,8 +1,7 @@ use { - crate::consensus::{Result, SwitchForkDecision, TowerError}, + crate::consensus::{BlockhashStatus, Result, SwitchForkDecision, TowerError}, solana_sdk::{ clock::Slot, - hash::Hash, pubkey::Pubkey, signature::{Signature, Signer}, }, @@ -22,7 +21,7 @@ pub struct Tower1_7_14 { // blockhash of the voted block itself, depending if the vote slot was refreshed. // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in // block 10, in which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5. - pub(crate) last_vote_tx_blockhash: Option, + pub(crate) last_vote_tx_blockhash: BlockhashStatus, pub(crate) last_timestamp: BlockTimestamp, #[serde(skip)] // Restored last voted slot which cannot be found in SlotHistory at replayed root diff --git a/core/src/consensus/tower_storage.rs b/core/src/consensus/tower_storage.rs index 61f3c07245105c..1e81f28f47ce46 100644 --- a/core/src/consensus/tower_storage.rs +++ b/core/src/consensus/tower_storage.rs @@ -372,7 +372,7 @@ pub mod test { super::*, crate::consensus::{ tower1_7_14::{SavedTower1_7_14, Tower1_7_14}, - Tower, + BlockhashStatus, Tower, }, solana_sdk::{hash::Hash, signature::Keypair}, solana_vote_program::vote_state::{ @@ -403,7 +403,7 @@ pub mod test { vote_state: VoteState1_14_11::from(vote_state), last_vote: vote.clone(), last_timestamp: BlockTimestamp::default(), - last_vote_tx_blockhash: None, + last_vote_tx_blockhash: BlockhashStatus::Uninitialized, stray_restored_slot: Some(2), last_switch_threshold_check: Option::default(), }; diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 0ba4cdc505e154..27c30b9e52eb7b 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -15,8 +15,8 @@ use { latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, progress_map::{ForkProgress, ProgressMap, PropagatedStats, ReplaySlotStats}, tower_storage::{SavedTower, SavedTowerVersions, TowerStorage}, - ComputedBankState, Stake, SwitchForkDecision, ThresholdDecision, Tower, VotedStakes, - SWITCH_FORK_THRESHOLD, + BlockhashStatus, ComputedBankState, Stake, SwitchForkDecision, ThresholdDecision, + Tower, VotedStakes, SWITCH_FORK_THRESHOLD, }, cost_update_service::CostUpdate, repair::{ @@ -137,6 +137,20 @@ enum ConfirmationType { DuplicateConfirmed, } +enum GenerateVoteTxResult { + // non voting validator, not eligible for refresh + NonVoting, + // failed generation, eligible for refresh + Failed, + Tx(Transaction), +} + +impl GenerateVoteTxResult { + fn is_non_voting(&self) -> bool { + matches!(self, Self::NonVoting) + } +} + #[derive(PartialEq, Eq, Debug)] struct ConfirmedSlot { slot: Slot, @@ -2321,18 +2335,18 @@ impl ReplayStage { vote_signatures: &mut Vec, has_new_vote_been_rooted: bool, wait_to_vote_slot: Option, - ) -> Option { + ) -> GenerateVoteTxResult { if !bank.is_startup_verification_complete() { info!("startup verification incomplete, so unable to vote"); - return None; + return GenerateVoteTxResult::Failed; } if authorized_voter_keypairs.is_empty() { - return None; + return GenerateVoteTxResult::NonVoting; } if let Some(slot) = wait_to_vote_slot { if bank.slot() < slot { - return None; + return GenerateVoteTxResult::Failed; } } let vote_account = match bank.get_vote_account(vote_account_pubkey) { @@ -2341,7 +2355,7 @@ impl ReplayStage { "Vote account {} does not exist. Unable to vote", vote_account_pubkey, ); - return None; + return GenerateVoteTxResult::Failed; } Some(vote_account) => vote_account, }; @@ -2352,7 +2366,7 @@ impl ReplayStage { "Vote account {} is unreadable. Unable to vote", vote_account_pubkey, ); - return None; + return GenerateVoteTxResult::Failed; } Ok(vote_state) => vote_state, }; @@ -2363,7 +2377,7 @@ impl ReplayStage { vote_state.node_pubkey, node_keypair.pubkey() ); - return None; + return GenerateVoteTxResult::Failed; } let Some(authorized_voter_pubkey) = vote_state.get_authorized_voter(bank.epoch()) else { @@ -2372,7 +2386,7 @@ impl ReplayStage { vote_account_pubkey, bank.epoch() ); - return None; + return GenerateVoteTxResult::Failed; }; let authorized_voter_keypair = match authorized_voter_keypairs @@ -2382,7 +2396,7 @@ impl ReplayStage { None => { warn!("The authorized keypair {} for vote account {} is not available. Unable to vote", authorized_voter_pubkey, vote_account_pubkey); - return None; + return GenerateVoteTxResult::NonVoting; } Some(authorized_voter_keypair) => authorized_voter_keypair, }; @@ -2418,7 +2432,7 @@ impl ReplayStage { vote_signatures.clear(); } - Some(vote_tx) + GenerateVoteTxResult::Tx(vote_tx) } #[allow(clippy::too_many_arguments)] @@ -2457,13 +2471,23 @@ impl ReplayStage { // If we are a non voting validator or have an incorrect setup preventing us from // generating vote txs, no need to refresh - let Some(last_vote_tx_blockhash) = tower.last_vote_tx_blockhash() else { - return; + let last_vote_tx_blockhash = match tower.last_vote_tx_blockhash() { + // Since the checks in vote generation are deterministic, if we were non voting + // on the original vote, the refresh will also fail. No reason to refresh. + BlockhashStatus::NonVoting => return, + // In this case we have not voted since restart, it is unclear if we are non voting. + // Attempt to refresh. + BlockhashStatus::Uninitialized => None, + // Refresh if the blockhash is expired + BlockhashStatus::Blockhash(blockhash) => Some(blockhash), }; if my_latest_landed_vote >= last_voted_slot - || heaviest_bank_on_same_fork - .is_hash_valid_for_age(&last_vote_tx_blockhash, MAX_PROCESSING_AGE) + || { + last_vote_tx_blockhash.is_some() + && heaviest_bank_on_same_fork + .is_hash_valid_for_age(&last_vote_tx_blockhash.unwrap(), MAX_PROCESSING_AGE) + } || { // In order to avoid voting on multiple forks all past MAX_PROCESSING_AGE that don't // include the last voted blockhash @@ -2480,7 +2504,7 @@ impl ReplayStage { // Update timestamp for refreshed vote tower.refresh_last_vote_timestamp(heaviest_bank_on_same_fork.slot()); - let vote_tx = Self::generate_vote_tx( + let vote_tx_result = Self::generate_vote_tx( identity_keypair, heaviest_bank_on_same_fork, vote_account_pubkey, @@ -2492,7 +2516,7 @@ impl ReplayStage { wait_to_vote_slot, ); - if let Some(vote_tx) = vote_tx { + if let GenerateVoteTxResult::Tx(vote_tx) = vote_tx_result { let recent_blockhash = vote_tx.message.recent_blockhash; tower.refresh_last_vote_tx_blockhash(recent_blockhash); @@ -2511,6 +2535,8 @@ impl ReplayStage { }) .unwrap_or_else(|err| warn!("Error: {:?}", err)); last_vote_refresh_time.last_refresh_time = Instant::now(); + } else if vote_tx_result.is_non_voting() { + tower.mark_last_vote_tx_blockhash_non_voting(); } } @@ -2529,7 +2555,7 @@ impl ReplayStage { wait_to_vote_slot: Option, ) { let mut generate_time = Measure::start("generate_vote"); - let vote_tx = Self::generate_vote_tx( + let vote_tx_result = Self::generate_vote_tx( identity_keypair, bank, vote_account_pubkey, @@ -2542,7 +2568,7 @@ impl ReplayStage { ); generate_time.stop(); replay_timing.generate_vote_us += generate_time.as_us(); - if let Some(vote_tx) = vote_tx { + if let GenerateVoteTxResult::Tx(vote_tx) = vote_tx_result { tower.refresh_last_vote_tx_blockhash(vote_tx.message.recent_blockhash); let saved_tower = SavedTower::new(tower, identity_keypair).unwrap_or_else(|err| { @@ -2558,6 +2584,8 @@ impl ReplayStage { saved_tower: SavedTowerVersions::from(saved_tower), }) .unwrap_or_else(|err| warn!("Error: {:?}", err)); + } else if vote_tx_result.is_non_voting() { + tower.mark_last_vote_tx_blockhash_non_voting(); } } @@ -7480,8 +7508,8 @@ pub(crate) mod tests { let vote_tx = &votes[0]; assert_eq!(vote_tx.message.recent_blockhash, bank0.last_blockhash()); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - bank0.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(bank0.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 0); bank1.process_transaction(vote_tx).unwrap(); @@ -7517,8 +7545,8 @@ pub(crate) mod tests { assert!(votes.is_empty()); // Tower's latest vote tx blockhash hasn't changed either assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - bank0.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(bank0.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 0); } @@ -7553,8 +7581,8 @@ pub(crate) mod tests { let vote_tx = &votes[0]; assert_eq!(vote_tx.message.recent_blockhash, bank1.last_blockhash()); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - bank1.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(bank1.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 1); @@ -7578,8 +7606,8 @@ pub(crate) mod tests { let votes = cluster_info.get_votes(&mut cursor); assert!(votes.is_empty()); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - bank1.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(bank1.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 1); @@ -7641,8 +7669,8 @@ pub(crate) mod tests { expired_bank.last_blockhash() ); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - expired_bank.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(expired_bank.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 1); @@ -7700,8 +7728,8 @@ pub(crate) mod tests { expired_bank.last_blockhash() ); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - expired_bank.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(expired_bank.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 1); } @@ -7758,8 +7786,8 @@ pub(crate) mod tests { parent_bank.last_blockhash() ); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - parent_bank.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(parent_bank.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), parent_bank.slot()); let bank = new_bank_from_parent_with_bank_forks( From 10defb161f755097bc95501b08742d202eb567da Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 6 Feb 2024 11:15:48 -0800 Subject: [PATCH 131/401] SVM: Move TransactionErrorMetrics to SVM folder (#35112) --- accounts-db/src/lib.rs | 1 - core/src/banking_stage/consume_worker.rs | 3 +-- core/src/banking_stage/consumer.rs | 9 ++++----- core/src/banking_stage/leader_slot_metrics.rs | 2 +- .../transaction_scheduler/scheduler_controller.rs | 5 +++-- .../src/banking_stage/unprocessed_transaction_storage.rs | 3 +-- runtime/src/bank.rs | 2 +- runtime/src/bank/tests.rs | 2 +- runtime/src/svm/account_loader.rs | 2 +- runtime/src/svm/mod.rs | 1 + .../src => runtime/src/svm}/transaction_error_metrics.rs | 0 runtime/src/svm/transaction_processor.rs | 2 +- 12 files changed, 15 insertions(+), 17 deletions(-) rename {accounts-db/src => runtime/src/svm}/transaction_error_metrics.rs (100%) diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index ce9908f105ceeb..deec44048f78ef 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -43,7 +43,6 @@ pub mod sorted_storages; pub mod stake_rewards; pub mod storable_accounts; pub mod tiered_storage; -pub mod transaction_error_metrics; pub mod transaction_results; pub mod utils; mod verify_accounts_hash_in_background; diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 7744a399e565bc..930a9e8a47d605 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -5,9 +5,8 @@ use { scheduler_messages::{ConsumeWork, FinishedConsumeWork}, }, crossbeam_channel::{Receiver, RecvError, SendError, Sender}, - solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, solana_poh::leader_bank_notifier::LeaderBankNotifier, - solana_runtime::bank::Bank, + solana_runtime::{bank::Bank, svm::transaction_error_metrics::TransactionErrorMetrics}, solana_sdk::timing::AtomicInterval, std::{ sync::{ diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index e7016b0bbb127a..526acb57700964 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -9,10 +9,7 @@ use { BankingStageStats, }, itertools::Itertools, - solana_accounts_db::{ - transaction_error_metrics::TransactionErrorMetrics, - transaction_results::TransactionCheckResult, - }, + solana_accounts_db::transaction_results::TransactionCheckResult, solana_ledger::token_balances::collect_token_balances, solana_measure::{measure::Measure, measure_us}, solana_poh::poh_recorder::{ @@ -25,7 +22,9 @@ use { solana_runtime::{ bank::{Bank, LoadAndExecuteTransactionsOutput}, compute_budget_details::GetComputeBudgetDetails, - svm::account_loader::validate_fee_payer, + svm::{ + account_loader::validate_fee_payer, transaction_error_metrics::TransactionErrorMetrics, + }, transaction_batch::TransactionBatch, }, solana_sdk::{ diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index 1e250c5b69a17b..33b6e7a55a631f 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -5,8 +5,8 @@ use { InsertPacketBatchSummary, UnprocessedTransactionStorage, }, }, - solana_accounts_db::transaction_error_metrics::*, solana_poh::poh_recorder::BankStart, + solana_runtime::svm::transaction_error_metrics::*, solana_sdk::{clock::Slot, saturating_add_assign}, std::time::Instant, }; diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 394479042557f6..df58ccdaa3a6fa 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -18,10 +18,11 @@ use { }, crossbeam_channel::RecvTimeoutError, itertools::MinMaxResult, - solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, solana_cost_model::cost_model::CostModel, solana_measure::measure_us, - solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime::{ + bank::Bank, bank_forks::BankForks, svm::transaction_error_metrics::TransactionErrorMetrics, + }, solana_sdk::{ clock::MAX_PROCESSING_AGE, saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 3ed633d982e9b1..65a5b09f9667ac 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -17,9 +17,8 @@ use { }, itertools::Itertools, min_max_heap::MinMaxHeap, - solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, solana_measure::{measure, measure_us}, - solana_runtime::bank::Bank, + solana_runtime::{bank::Bank, svm::transaction_error_metrics::TransactionErrorMetrics}, solana_sdk::{ clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash, saturating_add_assign, transaction::SanitizedTransaction, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 9c97632f5b3306..d035eb773fc3bf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -61,6 +61,7 @@ use { svm::{ account_overrides::AccountOverrides, runtime_config::RuntimeConfig, + transaction_error_metrics::TransactionErrorMetrics, transaction_processor::{ TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, }, @@ -100,7 +101,6 @@ use { sorted_storages::SortedStorages, stake_rewards::{RewardInfo, StakeReward}, storable_accounts::StorableAccounts, - transaction_error_metrics::TransactionErrorMetrics, transaction_results::{ TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index fca911f93d6e76..75d4a244639304 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -21,6 +21,7 @@ use { svm::{ account_loader::load_accounts, transaction_account_state_info::TransactionAccountStateInfo, + transaction_error_metrics::TransactionErrorMetrics, }, }, assert_matches::assert_matches, @@ -41,7 +42,6 @@ use { nonce_info::NonceFull, partitioned_rewards::TestPartitionedEpochRewards, rent_collector::RENT_EXEMPT_RENT_EPOCH, - transaction_error_metrics::TransactionErrorMetrics, transaction_results::DurableNonceFee, }, solana_logger, diff --git a/runtime/src/svm/account_loader.rs b/runtime/src/svm/account_loader.rs index 4b093c75ead2fa..19f59992410bf3 100644 --- a/runtime/src/svm/account_loader.rs +++ b/runtime/src/svm/account_loader.rs @@ -1,6 +1,7 @@ use { crate::svm::{ account_overrides::AccountOverrides, account_rent_state::RentState, + transaction_error_metrics::TransactionErrorMetrics, transaction_processor::TransactionProcessingCallback, }, itertools::Itertools, @@ -10,7 +11,6 @@ use { nonce_info::NonceFull, rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, - transaction_error_metrics::TransactionErrorMetrics, transaction_results::TransactionCheckResult, }, solana_program_runtime::{ diff --git a/runtime/src/svm/mod.rs b/runtime/src/svm/mod.rs index bec00cfd132e37..441989781dcf9c 100644 --- a/runtime/src/svm/mod.rs +++ b/runtime/src/svm/mod.rs @@ -3,4 +3,5 @@ pub mod account_overrides; pub mod account_rent_state; pub mod runtime_config; pub mod transaction_account_state_info; +pub mod transaction_error_metrics; pub mod transaction_processor; diff --git a/accounts-db/src/transaction_error_metrics.rs b/runtime/src/svm/transaction_error_metrics.rs similarity index 100% rename from accounts-db/src/transaction_error_metrics.rs rename to runtime/src/svm/transaction_error_metrics.rs diff --git a/runtime/src/svm/transaction_processor.rs b/runtime/src/svm/transaction_processor.rs index e72caca4d3c85a..9768fb40e803da 100644 --- a/runtime/src/svm/transaction_processor.rs +++ b/runtime/src/svm/transaction_processor.rs @@ -2,6 +2,7 @@ use { crate::svm::{ account_loader::load_accounts, account_overrides::AccountOverrides, runtime_config::RuntimeConfig, transaction_account_state_info::TransactionAccountStateInfo, + transaction_error_metrics::TransactionErrorMetrics, }, log::debug, percentage::Percentage, @@ -9,7 +10,6 @@ use { accounts::{LoadedTransaction, TransactionLoadResult}, accounts_file::MatchAccountOwnerError, rent_collector::RentCollector, - transaction_error_metrics::TransactionErrorMetrics, transaction_results::{ DurableNonceFee, TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, From d52b1ac7954ddaea4404325d902979178aec0b82 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 6 Feb 2024 13:45:51 -0600 Subject: [PATCH 132/401] update rent epoch max test (#35092) * update rent epoch max test * invert diff --- runtime/src/bank/tests.rs | 101 +++++++++++++++++++++++++------------- 1 file changed, 67 insertions(+), 34 deletions(-) diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 75d4a244639304..b26060b61c4da2 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -1656,7 +1656,6 @@ impl Bank { #[test_case(false; "disable rent fees collection")] fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { solana_logger::setup(); - let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000); for feature_id in FeatureSet::default().inactive { if feature_id != solana_sdk::feature_set::set_exempt_rent_epoch_max::id() @@ -6481,49 +6480,83 @@ fn test_fuzz_instructions() { info!("results: {:?}", results); } -#[test] -fn test_bank_hash_consistency() { +#[test_case(true; "set_rent_epoch_max")] +#[test_case(false; "disable_set_rent_epoch_max")] +fn test_bank_hash_consistency(set_rent_epoch_max: bool) { solana_logger::setup(); - let mut genesis_config = GenesisConfig::new( - &[( - Pubkey::from([42; 32]), - AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()), - )], - &[], - ); + let account = AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()); + if !set_rent_epoch_max { + assert_eq!(account.rent_epoch(), 0); + } + let mut genesis_config = GenesisConfig::new(&[(Pubkey::from([42; 32]), account)], &[]); genesis_config.creation_time = 0; genesis_config.cluster_type = ClusterType::MainnetBeta; genesis_config.rent.burn_percent = 100; + if set_rent_epoch_max { + activate_feature( + &mut genesis_config, + solana_sdk::feature_set::set_exempt_rent_epoch_max::id(), + ); + } + let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); // Check a few slots, cross an epoch boundary assert_eq!(bank.get_slots_in_epoch(0), 32); loop { goto_end_of_slot(bank.clone()); - if bank.slot == 0 { - assert_eq!( - bank.hash().to_string(), - "trdzvRDTAXAqo1i2GX4JfK9ReixV1NYNG7DRaVq43Do", - ); - } - if bank.slot == 32 { - assert_eq!( - bank.hash().to_string(), - "2rdj8QEnDnBSyMv81rCmncss4UERACyXXB3pEvkep8eS", - ); - } - if bank.slot == 64 { - assert_eq!( - bank.hash().to_string(), - "7g3ofXVQB3reFt9ki8zLA8S4w1GdmEWsWuWrwkPN3SSv" - ); - } - if bank.slot == 128 { - assert_eq!( - bank.hash().to_string(), - "4uX1AZFbqwjwWBACWbAW3V8rjbWH4N3ZRTbNysSLAzj2" - ); - break; + if !set_rent_epoch_max { + if bank.slot == 0 { + assert_eq!( + bank.hash().to_string(), + "trdzvRDTAXAqo1i2GX4JfK9ReixV1NYNG7DRaVq43Do", + ); + } + if bank.slot == 32 { + assert_eq!( + bank.hash().to_string(), + "2rdj8QEnDnBSyMv81rCmncss4UERACyXXB3pEvkep8eS", + ); + } + if bank.slot == 64 { + assert_eq!( + bank.hash().to_string(), + "7g3ofXVQB3reFt9ki8zLA8S4w1GdmEWsWuWrwkPN3SSv" + ); + } + if bank.slot == 128 { + assert_eq!( + bank.hash().to_string(), + "4uX1AZFbqwjwWBACWbAW3V8rjbWH4N3ZRTbNysSLAzj2" + ); + break; + } + } else { + if bank.slot == 0 { + assert_eq!( + bank.hash().to_string(), + "3VqF5pMe3XABLqzUaYw2UVXfAokMJgMkrdfvneFQkHbB", + ); + } + if bank.slot == 32 { + assert_eq!( + bank.hash().to_string(), + "B8GsaBJ9aJrQcbhTTfgNVuV4uwb4v8nKT86HUjDLvNgk", + ); + } + if bank.slot == 64 { + assert_eq!( + bank.hash().to_string(), + "Eg9VRE3zUwarxWyHXhitX9wLkg1vfNeiVqVQxSif6qEC" + ); + } + if bank.slot == 128 { + assert_eq!( + bank.hash().to_string(), + "5rLmK24zyxdeb8aLn5LDEnHLDQmxRd5gWZDVJGgsFX1c" + ); + break; + } } bank = Arc::new(new_from_parent(bank)); } From 8d0ca9db7897145b61c2c20f2ff0b2a415be4e48 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 6 Feb 2024 20:02:38 +0000 Subject: [PATCH 133/401] chains Merkle shreds in broadcast fake shreds (#35061) The commit migrates turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs to use chained Merkle shreds variant. --- ledger/src/shred.rs | 2 +- turbine/src/broadcast_stage.rs | 8 ++++ .../broadcast_fake_shreds_run.rs | 21 +++++++++-- .../src/broadcast_stage/broadcast_utils.rs | 37 +++++++++++++++++-- 4 files changed, 61 insertions(+), 7 deletions(-) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 54c27e237da980..e3c896f71befa8 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -692,7 +692,7 @@ pub mod layout { Ok(flags & ShredFlags::SHRED_TICK_REFERENCE_MASK.bits()) } - pub(crate) fn get_merkle_root(shred: &[u8]) -> Option { + pub fn get_merkle_root(shred: &[u8]) -> Option { match get_shred_variant(shred).ok()? { ShredVariant::LegacyCode | ShredVariant::LegacyData => None, ShredVariant::MerkleCode(proof_size, chained) => { diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index d799c0d9b62005..7538754539ff43 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -66,6 +66,8 @@ pub enum Error { Blockstore(#[from] solana_ledger::blockstore::BlockstoreError), #[error(transparent)] ClusterInfo(#[from] solana_gossip::cluster_info::ClusterInfoError), + #[error("Invalid Merkle root, slot: {slot}, index: {index}")] + InvalidMerkleRoot { slot: Slot, index: u64 }, #[error(transparent)] Io(#[from] std::io::Error), #[error(transparent)] @@ -76,8 +78,14 @@ pub enum Error { Send, #[error(transparent)] Serialize(#[from] std::boxed::Box), + #[error("Shred not found, slot: {slot}, index: {index}")] + ShredNotFound { slot: Slot, index: u64 }, #[error(transparent)] TransportError(#[from] solana_sdk::transport::TransportError), + #[error("Unknown last index, slot: {0}")] + UnknownLastIndex(Slot), + #[error("Unknown slot meta, slot: {0}")] + UnknownSlotMeta(Slot), } type Result = std::result::Result; diff --git a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs index 20d141dee01a73..b82ca324b61820 100644 --- a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -1,7 +1,7 @@ use { super::*, solana_entry::entry::Entry, - solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, + solana_ledger::shred::{self, ProcessShredsStats, ReedSolomonCache, Shredder}, solana_sdk::{hash::Hash, signature::Keypair}, }; @@ -45,6 +45,21 @@ impl BroadcastRun for BroadcastFakeShredsRun { .expect("Database error") .map(|meta| meta.consumed) .unwrap_or(0) as u32; + let chained_merkle_root = match next_shred_index.checked_sub(1) { + None => broadcast_utils::get_chained_merkle_root_from_parent( + bank.slot(), + bank.parent_slot(), + blockstore, + ) + .unwrap(), + Some(index) => { + let shred = blockstore + .get_data_shred(bank.slot(), u64::from(index)) + .unwrap() + .unwrap(); + shred::layout::get_merkle_root(&shred).unwrap() + } + }; let num_entries = receive_results.entries.len(); @@ -60,7 +75,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height(), - None, // chained_merkle_root + Some(chained_merkle_root), next_shred_index, self.next_code_index, true, // merkle_variant @@ -82,7 +97,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { keypair, &fake_entries, last_tick_height == bank.max_tick_height(), - None, // chained_merkle_root + Some(chained_merkle_root), next_shred_index, self.next_code_index, true, // merkle_variant diff --git a/turbine/src/broadcast_stage/broadcast_utils.rs b/turbine/src/broadcast_stage/broadcast_utils.rs index fe99077091f516..3468a86dfd64ff 100644 --- a/turbine/src/broadcast_stage/broadcast_utils.rs +++ b/turbine/src/broadcast_stage/broadcast_utils.rs @@ -1,12 +1,15 @@ use { - super::Result, + super::{Error, Result}, bincode::serialized_size, crossbeam_channel::Receiver, solana_entry::entry::Entry, - solana_ledger::shred::ShredData, + solana_ledger::{ + blockstore::Blockstore, + shred::{self, ShredData}, + }, solana_poh::poh_recorder::WorkingBankEntry, solana_runtime::bank::Bank, - solana_sdk::clock::Slot, + solana_sdk::{clock::Slot, hash::Hash}, std::{ sync::Arc, time::{Duration, Instant}, @@ -96,6 +99,34 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result }) } +// Returns the Merkle root of the last erasure batch of the parent slot. +pub(super) fn get_chained_merkle_root_from_parent( + slot: Slot, + parent: Slot, + blockstore: &Blockstore, +) -> Result { + if slot == parent { + debug_assert_eq!(slot, 0u64); + return Ok(Hash::default()); + } + debug_assert!(parent < slot, "parent: {parent} >= slot: {slot}"); + let index = blockstore + .meta(parent)? + .ok_or_else(|| Error::UnknownSlotMeta(parent))? + .last_index + .ok_or_else(|| Error::UnknownLastIndex(parent))?; + let shred = blockstore + .get_data_shred(parent, index)? + .ok_or(Error::ShredNotFound { + slot: parent, + index, + })?; + shred::layout::get_merkle_root(&shred).ok_or(Error::InvalidMerkleRoot { + slot: parent, + index, + }) +} + #[cfg(test)] mod tests { use { From 8fb389fe8de3b66ea6cae974a629737319e88020 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 6 Feb 2024 20:04:47 +0000 Subject: [PATCH 134/401] chains Merkle shreds in fail-entry-verification broadcast (#35060) The commit migrates turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs to use chained Merkle shreds variant. --- .../fail_entry_verification_broadcast_run.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index b98972690c78a8..e9ed6a1a6eeed4 100644 --- a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -15,6 +15,7 @@ pub(super) struct FailEntryVerificationBroadcastRun { shred_version: u16, good_shreds: Vec, current_slot: Slot, + chained_merkle_root: Hash, next_shred_index: u32, next_code_index: u32, cluster_nodes_cache: Arc>, @@ -31,6 +32,7 @@ impl FailEntryVerificationBroadcastRun { shred_version, good_shreds: vec![], current_slot: 0, + chained_merkle_root: Hash::default(), next_shred_index: 0, next_code_index: 0, cluster_nodes_cache, @@ -54,6 +56,12 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { let last_tick_height = receive_results.last_tick_height; if bank.slot() != self.current_slot { + self.chained_merkle_root = broadcast_utils::get_chained_merkle_root_from_parent( + bank.slot(), + bank.parent_slot(), + blockstore, + ) + .unwrap(); self.next_shred_index = 0; self.next_code_index = 0; self.current_slot = bank.slot(); @@ -92,7 +100,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), - None, // chained_merkle_root + Some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant @@ -100,6 +108,9 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { &mut ProcessShredsStats::default(), ); + if let Some(shred) = data_shreds.iter().max_by_key(|shred| shred.index()) { + self.chained_merkle_root = shred.merkle_root().unwrap(); + } self.next_shred_index += data_shreds.len() as u32; if let Some(index) = coding_shreds.iter().map(Shred::index).max() { self.next_code_index = index + 1; @@ -109,7 +120,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &[good_last_entry], true, - None, // chained_merkle_root + Some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant @@ -123,13 +134,15 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &[bad_last_entry], false, - None, // chained_merkle_root + Some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); + assert_eq!(good_last_data_shred.len(), 1); + self.chained_merkle_root = good_last_data_shred.last().unwrap().merkle_root().unwrap(); self.next_shred_index += 1; (good_last_data_shred, bad_last_data_shred) }); From 19454bf56efb41cebd354a942d3b63235541292a Mon Sep 17 00:00:00 2001 From: edgerunnergit Date: Wed, 7 Feb 2024 02:02:06 +0530 Subject: [PATCH 135/401] Show error instead of panic on passphrase mismatch during solana-keygen (#35072) * Show error instead of panic on passphrase mismatch * Format code --- keygen/src/keygen.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 21f430daff9781..32d57a4c2f1333 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -488,7 +488,8 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { let derivation_path = acquire_derivation_path(matches)?; let mnemonic = Mnemonic::new(mnemonic_type, language); - let (passphrase, passphrase_message) = acquire_passphrase_and_message(matches).unwrap(); + let (passphrase, passphrase_message) = acquire_passphrase_and_message(matches) + .map_err(|err| format!("Unable to acquire passphrase: {err}"))?; let seed = Seed::new(&mnemonic, &passphrase); let keypair = match derivation_path { From fddfc8431eebe980e023e547d39074292b0f561a Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 6 Feb 2024 16:38:05 -0600 Subject: [PATCH 136/401] Reorder fields in shred_insert_is_full datapoint (#35117) Put the slot as the first field to make grep'ing for datapoints for a specific slot in logs easier. This does not effect the datapoints submission / presentation in metrics database --- ledger/src/slot_stats.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/src/slot_stats.rs b/ledger/src/slot_stats.rs index 9033c3d1600f89..14e363960645a7 100644 --- a/ledger/src/slot_stats.rs +++ b/ledger/src/slot_stats.rs @@ -131,8 +131,8 @@ impl SlotsStats { .unwrap_or(-1); datapoint_info!( "shred_insert_is_full", - ("total_time_ms", total_time_ms, i64), ("slot", slot, i64), + ("total_time_ms", total_time_ms, i64), ("last_index", last_index, i64), ("num_repaired", num_repaired, i64), ("num_recovered", num_recovered, i64), From 46b95866309f63c2ce6376e14e048c59c186d31a Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 6 Feb 2024 16:06:32 -0800 Subject: [PATCH 137/401] SVM: Move SVM code to its own crate folder (#35119) --- Cargo.lock | 28 ++++++++++++ Cargo.toml | 2 + core/Cargo.toml | 1 + core/src/banking_stage/consume_worker.rs | 3 +- core/src/banking_stage/consumer.rs | 6 +-- core/src/banking_stage/leader_slot_metrics.rs | 2 +- .../scheduler_controller.rs | 5 +-- .../unprocessed_transaction_storage.rs | 3 +- core/src/validator.rs | 2 +- core/tests/epoch_accounts_hash.rs | 2 +- core/tests/snapshots.rs | 2 +- ledger-tool/Cargo.toml | 1 + ledger-tool/src/args.rs | 2 +- ledger/Cargo.toml | 1 + ledger/src/blockstore_processor.rs | 2 +- program-test/Cargo.toml | 1 + program-test/src/lib.rs | 2 +- programs/sbf/Cargo.lock | 26 +++++++++++ runtime/Cargo.toml | 1 + runtime/src/bank.rs | 16 +++---- runtime/src/bank/fee_distribution.rs | 2 +- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/bank/tests.rs | 9 ++-- runtime/src/lib.rs | 1 - runtime/src/serde_snapshot.rs | 2 +- runtime/src/snapshot_bank_utils.rs | 2 +- svm/Cargo.toml | 43 +++++++++++++++++++ svm/build.rs | 1 + .../src/svm => svm/src}/account_loader.rs | 4 +- .../src/svm => svm/src}/account_overrides.rs | 0 .../src/svm => svm/src}/account_rent_state.rs | 2 +- runtime/src/svm/mod.rs => svm/src/lib.rs | 9 ++++ .../src/svm => svm/src}/runtime_config.rs | 0 .../src}/transaction_account_state_info.rs | 6 +-- .../src}/transaction_error_metrics.rs | 0 .../svm => svm/src}/transaction_processor.rs | 2 +- test-validator/Cargo.toml | 1 + test-validator/src/lib.rs | 3 +- validator/Cargo.toml | 1 + validator/src/main.rs | 2 +- 40 files changed, 158 insertions(+), 42 deletions(-) create mode 100644 svm/Cargo.toml create mode 120000 svm/build.rs rename {runtime/src/svm => svm/src}/account_loader.rs (99%) rename {runtime/src/svm => svm/src}/account_overrides.rs (100%) rename {runtime/src/svm => svm/src}/account_rent_state.rs (99%) rename runtime/src/svm/mod.rs => svm/src/lib.rs (50%) rename {runtime/src/svm => svm/src}/runtime_config.rs (100%) rename {runtime/src/svm => svm/src}/transaction_account_state_info.rs (94%) rename {runtime/src/svm => svm/src}/transaction_error_metrics.rs (100%) rename {runtime/src/svm => svm/src}/transaction_processor.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 69ad4b3411c709..1409037320f776 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5924,6 +5924,7 @@ dependencies = [ "solana-send-transaction-service", "solana-stake-program", "solana-streamer", + "solana-svm", "solana-tpu-client", "solana-transaction-status", "solana-turbine", @@ -6328,6 +6329,7 @@ dependencies = [ "solana-stake-program", "solana-storage-bigtable", "solana-storage-proto", + "solana-svm", "solana-transaction-status", "solana-vote", "solana-vote-program", @@ -6386,6 +6388,7 @@ dependencies = [ "solana-stake-program", "solana-storage-bigtable", "solana-streamer", + "solana-svm", "solana-transaction-status", "solana-unified-scheduler-pool", "solana-version", @@ -6743,6 +6746,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-stake-program", + "solana-svm", "solana-vote-program", "solana_rbpf", "test-case", @@ -7046,6 +7050,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-stake-program", + "solana-svm", "solana-system-program", "solana-version", "solana-vote", @@ -7291,6 +7296,27 @@ dependencies = [ "x509-parser", ] +[[package]] +name = "solana-svm" +version = "1.18.0" +dependencies = [ + "itertools", + "log", + "percentage", + "rustc_version 0.4.0", + "solana-accounts-db", + "solana-bpf-loader-program", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-loader-v4-program", + "solana-logger", + "solana-measure", + "solana-metrics", + "solana-program-runtime", + "solana-sdk", + "solana-system-program", +] + [[package]] name = "solana-system-program" version = "1.18.0" @@ -7331,6 +7357,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-streamer", + "solana-svm", "solana-tpu-client", "tokio", ] @@ -7589,6 +7616,7 @@ dependencies = [ "solana-send-transaction-service", "solana-storage-bigtable", "solana-streamer", + "solana-svm", "solana-test-validator", "solana-tpu-client", "solana-version", diff --git a/Cargo.toml b/Cargo.toml index 52e342089375f9..64a64e58ddb31e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,6 +100,7 @@ members = [ "storage-bigtable/build-proto", "storage-proto", "streamer", + "svm", "test-validator", "thin-client", "tokens", @@ -373,6 +374,7 @@ solana-stake-program = { path = "programs/stake", version = "=1.18.0" } solana-storage-bigtable = { path = "storage-bigtable", version = "=1.18.0" } solana-storage-proto = { path = "storage-proto", version = "=1.18.0" } solana-streamer = { path = "streamer", version = "=1.18.0" } +solana-svm = { path = "svm", version = "=1.18.0" } solana-system-program = { path = "programs/system", version = "=1.18.0" } solana-test-validator = { path = "test-validator", version = "=1.18.0" } solana-thin-client = { path = "thin-client", version = "=1.18.0" } diff --git a/core/Cargo.toml b/core/Cargo.toml index fa6c7cd2052aea..e2a936cdabc4c1 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -65,6 +65,7 @@ solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-turbine = { workspace = true } diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 930a9e8a47d605..92fb07ddfab18c 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -6,8 +6,9 @@ use { }, crossbeam_channel::{Receiver, RecvError, SendError, Sender}, solana_poh::leader_bank_notifier::LeaderBankNotifier, - solana_runtime::{bank::Bank, svm::transaction_error_metrics::TransactionErrorMetrics}, + solana_runtime::bank::Bank, solana_sdk::timing::AtomicInterval, + solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 526acb57700964..01432baa447793 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -22,9 +22,6 @@ use { solana_runtime::{ bank::{Bank, LoadAndExecuteTransactionsOutput}, compute_budget_details::GetComputeBudgetDetails, - svm::{ - account_loader::validate_fee_payer, transaction_error_metrics::TransactionErrorMetrics, - }, transaction_batch::TransactionBatch, }, solana_sdk::{ @@ -35,6 +32,9 @@ use { timing::timestamp, transaction::{self, AddressLoader, SanitizedTransaction, TransactionError}, }, + solana_svm::{ + account_loader::validate_fee_payer, transaction_error_metrics::TransactionErrorMetrics, + }, std::{ sync::{atomic::Ordering, Arc}, time::Instant, diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index 33b6e7a55a631f..88ea6b5ee340cf 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -6,8 +6,8 @@ use { }, }, solana_poh::poh_recorder::BankStart, - solana_runtime::svm::transaction_error_metrics::*, solana_sdk::{clock::Slot, saturating_add_assign}, + solana_svm::transaction_error_metrics::*, std::time::Instant, }; diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index df58ccdaa3a6fa..aaf2753597b8ea 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -20,13 +20,12 @@ use { itertools::MinMaxResult, solana_cost_model::cost_model::CostModel, solana_measure::measure_us, - solana_runtime::{ - bank::Bank, bank_forks::BankForks, svm::transaction_error_metrics::TransactionErrorMetrics, - }, + solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ clock::MAX_PROCESSING_AGE, saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, }, + solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ sync::{Arc, RwLock}, time::Duration, diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 65a5b09f9667ac..adfb11f0b28fc2 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -18,11 +18,12 @@ use { itertools::Itertools, min_max_heap::MinMaxHeap, solana_measure::{measure, measure_us}, - solana_runtime::{bank::Bank, svm::transaction_error_metrics::TransactionErrorMetrics}, + solana_runtime::bank::Bank, solana_sdk::{ clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash, saturating_add_assign, transaction::SanitizedTransaction, }, + solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ collections::HashMap, sync::{atomic::Ordering, Arc}, diff --git a/core/src/validator.rs b/core/src/validator.rs index aed2731b5298dc..a90044881ee458 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -101,7 +101,6 @@ use { snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, snapshot_utils::{self, clean_orphaned_account_snapshot_dirs}, - svm::runtime_config::RuntimeConfig, }, solana_sdk::{ clock::Slot, @@ -116,6 +115,7 @@ use { }, solana_send_transaction_service::send_transaction_service, solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, + solana_svm::runtime_config::RuntimeConfig, solana_turbine::{self, broadcast_stage::BroadcastStageType}, solana_unified_scheduler_pool::DefaultSchedulerPool, solana_vote_program::vote_state, diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index af9c93ba147241..b0dd111676af79 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -27,7 +27,6 @@ use { snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_utils, - svm::runtime_config::RuntimeConfig, }, solana_sdk::{ clock::Slot, @@ -39,6 +38,7 @@ use { timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, + solana_svm::runtime_config::RuntimeConfig, std::{ mem::ManuallyDrop, sync::{ diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 7aee26a742b79a..2694f7294a7217 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -35,7 +35,6 @@ use { SnapshotVersion::{self, V1_2_0}, }, status_cache::MAX_CACHE_ENTRIES, - svm::runtime_config::RuntimeConfig, }, solana_sdk::{ clock::Slot, @@ -50,6 +49,7 @@ use { timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, + solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, fs, diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index ddc1ca9b564e94..6da42940a4ba7f 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -44,6 +44,7 @@ solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-transaction-status = { workspace = true } solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 7ea5bed687f1a5..80ea6f9715bf35 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -12,8 +12,8 @@ use { blockstore_processor::ProcessOptions, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, - solana_runtime::svm::runtime_config::RuntimeConfig, solana_sdk::clock::Slot, + solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, path::{Path, PathBuf}, diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 87ba0c39235a12..7665428981ed82 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -54,6 +54,7 @@ solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-storage-proto = { workspace = true } +solana-svm = { workspace = true } solana-transaction-status = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index c30a3742f25662..ee66f697eb705a 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -41,7 +41,6 @@ use { commitment::VOTE_THRESHOLD_SIZE, installed_scheduler_pool::BankWithScheduler, prioritization_fee_cache::PrioritizationFeeCache, - svm::runtime_config::RuntimeConfig, transaction_batch::TransactionBatch, }, solana_sdk::{ @@ -58,6 +57,7 @@ use { VersionedTransaction, }, }, + solana_svm::runtime_config::RuntimeConfig, solana_transaction_status::token_balances::TransactionTokenBalancesSet, solana_vote::{vote_account::VoteAccountsHashMap, vote_sender_types::ReplayVoteSender}, std::{ diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index c4ab4507b27eae..b8b4fcdb332a09 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -26,6 +26,7 @@ solana-logger = { workspace = true } solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-svm = { workspace = true } solana-vote-program = { workspace = true } solana_rbpf = { workspace = true } test-case = { workspace = true } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 09c55fe793efba..20b9f5806e29c3 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -25,7 +25,6 @@ use { bank_forks::BankForks, commitment::BlockCommitmentCache, genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo}, - svm::runtime_config::RuntimeConfig, }, solana_sdk::{ account::{create_account_shared_data_for_test, Account, AccountSharedData}, @@ -46,6 +45,7 @@ use { stable_layout::stable_instruction::StableInstruction, sysvar::{Sysvar, SysvarId}, }, + solana_svm::runtime_config::RuntimeConfig, solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, std::{ cell::RefCell, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 016c30266ba038..fa0abfb607c3ea 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4972,6 +4972,7 @@ dependencies = [ "solana-sdk", "solana-send-transaction-service", "solana-streamer", + "solana-svm", "solana-tpu-client", "solana-transaction-status", "solana-turbine", @@ -5243,6 +5244,7 @@ dependencies = [ "solana-stake-program", "solana-storage-bigtable", "solana-storage-proto", + "solana-svm", "solana-transaction-status", "solana-vote", "solana-vote-program", @@ -5476,6 +5478,7 @@ dependencies = [ "solana-program-runtime", "solana-runtime", "solana-sdk", + "solana-svm", "solana-vote-program", "solana_rbpf", "test-case", @@ -5724,6 +5727,7 @@ dependencies = [ "solana-rayon-threadlimit", "solana-sdk", "solana-stake-program", + "solana-svm", "solana-system-program", "solana-version", "solana-vote", @@ -6334,6 +6338,26 @@ dependencies = [ "x509-parser", ] +[[package]] +name = "solana-svm" +version = "1.18.0" +dependencies = [ + "itertools", + "log", + "percentage", + "rustc_version", + "solana-accounts-db", + "solana-bpf-loader-program", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-loader-v4-program", + "solana-measure", + "solana-metrics", + "solana-program-runtime", + "solana-sdk", + "solana-system-program", +] + [[package]] name = "solana-system-program" version = "1.18.0" @@ -6372,6 +6396,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-streamer", + "solana-svm", "solana-tpu-client", "tokio", ] @@ -6557,6 +6582,7 @@ dependencies = [ "solana-send-transaction-service", "solana-storage-bigtable", "solana-streamer", + "solana-svm", "solana-test-validator", "solana-tpu-client", "solana-version", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 355c858597895f..b14ffab2076ca3 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -64,6 +64,7 @@ solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } +solana-svm = { workspace = true } solana-system-program = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d035eb773fc3bf..eb040b3b79cade 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -58,14 +58,6 @@ use { }, stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, - svm::{ - account_overrides::AccountOverrides, - runtime_config::RuntimeConfig, - transaction_error_metrics::TransactionErrorMetrics, - transaction_processor::{ - TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, - }, - }, transaction_batch::TransactionBatch, }, byteorder::{ByteOrder, LittleEndian}, @@ -168,6 +160,14 @@ use { solana_stake_program::stake_state::{ self, InflationPointCalculationEvent, PointValue, StakeStateV2, }, + solana_svm::{ + account_overrides::AccountOverrides, + runtime_config::RuntimeConfig, + transaction_error_metrics::TransactionErrorMetrics, + transaction_processor::{ + TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, + }, + }, solana_system_program::{get_system_account_kind, SystemAccountKind}, solana_vote::vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap}, solana_vote_program::vote_state::VoteState, diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 85d68c07fd7448..fc6d16f3b5683d 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -1,6 +1,5 @@ use { super::Bank, - crate::svm::account_rent_state::RentState, log::{debug, warn}, solana_accounts_db::stake_rewards::RewardInfo, solana_sdk::{ @@ -9,6 +8,7 @@ use { reward_type::RewardType, system_program, }, + solana_svm::account_rent_state::RentState, solana_vote::vote_account::VoteAccountsHashMap, std::{result::Result, sync::atomic::Ordering::Relaxed}, thiserror::Error, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 7a3c1a2c62439a..ba2f24c553ceef 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -17,7 +17,6 @@ mod tests { StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, }, status_cache::StatusCache, - svm::runtime_config::RuntimeConfig, }, assert_matches::assert_matches, solana_accounts_db::{ @@ -39,6 +38,7 @@ mod tests { pubkey::Pubkey, signature::{Keypair, Signer}, }, + solana_svm::runtime_config::RuntimeConfig, std::{ io::{Cursor, Read, Write}, num::NonZeroUsize, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index b26060b61c4da2..61b10454dceda5 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -18,11 +18,6 @@ use { }, snapshot_bank_utils, snapshot_utils, status_cache::MAX_CACHE_ENTRIES, - svm::{ - account_loader::load_accounts, - transaction_account_state_info::TransactionAccountStateInfo, - transaction_error_metrics::TransactionErrorMetrics, - }, }, assert_matches::assert_matches, crossbeam_channel::{bounded, unbounded}, @@ -113,6 +108,10 @@ use { transaction_context::{TransactionAccount, TransactionContext}, }, solana_stake_program::stake_state::{self, StakeStateV2}, + solana_svm::{ + account_loader::load_accounts, transaction_account_state_info::TransactionAccountStateInfo, + transaction_error_metrics::TransactionErrorMetrics, + }, solana_vote_program::{ vote_instruction, vote_state::{ diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index ba6ca17d427931..fac4169301004d 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -36,7 +36,6 @@ pub mod stake_weighted_timestamp; pub mod stakes; pub mod static_ids; pub mod status_cache; -pub mod svm; pub mod transaction_batch; #[macro_use] diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index b9f8300e400789..e38ea904686b40 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -8,7 +8,6 @@ use { self, SnapshotError, StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, }, stakes::Stakes, - svm::runtime_config::RuntimeConfig, }, bincode::{self, config::Options, Error}, log::*, @@ -40,6 +39,7 @@ use { inflation::Inflation, pubkey::Pubkey, }, + solana_svm::runtime_config::RuntimeConfig, std::{ collections::{HashMap, HashSet}, io::{self, BufReader, BufWriter, Read, Write}, diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index d932ab8408fdb4..dfeda8e59e0fe1 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -23,7 +23,6 @@ use { UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError, }, status_cache, - svm::runtime_config::RuntimeConfig, }, bincode::{config::Options, serialize_into}, log::*, @@ -46,6 +45,7 @@ use { pubkey::Pubkey, slot_history::{Check, SlotHistory}, }, + solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, fs, diff --git a/svm/Cargo.toml b/svm/Cargo.toml new file mode 100644 index 00000000000000..4fdf7d9cb1a0b4 --- /dev/null +++ b/svm/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "solana-svm" +description = "Solana SVM" +documentation = "https://docs.rs/solana-svm" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +itertools = { workspace = true } +log = { workspace = true } +percentage = { workspace = true } +solana-accounts-db = { workspace = true } +solana-bpf-loader-program = { workspace = true } +solana-frozen-abi = { workspace = true } +solana-frozen-abi-macro = { workspace = true } +solana-loader-v4-program = { workspace = true } +solana-measure = { workspace = true } +solana-metrics = { workspace = true } +solana-program-runtime = { workspace = true } +solana-sdk = { workspace = true } +solana-system-program = { workspace = true } + +[lib] +crate-type = ["lib"] +name = "solana_svm" + +[dev-dependencies] +solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] } +solana-logger = { workspace = true } +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +rustc_version = { workspace = true } + +[features] +dev-context-only-utils = [] diff --git a/svm/build.rs b/svm/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/svm/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/runtime/src/svm/account_loader.rs b/svm/src/account_loader.rs similarity index 99% rename from runtime/src/svm/account_loader.rs rename to svm/src/account_loader.rs index 19f59992410bf3..f945672169ca88 100644 --- a/runtime/src/svm/account_loader.rs +++ b/svm/src/account_loader.rs @@ -1,5 +1,5 @@ use { - crate::svm::{ + crate::{ account_overrides::AccountOverrides, account_rent_state::RentState, transaction_error_metrics::TransactionErrorMetrics, transaction_processor::TransactionProcessingCallback, @@ -38,7 +38,7 @@ use { std::{collections::HashMap, num::NonZeroUsize}, }; -pub(crate) fn load_accounts( +pub fn load_accounts( callbacks: &CB, txs: &[SanitizedTransaction], lock_results: &[TransactionCheckResult], diff --git a/runtime/src/svm/account_overrides.rs b/svm/src/account_overrides.rs similarity index 100% rename from runtime/src/svm/account_overrides.rs rename to svm/src/account_overrides.rs diff --git a/runtime/src/svm/account_rent_state.rs b/svm/src/account_rent_state.rs similarity index 99% rename from runtime/src/svm/account_rent_state.rs rename to svm/src/account_rent_state.rs index 3fc71ac6a27686..38cda820f8ceb7 100644 --- a/runtime/src/svm/account_rent_state.rs +++ b/svm/src/account_rent_state.rs @@ -10,7 +10,7 @@ use { }; #[derive(Debug, PartialEq, Eq)] -pub(crate) enum RentState { +pub enum RentState { /// account.lamports == 0 Uninitialized, /// 0 < account.lamports < rent-exempt-minimum diff --git a/runtime/src/svm/mod.rs b/svm/src/lib.rs similarity index 50% rename from runtime/src/svm/mod.rs rename to svm/src/lib.rs index 441989781dcf9c..ff28128edca36d 100644 --- a/runtime/src/svm/mod.rs +++ b/svm/src/lib.rs @@ -1,3 +1,6 @@ +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![allow(clippy::arithmetic_side_effects)] + pub mod account_loader; pub mod account_overrides; pub mod account_rent_state; @@ -5,3 +8,9 @@ pub mod runtime_config; pub mod transaction_account_state_info; pub mod transaction_error_metrics; pub mod transaction_processor; + +#[macro_use] +extern crate solana_metrics; + +#[macro_use] +extern crate solana_frozen_abi_macro; diff --git a/runtime/src/svm/runtime_config.rs b/svm/src/runtime_config.rs similarity index 100% rename from runtime/src/svm/runtime_config.rs rename to svm/src/runtime_config.rs diff --git a/runtime/src/svm/transaction_account_state_info.rs b/svm/src/transaction_account_state_info.rs similarity index 94% rename from runtime/src/svm/transaction_account_state_info.rs rename to svm/src/transaction_account_state_info.rs index 48a6a63994e341..02d6f0228de2a7 100644 --- a/runtime/src/svm/transaction_account_state_info.rs +++ b/svm/src/transaction_account_state_info.rs @@ -1,5 +1,5 @@ use { - crate::svm::account_rent_state::RentState, + crate::account_rent_state::RentState, solana_sdk::{ account::ReadableAccount, message::SanitizedMessage, @@ -10,12 +10,12 @@ use { }, }; -pub(crate) struct TransactionAccountStateInfo { +pub struct TransactionAccountStateInfo { rent_state: Option, // None: readonly account } impl TransactionAccountStateInfo { - pub(crate) fn new( + pub fn new( rent: &Rent, transaction_context: &TransactionContext, message: &SanitizedMessage, diff --git a/runtime/src/svm/transaction_error_metrics.rs b/svm/src/transaction_error_metrics.rs similarity index 100% rename from runtime/src/svm/transaction_error_metrics.rs rename to svm/src/transaction_error_metrics.rs diff --git a/runtime/src/svm/transaction_processor.rs b/svm/src/transaction_processor.rs similarity index 99% rename from runtime/src/svm/transaction_processor.rs rename to svm/src/transaction_processor.rs index 9768fb40e803da..f16891a85fb8b2 100644 --- a/runtime/src/svm/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -1,5 +1,5 @@ use { - crate::svm::{ + crate::{ account_loader::load_accounts, account_overrides::AccountOverrides, runtime_config::RuntimeConfig, transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 60f299d01e58a0..2bc8deb5fc200e 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -32,6 +32,7 @@ solana-rpc-client = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-tpu-client = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 270e8d9d816ffe..c658b53305bf74 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -34,7 +34,7 @@ use { solana_rpc_client::{nonblocking, rpc_client::RpcClient}, solana_runtime::{ bank_forks::BankForks, genesis_utils::create_genesis_config_with_leader_ex, - snapshot_config::SnapshotConfig, svm::runtime_config::RuntimeConfig, + snapshot_config::SnapshotConfig, }, solana_sdk::{ account::{Account, AccountSharedData}, @@ -54,6 +54,7 @@ use { signature::{read_keypair_file, write_keypair_file, Keypair, Signer}, }, solana_streamer::socket::SocketAddrSpace, + solana_svm::runtime_config::RuntimeConfig, solana_tpu_client::tpu_client::{ DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC, }, diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 6c7f691c27b5fa..4028221cd7ce68 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -58,6 +58,7 @@ solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-test-validator = { workspace = true } solana-tpu-client = { workspace = true } solana-version = { workspace = true } diff --git a/validator/src/main.rs b/validator/src/main.rs index 0cff3139a9d22c..56b17e5d29c32e 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -48,7 +48,6 @@ use { snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_config::{SnapshotConfig, SnapshotUsage}, snapshot_utils::{self, ArchiveFormat, SnapshotVersion}, - svm::runtime_config::RuntimeConfig, }, solana_sdk::{ clock::{Slot, DEFAULT_S_PER_SLOT}, @@ -59,6 +58,7 @@ use { }, solana_send_transaction_service::send_transaction_service, solana_streamer::socket::SocketAddrSpace, + solana_svm::runtime_config::RuntimeConfig, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, solana_validator::{ admin_rpc_service, From 070a5a36b826c7c9252b705ebe4aef8ab843e89d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Wed, 7 Feb 2024 02:38:21 +0100 Subject: [PATCH 138/401] Cleanup - Removes `LoadedProgram::maybe_expiration_slot` (#35023) Removes LoadedProgram::maybe_expiration_slot. --- program-runtime/src/loaded_programs.rs | 385 +++++-------------------- programs/bpf_loader/src/lib.rs | 4 - programs/loader-v4/src/lib.rs | 2 - svm/src/transaction_processor.rs | 2 - 4 files changed, 74 insertions(+), 319 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index e8e3b9ee325c2c..19f5f7486ea330 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -137,8 +137,6 @@ pub struct LoadedProgram { pub deployment_slot: Slot, /// Slot in which this entry will become active (can be in the future) pub effective_slot: Slot, - /// Optional expiration slot for this entry, after which it is treated as non-existent - pub maybe_expiration_slot: Option, /// How often this entry was used by a transaction pub tx_usage_counter: AtomicU64, /// How often this entry was used by an instruction @@ -282,7 +280,6 @@ impl LoadedProgram { program_runtime_environment: ProgramRuntimeEnvironment, deployment_slot: Slot, effective_slot: Slot, - maybe_expiration_slot: Option, elf_bytes: &[u8], account_size: usize, metrics: &mut LoadProgramMetrics, @@ -292,7 +289,6 @@ impl LoadedProgram { program_runtime_environment, deployment_slot, effective_slot, - maybe_expiration_slot, elf_bytes, account_size, metrics, @@ -313,7 +309,6 @@ impl LoadedProgram { program_runtime_environment: Arc>>, deployment_slot: Slot, effective_slot: Slot, - maybe_expiration_slot: Option, elf_bytes: &[u8], account_size: usize, metrics: &mut LoadProgramMetrics, @@ -323,7 +318,6 @@ impl LoadedProgram { program_runtime_environment, deployment_slot, effective_slot, - maybe_expiration_slot, elf_bytes, account_size, metrics, @@ -336,7 +330,6 @@ impl LoadedProgram { program_runtime_environment: Arc>>, deployment_slot: Slot, effective_slot: Slot, - maybe_expiration_slot: Option, elf_bytes: &[u8], account_size: usize, metrics: &mut LoadProgramMetrics, @@ -381,7 +374,6 @@ impl LoadedProgram { deployment_slot, account_size, effective_slot, - maybe_expiration_slot, tx_usage_counter: AtomicU64::new(0), program, ix_usage_counter: AtomicU64::new(0), @@ -395,7 +387,6 @@ impl LoadedProgram { account_size: self.account_size, deployment_slot: self.deployment_slot, effective_slot: self.effective_slot, - maybe_expiration_slot: self.maybe_expiration_slot, tx_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)), ix_usage_counter: AtomicU64::new(self.ix_usage_counter.load(Ordering::Relaxed)), latest_access_slot: AtomicU64::new(self.latest_access_slot.load(Ordering::Relaxed)), @@ -416,7 +407,6 @@ impl LoadedProgram { deployment_slot, account_size, effective_slot: deployment_slot, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(0), program: LoadedProgramType::Builtin(BuiltinProgram::new_builtin(function_registry)), ix_usage_counter: AtomicU64::new(0), @@ -425,14 +415,11 @@ impl LoadedProgram { } pub fn new_tombstone(slot: Slot, reason: LoadedProgramType) -> Self { - let maybe_expiration_slot = matches!(reason, LoadedProgramType::DelayVisibility) - .then_some(slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET)); let tombstone = Self { program: reason, account_size: 0, deployment_slot: slot, effective_slot: slot, - maybe_expiration_slot, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::new(0), @@ -835,12 +822,6 @@ impl LoadedPrograms { } }) .filter(|entry| { - // Remove expired - if let Some(expiration) = entry.maybe_expiration_slot { - if expiration <= new_root_slot { - return false; - } - } // Remove outdated environment of previous feature set if recompilation_phase_ends && !Self::matches_environment(entry, &self.environments) @@ -885,25 +866,6 @@ impl LoadedPrograms { } } - fn is_entry_usable( - entry: &Arc, - current_slot: Slot, - match_criteria: &LoadedProgramMatchCriteria, - ) -> bool { - if entry - .maybe_expiration_slot - .map(|expiration_slot| expiration_slot <= current_slot) - .unwrap_or(false) - { - // Found an entry that's already expired. Any further entries in the list - // are older than the current one. So treat the program as missing in the - // cache and return early. - return false; - } - - Self::matches_loaded_program_criteria(entry, match_criteria) - } - /// Extracts a subset of the programs relevant to a transaction batch /// and returns which program accounts the accounts DB needs to load. pub fn extract( @@ -933,14 +895,9 @@ impl LoadedPrograms { entry, &loaded_programs_for_tx_batch.environments, ) { - if !Self::is_entry_usable( - entry, - loaded_programs_for_tx_batch.slot, - match_criteria, - ) { + if !Self::matches_loaded_program_criteria(entry, match_criteria) { break; } - if let LoadedProgramType::Unloaded(_environment) = &entry.program { break; } @@ -1246,27 +1203,12 @@ mod tests { deployment_slot: Slot, effective_slot: Slot, usage_counter: AtomicU64, - ) -> Arc { - new_test_loaded_program_with_usage_and_expiry( - deployment_slot, - effective_slot, - usage_counter, - None, - ) - } - - fn new_test_loaded_program_with_usage_and_expiry( - deployment_slot: Slot, - effective_slot: Slot, - usage_counter: AtomicU64, - expiry: Option, ) -> Arc { Arc::new(LoadedProgram { program: LoadedProgramType::TestLoaded(MOCK_ENVIRONMENT.get().unwrap().clone()), account_size: 0, deployment_slot, effective_slot, - maybe_expiration_slot: expiry, tx_usage_counter: usage_counter, ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::new(deployment_slot), @@ -1279,7 +1221,6 @@ mod tests { account_size: 0, deployment_slot, effective_slot, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::default(), @@ -1308,7 +1249,6 @@ mod tests { account_size: 0, deployment_slot: slot, effective_slot: slot.saturating_add(1), - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::default(), @@ -1905,7 +1845,6 @@ mod tests { account_size: 0, deployment_slot: 20, effective_slot: 20, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::default(), @@ -2192,58 +2131,6 @@ mod tests { assert!(match_missing(&missing, &program3, false)); - // The following is a special case, where there's an expiration slot - let test_program = Arc::new(LoadedProgram { - program: LoadedProgramType::DelayVisibility, - account_size: 0, - deployment_slot: 19, - effective_slot: 19, - maybe_expiration_slot: Some(21), - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), - latest_access_slot: AtomicU64::default(), - }); - assert!(!cache.replenish(program4, test_program).0); - - // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let mut missing = vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ]; - let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted, true); - - assert!(match_slot(&extracted, &program1, 0, 19)); - assert!(match_slot(&extracted, &program2, 11, 19)); - // Program4 deployed at slot 19 should not be expired yet - assert!(match_slot(&extracted, &program4, 19, 19)); - - assert!(match_missing(&missing, &program3, false)); - - // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 21 - // This would cause program4 deployed at slot 19 to be expired. - let mut missing = vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ]; - let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted, true); - - assert!(match_slot(&extracted, &program1, 0, 21)); - assert!(match_slot(&extracted, &program2, 11, 21)); - - assert!(match_missing(&missing, &program3, false)); - assert!(match_missing(&missing, &program4, false)); - - // Remove the expired entry to let the rest of the test continue - if let Some(second_level) = cache.entries.get_mut(&program4) { - second_level.slot_versions.pop(); - } - cache.prune(5, 0); // Fork graph after pruning @@ -2499,117 +2386,6 @@ mod tests { assert!(match_missing(&missing, &program3, true)); } - #[test] - fn test_prune_expired() { - let mut cache = new_mock_cache::(); - - // Fork graph created for the test - // 0 - // / \ - // 10 5 - // | | - // 20 11 - // | | \ - // 22 15 25 - // | | - // 16 27 - // | - // 19 - // | - // 23 - - let mut fork_graph = TestForkGraphSpecific::default(); - fork_graph.insert_fork(&[0, 10, 20, 22]); - fork_graph.insert_fork(&[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); - fork_graph.insert_fork(&[0, 5, 11, 25, 27]); - let fork_graph = Arc::new(RwLock::new(fork_graph)); - cache.set_fork_graph(fork_graph); - - let program1 = Pubkey::new_unique(); - assert!(!cache.replenish(program1, new_test_loaded_program(10, 11)).0); - assert!(!cache.replenish(program1, new_test_loaded_program(20, 21)).0); - - let program2 = Pubkey::new_unique(); - assert!(!cache.replenish(program2, new_test_loaded_program(5, 6)).0); - assert!(!cache.replenish(program2, new_test_loaded_program(11, 12)).0); - - let program3 = Pubkey::new_unique(); - assert!(!cache.replenish(program3, new_test_loaded_program(25, 26)).0); - - // The following is a special case, where there's an expiration slot - let test_program = Arc::new(LoadedProgram { - program: LoadedProgramType::DelayVisibility, - account_size: 0, - deployment_slot: 11, - effective_slot: 11, - maybe_expiration_slot: Some(15), - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), - latest_access_slot: AtomicU64::default(), - }); - assert!(!cache.replenish(program1, test_program).0); - - // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let mut missing = vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ]; - let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted, true); - - // Program1 deployed at slot 11 should not be expired yet - assert!(match_slot(&extracted, &program1, 11, 12)); - assert!(match_slot(&extracted, &program2, 11, 12)); - - assert!(match_missing(&missing, &program3, false)); - - // Testing fork 0 - 5 - 11 - 12 - 15 - 16 - 19 - 21 - 23 with current slot at 15 - // This would cause program4 deployed at slot 15 to be expired. - let mut missing = vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ]; - let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted, true); - - assert!(match_slot(&extracted, &program2, 11, 15)); - - assert!(match_missing(&missing, &program1, false)); - assert!(match_missing(&missing, &program3, false)); - - // Test that the program still exists in the cache, even though it is expired. - assert_eq!( - cache - .entries - .get(&program1) - .expect("Didn't find program1") - .slot_versions - .len(), - 3 - ); - - // New root 5 should not evict the expired entry for program1 - cache.prune(5, 0); - assert_eq!( - cache - .entries - .get(&program1) - .expect("Didn't find program1") - .slot_versions - .len(), - 1 - ); - - // Unlock the cooperative loading lock so that the subsequent prune can do its job - cache.finish_cooperative_loading_task(15, program1, new_test_loaded_program(0, 1)); - - // New root 15 should evict the expired entry for program1 - cache.prune(15, 0); - assert!(cache.entries.get(&program1).is_none()); - } - #[test] fn test_fork_prune_find_first_ancestor() { let mut cache = new_mock_cache::(); @@ -2741,109 +2517,96 @@ mod tests { new_mock_cache::(); let tombstone = Arc::new(LoadedProgram::new_tombstone(0, LoadedProgramType::Closed)); - assert!(LoadedPrograms::::is_entry_usable( - &tombstone, - 0, - &LoadedProgramMatchCriteria::NoCriteria - )); - - assert!(LoadedPrograms::::is_entry_usable( - &tombstone, - 1, - &LoadedProgramMatchCriteria::Tombstone - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &tombstone, + &LoadedProgramMatchCriteria::NoCriteria + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &tombstone, - 1, - &LoadedProgramMatchCriteria::NoCriteria - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &tombstone, + &LoadedProgramMatchCriteria::Tombstone + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &tombstone, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &tombstone, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) + ) + ); - assert!(!LoadedPrograms::::is_entry_usable( - &tombstone, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &tombstone, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) + ) + ); let program = new_test_loaded_program(0, 1); - assert!(LoadedPrograms::::is_entry_usable( - &program, - 0, - &LoadedProgramMatchCriteria::NoCriteria - )); - - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::Tombstone - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::NoCriteria + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::NoCriteria - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::Tombstone + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) + ) + ); - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) + ) + ); - let program = Arc::new(new_test_loaded_program_with_usage_and_expiry( + let program = Arc::new(new_test_loaded_program_with_usage( 0, 1, AtomicU64::default(), - Some(2), - )); - - assert!(LoadedPrograms::::is_entry_usable( - &program, - 0, - &LoadedProgramMatchCriteria::NoCriteria - )); - - assert!(LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::NoCriteria )); - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::Tombstone - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::NoCriteria + ) + ); - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 2, - &LoadedProgramMatchCriteria::NoCriteria - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::Tombstone + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) + ) + ); - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) + ) + ); } } diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 48d44b7187a658..21a7b5fed77257 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -81,7 +81,6 @@ pub fn load_program_from_bytes( program_runtime_environment, deployment_slot, effective_slot, - None, programdata, account_size, load_program_metrics, @@ -93,7 +92,6 @@ pub fn load_program_from_bytes( program_runtime_environment, deployment_slot, effective_slot, - None, programdata, account_size, load_program_metrics, @@ -4004,7 +4002,6 @@ mod tests { account_size: 0, deployment_slot: 0, effective_slot: 0, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(100), ix_usage_counter: AtomicU64::new(100), latest_access_slot: AtomicU64::new(0), @@ -4045,7 +4042,6 @@ mod tests { account_size: 0, deployment_slot: 0, effective_slot: 0, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(100), ix_usage_counter: AtomicU64::new(100), latest_access_slot: AtomicU64::new(0), diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 20b413d23e7416..4764b23fe65e50 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -419,7 +419,6 @@ pub fn process_instruction_deploy( .clone(), deployment_slot, effective_slot, - None, programdata, buffer.get_data().len(), &mut load_program_metrics, @@ -660,7 +659,6 @@ mod tests { .clone(), 0, 0, - None, programdata, account.data().len(), &mut load_program_metrics, diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index f16891a85fb8b2..837dc5e7fd4ce8 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -774,7 +774,6 @@ impl TransactionBatchProcessor { program_runtime_environment.clone(), deployment_slot, deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, programdata, account_size, load_program_metrics, @@ -786,7 +785,6 @@ impl TransactionBatchProcessor { program_runtime_environment.clone(), deployment_slot, deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, programdata, account_size, load_program_metrics, From b36d1e227f4ae00254bd0d85b26502de016afa1b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 13:17:48 +0800 Subject: [PATCH 139/401] build(deps): bump tempfile from 3.9.0 to 3.10.0 (#35102) * build(deps): bump tempfile from 3.9.0 to 3.10.0 Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.9.0 to 3.10.0. - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/compare/v3.9.0...v3.10.0) --- updated-dependencies: - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 22 ++++++---------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 22 ++++++---------------- 3 files changed, 13 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1409037320f776..f5cf46a44fb670 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1922,9 +1922,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fd-lock" @@ -4414,15 +4414,6 @@ dependencies = [ "bitflags 1.3.2", ] -[[package]] -name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_users" version = "0.4.0" @@ -4638,9 +4629,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ "bitflags 2.4.2", "errno", @@ -8267,13 +8258,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.4.1", "rustix", "windows-sys 0.52.0", ] diff --git a/Cargo.toml b/Cargo.toml index 64a64e58ddb31e..ef2cc3487e1ee5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -410,7 +410,7 @@ sysctl = "0.4.6" systemstat = "0.2.3" tar = "0.4.40" tarpc = "0.29.0" -tempfile = "3.9.0" +tempfile = "3.10.0" test-case = "3.3.1" thiserror = "1.0.56" tiny-bip39 = "0.8.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index fa0abfb607c3ea..02db34e35c0c1d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1617,9 +1617,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fd-lock" @@ -3900,15 +3900,6 @@ dependencies = [ "bitflags 1.3.2", ] -[[package]] -name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_users" version = "0.4.0" @@ -4109,9 +4100,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ "bitflags 2.4.2", "errno", @@ -7146,13 +7137,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.4.1", "rustix", "windows-sys 0.52.0", ] From 8363ebfd64dd2d55e876715c760fbb0f7d53fb26 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 7 Feb 2024 15:17:33 +0800 Subject: [PATCH 140/401] ci: merge docker images (#35078) * ci: add new ci docker image * ci: use the new docker image * fix shellcheck * add readme for the new docker image * remove old docker images * remove unused check in docs/build.sh * use the new image in net.sh --- .buildkite/scripts/build-bench.sh | 4 +-- .buildkite/scripts/build-stable.sh | 6 ++-- ci/buildkite-pipeline.sh | 16 +++++----- ci/buildkite-secondary.yml | 2 +- ci/buildkite-solana-private.sh | 10 +++---- ci/dependabot-pr.sh | 2 +- ci/docker-run-default-image.sh | 10 +++++++ ci/docker-rust-nightly/Dockerfile | 21 -------------- ci/docker-rust-nightly/README.md | 42 --------------------------- ci/docker-rust-nightly/build.sh | 20 ------------- ci/docker-rust/README.md | 11 ------- ci/docker-rust/build.sh | 19 ------------ ci/{docker-rust => docker}/Dockerfile | 26 +++++++++++++++-- ci/docker/README.md | 11 +++++++ ci/docker/build.sh | 23 +++++++++++++++ ci/publish-crate.sh | 2 +- ci/rust-version.sh | 4 +-- ci/test.sh | 9 ++++++ docs/build.sh | 6 ++-- net/net.sh | 2 +- sdk/docker-solana/build.sh | 3 +- 21 files changed, 103 insertions(+), 146 deletions(-) create mode 100755 ci/docker-run-default-image.sh delete mode 100644 ci/docker-rust-nightly/Dockerfile delete mode 100644 ci/docker-rust-nightly/README.md delete mode 100755 ci/docker-rust-nightly/build.sh delete mode 100644 ci/docker-rust/README.md delete mode 100755 ci/docker-rust/build.sh rename ci/{docker-rust => docker}/Dockerfile (78%) create mode 100644 ci/docker/README.md create mode 100755 ci/docker/build.sh create mode 100644 ci/test.sh diff --git a/.buildkite/scripts/build-bench.sh b/.buildkite/scripts/build-bench.sh index a19e4291bc1426..27f156c141fe03 100755 --- a/.buildkite/scripts/build-bench.sh +++ b/.buildkite/scripts/build-bench.sh @@ -22,5 +22,5 @@ EOF # shellcheck disable=SC2016 group "bench" \ - "$(build_steps "bench-part-1" ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/bench/part1.sh")" \ - "$(build_steps "bench-part-2" ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/bench/part2.sh")" + "$(build_steps "bench-part-1" "ci/docker-run-default-image.sh ci/bench/part1.sh")" \ + "$(build_steps "bench-part-2" "ci/docker-run-default-image.sh ci/bench/part2.sh")" diff --git a/.buildkite/scripts/build-stable.sh b/.buildkite/scripts/build-stable.sh index e1d774e1669ab8..f20ca1db358402 100755 --- a/.buildkite/scripts/build-stable.sh +++ b/.buildkite/scripts/build-stable.sh @@ -12,7 +12,7 @@ partitions=$( cat <> "$output_file" <<"EOF" - - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable-sbf.sh" + - command: "ci/docker-run-default-image.sh ci/test-stable-sbf.sh" name: "stable-sbf" timeout_in_minutes: 35 artifact_paths: "sbf-dumps.tar.bz2" @@ -226,7 +226,7 @@ EOF ^ci/test-stable.sh \ ^sdk/ \ ; then - command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + command_step wasm "ci/docker-run-default-image.sh ci/test-wasm.sh" 20 else annotate --style info \ "wasm skipped as no relevant files were modified" @@ -258,7 +258,7 @@ EOF ^ci/test-coverage.sh \ ^scripts/coverage.sh \ ; then - command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 80 + command_step coverage "ci/docker-run-default-image.sh ci/test-coverage.sh" 80 else annotate --style info --context test-coverage \ "Coverage skipped as no .rs files were modified" @@ -296,7 +296,7 @@ pull_or_push_steps() { if [ -z "$diff_other_than_version_bump" ]; then echo "Diff only contains version bump." - command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20 + command_step checks "ci/docker-run-default-image.sh ci/test-checks.sh" 20 exit 0 fi fi diff --git a/ci/buildkite-secondary.yml b/ci/buildkite-secondary.yml index c8bf7b4fd9fd57..c43c7ee449e758 100644 --- a/ci/buildkite-secondary.yml +++ b/ci/buildkite-secondary.yml @@ -3,7 +3,7 @@ # Pull requests to not run these steps. steps: - name: "cargo audit" - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/do-audit.sh" + command: "ci/docker-run-default-image.sh ci/do-audit.sh" agents: queue: "release-build" timeout_in_minutes: 10 diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index eeb087d323ee9a..70d8e4bfe4f59f 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -134,7 +134,7 @@ wait_step() { } all_test_steps() { - command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20 + command_step checks "ci/docker-run-default-image.sh ci/test-checks.sh" 20 wait_step # Full test suite @@ -146,7 +146,7 @@ all_test_steps() { ^ci/rust-version.sh \ ^ci/test-docs.sh \ ; then - command_step doctest ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-docs.sh" 15 + command_step doctest "ci/docker-run-default-image.sh ci/test-docs.sh" 15 else annotate --style info --context test-docs \ "Docs skipped as no .rs files were modified" @@ -168,7 +168,7 @@ all_test_steps() { ^sdk/ \ ; then cat >> "$output_file" <<"EOF" - - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable-sbf.sh" + - command: "ci/docker-run-default-image.sh ci/test-stable-sbf.sh" name: "stable-sbf" timeout_in_minutes: 35 artifact_paths: "sbf-dumps.tar.bz2" @@ -208,7 +208,7 @@ EOF ^ci/test-stable.sh \ ^sdk/ \ ; then - command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + command_step wasm "ci/docker-run-default-image.sh ci/test-wasm.sh" 20 else annotate --style info \ "wasm skipped as no relevant files were modified" @@ -238,7 +238,7 @@ EOF ^ci/test-coverage.sh \ ^scripts/coverage.sh \ ; then - command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 80 + command_step coverage "ci/docker-run-default-image.sh ci/test-coverage.sh" 80 else annotate --style info --context test-coverage \ "Coverage skipped as no .rs files were modified" diff --git a/ci/dependabot-pr.sh b/ci/dependabot-pr.sh index 9ef6816cec5b96..91ecd5948c9a43 100755 --- a/ci/dependabot-pr.sh +++ b/ci/dependabot-pr.sh @@ -11,7 +11,7 @@ fi source ci/rust-version.sh stable -ci/docker-run.sh $rust_nightly_docker_image ci/dependabot-updater.sh +ci/docker-run-default-image.sh ci/dependabot-updater.sh if [[ $(git status --short :**/Cargo.lock | wc -l) -eq 0 ]]; then echo --- ok diff --git a/ci/docker-run-default-image.sh b/ci/docker-run-default-image.sh new file mode 100755 index 00000000000000..927167cc8ef1ac --- /dev/null +++ b/ci/docker-run-default-image.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e + +here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# shellcheck disable=SC1091 +source "$here/rust-version.sh" + +"$here/docker-run.sh" "${ci_docker_image:?}" "$@" diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile deleted file mode 100644 index 60d48cc22594f4..00000000000000 --- a/ci/docker-rust-nightly/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM solanalabs/rust:1.75.0 - -ARG date -ARG GRCOV_VERSION=v0.8.18 - -RUN \ - rustup install nightly-$date && \ - rustup component add clippy --toolchain=nightly-$date && \ - rustup component add rustfmt --toolchain=nightly-$date && \ - rustup show && \ - rustc --version && \ - cargo --version && \ - # grcov - curl -LOsS "https://github.com/mozilla/grcov/releases/download/$GRCOV_VERSION/grcov-x86_64-unknown-linux-musl.tar.bz2" && \ - tar -xf grcov-x86_64-unknown-linux-musl.tar.bz2 && \ - mv ./grcov $CARGO_HOME/bin && \ - rm grcov-x86_64-unknown-linux-musl.tar.bz2 && \ - # codecov - curl -Os https://uploader.codecov.io/latest/linux/codecov && \ - chmod +x codecov && \ - mv codecov /usr/bin diff --git a/ci/docker-rust-nightly/README.md b/ci/docker-rust-nightly/README.md deleted file mode 100644 index f4e7931f3d8511..00000000000000 --- a/ci/docker-rust-nightly/README.md +++ /dev/null @@ -1,42 +0,0 @@ -Docker image containing rust nightly and some preinstalled crates used in CI. - -This image may be manually updated by running `CI=true ./build.sh` if you are a member -of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub -organization. - -## Moving to a newer nightly - -NOTE: Follow instructions in docker-rust/README.md before this when updating the stable -rust version as well. - -We pin the version of nightly (see the `ARG nightly=xyz` line in `Dockerfile`) -to avoid the build breaking at unexpected times, as occasionally nightly will -introduce breaking changes. - -To update the pinned version: -1. Edit `Dockerfile` to match the desired stable rust version to base on if needed. -1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally, - or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a - specific YYYY-MM-DD that is desired (default is today's build). - Check https://rust-lang.github.io/rustup-components-history/ for build - status -1. Update `ci/rust-version.sh` to reflect the new nightly `YYYY-MM-DD` -1. Run `SOLANA_ALLOCATE_TTY=1 SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-checks.sh` - and `SOLANA_ALLOCATE_TTY=1 SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh [args]...` - to confirm the new nightly image builds. Fix any issues as needed -1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized. -1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com. -1. Send a PR with the `ci/rust-version.sh` change and any codebase adjustments needed. - -## Troubleshooting - -### Resource is denied - -When running `CI=true ci/docker-rust-nightly/build.sh`, you see: - -``` -denied: requested access to the resource is denied -``` - -Run `docker login` to enable pushing images to Docker Hub. Contact @mvines or @garious -to get write access. diff --git a/ci/docker-rust-nightly/build.sh b/ci/docker-rust-nightly/build.sh deleted file mode 100755 index dad7221e5dbecf..00000000000000 --- a/ci/docker-rust-nightly/build.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -ex - -cd "$(dirname "$0")" - -platform=() -if [[ $(uname -m) = arm64 ]]; then - # Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr - platform+=(--platform linux/amd64) -fi - -nightlyDate=${1:-$(date +%Y-%m-%d)} -docker build "${platform[@]}" -t solanalabs/rust-nightly:"$nightlyDate" --build-arg date="$nightlyDate" . - -maybeEcho= -if [[ -z $CI ]]; then - echo "Not CI, skipping |docker push|" - maybeEcho="echo" -fi -$maybeEcho docker push solanalabs/rust-nightly:"$nightlyDate" diff --git a/ci/docker-rust/README.md b/ci/docker-rust/README.md deleted file mode 100644 index 3f818476867be3..00000000000000 --- a/ci/docker-rust/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Docker image containing rust and some preinstalled packages used in CI. - -NOTE: Recreate rust-nightly docker image after this when updating the stable rust -version! Both docker images must be updated in tandem. - -This image is manually maintained: -1. Edit `Dockerfile` to match the desired rust version -1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized. -1. Run `./build.sh` to publish the new image, if you are a member of the [Solana - Labs](https://hub.docker.com/u/solanalabs/) Docker Hub organization. - diff --git a/ci/docker-rust/build.sh b/ci/docker-rust/build.sh deleted file mode 100755 index 360bbbcbe3bcb3..00000000000000 --- a/ci/docker-rust/build.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -set -ex - -cd "$(dirname "$0")" - - -platform=() -if [[ $(uname -m) = arm64 ]]; then - # Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr - platform+=(--platform linux/amd64) -fi - -docker build "${platform[@]}" -t solanalabs/rust . - -read -r rustc version _ < <(docker run solanalabs/rust rustc --version) -[[ $rustc = rustc ]] -docker tag solanalabs/rust:latest solanalabs/rust:"$version" -docker push solanalabs/rust:"$version" -docker push solanalabs/rust:latest diff --git a/ci/docker-rust/Dockerfile b/ci/docker/Dockerfile similarity index 78% rename from ci/docker-rust/Dockerfile rename to ci/docker/Dockerfile index 227d5f55d7753b..cee80877c6db5d 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker/Dockerfile @@ -1,10 +1,12 @@ FROM ubuntu:20.04 ARG \ - RUST_VERSION=1.75.0 \ + RUST_VERSION= \ + RUST_NIGHTLY_VERSION= \ GOLANG_VERSION=1.21.3 \ NODE_MAJOR=18 \ - SCCACHE_VERSION=v0.5.4 + SCCACHE_VERSION=v0.5.4 \ + GRCOV_VERSION=v0.8.18 SHELL ["/bin/bash", "-o", "pipefail", "-c"] @@ -21,7 +23,10 @@ ENV \ CARGO_HOME=/usr/local/cargo \ PATH="$PATH:/usr/local/cargo/bin" -RUN apt-get update && \ +RUN \ + if [ -z "$RUST_VERSION" ]; then echo "ERROR: The RUST_VERSION argument is required!" && exit 1; fi && \ + if [ -z "$RUST_NIGHTLY_VERSION" ]; then echo "ERROR: The RUST_NIGHTLY_VERSION argument is required!" && exit 1; fi && \ + apt-get update && \ apt-get install --no-install-recommends -y \ # basic tzdata \ @@ -65,6 +70,9 @@ RUN apt-get update && \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --no-modify-path --profile minimal --default-toolchain $RUST_VERSION -y && \ rustup component add rustfmt && \ rustup component add clippy && \ + rustup install $RUST_NIGHTLY_VERSION && \ + rustup component add clippy --toolchain=$RUST_NIGHTLY_VERSION && \ + rustup component add rustfmt --toolchain=$RUST_NIGHTLY_VERSION && \ rustup target add wasm32-unknown-unknown && \ cargo install cargo-audit && \ cargo install cargo-hack && \ @@ -74,6 +82,9 @@ RUN apt-get update && \ cargo install svgbob_cli && \ cargo install wasm-pack && \ cargo install rustfilt && \ + rustup show && \ + rustc --version && \ + cargo --version && \ chmod -R a+w $CARGO_HOME $RUSTUP_HOME && \ rm -rf $CARGO_HOME/registry && \ # sccache @@ -101,5 +112,14 @@ RUN apt-get update && \ chmod -R a+w /.config && \ mkdir /.npm && \ chmod -R a+w /.npm && \ + # grcov + curl -LOsS "https://github.com/mozilla/grcov/releases/download/$GRCOV_VERSION/grcov-x86_64-unknown-linux-musl.tar.bz2" && \ + tar -xf grcov-x86_64-unknown-linux-musl.tar.bz2 && \ + mv ./grcov $CARGO_HOME/bin && \ + rm grcov-x86_64-unknown-linux-musl.tar.bz2 && \ + # codecov + curl -Os https://uploader.codecov.io/latest/linux/codecov && \ + chmod +x codecov && \ + mv codecov /usr/bin && \ # clean lists rm -rf /var/lib/apt/lists/* diff --git a/ci/docker/README.md b/ci/docker/README.md new file mode 100644 index 00000000000000..58bd9accdb14a1 --- /dev/null +++ b/ci/docker/README.md @@ -0,0 +1,11 @@ +Docker image containing rust, rust nightly and some preinstalled packages used in CI + +This image is manually maintained: + +#### CLI + +1. Edit + 1. `ci/rust-version.sh` for rust and rust nightly version + 2. `ci/docker/Dockerfile` for other packages +2. Ensure you're a member of the [Solana Docker Hub Organization](https://hub.docker.com/u/solanalabs/) and already `docker login` +3. Run `ci/docker/build.sh` to build/publish the new image diff --git a/ci/docker/build.sh b/ci/docker/build.sh new file mode 100755 index 00000000000000..0c20c5e928d94d --- /dev/null +++ b/ci/docker/build.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -e + +here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# shellcheck disable=SC1091 +source "$here/../rust-version.sh" + +platform=() +if [[ $(uname -m) = arm64 ]]; then + # Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr + platform+=(--platform linux/amd64) +fi + +echo "build image: ${ci_docker_image:?}" +docker build "${platform[@]}" \ + -f "$here/Dockerfile" \ + --build-arg "RUST_VERSION=${rust_stable:?}" \ + --build-arg "RUST_NIGHTLY_VERSION=${rust_nightly:?}" \ + -t "$ci_docker_image" . + +docker push "$ci_docker_image" diff --git a/ci/publish-crate.sh b/ci/publish-crate.sh index 099d02129e3cb8..5d7f3b1e1e1c50 100755 --- a/ci/publish-crate.sh +++ b/ci/publish-crate.sh @@ -72,7 +72,7 @@ for Cargo_toml in $Cargo_tomls; do echo "Attempt ${i} of ${numRetries}" # The rocksdb package does not build with the stock rust docker image so use # the solana rust docker image - if ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"; then + if ci/docker-run-default-image.sh bash -exc "cd $crate; $cargoCommand"; then break fi diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 3db1a843fa105b..3321f1d5ecb6a1 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -34,10 +34,10 @@ fi export rust_stable="$stable_version" -export rust_stable_docker_image=solanalabs/rust:"$stable_version" export rust_nightly=nightly-"$nightly_version" -export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version" + +export ci_docker_image="solanalabs/ci:rust_${rust_stable}_${rust_nightly}" [[ -z $1 ]] || ( diff --git a/ci/test.sh b/ci/test.sh new file mode 100644 index 00000000000000..987f2a6cf36153 --- /dev/null +++ b/ci/test.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Get the directory of the current script +script_dir_by_bash_source=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) + +script_dir_by_0=$(cd "$(dirname "$0")" && pwd) + +echo "script_dir_by_bash_source = $script_dir_by_bash_source" +echo "script_dir_by_0 = $script_dir_by_0" diff --git a/docs/build.sh b/docs/build.sh index 5cb2ee6eebf7a7..6269eabdbb78b0 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -6,12 +6,10 @@ cd "$(dirname "$0")" # shellcheck source=ci/env.sh source ../ci/env.sh -: "${rust_stable_docker_image:=}" # Pacify shellcheck - # shellcheck source=ci/rust-version.sh source ../ci/rust-version.sh -../ci/docker-run.sh "$rust_stable_docker_image" docs/build-cli-usage.sh -../ci/docker-run.sh "$rust_stable_docker_image" docs/convert-ascii-to-svg.sh +../ci/docker-run-default-image.sh docs/build-cli-usage.sh +../ci/docker-run-default-image.sh docs/convert-ascii-to-svg.sh ./set-solana-release-tag.sh # Get current channel diff --git a/net/net.sh b/net/net.sh index fd25d429be3aa6..fe52116250545d 100755 --- a/net/net.sh +++ b/net/net.sh @@ -191,7 +191,7 @@ build() { if [[ $(uname) != Linux || ! " ${supported[*]} " =~ $(lsb_release -sr) ]]; then # shellcheck source=ci/rust-version.sh source "$SOLANA_ROOT"/ci/rust-version.sh - MAYBE_DOCKER="ci/docker-run.sh $rust_stable_docker_image" + MAYBE_DOCKER="ci/docker-run.sh ${ci_docker_image:?}" fi SECONDS=0 ( diff --git a/sdk/docker-solana/build.sh b/sdk/docker-solana/build.sh index 77160d73edbc38..f1c8ee265d6d56 100755 --- a/sdk/docker-solana/build.sh +++ b/sdk/docker-solana/build.sh @@ -20,8 +20,7 @@ fi cd "$(dirname "$0")" rm -rf usr/ -../../ci/docker-run.sh "$rust_stable_docker_image" \ - scripts/cargo-install-all.sh sdk/docker-solana/usr +../../ci/docker-run-default-image.sh scripts/cargo-install-all.sh sdk/docker-solana/usr cp -f ../../scripts/run.sh usr/bin/solana-run.sh cp -f ../../fetch-spl.sh usr/bin/ From af9dd35cc5816e6712663541bd2fc6efb19dfa9d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 23:06:35 +0800 Subject: [PATCH 141/401] build(deps): bump js-sys from 0.3.67 to 0.3.68 (#35127) * build(deps): bump js-sys from 0.3.67 to 0.3.68 Bumps [js-sys](https://github.com/rustwasm/wasm-bindgen) from 0.3.67 to 0.3.68. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/commits) --- updated-dependencies: - dependency-name: js-sys dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 24 ++++++++++++------------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f5cf46a44fb670..36bae39e0743f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2748,9 +2748,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ "wasm-bindgen", ] @@ -9017,9 +9017,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -9027,9 +9027,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", @@ -9054,9 +9054,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9064,9 +9064,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", @@ -9077,9 +9077,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "web-sys" diff --git a/Cargo.toml b/Cargo.toml index ef2cc3487e1ee5..bd8bf23891a60f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -228,7 +228,7 @@ itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.67" +js-sys = "0.3.68" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 02db34e35c0c1d..dbdeb7ad1bba60 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2350,9 +2350,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ "wasm-bindgen", ] @@ -7833,9 +7833,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7843,9 +7843,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", @@ -7870,9 +7870,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7880,9 +7880,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", @@ -7893,9 +7893,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "web-sys" From fc727a3ac52d21ed58e4c6762cda4f9129f8e4d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 23:06:57 +0800 Subject: [PATCH 142/401] build(deps): bump wasm-bindgen from 0.2.90 to 0.2.91 (#35128) * build(deps): bump wasm-bindgen from 0.2.90 to 0.2.91 Bumps [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) from 0.2.90 to 0.2.91. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/compare/0.2.90...0.2.91) --- updated-dependencies: - dependency-name: wasm-bindgen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite From 99247d150dea28409df082210ce0337b435380ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 23:07:17 +0800 Subject: [PATCH 143/401] build(deps): bump bytemuck from 1.14.1 to 1.14.2 (#35129) * build(deps): bump bytemuck from 1.14.1 to 1.14.2 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.14.1 to 1.14.2. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.14.1...v1.14.2) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 36bae39e0743f3..b762b0dc7e98f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -911,9 +911,9 @@ checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "bytemuck" -version = "1.14.1" +version = "1.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2490600f404f2b94c167e31d3ed1d5f3c225a0f3b80230053b3e0b7b962bd9" +checksum = "ea31d69bda4949c1c1562c1e6f042a1caefac98cdc8a298260a2ff41c1e2d42b" dependencies = [ "bytemuck_derive", ] diff --git a/Cargo.toml b/Cargo.toml index bd8bf23891a60f..d0bc79a53f23f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -164,7 +164,7 @@ bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.7" -bytemuck = "1.14.1" +bytemuck = "1.14.2" byteorder = "1.5.0" bytes = "1.5" bzip2 = "0.4.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index dbdeb7ad1bba60..2c80edb3065b1e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -834,9 +834,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.14.1" +version = "1.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2490600f404f2b94c167e31d3ed1d5f3c225a0f3b80230053b3e0b7b962bd9" +checksum = "ea31d69bda4949c1c1562c1e6f042a1caefac98cdc8a298260a2ff41c1e2d42b" dependencies = [ "bytemuck_derive", ] From 2aa8b829900aa04b0d50d130cd91a626bc38018e Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 7 Feb 2024 09:20:31 -0600 Subject: [PATCH 144/401] remove activated feature set_exempt_rent_epoch_max (#35082) * remove activated feature set_exempt_rent_epoch_max * fix test_rent_eager_collect_rent_in_partition test * update hash values for test_bank_hash_consistency * clean up commas --- accounts-db/src/rent_collector.rs | 460 +++++++++++++---------------- runtime/src/bank.rs | 10 +- runtime/src/bank/tests.rs | 463 +++++++++++------------------- svm/src/account_loader.rs | 25 +- 4 files changed, 379 insertions(+), 579 deletions(-) diff --git a/accounts-db/src/rent_collector.rs b/accounts-db/src/rent_collector.rs index 1a72cac88308b3..0bdb03291e8c5f 100644 --- a/accounts-db/src/rent_collector.rs +++ b/accounts-db/src/rent_collector.rs @@ -111,13 +111,10 @@ impl RentCollector { &self, address: &Pubkey, account: &mut AccountSharedData, - set_exempt_rent_epoch_max: bool, ) -> CollectedInfo { match self.calculate_rent_result(address, account) { RentResult::Exempt => { - if set_exempt_rent_epoch_max { - account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); CollectedInfo::default() } RentResult::NoRentCollectionNow => CollectedInfo::default(), @@ -219,314 +216,255 @@ mod tests { &self, address: &Pubkey, account: &mut AccountSharedData, - set_exempt_rent_epoch_max: bool, ) -> CollectedInfo { // initialize rent_epoch as created at this epoch account.set_rent_epoch(self.epoch); - self.collect_from_existing_account(address, account, set_exempt_rent_epoch_max) + self.collect_from_existing_account(address, account) } } #[test] fn test_calculate_rent_result() { - for set_exempt_rent_epoch_max in [false, true] { - let mut rent_collector = RentCollector::default(); + let mut rent_collector = RentCollector::default(); - let mut account = AccountSharedData::default(); - assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account), - RentResult::NoRentCollectionNow + let mut account = AccountSharedData::default(); + assert_matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::NoRentCollectionNow + ); + { + let mut account_clone = account.clone(); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo::default() ); - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account); - } + assert_eq!(account_clone, account); + } - account.set_executable(true); - assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account), - RentResult::Exempt + account.set_executable(true); + assert_matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::Exempt + ); + { + let mut account_clone = account.clone(); + let mut account_expected = account.clone(); + account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo::default() ); - { - let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account_expected); - } + assert_eq!(account_clone, account_expected); + } - account.set_executable(false); - assert_matches!( - rent_collector.calculate_rent_result(&incinerator::id(), &account), - RentResult::Exempt + account.set_executable(false); + assert_matches!( + rent_collector.calculate_rent_result(&incinerator::id(), &account), + RentResult::Exempt + ); + { + let mut account_clone = account.clone(); + let mut account_expected = account.clone(); + account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + assert_eq!( + rent_collector + .collect_from_existing_account(&incinerator::id(), &mut account_clone), + CollectedInfo::default() ); - { - let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - assert_eq!( - rent_collector.collect_from_existing_account( - &incinerator::id(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account_expected); - } - - // try a few combinations of rent collector rent epoch and collecting rent - for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { - rent_collector.epoch = rent_epoch; - account.set_lamports(10); - account.set_rent_epoch(1); - let new_rent_epoch_expected = rent_collector.epoch + 1; - assert!( - matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account), - RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, - ), - "{:?}", - rent_collector.calculate_rent_result(&Pubkey::default(), &account) - ); - - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo { - rent_amount: rent_due_expected, - account_data_len_reclaimed: 0 - } - ); - let mut account_expected = account.clone(); - account_expected.set_lamports(account.lamports() - rent_due_expected); - account_expected.set_rent_epoch(new_rent_epoch_expected); - assert_eq!(account_clone, account_expected); - } - } + assert_eq!(account_clone, account_expected); + } - // enough lamports to make us exempt - account.set_lamports(1_000_000); - let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account); + // try a few combinations of rent collector rent epoch and collecting rent + for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { + rent_collector.epoch = rent_epoch; + account.set_lamports(10); + account.set_rent_epoch(1); + let new_rent_epoch_expected = rent_collector.epoch + 1; assert!( - matches!(result, RentResult::Exempt), - "{result:?}, set_exempt_rent_epoch_max: {set_exempt_rent_epoch_max}", + matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, + ), + "{:?}", + rent_collector.calculate_rent_result(&Pubkey::default(), &account) ); + { let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo { + rent_amount: rent_due_expected, + account_data_len_reclaimed: 0 + } ); + let mut account_expected = account.clone(); + account_expected.set_lamports(account.lamports() - rent_due_expected); + account_expected.set_rent_epoch(new_rent_epoch_expected); assert_eq!(account_clone, account_expected); } + } + + // enough lamports to make us exempt + account.set_lamports(1_000_000); + let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account); + assert!(matches!(result, RentResult::Exempt), "{result:?}",); + { + let mut account_clone = account.clone(); + let mut account_expected = account.clone(); + account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo::default() + ); + assert_eq!(account_clone, account_expected); + } - // enough lamports to make us exempt - // but, our rent_epoch is set in the future, so we can't know if we are exempt yet or not. - // We don't calculate rent amount vs data if the rent_epoch is already in the future. - account.set_rent_epoch(1_000_000); - assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account), - RentResult::NoRentCollectionNow + // enough lamports to make us exempt + // but, our rent_epoch is set in the future, so we can't know if we are exempt yet or not. + // We don't calculate rent amount vs data if the rent_epoch is already in the future. + account.set_rent_epoch(1_000_000); + assert_matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::NoRentCollectionNow + ); + { + let mut account_clone = account.clone(); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo::default() ); - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account); - } + assert_eq!(account_clone, account); } } #[test] fn test_collect_from_account_created_and_existing() { - for set_exempt_rent_epoch_max in [false, true] { - let old_lamports = 1000; - let old_epoch = 1; - let new_epoch = 2; - - let (mut created_account, mut existing_account) = { - let account = AccountSharedData::from(Account { - lamports: old_lamports, - rent_epoch: old_epoch, - ..Account::default() - }); - - (account.clone(), account) - }; - - let rent_collector = default_rent_collector_clone_with_epoch(new_epoch); - - // collect rent on a newly-created account - let collected = rent_collector.collect_from_created_account( - &solana_sdk::pubkey::new_rand(), - &mut created_account, - set_exempt_rent_epoch_max, - ); - assert!(created_account.lamports() < old_lamports); - assert_eq!( - created_account.lamports() + collected.rent_amount, - old_lamports - ); - assert_ne!(created_account.rent_epoch(), old_epoch); - assert_eq!(collected.account_data_len_reclaimed, 0); - - // collect rent on a already-existing account - let collected = rent_collector.collect_from_existing_account( - &solana_sdk::pubkey::new_rand(), - &mut existing_account, - set_exempt_rent_epoch_max, - ); - assert!(existing_account.lamports() < old_lamports); - assert_eq!( - existing_account.lamports() + collected.rent_amount, - old_lamports - ); - assert_ne!(existing_account.rent_epoch(), old_epoch); - assert_eq!(collected.account_data_len_reclaimed, 0); + let old_lamports = 1000; + let old_epoch = 1; + let new_epoch = 2; + + let (mut created_account, mut existing_account) = { + let account = AccountSharedData::from(Account { + lamports: old_lamports, + rent_epoch: old_epoch, + ..Account::default() + }); - // newly created account should be collected for less rent; thus more remaining balance - assert!(created_account.lamports() > existing_account.lamports()); - assert_eq!(created_account.rent_epoch(), existing_account.rent_epoch()); - } + (account.clone(), account) + }; + + let rent_collector = default_rent_collector_clone_with_epoch(new_epoch); + + // collect rent on a newly-created account + let collected = rent_collector + .collect_from_created_account(&solana_sdk::pubkey::new_rand(), &mut created_account); + assert!(created_account.lamports() < old_lamports); + assert_eq!( + created_account.lamports() + collected.rent_amount, + old_lamports + ); + assert_ne!(created_account.rent_epoch(), old_epoch); + assert_eq!(collected.account_data_len_reclaimed, 0); + + // collect rent on a already-existing account + let collected = rent_collector + .collect_from_existing_account(&solana_sdk::pubkey::new_rand(), &mut existing_account); + assert!(existing_account.lamports() < old_lamports); + assert_eq!( + existing_account.lamports() + collected.rent_amount, + old_lamports + ); + assert_ne!(existing_account.rent_epoch(), old_epoch); + assert_eq!(collected.account_data_len_reclaimed, 0); + + // newly created account should be collected for less rent; thus more remaining balance + assert!(created_account.lamports() > existing_account.lamports()); + assert_eq!(created_account.rent_epoch(), existing_account.rent_epoch()); } #[test] fn test_rent_exempt_temporal_escape() { - for set_exempt_rent_epoch_max in [false, true] { - for pass in 0..2 { - let mut account = AccountSharedData::default(); - let epoch = 3; - let huge_lamports = 123_456_789_012; - let tiny_lamports = 789_012; - let pubkey = solana_sdk::pubkey::new_rand(); - - assert_eq!(account.rent_epoch(), 0); - - // create a tested rent collector - let rent_collector = default_rent_collector_clone_with_epoch(epoch); - - if pass == 0 { - account.set_lamports(huge_lamports); - // first mark account as being collected while being rent-exempt - let collected = rent_collector.collect_from_existing_account( - &pubkey, - &mut account, - set_exempt_rent_epoch_max, - ); - assert_eq!(account.lamports(), huge_lamports); - assert_eq!(collected, CollectedInfo::default()); - continue; - } + for pass in 0..2 { + let mut account = AccountSharedData::default(); + let epoch = 3; + let huge_lamports = 123_456_789_012; + let tiny_lamports = 789_012; + let pubkey = solana_sdk::pubkey::new_rand(); - // decrease the balance not to be rent-exempt - // In a real validator, it is not legal to reduce an account's lamports such that the account becomes rent paying. - // So, pass == 0 above tests the case of rent that is exempt. pass == 1 tests the case where we are rent paying. - account.set_lamports(tiny_lamports); + assert_eq!(account.rent_epoch(), 0); - // ... and trigger another rent collection on the same epoch and check that rent is working - let collected = rent_collector.collect_from_existing_account( - &pubkey, - &mut account, - set_exempt_rent_epoch_max, - ); - assert_eq!(account.lamports(), tiny_lamports - collected.rent_amount); - assert_ne!(collected, CollectedInfo::default()); + // create a tested rent collector + let rent_collector = default_rent_collector_clone_with_epoch(epoch); + + if pass == 0 { + account.set_lamports(huge_lamports); + // first mark account as being collected while being rent-exempt + let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account); + assert_eq!(account.lamports(), huge_lamports); + assert_eq!(collected, CollectedInfo::default()); + continue; } + + // decrease the balance not to be rent-exempt + // In a real validator, it is not legal to reduce an account's lamports such that the account becomes rent paying. + // So, pass == 0 above tests the case of rent that is exempt. pass == 1 tests the case where we are rent paying. + account.set_lamports(tiny_lamports); + + // ... and trigger another rent collection on the same epoch and check that rent is working + let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account); + assert_eq!(account.lamports(), tiny_lamports - collected.rent_amount); + assert_ne!(collected, CollectedInfo::default()); } } #[test] fn test_rent_exempt_sysvar() { - for set_exempt_rent_epoch_max in [false, true] { - let tiny_lamports = 1; - let mut account = AccountSharedData::default(); - account.set_owner(sysvar::id()); - account.set_lamports(tiny_lamports); + let tiny_lamports = 1; + let mut account = AccountSharedData::default(); + account.set_owner(sysvar::id()); + account.set_lamports(tiny_lamports); - let pubkey = solana_sdk::pubkey::new_rand(); + let pubkey = solana_sdk::pubkey::new_rand(); - assert_eq!(account.rent_epoch(), 0); + assert_eq!(account.rent_epoch(), 0); - let epoch = 3; - let rent_collector = default_rent_collector_clone_with_epoch(epoch); + let epoch = 3; + let rent_collector = default_rent_collector_clone_with_epoch(epoch); - let collected = rent_collector.collect_from_existing_account( - &pubkey, - &mut account, - set_exempt_rent_epoch_max, - ); - assert_eq!(account.lamports(), 0); - assert_eq!(collected.rent_amount, 1); - } + let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account); + assert_eq!(account.lamports(), 0); + assert_eq!(collected.rent_amount, 1); } /// Ensure that when an account is "rent collected" away, its data len is returned. #[test] fn test_collect_cleans_up_account() { - for set_exempt_rent_epoch_max in [false, true] { - solana_logger::setup(); - let account_lamports = 1; // must be *below* rent amount - let account_data_len = 567; - let account_rent_epoch = 11; - let mut account = AccountSharedData::from(Account { - lamports: account_lamports, // <-- must be below rent-exempt amount - data: vec![u8::default(); account_data_len], - rent_epoch: account_rent_epoch, - ..Account::default() - }); - let rent_collector = default_rent_collector_clone_with_epoch(account_rent_epoch + 1); - - let collected = rent_collector.collect_from_existing_account( - &Pubkey::new_unique(), - &mut account, - set_exempt_rent_epoch_max, - ); - - assert_eq!(collected.rent_amount, account_lamports); - assert_eq!( - collected.account_data_len_reclaimed, - account_data_len as u64 - ); - assert_eq!(account, AccountSharedData::default()); - } + solana_logger::setup(); + let account_lamports = 1; // must be *below* rent amount + let account_data_len = 567; + let account_rent_epoch = 11; + let mut account = AccountSharedData::from(Account { + lamports: account_lamports, // <-- must be below rent-exempt amount + data: vec![u8::default(); account_data_len], + rent_epoch: account_rent_epoch, + ..Account::default() + }); + let rent_collector = default_rent_collector_clone_with_epoch(account_rent_epoch + 1); + + let collected = + rent_collector.collect_from_existing_account(&Pubkey::new_unique(), &mut account); + + assert_eq!(collected.rent_amount, account_lamports); + assert_eq!( + collected.account_data_len_reclaimed, + account_data_len as u64 + ); + assert_eq!(account, AccountSharedData::default()); } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index eb040b3b79cade..630dbb67f415c2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5240,15 +5240,12 @@ impl Bank { .accounts .accounts_db .test_skip_rewrites_but_include_in_bank_hash; - let set_exempt_rent_epoch_max: bool = self - .feature_set - .is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { let rent_collected_info = if self.should_collect_rent() { let (rent_collected_info, measure) = measure!(self .rent_collector - .collect_from_existing_account(pubkey, account, set_exempt_rent_epoch_max,)); + .collect_from_existing_account(pubkey, account)); time_collecting_rent_us += measure.as_us(); rent_collected_info } else { @@ -5256,9 +5253,8 @@ impl Bank { // are any rent paying accounts, their `rent_epoch` won't change either. However, if the // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. - if set_exempt_rent_epoch_max - && (account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH - && self.rent_collector.get_rent_due(account) == RentDue::Exempt) + if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH + && self.rent_collector.get_rent_due(account) == RentDue::Exempt { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 61b10454dceda5..8c1f35e2d99ac0 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -454,124 +454,120 @@ fn rent_with_exemption_threshold(exemption_threshold: f64) -> Rent { /// one thing being tested here is that a failed tx (due to rent collection using up all lamports) followed by rent collection /// results in the same state as if just rent collection ran (and emptied the accounts that have too few lamports) fn test_credit_debit_rent_no_side_effect_on_hash() { - for set_exempt_rent_epoch_max in [false, true] { - solana_logger::setup(); + solana_logger::setup(); - let (mut genesis_config, _mint_keypair) = create_genesis_config_no_tx_fee(10); + let (mut genesis_config, _mint_keypair) = create_genesis_config_no_tx_fee(10); - genesis_config.rent = rent_with_exemption_threshold(21.0); + genesis_config.rent = rent_with_exemption_threshold(21.0); - let slot = years_as_slots( - 2.0, - &genesis_config.poh_config.target_tick_duration, - genesis_config.ticks_per_slot, - ) as u64; - let (root_bank, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank = new_bank_from_parent_with_bank_forks( - bank_forks_1.as_ref(), - root_bank, - &Pubkey::default(), - slot, - ); + let slot = years_as_slots( + 2.0, + &genesis_config.poh_config.target_tick_duration, + genesis_config.ticks_per_slot, + ) as u64; + let (root_bank, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks_1.as_ref(), + root_bank, + &Pubkey::default(), + slot, + ); - let (root_bank_2, bank_forks_2) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank_with_success_txs = new_bank_from_parent_with_bank_forks( - bank_forks_2.as_ref(), - root_bank_2, - &Pubkey::default(), - slot, - ); + let (root_bank_2, bank_forks_2) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank_with_success_txs = new_bank_from_parent_with_bank_forks( + bank_forks_2.as_ref(), + root_bank_2, + &Pubkey::default(), + slot, + ); - assert_eq!(bank.last_blockhash(), genesis_config.hash()); - - let plenty_of_lamports = 264; - let too_few_lamports = 10; - // Initialize credit-debit and credit only accounts - let accounts = [ - AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), - // Transaction between these two accounts will fail - AccountSharedData::new(too_few_lamports, 0, &Pubkey::default()), - AccountSharedData::new(too_few_lamports, 1, &Pubkey::default()), - ]; - - let keypairs = accounts.iter().map(|_| Keypair::new()).collect::>(); - { - // make sure rent and epoch change are such that we collect all lamports in accounts 4 & 5 - let mut account_copy = accounts[4].clone(); - let expected_rent = bank.rent_collector().collect_from_existing_account( - &keypairs[4].pubkey(), - &mut account_copy, - set_exempt_rent_epoch_max, - ); - assert_eq!(expected_rent.rent_amount, too_few_lamports); - assert_eq!(account_copy.lamports(), 0); - } + assert_eq!(bank.last_blockhash(), genesis_config.hash()); - for i in 0..accounts.len() { - let account = &accounts[i]; - bank.store_account(&keypairs[i].pubkey(), account); - bank_with_success_txs.store_account(&keypairs[i].pubkey(), account); - } + let plenty_of_lamports = 264; + let too_few_lamports = 10; + // Initialize credit-debit and credit only accounts + let accounts = [ + AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), + AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), + AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), + AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), + // Transaction between these two accounts will fail + AccountSharedData::new(too_few_lamports, 0, &Pubkey::default()), + AccountSharedData::new(too_few_lamports, 1, &Pubkey::default()), + ]; - // Make builtin instruction loader rent exempt - let system_program_id = system_program::id(); - let mut system_program_account = bank.get_account(&system_program_id).unwrap(); - system_program_account.set_lamports( - bank.get_minimum_balance_for_rent_exemption(system_program_account.data().len()), - ); - bank.store_account(&system_program_id, &system_program_account); - bank_with_success_txs.store_account(&system_program_id, &system_program_account); + let keypairs = accounts.iter().map(|_| Keypair::new()).collect::>(); + { + // make sure rent and epoch change are such that we collect all lamports in accounts 4 & 5 + let mut account_copy = accounts[4].clone(); + let expected_rent = bank + .rent_collector() + .collect_from_existing_account(&keypairs[4].pubkey(), &mut account_copy); + assert_eq!(expected_rent.rent_amount, too_few_lamports); + assert_eq!(account_copy.lamports(), 0); + } - let t1 = system_transaction::transfer( - &keypairs[0], - &keypairs[1].pubkey(), - 1, - genesis_config.hash(), - ); - let t2 = system_transaction::transfer( - &keypairs[2], - &keypairs[3].pubkey(), - 1, - genesis_config.hash(), - ); - // the idea is this transaction will result in both accounts being drained of all lamports due to rent collection - let t3 = system_transaction::transfer( - &keypairs[4], - &keypairs[5].pubkey(), - 1, - genesis_config.hash(), - ); + for i in 0..accounts.len() { + let account = &accounts[i]; + bank.store_account(&keypairs[i].pubkey(), account); + bank_with_success_txs.store_account(&keypairs[i].pubkey(), account); + } - let txs = vec![t1.clone(), t2.clone(), t3]; - let res = bank.process_transactions(txs.iter()); + // Make builtin instruction loader rent exempt + let system_program_id = system_program::id(); + let mut system_program_account = bank.get_account(&system_program_id).unwrap(); + system_program_account.set_lamports( + bank.get_minimum_balance_for_rent_exemption(system_program_account.data().len()), + ); + bank.store_account(&system_program_id, &system_program_account); + bank_with_success_txs.store_account(&system_program_id, &system_program_account); - assert_eq!(res.len(), 3); - assert_eq!(res[0], Ok(())); - assert_eq!(res[1], Ok(())); - assert_eq!(res[2], Err(TransactionError::AccountNotFound)); + let t1 = system_transaction::transfer( + &keypairs[0], + &keypairs[1].pubkey(), + 1, + genesis_config.hash(), + ); + let t2 = system_transaction::transfer( + &keypairs[2], + &keypairs[3].pubkey(), + 1, + genesis_config.hash(), + ); + // the idea is this transaction will result in both accounts being drained of all lamports due to rent collection + let t3 = system_transaction::transfer( + &keypairs[4], + &keypairs[5].pubkey(), + 1, + genesis_config.hash(), + ); - bank.freeze(); + let txs = vec![t1.clone(), t2.clone(), t3]; + let res = bank.process_transactions(txs.iter()); - let rwlockguard_bank_hash = bank.hash.read().unwrap(); - let bank_hash = rwlockguard_bank_hash.as_ref(); + assert_eq!(res.len(), 3); + assert_eq!(res[0], Ok(())); + assert_eq!(res[1], Ok(())); + assert_eq!(res[2], Err(TransactionError::AccountNotFound)); - let txs = vec![t2, t1]; - let res = bank_with_success_txs.process_transactions(txs.iter()); + bank.freeze(); - assert_eq!(res.len(), 2); - assert_eq!(res[0], Ok(())); - assert_eq!(res[1], Ok(())); + let rwlockguard_bank_hash = bank.hash.read().unwrap(); + let bank_hash = rwlockguard_bank_hash.as_ref(); - bank_with_success_txs.freeze(); + let txs = vec![t2, t1]; + let res = bank_with_success_txs.process_transactions(txs.iter()); - let rwlockguard_bank_with_success_txs_hash = bank_with_success_txs.hash.read().unwrap(); - let bank_with_success_txs_hash = rwlockguard_bank_with_success_txs_hash.as_ref(); + assert_eq!(res.len(), 2); + assert_eq!(res[0], Ok(())); + assert_eq!(res[1], Ok(())); - assert_eq!(bank_with_success_txs_hash, bank_hash); - } + bank_with_success_txs.freeze(); + + let rwlockguard_bank_with_success_txs_hash = bank_with_success_txs.hash.read().unwrap(); + let bank_with_success_txs_hash = rwlockguard_bank_with_success_txs_hash.as_ref(); + + assert_eq!(bank_with_success_txs_hash, bank_hash); } fn store_accounts_for_rent_test( @@ -1657,7 +1653,7 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { solana_logger::setup(); let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000); for feature_id in FeatureSet::default().inactive { - if feature_id != solana_sdk::feature_set::set_exempt_rent_epoch_max::id() + if feature_id != solana_sdk::feature_set::skip_rent_rewrites::id() && (!should_collect_rent || feature_id != solana_sdk::feature_set::disable_rent_fees_collection::id()) { @@ -1739,11 +1735,9 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { bank.get_account(&rent_exempt_pubkey).unwrap().lamports(), large_lamports ); - // Once preserve_rent_epoch_for_rent_exempt_accounts is activated, - // rent_epoch of rent-exempt accounts will no longer advance. assert_eq!( bank.get_account(&rent_exempt_pubkey).unwrap().rent_epoch(), - 0 + RENT_EXEMPT_RENT_EPOCH ); assert_eq!( bank.slots_by_pubkey(&rent_due_pubkey, &ancestors), @@ -6479,83 +6473,51 @@ fn test_fuzz_instructions() { info!("results: {:?}", results); } -#[test_case(true; "set_rent_epoch_max")] -#[test_case(false; "disable_set_rent_epoch_max")] -fn test_bank_hash_consistency(set_rent_epoch_max: bool) { +#[test] +fn test_bank_hash_consistency() { solana_logger::setup(); let account = AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()); - if !set_rent_epoch_max { - assert_eq!(account.rent_epoch(), 0); - } + assert_eq!(account.rent_epoch(), 0); let mut genesis_config = GenesisConfig::new(&[(Pubkey::from([42; 32]), account)], &[]); genesis_config.creation_time = 0; genesis_config.cluster_type = ClusterType::MainnetBeta; genesis_config.rent.burn_percent = 100; - if set_rent_epoch_max { - activate_feature( - &mut genesis_config, - solana_sdk::feature_set::set_exempt_rent_epoch_max::id(), - ); - } + activate_feature( + &mut genesis_config, + solana_sdk::feature_set::set_exempt_rent_epoch_max::id(), + ); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); // Check a few slots, cross an epoch boundary assert_eq!(bank.get_slots_in_epoch(0), 32); loop { goto_end_of_slot(bank.clone()); - if !set_rent_epoch_max { - if bank.slot == 0 { - assert_eq!( - bank.hash().to_string(), - "trdzvRDTAXAqo1i2GX4JfK9ReixV1NYNG7DRaVq43Do", - ); - } - if bank.slot == 32 { - assert_eq!( - bank.hash().to_string(), - "2rdj8QEnDnBSyMv81rCmncss4UERACyXXB3pEvkep8eS", - ); - } - if bank.slot == 64 { - assert_eq!( - bank.hash().to_string(), - "7g3ofXVQB3reFt9ki8zLA8S4w1GdmEWsWuWrwkPN3SSv" - ); - } - if bank.slot == 128 { - assert_eq!( - bank.hash().to_string(), - "4uX1AZFbqwjwWBACWbAW3V8rjbWH4N3ZRTbNysSLAzj2" - ); - break; - } - } else { - if bank.slot == 0 { - assert_eq!( - bank.hash().to_string(), - "3VqF5pMe3XABLqzUaYw2UVXfAokMJgMkrdfvneFQkHbB", - ); - } - if bank.slot == 32 { - assert_eq!( - bank.hash().to_string(), - "B8GsaBJ9aJrQcbhTTfgNVuV4uwb4v8nKT86HUjDLvNgk", - ); - } - if bank.slot == 64 { - assert_eq!( - bank.hash().to_string(), - "Eg9VRE3zUwarxWyHXhitX9wLkg1vfNeiVqVQxSif6qEC" - ); - } - if bank.slot == 128 { - assert_eq!( - bank.hash().to_string(), - "5rLmK24zyxdeb8aLn5LDEnHLDQmxRd5gWZDVJGgsFX1c" - ); - break; - } + + if bank.slot == 0 { + assert_eq!( + bank.hash().to_string(), + "3VqF5pMe3XABLqzUaYw2UVXfAokMJgMkrdfvneFQkHbB", + ); + } + if bank.slot == 32 { + assert_eq!( + bank.hash().to_string(), + "B8GsaBJ9aJrQcbhTTfgNVuV4uwb4v8nKT86HUjDLvNgk", + ); + } + if bank.slot == 64 { + assert_eq!( + bank.hash().to_string(), + "Eg9VRE3zUwarxWyHXhitX9wLkg1vfNeiVqVQxSif6qEC" + ); + } + if bank.slot == 128 { + assert_eq!( + bank.hash().to_string(), + "5rLmK24zyxdeb8aLn5LDEnHLDQmxRd5gWZDVJGgsFX1c" + ); + break; } bank = Arc::new(new_from_parent(bank)); } @@ -11635,57 +11597,53 @@ fn test_get_rent_paying_pubkeys() { #[test_case(true; "enable rent fees collection")] #[test_case(false; "disable rent fees collection")] fn test_accounts_data_size_and_rent_collection(should_collect_rent: bool) { - for set_exempt_rent_epoch_max in [false, true] { - let GenesisConfigInfo { - mut genesis_config, .. - } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); - genesis_config.rent = Rent::default(); - if should_collect_rent { - genesis_config - .accounts - .remove(&solana_sdk::feature_set::disable_rent_fees_collection::id()); - } + let GenesisConfigInfo { + mut genesis_config, .. + } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); + genesis_config.rent = Rent::default(); + if should_collect_rent { + genesis_config + .accounts + .remove(&solana_sdk::feature_set::disable_rent_fees_collection::id()); + } - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let slot = bank.slot() + bank.slot_count_per_normal_epoch(); - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + let slot = bank.slot() + bank.slot_count_per_normal_epoch(); + let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - // make another bank so that any reclaimed accounts from the previous bank do not impact - // this test - let slot = bank.slot() + bank.slot_count_per_normal_epoch(); - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + // make another bank so that any reclaimed accounts from the previous bank do not impact + // this test + let slot = bank.slot() + bank.slot_count_per_normal_epoch(); + let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - // Store an account into the bank that is rent-paying and has data - let data_size = 123; - let mut account = AccountSharedData::new(1, data_size, &Pubkey::default()); - let keypair = Keypair::new(); - bank.store_account(&keypair.pubkey(), &account); + // Store an account into the bank that is rent-paying and has data + let data_size = 123; + let mut account = AccountSharedData::new(1, data_size, &Pubkey::default()); + let keypair = Keypair::new(); + bank.store_account(&keypair.pubkey(), &account); - // Ensure if we collect rent from the account that it will be reclaimed - { - let info = bank.rent_collector.collect_from_existing_account( - &keypair.pubkey(), - &mut account, - set_exempt_rent_epoch_max, - ); - assert_eq!(info.account_data_len_reclaimed, data_size as u64); - } + // Ensure if we collect rent from the account that it will be reclaimed + { + let info = bank + .rent_collector + .collect_from_existing_account(&keypair.pubkey(), &mut account); + assert_eq!(info.account_data_len_reclaimed, data_size as u64); + } - // Collect rent for real - assert_eq!(should_collect_rent, bank.should_collect_rent()); - let accounts_data_size_delta_before_collecting_rent = bank.load_accounts_data_size_delta(); - bank.collect_rent_eagerly(); - let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta(); + // Collect rent for real + assert_eq!(should_collect_rent, bank.should_collect_rent()); + let accounts_data_size_delta_before_collecting_rent = bank.load_accounts_data_size_delta(); + bank.collect_rent_eagerly(); + let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta(); - let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent - - accounts_data_size_delta_before_collecting_rent; - assert!(!should_collect_rent || accounts_data_size_delta_delta < 0); - let reclaimed_data_size = accounts_data_size_delta_delta.saturating_neg() as usize; + let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent + - accounts_data_size_delta_before_collecting_rent; + assert!(!should_collect_rent || accounts_data_size_delta_delta < 0); + let reclaimed_data_size = accounts_data_size_delta_delta.saturating_neg() as usize; - // Ensure the account is reclaimed by rent collection - assert!(!should_collect_rent || reclaimed_data_size == data_size); - } + // Ensure the account is reclaimed by rent collection + assert!(!should_collect_rent || reclaimed_data_size == data_size); } #[test] @@ -11895,87 +11853,6 @@ fn test_feature_hashes_per_tick() { assert_eq!(bank.hashes_per_tick, Some(UPDATED_HASHES_PER_TICK6)); } -#[test_case(true)] -#[test_case(false)] -fn test_stake_account_consistency_with_rent_epoch_max_feature( - rent_epoch_max_enabled_initially: bool, -) { - // this test can be removed once set_exempt_rent_epoch_max gets activated - solana_logger::setup(); - let (mut genesis_config, _mint_keypair) = create_genesis_config(100 * LAMPORTS_PER_SOL); - genesis_config.rent = Rent::default(); - let mut bank = Bank::new_for_tests(&genesis_config); - let expected_initial_rent_epoch = if rent_epoch_max_enabled_initially { - bank.activate_feature(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); - RENT_EXEMPT_RENT_EPOCH - } else { - Epoch::default() - }; - - let mut pubkey_bytes_early = [0u8; 32]; - pubkey_bytes_early[31] = 2; - let stake_id1 = Pubkey::from(pubkey_bytes_early); - let vote_id = solana_sdk::pubkey::new_rand(); - let stake_account1 = crate::stakes::tests::create_stake_account(12300000, &vote_id, &stake_id1); - - // set up accounts - bank.store_account_and_update_capitalization(&stake_id1, &stake_account1); - - // create banks at a few slots - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - 0 // manually created, so default is 0 - ); - let slot = 1; - let slots_per_epoch = bank.epoch_schedule().get_slots_in_epoch(0); - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); - if !rent_epoch_max_enabled_initially { - bank.activate_feature(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); - } - let bank = Arc::new(bank); - - let slot = slots_per_epoch - 1; - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - // rent has been collected, so if rent epoch is max is activated, this will be max by now - expected_initial_rent_epoch - ); - let mut bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - - let last_slot_in_epoch = bank.epoch_schedule().get_last_slot_in_epoch(1); - let slot = last_slot_in_epoch - 2; - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - expected_initial_rent_epoch - ); - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - expected_initial_rent_epoch - ); - let slot = last_slot_in_epoch - 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - RENT_EXEMPT_RENT_EPOCH - ); -} - #[test] fn test_calculate_fee_with_congestion_multiplier() { let lamports_scale: u64 = 5; diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index f945672169ca88..689fb652eeb68a 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -132,9 +132,6 @@ fn load_transaction_accounts( let mut rent_debits = RentDebits::default(); let rent_collector = callbacks.get_rent_collector(); - let set_exempt_rent_epoch_max = - feature_set.is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); - let requested_loaded_accounts_data_size_limit = get_requested_loaded_accounts_data_size_limit(tx)?; let mut accumulated_accounts_data_size: usize = 0; @@ -179,11 +176,7 @@ fn load_transaction_accounts( .is_active(&feature_set::disable_rent_fees_collection::id()) { let rent_due = rent_collector - .collect_from_existing_account( - key, - &mut account, - set_exempt_rent_epoch_max, - ) + .collect_from_existing_account(key, &mut account) .rent_amount; (account.data().len(), account, rent_due) @@ -192,10 +185,8 @@ fn load_transaction_accounts( // are any rent paying accounts, their `rent_epoch` won't change either. However, if the // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. - if set_exempt_rent_epoch_max - && (account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH - && rent_collector.get_rent_due(&account) - == RentDue::Exempt) + if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH + && rent_collector.get_rent_due(&account) == RentDue::Exempt { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); } @@ -208,12 +199,10 @@ fn load_transaction_accounts( .unwrap_or_else(|| { account_found = false; let mut default_account = AccountSharedData::default(); - if set_exempt_rent_epoch_max { - // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). - // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account - // with this field already set would allow us to skip rent collection for these accounts. - default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } + // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). + // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account + // with this field already set would allow us to skip rent collection for these accounts. + default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); (default_account.data().len(), default_account, 0) }) }; From 56391f655d8257f61715ae345bf0f084e7a30fe8 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 7 Feb 2024 07:40:17 -0800 Subject: [PATCH 145/401] Remove unnecessary usage of RentCollector (#35121) --- accounts-db/src/accounts.rs | 27 +++------------------------ runtime/src/bank.rs | 1 - 2 files changed, 3 insertions(+), 25 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 0c0058703503d0..446b1df9cceb2b 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -7,7 +7,6 @@ use { accounts_index::{IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport}, ancestors::Ancestors, nonce_info::{NonceFull, NonceInfo}, - rent_collector::RentCollector, rent_debits::RentDebits, storable_accounts::StorableAccounts, transaction_results::TransactionExecutionResult, @@ -655,18 +654,11 @@ impl Accounts { txs: &[SanitizedTransaction], res: &[TransactionExecutionResult], loaded: &mut [TransactionLoadResult], - rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, ) { - let (accounts_to_store, transactions) = self.collect_accounts_to_store( - txs, - res, - loaded, - rent_collector, - durable_nonce, - lamports_per_signature, - ); + let (accounts_to_store, transactions) = + self.collect_accounts_to_store(txs, res, loaded, durable_nonce, lamports_per_signature); self.accounts_db .store_cached_inline_update_index((slot, &accounts_to_store[..]), Some(&transactions)); } @@ -689,7 +681,6 @@ impl Accounts { txs: &'a [SanitizedTransaction], execution_results: &'a [TransactionExecutionResult], load_results: &'a mut [TransactionLoadResult], - _rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, ) -> ( @@ -813,10 +804,7 @@ fn prepare_if_nonce_account( mod tests { use { super::*, - crate::{ - rent_collector::RentCollector, - transaction_results::{DurableNonceFee, TransactionExecutionDetails}, - }, + crate::transaction_results::{DurableNonceFee, TransactionExecutionDetails}, assert_matches::assert_matches, solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ @@ -1512,8 +1500,6 @@ mod tests { let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); - let rent_collector = RentCollector::default(); - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -1581,7 +1567,6 @@ mod tests { &txs, &execution_results, loaded.as_mut_slice(), - &rent_collector, &DurableNonce::default(), 0, ); @@ -1884,8 +1869,6 @@ mod tests { #[test] fn test_nonced_failure_accounts_rollback_from_pays() { - let rent_collector = RentCollector::default(); - let nonce_address = Pubkey::new_unique(); let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); let from = keypair_from_seed(&[1; 32]).unwrap(); @@ -1962,7 +1945,6 @@ mod tests { &txs, &execution_results, loaded.as_mut_slice(), - &rent_collector, &durable_nonce, 0, ); @@ -1994,8 +1976,6 @@ mod tests { #[test] fn test_nonced_failure_accounts_rollback_nonce_pays() { - let rent_collector = RentCollector::default(); - let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); let nonce_address = nonce_authority.pubkey(); let from = keypair_from_seed(&[1; 32]).unwrap(); @@ -2071,7 +2051,6 @@ mod tests { &txs, &execution_results, loaded.as_mut_slice(), - &rent_collector, &durable_nonce, 0, ); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 630dbb67f415c2..c1dffa59e4a445 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4911,7 +4911,6 @@ impl Bank { sanitized_txs, &execution_results, loaded_txs, - &self.rent_collector, &durable_nonce, lamports_per_signature, ); From 7a95e4fa90208b281018dab7f7e87f0aeb203c74 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 7 Feb 2024 16:02:16 +0000 Subject: [PATCH 146/401] uses Merkle shreds in broadcast duplicates (#35115) The commit migrates away from legacy shreds in duplicate shreds tests. --- local-cluster/tests/local_cluster.rs | 10 +++++----- .../src/broadcast_stage/broadcast_duplicates_run.rs | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index f9640066345745..6f7de16df296b1 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -5608,11 +5608,11 @@ fn test_invalid_forks_persisted_on_restart() { .entries_to_shreds( &majority_keypair, &entries, - true, // is_full_slot - None, // chained_merkle_root - 0, // next_shred_index, - 0, // next_code_index - false, // merkle_variant + true, // is_full_slot + None, // chained_merkle_root + 0, // next_shred_index, + 0, // next_code_index + true, // merkle_variant &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ) diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 8bee47068ac499..3190c039a116d0 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -176,7 +176,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { None, // chained_merkle_root self.next_shred_index, self.next_code_index, - false, // merkle_variant + true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); @@ -194,7 +194,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { None, // chained_merkle_root self.next_shred_index, self.next_code_index, - false, // merkle_variant + true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); @@ -208,7 +208,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { None, // chained_merkle_root self.next_shred_index, self.next_code_index, - false, // merkle_variant + true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); From 2c0001b530b17822c48622e5bb7588132e027421 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 7 Feb 2024 13:55:39 -0500 Subject: [PATCH 147/401] SVM: Move RewardInfo from accounts-db to Solana SDK (#35120) --- accounts-db/src/rent_debits.rs | 3 +-- accounts-db/src/stake_rewards.rs | 15 ++------------- core/src/rewards_recorder_service.rs | 3 +-- .../src/block_metadata_notifier.rs | 3 +-- .../src/block_metadata_notifier_interface.rs | 3 +-- runtime/src/bank.rs | 3 ++- runtime/src/bank/fee_distribution.rs | 2 +- runtime/src/bank/serde_snapshot.rs | 2 +- sdk/src/lib.rs | 1 + sdk/src/reward_info.rs | 12 ++++++++++++ 10 files changed, 23 insertions(+), 24 deletions(-) create mode 100644 sdk/src/reward_info.rs diff --git a/accounts-db/src/rent_debits.rs b/accounts-db/src/rent_debits.rs index 75d8eddec10dbd..588f7c67a2a929 100644 --- a/accounts-db/src/rent_debits.rs +++ b/accounts-db/src/rent_debits.rs @@ -1,6 +1,5 @@ use { - crate::stake_rewards::RewardInfo, - solana_sdk::{pubkey::Pubkey, reward_type::RewardType}, + solana_sdk::{pubkey::Pubkey, reward_info::RewardInfo, reward_type::RewardType}, std::collections::HashMap, }; diff --git a/accounts-db/src/stake_rewards.rs b/accounts-db/src/stake_rewards.rs index 9918c84747e465..712f2cb9957f1e 100644 --- a/accounts-db/src/stake_rewards.rs +++ b/accounts-db/src/stake_rewards.rs @@ -3,21 +3,10 @@ use { crate::storable_accounts::StorableAccounts, solana_sdk::{ - account::AccountSharedData, clock::Slot, pubkey::Pubkey, reward_type::RewardType, + account::AccountSharedData, clock::Slot, pubkey::Pubkey, reward_info::RewardInfo, }, }; -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, AbiExample, Clone, Copy)] -pub struct RewardInfo { - pub reward_type: RewardType, - /// Reward amount - pub lamports: i64, - /// Account balance in lamports after `lamports` was applied - pub post_balance: u64, - /// Vote account commission when the reward was credited, only present for voting and staking rewards - pub commission: Option, -} - #[derive(AbiExample, Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct StakeReward { pub stake_pubkey: Pubkey, @@ -94,7 +83,7 @@ impl StakeReward { Self { stake_pubkey: Pubkey::new_unique(), stake_reward_info: RewardInfo { - reward_type: RewardType::Staking, + reward_type: solana_sdk::reward_type::RewardType::Staking, lamports: rng.gen_range(1..200), post_balance: 0, /* unused atm */ commission: None, /* unused atm */ diff --git a/core/src/rewards_recorder_service.rs b/core/src/rewards_recorder_service.rs index f78b8bab260b65..3fc2c8dc5b5149 100644 --- a/core/src/rewards_recorder_service.rs +++ b/core/src/rewards_recorder_service.rs @@ -1,8 +1,7 @@ use { crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, - solana_accounts_db::stake_rewards::RewardInfo, solana_ledger::blockstore::Blockstore, - solana_sdk::{clock::Slot, pubkey::Pubkey}, + solana_sdk::{clock::Slot, pubkey::Pubkey, reward_info::RewardInfo}, solana_transaction_status::Reward, std::{ sync::{ diff --git a/geyser-plugin-manager/src/block_metadata_notifier.rs b/geyser-plugin-manager/src/block_metadata_notifier.rs index ab56cf3be81701..76d203c5e0ed44 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier.rs @@ -4,13 +4,12 @@ use { geyser_plugin_manager::GeyserPluginManager, }, log::*, - solana_accounts_db::stake_rewards::RewardInfo, solana_geyser_plugin_interface::geyser_plugin_interface::{ ReplicaBlockInfoV3, ReplicaBlockInfoVersions, }, solana_measure::measure::Measure, solana_metrics::*, - solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey}, + solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey, reward_info::RewardInfo}, solana_transaction_status::{Reward, Rewards}, std::sync::{Arc, RwLock}, }; diff --git a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs index 465f700efe3275..bb0ffe4c7f7513 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs @@ -1,6 +1,5 @@ use { - solana_accounts_db::stake_rewards::RewardInfo, - solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey}, + solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey, reward_info::RewardInfo}, std::sync::{Arc, RwLock}, }; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c1dffa59e4a445..eeb2c63b65bf0d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -91,7 +91,7 @@ use { rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, sorted_storages::SortedStorages, - stake_rewards::{RewardInfo, StakeReward}, + stake_rewards::StakeReward, storable_accounts::StorableAccounts, transaction_results::{ TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, @@ -143,6 +143,7 @@ use { precompiles::get_precompiles, pubkey::Pubkey, rent::RentDue, + reward_info::RewardInfo, saturating_add_assign, signature::{Keypair, Signature}, slot_hashes::SlotHashes, diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index fc6d16f3b5683d..5a53a1278881fa 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -1,10 +1,10 @@ use { super::Bank, log::{debug, warn}, - solana_accounts_db::stake_rewards::RewardInfo, solana_sdk::{ account::{ReadableAccount, WritableAccount}, pubkey::Pubkey, + reward_info::RewardInfo, reward_type::RewardType, system_program, }, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index ba2f24c553ceef..6af86976dde926 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -605,7 +605,7 @@ mod tests { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "12WNiuA7qeLU8JFweQszX5sCnCj1fYnYV4i9DeACqhQD")] + #[frozen_abi(digest = "77zuTwvAGH5Rf28XHUNkRWsrcJ8uMyARMCZZMg9BBu5S")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperNewer { #[serde(serialize_with = "wrapper_newer")] diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 4bf36a5d271929..68fa200418fa16 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -92,6 +92,7 @@ pub mod program_utils; pub mod pubkey; pub mod quic; pub mod recent_blockhashes_account; +pub mod reward_info; pub mod reward_type; pub mod rpc_port; pub mod secp256k1_instruction; diff --git a/sdk/src/reward_info.rs b/sdk/src/reward_info.rs new file mode 100644 index 00000000000000..b3b3d4a121c3ef --- /dev/null +++ b/sdk/src/reward_info.rs @@ -0,0 +1,12 @@ +use crate::reward_type::RewardType; + +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, AbiExample, Clone, Copy)] +pub struct RewardInfo { + pub reward_type: RewardType, + /// Reward amount + pub lamports: i64, + /// Account balance in lamports after `lamports` was applied + pub post_balance: u64, + /// Vote account commission when the reward was credited, only present for voting and staking rewards + pub commission: Option, +} From 7c59786f103337e53ff8dee66764d23d7a78da3a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 19:02:20 +0000 Subject: [PATCH 148/401] build(deps): bump indexmap from 2.1.0 to 2.2.2 (#35125) * build(deps): bump indexmap from 2.1.0 to 2.2.2 Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.1.0 to 2.2.2. - [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.1.0...2.2.2) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files * call swap_remove_entry directly --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite Co-authored-by: yihau --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 22 +++++++++++----------- streamer/src/nonblocking/quic.rs | 2 +- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b762b0dc7e98f8..7f944969a62b75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2298,7 +2298,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.1.0", + "indexmap 2.2.2", "slab", "tokio", "tokio-util 0.7.1", @@ -2674,9 +2674,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4931,7 +4931,7 @@ version = "0.9.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "itoa", "ryu", "serde", @@ -5275,7 +5275,7 @@ dependencies = [ "fnv", "im", "index_list", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "lazy_static", "libsecp256k1", @@ -5755,7 +5755,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "indicatif", "log", "quinn", @@ -5836,7 +5836,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "indicatif", "log", "rand 0.8.5", @@ -6175,7 +6175,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "log", "lru", @@ -7267,7 +7267,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "libc", "log", @@ -7378,7 +7378,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.1.0", + "indexmap 2.2.2", "indicatif", "pickledb", "serde", @@ -7407,7 +7407,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "indicatif", "log", "rayon", @@ -8618,7 +8618,7 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "toml_datetime", "winnow", ] @@ -8629,7 +8629,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index d0bc79a53f23f4..e522d8bb485bfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -222,7 +222,7 @@ hyper = "0.14.28" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.11" -indexmap = "2.1.0" +indexmap = "2.2.2" indicatif = "0.17.7" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2c80edb3065b1e..110c353b67c03f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1936,7 +1936,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.1.0", + "indexmap 2.2.2", "slab", "tokio", "tokio-util 0.7.1", @@ -2287,9 +2287,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -4348,7 +4348,7 @@ version = "0.9.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "itoa", "ryu", "serde", @@ -4619,7 +4619,7 @@ dependencies = [ "fnv", "im", "index_list", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "lazy_static", "log", @@ -4846,7 +4846,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "indicatif", "log", "quinn", @@ -4896,7 +4896,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "log", "rand 0.8.5", "rayon", @@ -5147,7 +5147,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "log", "lru", @@ -6310,7 +6310,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "libc", "log", @@ -6412,7 +6412,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "indicatif", "log", "rayon", @@ -7468,7 +7468,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "toml_datetime", "winnow", ] diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index f6f2357c7702e4..225412dd08b315 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -1126,7 +1126,7 @@ impl ConnectionTable { }); let new_size = e_ref.len(); if e_ref.is_empty() { - e.remove_entry(); + e.swap_remove_entry(); } let connections_removed = old_size.saturating_sub(new_size); self.total_size = self.total_size.saturating_sub(connections_removed); From bc735fad3efe44c9bd137cfbd065d9573c887b5f Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 8 Feb 2024 03:04:16 +0800 Subject: [PATCH 149/401] chore: bump toml from 0.8.8 to 0.8.10 (#35126) --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f944969a62b75..09e2e21f5a2941 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4872,9 +4872,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -5597,7 +5597,7 @@ dependencies = [ "tar", "tempfile", "tokio", - "toml 0.8.8", + "toml 0.8.10", ] [[package]] @@ -8593,14 +8593,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.21.0", + "toml_edit 0.22.4", ] [[package]] @@ -8625,9 +8625,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.21.0" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +checksum = "0c9ffdf896f8daaabf9b66ba8e77ea1ed5ed0f72821b398aba62352e95062951" dependencies = [ "indexmap 2.2.2", "serde", diff --git a/Cargo.toml b/Cargo.toml index e522d8bb485bfc..a2a606d691f3a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -420,7 +420,7 @@ tokio-serde = "0.8" tokio-stream = "0.1.14" tokio-tungstenite = "0.20.1" tokio-util = "0.6" -toml = "0.8.8" +toml = "0.8.10" tonic = "0.9.2" tonic-build = "0.9.2" trees = "0.4.2" From 3ddd2352a1ccbdc4233236a68c568512936b2d3b Mon Sep 17 00:00:00 2001 From: Joe C Date: Wed, 7 Feb 2024 15:35:29 -0600 Subject: [PATCH 150/401] sdk: add `Immutable` and `IncorrectAuthority` to `ProgramError` (#35113) --- sdk/program/src/program_error.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index 0840ee16b901d7..9881ef345f0159 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -63,6 +63,10 @@ pub enum ProgramError { InvalidAccountOwner, #[error("Program arithmetic overflowed")] ArithmeticOverflow, + #[error("Account is immutable")] + Immutable, + #[error("Incorrect authority provided")] + IncorrectAuthority, } pub trait PrintProgramError { @@ -113,6 +117,8 @@ impl PrintProgramError for ProgramError { } Self::InvalidAccountOwner => msg!("Error: InvalidAccountOwner"), Self::ArithmeticOverflow => msg!("Error: ArithmeticOverflow"), + Self::Immutable => msg!("Error: Immutable"), + Self::IncorrectAuthority => msg!("Error: IncorrectAuthority"), } } } @@ -149,6 +155,8 @@ pub const MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED: u64 = to_builtin!(21); pub const BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS: u64 = to_builtin!(22); pub const INVALID_ACCOUNT_OWNER: u64 = to_builtin!(23); pub const ARITHMETIC_OVERFLOW: u64 = to_builtin!(24); +pub const IMMUTABLE: u64 = to_builtin!(25); +pub const INCORRECT_AUTHORITY: u64 = to_builtin!(26); // Warning: Any new program errors added here must also be: // - Added to the below conversions // - Added as an equivalent to InstructionError @@ -187,6 +195,8 @@ impl From for u64 { } ProgramError::InvalidAccountOwner => INVALID_ACCOUNT_OWNER, ProgramError::ArithmeticOverflow => ARITHMETIC_OVERFLOW, + ProgramError::Immutable => IMMUTABLE, + ProgramError::IncorrectAuthority => INCORRECT_AUTHORITY, ProgramError::Custom(error) => { if error == 0 { CUSTOM_ZERO @@ -227,6 +237,8 @@ impl From for ProgramError { } INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, + IMMUTABLE => Self::Immutable, + INCORRECT_AUTHORITY => Self::IncorrectAuthority, _ => Self::Custom(error as u32), } } @@ -267,6 +279,8 @@ impl TryFrom for ProgramError { } Self::Error::InvalidAccountOwner => Ok(Self::InvalidAccountOwner), Self::Error::ArithmeticOverflow => Ok(Self::ArithmeticOverflow), + Self::Error::Immutable => Ok(Self::Immutable), + Self::Error::IncorrectAuthority => Ok(Self::IncorrectAuthority), _ => Err(error), } } @@ -305,6 +319,8 @@ where } INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, + IMMUTABLE => Self::Immutable, + INCORRECT_AUTHORITY => Self::IncorrectAuthority, _ => { // A valid custom error has no bits set in the upper 32 if error >> BUILTIN_BIT_SHIFT == 0 { From 1b9dfd447e10e72061433fbcb41a269adf7d38ab Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 7 Feb 2024 22:14:31 +0000 Subject: [PATCH 151/401] chains Merkle shreds in broadcast duplicates (#35058) The commit migrates turbine/src/broadcast_stage/broadcast_duplicates_run.rs to use chained Merkle shreds variant. --- .../broadcast_duplicates_run.rs | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 3190c039a116d0..adca69ed4938cd 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -37,6 +37,7 @@ pub struct BroadcastDuplicatesConfig { pub(super) struct BroadcastDuplicatesRun { config: BroadcastDuplicatesConfig, current_slot: Slot, + chained_merkle_root: Hash, next_shred_index: u32, next_code_index: u32, shred_version: u16, @@ -57,6 +58,7 @@ impl BroadcastDuplicatesRun { )); Self { config, + chained_merkle_root: Hash::default(), next_shred_index: u32::MAX, next_code_index: 0, shred_version, @@ -76,7 +78,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { fn run( &mut self, keypair: &Keypair, - _blockstore: &Blockstore, + blockstore: &Blockstore, receiver: &Receiver, socket_sender: &Sender<(Arc>, Option)>, blockstore_sender: &Sender<(Arc>, Option)>, @@ -87,6 +89,12 @@ impl BroadcastRun for BroadcastDuplicatesRun { let last_tick_height = receive_results.last_tick_height; if bank.slot() != self.current_slot { + self.chained_merkle_root = broadcast_utils::get_chained_merkle_root_from_parent( + bank.slot(), + bank.parent_slot(), + blockstore, + ) + .unwrap(); self.next_shred_index = 0; self.next_code_index = 0; self.current_slot = bank.slot(); @@ -169,18 +177,25 @@ impl BroadcastRun for BroadcastDuplicatesRun { ) .expect("Expected to create a new shredder"); + // Chained Merkle shreds are always discarded in epoch 0, due to + // feature_set::enable_chained_merkle_shreds. Below can be removed once + // the feature gated code is removed. + let should_chain_merkle_shreds = bank.epoch() > 0; + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), - None, // chained_merkle_root + should_chain_merkle_shreds.then_some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); - + if let Some(shred) = data_shreds.iter().max_by_key(|shred| shred.index()) { + self.chained_merkle_root = shred.merkle_root().unwrap(); + } self.next_shred_index += data_shreds.len() as u32; if let Some(index) = coding_shreds.iter().map(Shred::index).max() { self.next_code_index = index + 1; @@ -191,7 +206,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &[original_last_entry], true, - None, // chained_merkle_root + should_chain_merkle_shreds.then_some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant @@ -205,7 +220,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &duplicate_extra_last_entries, true, - None, // chained_merkle_root + should_chain_merkle_shreds.then_some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant @@ -222,6 +237,8 @@ impl BroadcastRun for BroadcastDuplicatesRun { sigs, ); + assert_eq!(original_last_data_shred.len(), 1); + assert_eq!(partition_last_data_shred.len(), 1); self.next_shred_index += 1; (original_last_data_shred, partition_last_data_shred) }); From b9ee3b475b1debbc88a78d486526bcd2c45c09f9 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 7 Feb 2024 18:10:17 -0500 Subject: [PATCH 152/401] SVM: Move RentDebits from accounts-db to Solana SDK (#35135) --- accounts-db/src/accounts.rs | 2 +- accounts-db/src/lib.rs | 1 - accounts-db/src/nonce_info.rs | 18 ++++++++---------- accounts-db/src/transaction_results.rs | 6 ++---- ledger/src/blockstore_processor.rs | 2 +- rpc/src/transaction_status_service.rs | 6 ++---- runtime/src/bank.rs | 2 +- sdk/src/lib.rs | 1 + {accounts-db => sdk}/src/rent_debits.rs | 0 svm/src/account_loader.rs | 2 +- 10 files changed, 17 insertions(+), 23 deletions(-) rename {accounts-db => sdk}/src/rent_debits.rs (100%) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 446b1df9cceb2b..6dd5b6ba90df09 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -7,7 +7,6 @@ use { accounts_index::{IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport}, ancestors::Ancestors, nonce_info::{NonceFull, NonceInfo}, - rent_debits::RentDebits, storable_accounts::StorableAccounts, transaction_results::TransactionExecutionResult, }, @@ -24,6 +23,7 @@ use { State as NonceState, }, pubkey::Pubkey, + rent_debits::RentDebits, slot_hashes::SlotHashes, transaction::{Result, SanitizedTransaction, TransactionAccountLocks, TransactionError}, transaction_context::{IndexOfAccount, TransactionAccount}, diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index deec44048f78ef..1af013aab982f7 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -35,7 +35,6 @@ pub mod partitioned_rewards; mod pubkey_bins; mod read_only_accounts_cache; pub mod rent_collector; -pub mod rent_debits; mod rolling_bit_field; pub mod secondary_index; pub mod shared_buffer_reader; diff --git a/accounts-db/src/nonce_info.rs b/accounts-db/src/nonce_info.rs index 8a6d3a40fc7ecc..12079b6ce641b1 100644 --- a/accounts-db/src/nonce_info.rs +++ b/accounts-db/src/nonce_info.rs @@ -1,13 +1,11 @@ -use { - crate::rent_debits::RentDebits, - solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - message::SanitizedMessage, - nonce_account, - pubkey::Pubkey, - transaction::{self, TransactionError}, - transaction_context::TransactionAccount, - }, +use solana_sdk::{ + account::{AccountSharedData, ReadableAccount, WritableAccount}, + message::SanitizedMessage, + nonce_account, + pubkey::Pubkey, + rent_debits::RentDebits, + transaction::{self, TransactionError}, + transaction_context::TransactionAccount, }; pub trait NonceInfo { diff --git a/accounts-db/src/transaction_results.rs b/accounts-db/src/transaction_results.rs index 79efa66425aabd..f2f51170a690ac 100644 --- a/accounts-db/src/transaction_results.rs +++ b/accounts-db/src/transaction_results.rs @@ -5,12 +5,10 @@ )] pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList}; use { - crate::{ - nonce_info::{NonceFull, NonceInfo, NoncePartial}, - rent_debits::RentDebits, - }, + crate::nonce_info::{NonceFull, NonceInfo, NoncePartial}, solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ + rent_debits::RentDebits, transaction::{self, TransactionError}, transaction_context::TransactionReturnData, }, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index ee66f697eb705a..b450caaa54577b 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -20,7 +20,6 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, epoch_accounts_hash::EpochAccountsHash, - rent_debits::RentDebits, transaction_results::{ TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, }, @@ -49,6 +48,7 @@ use { genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey, + rent_debits::RentDebits, saturating_add_assign, signature::{Keypair, Signature}, timing, diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 68640362b2182c..028ebcfb2a9047 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -212,10 +212,7 @@ pub(crate) mod tests { crossbeam_channel::unbounded, dashmap::DashMap, solana_account_decoder::parse_token::token_amount_to_ui_amount, - solana_accounts_db::{ - nonce_info::{NonceFull, NoncePartial}, - rent_debits::RentDebits, - }, + solana_accounts_db::nonce_info::{NonceFull, NoncePartial}, solana_ledger::{genesis_utils::create_genesis_config, get_tmp_ledger_path_auto_delete}, solana_runtime::bank::{Bank, TransactionBalancesSet}, solana_sdk::{ @@ -227,6 +224,7 @@ pub(crate) mod tests { nonce::{self, state::DurableNonce}, nonce_account, pubkey::Pubkey, + rent_debits::RentDebits, signature::{Keypair, Signature, Signer}, system_transaction, transaction::{ diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index eeb2c63b65bf0d..8a25cdd31a23be 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -89,7 +89,6 @@ use { nonce_info::{NonceInfo, NoncePartial}, partitioned_rewards::PartitionedEpochRewardsConfig, rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, - rent_debits::RentDebits, sorted_storages::SortedStorages, stake_rewards::StakeReward, storable_accounts::StorableAccounts, @@ -143,6 +142,7 @@ use { precompiles::get_precompiles, pubkey::Pubkey, rent::RentDue, + rent_debits::RentDebits, reward_info::RewardInfo, saturating_add_assign, signature::{Keypair, Signature}, diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 68fa200418fa16..98576e4f36bfb6 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -92,6 +92,7 @@ pub mod program_utils; pub mod pubkey; pub mod quic; pub mod recent_blockhashes_account; +pub mod rent_debits; pub mod reward_info; pub mod reward_type; pub mod rpc_port; diff --git a/accounts-db/src/rent_debits.rs b/sdk/src/rent_debits.rs similarity index 100% rename from accounts-db/src/rent_debits.rs rename to sdk/src/rent_debits.rs diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 689fb652eeb68a..fd0b975df74764 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -10,7 +10,6 @@ use { accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, nonce_info::NonceFull, rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, - rent_debits::RentDebits, transaction_results::TransactionCheckResult, }, solana_program_runtime::{ @@ -29,6 +28,7 @@ use { nonce::State as NonceState, pubkey::Pubkey, rent::RentDue, + rent_debits::RentDebits, saturating_add_assign, sysvar::{self, instructions::construct_instructions_data}, transaction::{Result, SanitizedTransaction, TransactionError}, From 28a320d81571f776c439a4090aedcceffaf5fbbb Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 7 Feb 2024 16:38:42 -0800 Subject: [PATCH 153/401] SVM: Move `RentCollector` to sdk (#35122) --- accounts-bench/src/main.rs | 4 ++-- accounts-db/src/accounts_db.rs | 2 +- accounts-db/src/accounts_hash.rs | 2 +- accounts-db/src/lib.rs | 1 - accounts-db/src/tiered_storage.rs | 2 +- accounts-db/src/tiered_storage/hot.rs | 6 ++++-- runtime/benches/accounts.rs | 2 +- runtime/src/bank.rs | 2 +- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/bank/tests.rs | 2 +- runtime/src/serde_snapshot.rs | 2 +- runtime/src/serde_snapshot/tests.rs | 2 +- runtime/src/snapshot_package.rs | 6 ++++-- sdk/src/lib.rs | 1 + {accounts-db => sdk}/src/rent_collector.rs | 17 ++++++++++++----- svm/src/account_loader.rs | 5 +++-- svm/src/transaction_processor.rs | 2 +- 17 files changed, 36 insertions(+), 24 deletions(-) rename {accounts-db => sdk}/src/rent_collector.rs (97%) diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 88d15ea72482aa..9437485e1e6533 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -14,11 +14,11 @@ use { }, accounts_index::AccountSecondaryIndexes, ancestors::Ancestors, - rent_collector::RentCollector, }, solana_measure::measure::Measure, solana_sdk::{ - genesis_config::ClusterType, pubkey::Pubkey, sysvar::epoch_schedule::EpochSchedule, + genesis_config::ClusterType, pubkey::Pubkey, rent_collector::RentCollector, + sysvar::epoch_schedule::EpochSchedule, }, std::{env, fs, path::PathBuf, sync::Arc}, }; diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2853bb7a05edb6..c89cf45e320971 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -67,7 +67,6 @@ use { partitioned_rewards::{PartitionedEpochRewardsConfig, TestPartitionedEpochRewards}, pubkey_bins::PubkeyBinCalculator24, read_only_accounts_cache::ReadOnlyAccountsCache, - rent_collector::RentCollector, sorted_storages::SortedStorages, storable_accounts::StorableAccounts, u64_align, utils, @@ -92,6 +91,7 @@ use { genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, + rent_collector::RentCollector, saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 78662a04157744..cb75369d52d182 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -4,7 +4,6 @@ use { active_stats::{ActiveStatItem, ActiveStats}, ancestors::Ancestors, pubkey_bins::PubkeyBinCalculator24, - rent_collector::RentCollector, }, bytemuck::{Pod, Zeroable}, log::*, @@ -14,6 +13,7 @@ use { solana_sdk::{ hash::{Hash, Hasher}, pubkey::Pubkey, + rent_collector::RentCollector, slot_history::Slot, sysvar::epoch_schedule::EpochSchedule, }, diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 1af013aab982f7..fe20e0aab2766c 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -34,7 +34,6 @@ pub mod nonce_info; pub mod partitioned_rewards; mod pubkey_bins; mod read_only_accounts_cache; -pub mod rent_collector; mod rolling_bit_field; pub mod secondary_index; pub mod shared_buffer_reader; diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 92a4f0869e0c2a..f0a23150e2fa70 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -165,12 +165,12 @@ mod tests { hot::HOT_FORMAT, index::IndexOffset, owners::OWNER_NO_OWNER, - solana_accounts_db::rent_collector::RENT_EXEMPT_RENT_EPOCH, solana_sdk::{ account::{Account, AccountSharedData}, clock::Slot, hash::Hash, pubkey::Pubkey, + rent_collector::RENT_EXEMPT_RENT_EPOCH, system_instruction::MAX_PERMITTED_DATA_LENGTH, }, std::{ diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 54091313cb9de7..7db9e90d65d353 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -5,7 +5,6 @@ use { account_storage::meta::{StoredAccountInfo, StoredAccountMeta}, accounts_file::MatchAccountOwnerError, accounts_hash::AccountHash, - rent_collector::RENT_EXEMPT_RENT_EPOCH, tiered_storage::{ byte_block, file::TieredStorageFile, @@ -22,7 +21,10 @@ use { bytemuck::{Pod, Zeroable}, memmap2::{Mmap, MmapOptions}, modular_bitfield::prelude::*, - solana_sdk::{account::ReadableAccount, pubkey::Pubkey, stake_history::Epoch}, + solana_sdk::{ + account::ReadableAccount, pubkey::Pubkey, rent_collector::RENT_EXEMPT_RENT_EPOCH, + stake_history::Epoch, + }, std::{borrow::Borrow, fs::OpenOptions, option::Option, path::Path}, }; diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 7efc0a11ac0d75..fb81ce4716553e 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -16,7 +16,6 @@ use { accounts_index::{AccountSecondaryIndexes, ScanConfig}, ancestors::Ancestors, epoch_accounts_hash::EpochAccountsHash, - rent_collector::RentCollector, }, solana_runtime::bank::*, solana_sdk::{ @@ -25,6 +24,7 @@ use { hash::Hash, lamports::LamportsError, pubkey::Pubkey, + rent_collector::RentCollector, sysvar::epoch_schedule::EpochSchedule, }, std::{ diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8a25cdd31a23be..52546ec4efe703 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -88,7 +88,6 @@ use { epoch_accounts_hash::EpochAccountsHash, nonce_info::{NonceInfo, NoncePartial}, partitioned_rewards::PartitionedEpochRewardsConfig, - rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, sorted_storages::SortedStorages, stake_rewards::StakeReward, storable_accounts::StorableAccounts, @@ -142,6 +141,7 @@ use { precompiles::get_precompiles, pubkey::Pubkey, rent::RentDue, + rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, reward_info::RewardInfo, saturating_add_assign, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 6af86976dde926..8b78efbcf3e11a 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -605,7 +605,7 @@ mod tests { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "77zuTwvAGH5Rf28XHUNkRWsrcJ8uMyARMCZZMg9BBu5S")] + #[frozen_abi(digest = "7BH2s2Y1yKy396c3ixC4TTyvvpkyenAvWDSiZvY5yb7P")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperNewer { #[serde(serialize_with = "wrapper_newer")] diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 8c1f35e2d99ac0..19eca1d61ad8a8 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -36,7 +36,6 @@ use { inline_spl_token, nonce_info::NonceFull, partitioned_rewards::TestPartitionedEpochRewards, - rent_collector::RENT_EXEMPT_RENT_EPOCH, transaction_results::DurableNonceFee, }, solana_logger, @@ -88,6 +87,7 @@ use { program::MAX_RETURN_DATA, pubkey::Pubkey, rent::Rent, + rent_collector::RENT_EXEMPT_RENT_EPOCH, reward_type::RewardType, secp256k1_program, signature::{keypair_from_seed, Keypair, Signature, Signer}, diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index e38ea904686b40..4b066976d49048 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -25,7 +25,6 @@ use { accounts_update_notifier_interface::AccountsUpdateNotifier, blockhash_queue::BlockhashQueue, epoch_accounts_hash::EpochAccountsHash, - rent_collector::RentCollector, }, solana_measure::measure::Measure, solana_sdk::{ @@ -38,6 +37,7 @@ use { hash::Hash, inflation::Inflation, pubkey::Pubkey, + rent_collector::RentCollector, }, solana_svm::runtime_config::RuntimeConfig, std::{ diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index f9d45b372f5fc4..510069c92662fc 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -23,7 +23,6 @@ mod serde_snapshot_tests { accounts_hash::AccountsHash, accounts_index::AccountSecondaryIndexes, ancestors::Ancestors, - rent_collector::RentCollector, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -32,6 +31,7 @@ mod serde_snapshot_tests { genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, + rent_collector::RentCollector, }, std::{ io::{BufReader, Cursor, Read, Write}, diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 99af3ebbe6ee2a..55a4b13744b4f4 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -11,9 +11,11 @@ use { accounts_db::{AccountStorageEntry, AccountsDb}, accounts_hash::{AccountsHash, AccountsHashKind}, epoch_accounts_hash::EpochAccountsHash, - rent_collector::RentCollector, }, - solana_sdk::{clock::Slot, feature_set, sysvar::epoch_schedule::EpochSchedule}, + solana_sdk::{ + clock::Slot, feature_set, rent_collector::RentCollector, + sysvar::epoch_schedule::EpochSchedule, + }, std::{ path::{Path, PathBuf}, sync::Arc, diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 98576e4f36bfb6..233154ce72bcae 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -92,6 +92,7 @@ pub mod program_utils; pub mod pubkey; pub mod quic; pub mod recent_blockhashes_account; +pub mod rent_collector; pub mod rent_debits; pub mod reward_info; pub mod reward_type; diff --git a/accounts-db/src/rent_collector.rs b/sdk/src/rent_collector.rs similarity index 97% rename from accounts-db/src/rent_collector.rs rename to sdk/src/rent_collector.rs index 0bdb03291e8c5f..1de6ce19950dbd 100644 --- a/accounts-db/src/rent_collector.rs +++ b/sdk/src/rent_collector.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "full")] + //! calculate and collect rent from Accounts use solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, @@ -87,7 +89,10 @@ impl RentCollector { } else { let account_rent_epoch = account.rent_epoch(); let slots_elapsed: u64 = (account_rent_epoch..=self.epoch) - .map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1)) + .map(|epoch| { + self.epoch_schedule + .get_slots_in_epoch(epoch.saturating_add(1)) + }) .sum(); // avoid infinite rent in rust 1.45 @@ -165,7 +170,7 @@ impl RentCollector { RentDue::Paying(0) => RentResult::NoRentCollectionNow, // Rent is collected for next epoch. RentDue::Paying(rent_due) => RentResult::CollectRent { - new_rent_epoch: self.epoch + 1, + new_rent_epoch: self.epoch.saturating_add(1), rent_due, }, } @@ -185,14 +190,16 @@ impl std::ops::Add for CollectedInfo { type Output = Self; fn add(self, other: Self) -> Self { Self { - rent_amount: self.rent_amount + other.rent_amount, - account_data_len_reclaimed: self.account_data_len_reclaimed - + other.account_data_len_reclaimed, + rent_amount: self.rent_amount.saturating_add(other.rent_amount), + account_data_len_reclaimed: self + .account_data_len_reclaimed + .saturating_add(other.account_data_len_reclaimed), } } } impl std::ops::AddAssign for CollectedInfo { + #![allow(clippy::arithmetic_side_effects)] fn add_assign(&mut self, other: Self) { *self = *self + other; } diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index fd0b975df74764..947d77dd2ac150 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -9,7 +9,6 @@ use { solana_accounts_db::{ accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, nonce_info::NonceFull, - rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, transaction_results::TransactionCheckResult, }, solana_program_runtime::{ @@ -28,6 +27,7 @@ use { nonce::State as NonceState, pubkey::Pubkey, rent::RentDue, + rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, saturating_add_assign, sysvar::{self, instructions::construct_instructions_data}, @@ -453,7 +453,7 @@ mod tests { nonce::state::Versions as NonceVersions, solana_accounts_db::{ accounts::Accounts, accounts_db::AccountsDb, accounts_file::MatchAccountOwnerError, - ancestors::Ancestors, rent_collector::RentCollector, + ancestors::Ancestors, }, solana_program_runtime::{ compute_budget_processor, @@ -470,6 +470,7 @@ mod tests { message::{Message, SanitizedMessage}, nonce, rent::Rent, + rent_collector::RentCollector, signature::{Keypair, Signer}, system_program, sysvar, transaction::{Result, Transaction, TransactionError}, diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 837dc5e7fd4ce8..be874dc9f9128f 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -9,7 +9,6 @@ use { solana_accounts_db::{ accounts::{LoadedTransaction, TransactionLoadResult}, accounts_file::MatchAccountOwnerError, - rent_collector::RentCollector, transaction_results::{ DurableNonceFee, TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, @@ -43,6 +42,7 @@ use { message::SanitizedMessage, native_loader, pubkey::Pubkey, + rent_collector::RentCollector, saturating_add_assign, transaction::{self, SanitizedTransaction, TransactionError}, transaction_context::{ExecutionRecord, TransactionContext}, From 9b63ac8d7afe4253a7d0a99efaed9a9b907047a7 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Thu, 8 Feb 2024 07:32:19 -0800 Subject: [PATCH 154/401] SVM: Remove dependency on MatchAccountOwnerError (#35144) --- runtime/src/bank.rs | 8 ++------ svm/src/account_loader.rs | 13 +++---------- svm/src/transaction_processor.rs | 9 ++------- 3 files changed, 7 insertions(+), 23 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 52546ec4efe703..a58d28770e18ad 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -76,7 +76,6 @@ use { AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, }, - accounts_file::MatchAccountOwnerError, accounts_hash::{ AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, }, @@ -7488,15 +7487,12 @@ impl Bank { } impl TransactionProcessingCallback for Bank { - fn account_matches_owners( - &self, - account: &Pubkey, - owners: &[Pubkey], - ) -> std::result::Result { + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { self.rc .accounts .accounts_db .account_matches_owners(&self.ancestors, account, owners) + .ok() } fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 947d77dd2ac150..8bae7368d8aec2 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -451,10 +451,7 @@ mod tests { use { super::*, nonce::state::Versions as NonceVersions, - solana_accounts_db::{ - accounts::Accounts, accounts_db::AccountsDb, accounts_file::MatchAccountOwnerError, - ancestors::Ancestors, - }, + solana_accounts_db::{accounts::Accounts, accounts_db::AccountsDb, ancestors::Ancestors}, solana_program_runtime::{ compute_budget_processor, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, @@ -487,12 +484,8 @@ mod tests { } impl TransactionProcessingCallback for TestCallbacks { - fn account_matches_owners( - &self, - _account: &Pubkey, - _owners: &[Pubkey], - ) -> std::result::Result { - Err(MatchAccountOwnerError::UnableToLoad) + fn account_matches_owners(&self, _account: &Pubkey, _owners: &[Pubkey]) -> Option { + None } fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index be874dc9f9128f..98efdefe582b4e 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -8,7 +8,6 @@ use { percentage::Percentage, solana_accounts_db::{ accounts::{LoadedTransaction, TransactionLoadResult}, - accounts_file::MatchAccountOwnerError, transaction_results::{ DurableNonceFee, TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, @@ -70,11 +69,7 @@ pub struct LoadAndExecuteSanitizedTransactionsOutput { } pub trait TransactionProcessingCallback { - fn account_matches_owners( - &self, - account: &Pubkey, - owners: &[Pubkey], - ) -> std::result::Result; + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option; fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; @@ -340,7 +335,7 @@ impl TransactionBatchProcessor { saturating_add_assign!(*count, 1); } Entry::Vacant(entry) => { - if let Ok(index) = + if let Some(index) = callbacks.account_matches_owners(key, program_owners) { program_owners From eeb0cf1ea8db812e1ac65571962f6b0e918dabba Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Thu, 8 Feb 2024 15:01:15 -0500 Subject: [PATCH 155/401] SVM: Move nonce_info from accounts-db to Solana SDK (#35138) --- accounts-db/src/accounts.rs | 2 +- accounts-db/src/lib.rs | 1 - accounts-db/src/transaction_results.rs | 2 +- rpc/src/transaction_status_service.rs | 2 +- runtime/src/bank.rs | 2 +- runtime/src/bank/tests.rs | 2 +- sdk/src/lib.rs | 1 + {accounts-db => sdk}/src/nonce_info.rs | 5 +++-- svm/src/account_loader.rs | 2 +- 9 files changed, 10 insertions(+), 9 deletions(-) rename {accounts-db => sdk}/src/nonce_info.rs (99%) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 6dd5b6ba90df09..9b65fc803d937e 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -6,7 +6,6 @@ use { }, accounts_index::{IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport}, ancestors::Ancestors, - nonce_info::{NonceFull, NonceInfo}, storable_accounts::StorableAccounts, transaction_results::TransactionExecutionResult, }, @@ -22,6 +21,7 @@ use { state::{DurableNonce, Versions as NonceVersions}, State as NonceState, }, + nonce_info::{NonceFull, NonceInfo}, pubkey::Pubkey, rent_debits::RentDebits, slot_hashes::SlotHashes, diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index fe20e0aab2766c..3016c6252ac612 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -30,7 +30,6 @@ pub mod hardened_unpack; pub mod in_mem_accounts_index; pub mod inline_spl_token; pub mod inline_spl_token_2022; -pub mod nonce_info; pub mod partitioned_rewards; mod pubkey_bins; mod read_only_accounts_cache; diff --git a/accounts-db/src/transaction_results.rs b/accounts-db/src/transaction_results.rs index f2f51170a690ac..15d95592808028 100644 --- a/accounts-db/src/transaction_results.rs +++ b/accounts-db/src/transaction_results.rs @@ -5,9 +5,9 @@ )] pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList}; use { - crate::nonce_info::{NonceFull, NonceInfo, NoncePartial}, solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ + nonce_info::{NonceFull, NonceInfo, NoncePartial}, rent_debits::RentDebits, transaction::{self, TransactionError}, transaction_context::TransactionReturnData, diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 028ebcfb2a9047..82c7d48f01f21e 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -212,7 +212,6 @@ pub(crate) mod tests { crossbeam_channel::unbounded, dashmap::DashMap, solana_account_decoder::parse_token::token_amount_to_ui_amount, - solana_accounts_db::nonce_info::{NonceFull, NoncePartial}, solana_ledger::{genesis_utils::create_genesis_config, get_tmp_ledger_path_auto_delete}, solana_runtime::bank::{Bank, TransactionBalancesSet}, solana_sdk::{ @@ -223,6 +222,7 @@ pub(crate) mod tests { message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, nonce::{self, state::DurableNonce}, nonce_account, + nonce_info::{NonceFull, NoncePartial}, pubkey::Pubkey, rent_debits::RentDebits, signature::{Keypair, Signature, Signer}, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index a58d28770e18ad..f8291f682c2256 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -85,7 +85,6 @@ use { ancestors::{Ancestors, AncestorsForSerialization}, blockhash_queue::BlockhashQueue, epoch_accounts_hash::EpochAccountsHash, - nonce_info::{NonceInfo, NoncePartial}, partitioned_rewards::PartitionedEpochRewardsConfig, sorted_storages::SortedStorages, stake_rewards::StakeReward, @@ -136,6 +135,7 @@ use { native_token::LAMPORTS_PER_SOL, nonce::{self, state::DurableNonce, NONCED_TX_MARKER_IX_INDEX}, nonce_account, + nonce_info::{NonceInfo, NoncePartial}, packet::PACKET_DATA_SIZE, precompiles::get_precompiles, pubkey::Pubkey, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 19eca1d61ad8a8..f6db68d3c0c0c1 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -34,7 +34,6 @@ use { accounts_partition::{self, PartitionIndex, RentPayingAccountsByPartition}, ancestors::Ancestors, inline_spl_token, - nonce_info::NonceFull, partitioned_rewards::TestPartitionedEpochRewards, transaction_results::DurableNonceFee, }, @@ -82,6 +81,7 @@ use { native_loader, native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, nonce::{self, state::DurableNonce}, + nonce_info::NonceFull, packet::PACKET_DATA_SIZE, poh_config::PohConfig, program::MAX_RETURN_DATA, diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 233154ce72bcae..7c6b643884e449 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -84,6 +84,7 @@ pub mod log; pub mod native_loader; pub mod net; pub mod nonce_account; +pub mod nonce_info; pub mod offchain_message; pub mod packet; pub mod poh_config; diff --git a/accounts-db/src/nonce_info.rs b/sdk/src/nonce_info.rs similarity index 99% rename from accounts-db/src/nonce_info.rs rename to sdk/src/nonce_info.rs index 12079b6ce641b1..585f9fa2e3a687 100644 --- a/accounts-db/src/nonce_info.rs +++ b/sdk/src/nonce_info.rs @@ -1,4 +1,5 @@ -use solana_sdk::{ +#![cfg(feature = "full")] +use crate::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, message::SanitizedMessage, nonce_account, @@ -118,7 +119,7 @@ impl NonceInfo for NonceFull { mod tests { use { super::*, - solana_sdk::{ + crate::{ hash::Hash, instruction::Instruction, message::Message, diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 8bae7368d8aec2..5a3b0d7cd590a9 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -8,7 +8,6 @@ use { log::warn, solana_accounts_db::{ accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, - nonce_info::NonceFull, transaction_results::TransactionCheckResult, }, solana_program_runtime::{ @@ -25,6 +24,7 @@ use { message::SanitizedMessage, native_loader, nonce::State as NonceState, + nonce_info::NonceFull, pubkey::Pubkey, rent::RentDue, rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, From 0cfb06f745d0fc8e54281ffbd6dfc1f70b00f027 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 8 Feb 2024 23:06:00 +0000 Subject: [PATCH 156/401] adds rollout path for chained Merkle shreds (#35076) The commit adds should_chain_merkle_shreds to incrementally roll out chained Merkle shreds to clusters. --- ledger/src/shred/stats.rs | 9 +++ .../src/broadcast_stage/broadcast_utils.rs | 1 + .../broadcast_stage/standard_broadcast_run.rs | 60 ++++++++++++++++--- 3 files changed, 62 insertions(+), 8 deletions(-) diff --git a/ledger/src/shred/stats.rs b/ledger/src/shred/stats.rs index 5b4a75a2489bbb..60dfa9a79859c2 100644 --- a/ledger/src/shred/stats.rs +++ b/ledger/src/shred/stats.rs @@ -23,6 +23,8 @@ pub struct ProcessShredsStats { num_data_shreds_hist: [usize; 5], // If the blockstore already has shreds for the broadcast slot. pub num_extant_slots: u64, + // When looking up chained merkle root from parent slot fails. + pub err_unknown_chained_merkle_root: u64, pub(crate) data_buffer_residual: usize, pub num_merkle_data_shreds: usize, pub num_merkle_coding_shreds: usize, @@ -89,6 +91,11 @@ impl ProcessShredsStats { ("sign_coding_time", self.sign_coding_elapsed, i64), ("coding_send_time", self.coding_send_elapsed, i64), ("num_extant_slots", self.num_extant_slots, i64), + ( + "err_unknown_chained_merkle_root", + self.err_unknown_chained_merkle_root, + i64 + ), ("data_buffer_residual", self.data_buffer_residual, i64), ("num_data_shreds_07", self.num_data_shreds_hist[0], i64), ("num_data_shreds_15", self.num_data_shreds_hist[1], i64), @@ -161,6 +168,7 @@ impl AddAssign for ProcessShredsStats { coalesce_elapsed, num_data_shreds_hist, num_extant_slots, + err_unknown_chained_merkle_root, data_buffer_residual, num_merkle_data_shreds, num_merkle_coding_shreds, @@ -175,6 +183,7 @@ impl AddAssign for ProcessShredsStats { self.get_leader_schedule_elapsed += get_leader_schedule_elapsed; self.coalesce_elapsed += coalesce_elapsed; self.num_extant_slots += num_extant_slots; + self.err_unknown_chained_merkle_root += err_unknown_chained_merkle_root; self.data_buffer_residual += data_buffer_residual; self.num_merkle_data_shreds += num_merkle_data_shreds; self.num_merkle_coding_shreds += num_merkle_coding_shreds; diff --git a/turbine/src/broadcast_stage/broadcast_utils.rs b/turbine/src/broadcast_stage/broadcast_utils.rs index 3468a86dfd64ff..be231581e7fbfe 100644 --- a/turbine/src/broadcast_stage/broadcast_utils.rs +++ b/turbine/src/broadcast_stage/broadcast_utils.rs @@ -28,6 +28,7 @@ pub(super) struct ReceiveResults { #[derive(Clone)] pub struct UnfinishedSlotInfo { + pub(super) chained_merkle_root: Hash, pub next_shred_index: u32, pub(crate) next_code_index: u32, pub slot: Slot, diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index e2b8871b4bc3c2..6378c0df40a8d3 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -14,6 +14,8 @@ use { shred::{shred_code, ProcessShredsStats, ReedSolomonCache, Shred, ShredFlags, Shredder}, }, solana_sdk::{ + genesis_config::ClusterType, + hash::Hash, signature::Keypair, timing::{duration_as_us, AtomicInterval}, }, @@ -69,6 +71,7 @@ impl StandardBroadcastRun { &mut self, keypair: &Keypair, max_ticks_in_slot: u8, + cluster_type: ClusterType, stats: &mut ProcessShredsStats, ) -> Vec { const SHRED_TICK_REFERENCE_MASK: u8 = ShredFlags::SHRED_TICK_REFERENCE_MASK.bits(); @@ -85,7 +88,8 @@ impl StandardBroadcastRun { keypair, &[], // entries true, // is_last_in_slot, - None, // chained_merkle_root + should_chain_merkle_shreds(state.slot, cluster_type) + .then_some(state.chained_merkle_root), state.next_shred_index, state.next_code_index, true, // merkle_variant @@ -110,6 +114,7 @@ impl StandardBroadcastRun { blockstore: &Blockstore, reference_tick: u8, is_slot_end: bool, + cluster_type: ClusterType, process_stats: &mut ProcessShredsStats, max_data_shreds_per_slot: u32, max_code_shreds_per_slot: u32, @@ -121,8 +126,12 @@ impl StandardBroadcastRun { BroadcastError, > { let (slot, parent_slot) = self.current_slot_and_parent.unwrap(); - let (next_shred_index, next_code_index) = match &self.unfinished_slot { - Some(state) => (state.next_shred_index, state.next_code_index), + let (next_shred_index, next_code_index, chained_merkle_root) = match &self.unfinished_slot { + Some(state) => ( + state.next_shred_index, + state.next_code_index, + state.chained_merkle_root, + ), None => { // If the blockstore has shreds for the slot, it should not // recreate the slot: @@ -135,7 +144,17 @@ impl StandardBroadcastRun { return Ok((Vec::default(), Vec::default())); } } - (0u32, 0u32) + let chained_merkle_root = broadcast_utils::get_chained_merkle_root_from_parent( + slot, + parent_slot, + blockstore, + ) + .unwrap_or_else(|err| { + error!("Unknown chained Merkle root: {err}"); + process_stats.err_unknown_chained_merkle_root += 1; + Hash::default() + }); + (0u32, 0u32, chained_merkle_root) } }; let shredder = @@ -144,7 +163,7 @@ impl StandardBroadcastRun { keypair, entries, is_slot_end, - None, // chained_merkle_root + should_chain_merkle_shreds(slot, cluster_type).then_some(chained_merkle_root), next_shred_index, next_code_index, true, // merkle_variant @@ -153,6 +172,10 @@ impl StandardBroadcastRun { ); process_stats.num_merkle_data_shreds += data_shreds.len(); process_stats.num_merkle_coding_shreds += coding_shreds.len(); + let chained_merkle_root = match data_shreds.iter().max_by_key(|shred| shred.index()) { + None => chained_merkle_root, + Some(shred) => shred.merkle_root().unwrap(), + }; let next_shred_index = match data_shreds.iter().map(Shred::index).max() { Some(index) => index + 1, None => next_shred_index, @@ -169,6 +192,7 @@ impl StandardBroadcastRun { return Err(BroadcastError::TooManyShreds); } self.unfinished_slot = Some(UnfinishedSlotInfo { + chained_merkle_root, next_shred_index, next_code_index, slot, @@ -232,10 +256,15 @@ impl StandardBroadcastRun { let mut process_stats = ProcessShredsStats::default(); let mut to_shreds_time = Measure::start("broadcast_to_shreds"); + let cluster_type = bank.cluster_type(); // 1) Check if slot was interrupted - let prev_slot_shreds = - self.finish_prev_slot(keypair, bank.ticks_per_slot() as u8, &mut process_stats); + let prev_slot_shreds = self.finish_prev_slot( + keypair, + bank.ticks_per_slot() as u8, + cluster_type, + &mut process_stats, + ); // 2) Convert entries to shreds and coding shreds let is_last_in_slot = last_tick_height == bank.max_tick_height(); @@ -247,6 +276,7 @@ impl StandardBroadcastRun { blockstore, reference_tick as u8, is_last_in_slot, + cluster_type, &mut process_stats, blockstore::MAX_DATA_SHREDS_PER_SLOT as u32, shred_code::MAX_CODE_SHREDS_PER_SLOT as u32, @@ -497,10 +527,15 @@ impl BroadcastRun for StandardBroadcastRun { } } +fn should_chain_merkle_shreds(_slot: Slot, _cluster_type: ClusterType) -> bool { + false +} + #[cfg(test)] mod test { use { super::*, + rand::Rng, solana_entry::entry::create_ticks, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -510,6 +545,7 @@ mod test { solana_runtime::bank::Bank, solana_sdk::{ genesis_config::GenesisConfig, + hash::Hash, signature::{Keypair, Signer}, }, solana_streamer::socket::SocketAddrSpace, @@ -569,6 +605,7 @@ mod test { let slot = 1; let parent = 0; run.unfinished_slot = Some(UnfinishedSlotInfo { + chained_merkle_root: Hash::new_from_array(rand::thread_rng().gen()), next_shred_index, next_code_index: 17, slot, @@ -580,7 +617,12 @@ mod test { run.current_slot_and_parent = Some((4, 2)); // Slot 2 interrupted slot 1 - let shreds = run.finish_prev_slot(&keypair, 0, &mut ProcessShredsStats::default()); + let shreds = run.finish_prev_slot( + &keypair, + 0, // max_ticks_in_slot + ClusterType::Development, + &mut ProcessShredsStats::default(), + ); let shred = shreds .first() .expect("Expected a shred that signals an interrupt"); @@ -831,6 +873,7 @@ mod test { &blockstore, 0, false, + ClusterType::Development, &mut stats, 1000, 1000, @@ -846,6 +889,7 @@ mod test { &blockstore, 0, false, + ClusterType::Development, &mut stats, 10, 10, From 245d1c408735fdfc999276696dda1697e56c2795 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Thu, 8 Feb 2024 21:13:00 -0500 Subject: [PATCH 157/401] SVM: Move TransactionCheckResult definition from accounts-db to SVM (#35153) --- accounts-db/src/transaction_results.rs | 4 +--- core/src/banking_stage/consumer.rs | 4 ++-- runtime/src/bank.rs | 4 ++-- svm/src/account_loader.rs | 11 +++++------ svm/src/transaction_processor.rs | 9 +++++---- 5 files changed, 15 insertions(+), 17 deletions(-) diff --git a/accounts-db/src/transaction_results.rs b/accounts-db/src/transaction_results.rs index 15d95592808028..d213d7dab264e0 100644 --- a/accounts-db/src/transaction_results.rs +++ b/accounts-db/src/transaction_results.rs @@ -7,15 +7,13 @@ pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList} use { solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ - nonce_info::{NonceFull, NonceInfo, NoncePartial}, + nonce_info::{NonceFull, NonceInfo}, rent_debits::RentDebits, transaction::{self, TransactionError}, transaction_context::TransactionReturnData, }, }; -pub type TransactionCheckResult = (transaction::Result<()>, Option, Option); - pub struct TransactionResults { pub fee_collection_results: Vec>, pub execution_results: Vec, diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 01432baa447793..938a5dd52a2549 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -9,7 +9,6 @@ use { BankingStageStats, }, itertools::Itertools, - solana_accounts_db::transaction_results::TransactionCheckResult, solana_ledger::token_balances::collect_token_balances, solana_measure::{measure::Measure, measure_us}, solana_poh::poh_recorder::{ @@ -33,7 +32,8 @@ use { transaction::{self, AddressLoader, SanitizedTransaction, TransactionError}, }, solana_svm::{ - account_loader::validate_fee_payer, transaction_error_metrics::TransactionErrorMetrics, + account_loader::{validate_fee_payer, TransactionCheckResult}, + transaction_error_metrics::TransactionErrorMetrics, }, std::{ sync::{atomic::Ordering, Arc}, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f8291f682c2256..57febbdfda95c2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -90,8 +90,7 @@ use { stake_rewards::StakeReward, storable_accounts::StorableAccounts, transaction_results::{ - TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, - TransactionResults, + TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, }, }, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, @@ -161,6 +160,7 @@ use { self, InflationPointCalculationEvent, PointValue, StakeStateV2, }, solana_svm::{ + account_loader::TransactionCheckResult, account_overrides::AccountOverrides, runtime_config::RuntimeConfig, transaction_error_metrics::TransactionErrorMetrics, diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 5a3b0d7cd590a9..cfe0b069f156ae 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -6,10 +6,7 @@ use { }, itertools::Itertools, log::warn, - solana_accounts_db::{ - accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, - transaction_results::TransactionCheckResult, - }, + solana_accounts_db::accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, solana_program_runtime::{ compute_budget_processor::process_compute_budget_instructions, loaded_programs::LoadedProgramsForTxBatch, @@ -24,20 +21,22 @@ use { message::SanitizedMessage, native_loader, nonce::State as NonceState, - nonce_info::NonceFull, + nonce_info::{NonceFull, NoncePartial}, pubkey::Pubkey, rent::RentDue, rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, saturating_add_assign, sysvar::{self, instructions::construct_instructions_data}, - transaction::{Result, SanitizedTransaction, TransactionError}, + transaction::{self, Result, SanitizedTransaction, TransactionError}, transaction_context::IndexOfAccount, }, solana_system_program::{get_system_account_kind, SystemAccountKind}, std::{collections::HashMap, num::NonZeroUsize}, }; +pub type TransactionCheckResult = (transaction::Result<()>, Option, Option); + pub fn load_accounts( callbacks: &CB, txs: &[SanitizedTransaction], diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 98efdefe582b4e..71fc4e8e8a46b2 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -1,7 +1,9 @@ use { crate::{ - account_loader::load_accounts, account_overrides::AccountOverrides, - runtime_config::RuntimeConfig, transaction_account_state_info::TransactionAccountStateInfo, + account_loader::{load_accounts, TransactionCheckResult}, + account_overrides::AccountOverrides, + runtime_config::RuntimeConfig, + transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, }, log::debug, @@ -9,8 +11,7 @@ use { solana_accounts_db::{ accounts::{LoadedTransaction, TransactionLoadResult}, transaction_results::{ - DurableNonceFee, TransactionCheckResult, TransactionExecutionDetails, - TransactionExecutionResult, + DurableNonceFee, TransactionExecutionDetails, TransactionExecutionResult, }, }, solana_measure::measure::Measure, From 41f97d7d09f2cfbcea0350375afade836bc4695e Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 8 Feb 2024 20:43:11 -0600 Subject: [PATCH 158/401] ledger-tool: Add additional modes for accounts subcommand (#34925) - Add mode to output individual pubkeys - Add mode to output program accounts --- ledger-tool/src/main.rs | 38 +++++++++++++- ledger-tool/src/output.rs | 101 +++++++++++++++++++++++++++++++------- runtime/src/bank.rs | 10 +++- 3 files changed, 128 insertions(+), 21 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index d4a5a3eb18ea69..500a64173a25c4 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -6,7 +6,9 @@ use { blockstore::*, ledger_path::*, ledger_utils::*, - output::{output_account, AccountsOutputConfig, AccountsOutputStreamer}, + output::{ + output_account, AccountsOutputConfig, AccountsOutputMode, AccountsOutputStreamer, + }, program::*, }, clap::{ @@ -1312,6 +1314,7 @@ fn main() { .arg(&geyser_plugin_args) .arg(&accounts_data_encoding_arg) .arg(&use_snapshot_archives_at_startup) + .arg(&max_genesis_archive_unpacked_size_arg) .arg( Arg::with_name("include_sysvars") .long("include-sysvars") @@ -1333,7 +1336,27 @@ fn main() { .takes_value(false) .help("Do not print account data when printing account contents."), ) - .arg(&max_genesis_archive_unpacked_size_arg), + .arg( + Arg::with_name("account") + .long("account") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help( + "Limit output to accounts corresponding to the specified pubkey(s), \ + may be specified multiple times", + ), + ) + .arg( + Arg::with_name("program_accounts") + .long("program-accounts") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .conflicts_with("account") + .help("Limit output to accounts owned by the provided program pubkey"), + ), ) .subcommand( SubCommand::with_name("capitalization") @@ -2179,7 +2202,18 @@ fn main() { let include_account_contents = !arg_matches.is_present("no_account_contents"); let include_account_data = !arg_matches.is_present("no_account_data"); let account_data_encoding = parse_encoding_format(arg_matches); + let mode = if let Some(pubkeys) = pubkeys_of(arg_matches, "account") { + info!("Scanning individual accounts: {pubkeys:?}"); + AccountsOutputMode::Individual(pubkeys) + } else if let Some(pubkey) = pubkey_of(arg_matches, "program_accounts") { + info!("Scanning program accounts for {pubkey}"); + AccountsOutputMode::Program(pubkey) + } else { + info!("Scanning all accounts"); + AccountsOutputMode::All + }; let config = AccountsOutputConfig { + mode, include_sysvars, include_account_contents, include_account_data, diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index e21676771d598f..2de702ef44ce5d 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -6,6 +6,7 @@ use { Deserialize, Serialize, }, solana_account_decoder::{UiAccount, UiAccountData, UiAccountEncoding}, + solana_accounts_db::accounts_index::ScanConfig, solana_cli_output::{ display::writeln_transaction, CliAccount, CliAccountNewConfig, OutputFormat, QuietDisplay, VerboseDisplay, @@ -572,7 +573,14 @@ pub struct AccountsOutputStreamer { output_format: OutputFormat, } +pub enum AccountsOutputMode { + All, + Individual(Vec), + Program(Pubkey), +} + pub struct AccountsOutputConfig { + pub mode: AccountsOutputMode, pub include_sysvars: bool, pub include_account_contents: bool, pub include_account_data: bool, @@ -608,7 +616,10 @@ impl AccountsOutputStreamer { .serialize_field("summary", &*self.total_accounts_stats.borrow()) .map_err(|err| format!("unable to serialize accounts summary: {err}"))?; SerializeStruct::end(struct_serializer) - .map_err(|err| format!("unable to end serialization: {err}")) + .map_err(|err| format!("unable to end serialization: {err}"))?; + // The serializer doesn't give us a trailing newline so do it ourselves + println!(); + Ok(()) } _ => { // The compiler needs a placeholder type to satisfy the generic @@ -637,6 +648,33 @@ impl AccountsScanner { && (self.config.include_sysvars || !solana_sdk::sysvar::is_sysvar_id(pubkey)) } + fn maybe_output_account( + &self, + seq_serializer: &mut Option, + pubkey: &Pubkey, + account: &AccountSharedData, + slot: Option, + cli_account_new_config: &CliAccountNewConfig, + ) where + S: SerializeSeq, + { + if self.config.include_account_contents { + if let Some(serializer) = seq_serializer { + let cli_account = + CliAccount::new_with_config(pubkey, account, cli_account_new_config); + serializer.serialize_element(&cli_account).unwrap(); + } else { + output_account( + pubkey, + account, + slot, + self.config.include_account_data, + self.config.account_data_encoding, + ); + } + } + } + pub fn output(&self, seq_serializer: &mut Option) where S: SerializeSeq, @@ -654,26 +692,53 @@ impl AccountsScanner { .filter(|(pubkey, account, _)| self.should_process_account(account, pubkey)) { total_accounts_stats.accumulate_account(pubkey, &account, rent_collector); - - if self.config.include_account_contents { - if let Some(serializer) = seq_serializer { - let cli_account = - CliAccount::new_with_config(pubkey, &account, &cli_account_new_config); - serializer.serialize_element(&cli_account).unwrap(); - } else { - output_account( - pubkey, - &account, - Some(slot), - self.config.include_account_data, - self.config.account_data_encoding, - ); - } - } + self.maybe_output_account( + seq_serializer, + pubkey, + &account, + Some(slot), + &cli_account_new_config, + ); } }; - self.bank.scan_all_accounts(scan_func).unwrap(); + match &self.config.mode { + AccountsOutputMode::All => { + self.bank.scan_all_accounts(scan_func).unwrap(); + } + AccountsOutputMode::Individual(pubkeys) => pubkeys.iter().for_each(|pubkey| { + if let Some((account, slot)) = self + .bank + .get_account_modified_slot_with_fixed_root(pubkey) + .filter(|(account, _)| self.should_process_account(account, pubkey)) + { + total_accounts_stats.accumulate_account(pubkey, &account, rent_collector); + self.maybe_output_account( + seq_serializer, + pubkey, + &account, + Some(slot), + &cli_account_new_config, + ); + } + }), + AccountsOutputMode::Program(program_pubkey) => self + .bank + .get_program_accounts(program_pubkey, &ScanConfig::default()) + .unwrap() + .iter() + .filter(|(pubkey, account)| self.should_process_account(account, pubkey)) + .for_each(|(pubkey, account)| { + total_accounts_stats.accumulate_account(pubkey, account, rent_collector); + self.maybe_output_account( + seq_serializer, + pubkey, + account, + None, + &cli_account_new_config, + ); + }), + } } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 57febbdfda95c2..f2722983dcdbdf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6039,10 +6039,18 @@ impl Bank { // pro: safer assertion can be enabled inside AccountsDb // con: panics!() if called from off-chain processing pub fn get_account_with_fixed_root(&self, pubkey: &Pubkey) -> Option { - self.load_slow_with_fixed_root(&self.ancestors, pubkey) + self.get_account_modified_slot_with_fixed_root(pubkey) .map(|(acc, _slot)| acc) } + // See note above get_account_with_fixed_root() about when to prefer this function + pub fn get_account_modified_slot_with_fixed_root( + &self, + pubkey: &Pubkey, + ) -> Option<(AccountSharedData, Slot)> { + self.load_slow_with_fixed_root(&self.ancestors, pubkey) + } + pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { self.load_slow(&self.ancestors, pubkey) } From 5df5ee5ccfbae3b26d2bddd097a04f69e96a966a Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 8 Feb 2024 22:19:56 -0500 Subject: [PATCH 159/401] Upgrades Rust to 1.76.0 (#35148) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 7897a24d1a1be7..624eb0ea639014 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.75.0" +channel = "1.76.0" From 90f639a89f610d586687dc3dbec8b13b56b344fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 11:41:27 +0800 Subject: [PATCH 160/401] build(deps): bump serde_yaml from 0.9.30 to 0.9.31 (#35034) * build(deps): bump serde_yaml from 0.9.30 to 0.9.31 Bumps [serde_yaml](https://github.com/dtolnay/serde-yaml) from 0.9.30 to 0.9.31. - [Release notes](https://github.com/dtolnay/serde-yaml/releases) - [Commits](https://github.com/dtolnay/serde-yaml/compare/0.9.30...0.9.31) --- updated-dependencies: - dependency-name: serde_yaml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 09e2e21f5a2941..f1dd80930689c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4927,9 +4927,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.30" +version = "0.9.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" +checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" dependencies = [ "indexmap 2.2.2", "itoa", @@ -5443,7 +5443,7 @@ dependencies = [ "rand 0.8.5", "rayon", "serde_json", - "serde_yaml 0.9.30", + "serde_yaml 0.9.31", "serial_test", "solana-clap-utils", "solana-cli-config", @@ -5713,7 +5713,7 @@ dependencies = [ "lazy_static", "serde", "serde_derive", - "serde_yaml 0.9.30", + "serde_yaml 0.9.31", "solana-clap-utils", "solana-sdk", "url 2.5.0", @@ -6104,7 +6104,7 @@ dependencies = [ "itertools", "serde", "serde_json", - "serde_yaml 0.9.30", + "serde_yaml 0.9.31", "solana-accounts-db", "solana-clap-utils", "solana-cli-config", @@ -6237,7 +6237,7 @@ dependencies = [ "semver 1.0.21", "serde", "serde_yaml 0.8.26", - "serde_yaml 0.9.30", + "serde_yaml 0.9.31", "solana-clap-utils", "solana-config-program", "solana-logger", @@ -7579,7 +7579,7 @@ dependencies = [ "rayon", "serde", "serde_json", - "serde_yaml 0.9.30", + "serde_yaml 0.9.31", "signal-hook", "solana-account-decoder", "solana-accounts-db", diff --git a/Cargo.toml b/Cargo.toml index a2a606d691f3a2..13cb89983bda9d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -298,7 +298,7 @@ serde_bytes = "0.11.14" serde_derive = "1.0.103" serde_json = "1.0.113" serde_with = { version = "2.3.3", default-features = false } -serde_yaml = "0.9.30" +serde_yaml = "0.9.31" serial_test = "2.0.0" sha2 = "0.10.8" sha3 = "0.10.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 110c353b67c03f..c0d7bfe2295f4f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4344,9 +4344,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.30" +version = "0.9.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" +checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" dependencies = [ "indexmap 2.2.2", "itoa", From 677e4c45752cf3b5e22844b4fdca9be7f017d5e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 11:42:11 +0800 Subject: [PATCH 161/401] build(deps): bump num-derive from 0.4.1 to 0.4.2 (#35142) * build(deps): bump num-derive from 0.4.1 to 0.4.2 Bumps [num-derive](https://github.com/rust-num/num-derive) from 0.4.1 to 0.4.2. - [Changelog](https://github.com/rust-num/num-derive/blob/master/RELEASES.md) - [Commits](https://github.com/rust-num/num-derive/compare/num-derive-0.4.1...num-derive-0.4.2) --- updated-dependencies: - dependency-name: num-derive dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 30 +++++++++++++++--------------- programs/sbf/Cargo.lock | 30 +++++++++++++++--------------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1dd80930689c2..56899b5e53b19b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3378,9 +3378,9 @@ dependencies = [ [[package]] name = "num-derive" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", @@ -5284,7 +5284,7 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_cpus", "num_enum 0.7.2", @@ -5331,7 +5331,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rustc_version 0.4.0", "serde", @@ -6663,7 +6663,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -6699,7 +6699,7 @@ dependencies = [ "libc", "libsecp256k1", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "percentage", "rand 0.8.5", @@ -6813,7 +6813,7 @@ dependencies = [ "dialoguer", "hidapi", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "parking_lot 0.12.1", "qstring", @@ -7007,7 +7007,7 @@ dependencies = [ "memoffset 0.9.0", "mockall", "modular-bitfield", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_cpus", "num_enum 0.7.2", @@ -7101,7 +7101,7 @@ dependencies = [ "libsecp256k1", "log", "memmap2", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_enum 0.7.2", "pbkdf2 0.11.0", @@ -7658,7 +7658,7 @@ dependencies = [ "assert_matches", "bincode", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rustc_version 0.4.0", "serde", @@ -7741,7 +7741,7 @@ dependencies = [ "bytemuck", "criterion", "curve25519-dalek", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program-runtime", "solana-sdk", @@ -7774,7 +7774,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rand 0.7.3", "serde", @@ -7828,7 +7828,7 @@ checksum = "992d9c64c2564cc8f63a4b508bf3ebcdf2254b0429b13cd1d31adb6162432a5f" dependencies = [ "assert_matches", "borsh 0.10.3", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program", "spl-token", @@ -7909,7 +7909,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" dependencies = [ - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program", "spl-program-error-derive", @@ -7965,7 +7965,7 @@ checksum = "d697fac19fd74ff472dfcc13f0b442dd71403178ce1de7b5d16f83a33561c059" dependencies = [ "arrayref", "bytemuck", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_enum 0.7.2", "solana-program", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c0d7bfe2295f4f..ca6a8c39e49291 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3010,9 +3010,9 @@ dependencies = [ [[package]] name = "num-derive" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", @@ -4626,7 +4626,7 @@ dependencies = [ "lz4", "memmap2", "modular-bitfield", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_cpus", "num_enum 0.7.2", @@ -4669,7 +4669,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rustc_version", "serde", @@ -5401,7 +5401,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -5433,7 +5433,7 @@ dependencies = [ "itertools", "libc", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "percentage", "rand 0.8.5", @@ -5539,7 +5539,7 @@ dependencies = [ "console", "dialoguer", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "parking_lot 0.12.1", "qstring", @@ -5687,7 +5687,7 @@ dependencies = [ "memmap2", "mockall", "modular-bitfield", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_cpus", "num_enum 0.7.2", @@ -6182,7 +6182,7 @@ dependencies = [ "libsecp256k1 0.6.0", "log", "memmap2", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_enum 0.7.2", "pbkdf2 0.11.0", @@ -6620,7 +6620,7 @@ version = "1.18.0" dependencies = [ "bincode", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rustc_version", "serde", @@ -6658,7 +6658,7 @@ name = "solana-zk-token-proof-program" version = "1.18.0" dependencies = [ "bytemuck", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program-runtime", "solana-sdk", @@ -6679,7 +6679,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rand 0.7.3", "serde", @@ -6731,7 +6731,7 @@ checksum = "992d9c64c2564cc8f63a4b508bf3ebcdf2254b0429b13cd1d31adb6162432a5f" dependencies = [ "assert_matches", "borsh 0.10.3", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program", "spl-token", @@ -6802,7 +6802,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" dependencies = [ - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program", "spl-program-error-derive", @@ -6858,7 +6858,7 @@ checksum = "d697fac19fd74ff472dfcc13f0b442dd71403178ce1de7b5d16f83a33561c059" dependencies = [ "arrayref", "bytemuck", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_enum 0.7.2", "solana-program", From eec025fa10a6373a4ff3b2e8dda3ea32b710a280 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 11:43:08 +0800 Subject: [PATCH 162/401] build(deps): bump hidapi from 2.5.1 to 2.6.0 (#35141) Bumps [hidapi](https://github.com/ruabmbua/hidapi-rs) from 2.5.1 to 2.6.0. - [Release notes](https://github.com/ruabmbua/hidapi-rs/releases) - [Commits](https://github.com/ruabmbua/hidapi-rs/compare/v2.5.1...v2.6.0) --- updated-dependencies: - dependency-name: hidapi dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56899b5e53b19b..d65aaac98cf122 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2407,9 +2407,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hidapi" -version = "2.5.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830eccace7c861211d0ad04288e5dad690d6711b0db152084da58882ee7a840a" +checksum = "9a722fb137d008dbf264f54612457f8eb6a299efbcb0138178964a0809035d74" dependencies = [ "cc", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 13cb89983bda9d..d4c48c23d59e54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -213,7 +213,7 @@ gethostname = "0.2.3" getrandom = "0.2.10" goauth = "0.13.1" hex = "0.4.3" -hidapi = { version = "2.5.1", default-features = false } +hidapi = { version = "2.6.0", default-features = false } histogram = "0.6.9" hmac = "0.12.1" http = "0.2.11" From 864f29e93812c393ff5d221698cd72143a640cd0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 11:43:39 +0800 Subject: [PATCH 163/401] build(deps): bump num-traits from 0.2.17 to 0.2.18 (#35140) * build(deps): bump num-traits from 0.2.17 to 0.2.18 Bumps [num-traits](https://github.com/rust-num/num-traits) from 0.2.17 to 0.2.18. - [Changelog](https://github.com/rust-num/num-traits/blob/master/RELEASES.md) - [Commits](https://github.com/rust-num/num-traits/compare/num-traits-0.2.17...num-traits-0.2.18) --- updated-dependencies: - dependency-name: num-traits dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d65aaac98cf122..cbba36ccc8fff9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3422,9 +3422,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ca6a8c39e49291..5af01021f6a1cf 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3054,9 +3054,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] From 1517d22ecc9817c265bc1506ad6bc40f77f8deb3 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 9 Feb 2024 08:51:21 -0800 Subject: [PATCH 164/401] Scheduler - prioritization fees/cost (#34888) --- .../prio_graph_scheduler.rs | 18 +-- .../scheduler_controller.rs | 103 +++++++++---- .../transaction_state.rs | 137 +++++------------- .../transaction_state_container.rs | 47 ++---- 4 files changed, 126 insertions(+), 179 deletions(-) diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index f1be7339f3cd73..e0b53a97ab020e 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -191,7 +191,7 @@ impl PrioGraphScheduler { saturating_add_assign!(num_scheduled, 1); let sanitized_transaction_ttl = transaction_state.transition_to_pending(); - let cost = transaction_state.transaction_cost().sum(); + let cost = transaction_state.cost(); let SanitizedTransactionTTL { transaction, @@ -490,12 +490,9 @@ mod tests { crate::banking_stage::consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, - solana_cost_model::cost_model::CostModel, - solana_runtime::compute_budget_details::ComputeBudgetDetails, solana_sdk::{ - compute_budget::ComputeBudgetInstruction, feature_set::FeatureSet, hash::Hash, - message::Message, pubkey::Pubkey, signature::Keypair, signer::Signer, - system_instruction, transaction::Transaction, + compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, pubkey::Pubkey, + signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, }, std::borrow::Borrow, }; @@ -572,19 +569,16 @@ mod tests { lamports, compute_unit_price, ); - let transaction_cost = CostModel::calculate_cost(&transaction, &FeatureSet::default()); let transaction_ttl = SanitizedTransactionTTL { transaction, max_age_slot: Slot::MAX, }; + const TEST_TRANSACTION_COST: u64 = 5000; container.insert_new_transaction( id, transaction_ttl, - ComputeBudgetDetails { - compute_unit_price, - compute_unit_limit: 1, - }, - transaction_cost, + compute_unit_price, + TEST_TRANSACTION_COST, ); } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index aaf2753597b8ea..a5c0fa134f5369 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -20,10 +20,12 @@ use { itertools::MinMaxResult, solana_cost_model::cost_model::CostModel, solana_measure::measure_us, + solana_program_runtime::compute_budget_processor::process_compute_budget_instructions, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ - clock::MAX_PROCESSING_AGE, saturating_add_assign, timing::AtomicInterval, - transaction::SanitizedTransaction, + clock::MAX_PROCESSING_AGE, + feature_set::include_loaded_accounts_data_size_in_fee_calculation, fee::FeeBudgetLimits, + saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, }, solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ @@ -100,7 +102,7 @@ impl SchedulerController { // Reset intervals when appropriate, regardless of report. let should_report = self.count_metrics.has_data(); self.count_metrics - .update_prioritization_stats(self.container.get_min_max_prioritization_fees()); + .update_priority_stats(self.container.get_min_max_priority()); self.count_metrics.maybe_report_and_reset(should_report); self.timing_metrics.maybe_report_and_reset(should_report); self.worker_metrics @@ -311,21 +313,24 @@ impl SchedulerController { let mut error_counts = TransactionErrorMetrics::default(); for chunk in packets.chunks(CHUNK_SIZE) { let mut post_sanitization_count: usize = 0; - let (transactions, compute_budget_details): (Vec<_>, Vec<_>) = chunk + let (transactions, fee_budget_limits_vec): (Vec<_>, Vec<_>) = chunk .iter() .filter_map(|packet| { - packet - .build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) - .map(|tx| (tx, packet.compute_budget_details())) + packet.build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) }) .inspect(|_| saturating_add_assign!(post_sanitization_count, 1)) - .filter(|(tx, _)| { + .filter(|tx| { SanitizedTransaction::validate_account_locks( tx.message(), transaction_account_lock_limit, ) .is_ok() }) + .filter_map(|tx| { + process_compute_budget_instructions(tx.message().program_instructions_iter()) + .map(|compute_budget| (tx, compute_budget.into())) + .ok() + }) .unzip(); let check_results = bank.check_transactions( @@ -337,16 +342,17 @@ impl SchedulerController { let post_lock_validation_count = transactions.len(); let mut post_transaction_check_count: usize = 0; - for ((transaction, compute_budget_details), _) in transactions + for ((transaction, fee_budget_limits), _) in transactions .into_iter() - .zip(compute_budget_details) + .zip(fee_budget_limits_vec) .zip(check_results) .filter(|(_, check_result)| check_result.0.is_ok()) { saturating_add_assign!(post_transaction_check_count, 1); let transaction_id = self.transaction_id_generator.next(); - let transaction_cost = CostModel::calculate_cost(&transaction, &bank.feature_set); + let (priority, cost) = + Self::calculate_priority_and_cost(&transaction, &fee_budget_limits, &bank); let transaction_ttl = SanitizedTransactionTTL { transaction, max_age_slot: last_slot_in_epoch, @@ -355,8 +361,8 @@ impl SchedulerController { if self.container.insert_new_transaction( transaction_id, transaction_ttl, - compute_budget_details, - transaction_cost, + priority, + cost, ) { saturating_add_assign!(self.count_metrics.num_dropped_on_capacity, 1); } @@ -384,6 +390,51 @@ impl SchedulerController { ); } } + + /// Calculate priority and cost for a transaction: + /// + /// Cost is calculated through the `CostModel`, + /// and priority is calculated through a formula here that attempts to sell + /// blockspace to the highest bidder. + /// + /// The priority is calculated as: + /// P = R / (1 + C) + /// where P is the priority, R is the reward, + /// and C is the cost towards block-limits. + /// + /// Current minimum costs are on the order of several hundred, + /// so the denominator is effectively C, and the +1 is simply + /// to avoid any division by zero due to a bug - these costs + /// are calculated by the cost-model and are not direct + /// from user input. They should never be zero. + /// Any difference in the prioritization is negligible for + /// the current transaction costs. + fn calculate_priority_and_cost( + transaction: &SanitizedTransaction, + fee_budget_limits: &FeeBudgetLimits, + bank: &Bank, + ) -> (u64, u64) { + let cost = CostModel::calculate_cost(transaction, &bank.feature_set).sum(); + let fee = bank.fee_structure.calculate_fee( + transaction.message(), + 5_000, // this just needs to be non-zero + fee_budget_limits, + bank.feature_set + .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + ); + + // We need a multiplier here to avoid rounding down too aggressively. + // For many transactions, the cost will be greater than the fees in terms of raw lamports. + // For the purposes of calculating prioritization, we multiply the fees by a large number so that + // the cost is a small fraction. + // An offset of 1 is used in the denominator to explicitly avoid division by zero. + const MULTIPLIER: u64 = 1_000_000; + ( + fee.saturating_mul(MULTIPLIER) + .saturating_div(cost.saturating_add(1)), + cost, + ) + } } #[derive(Default)] @@ -475,16 +526,8 @@ impl SchedulerCountMetrics { i64 ), ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64), - ( - "min_prioritization_fees", - self.get_min_prioritization_fees(), - i64 - ), - ( - "max_prioritization_fees", - self.get_max_prioritization_fees(), - i64 - ) + ("min_priority", self.get_min_priority(), i64), + ("max_priority", self.get_max_priority(), i64) ); } @@ -524,8 +567,8 @@ impl SchedulerCountMetrics { self.max_prioritization_fees = 0; } - pub fn update_prioritization_stats(&mut self, min_max_fees: MinMaxResult) { - // update min/max priotization fees + pub fn update_priority_stats(&mut self, min_max_fees: MinMaxResult) { + // update min/max priority match min_max_fees { itertools::MinMaxResult::NoElements => { // do nothing @@ -541,7 +584,7 @@ impl SchedulerCountMetrics { } } - pub fn get_min_prioritization_fees(&self) -> u64 { + pub fn get_min_priority(&self) -> u64 { // to avoid getting u64::max recorded by metrics / in case of edge cases if self.min_prioritization_fees != u64::MAX { self.min_prioritization_fees @@ -550,7 +593,7 @@ impl SchedulerCountMetrics { } } - pub fn get_max_prioritization_fees(&self) -> u64 { + pub fn get_max_priority(&self) -> u64 { self.max_prioritization_fees } } @@ -728,7 +771,7 @@ mod tests { from_keypair: &Keypair, to_pubkey: &Pubkey, lamports: u64, - priority: u64, + compute_unit_price: u64, recent_blockhash: Hash, ) -> Transaction { // Fund the sending key, so that the transaction does not get filtered by the fee-payer check. @@ -743,7 +786,7 @@ mod tests { } let transfer = system_instruction::transfer(&from_keypair.pubkey(), to_pubkey, lamports); - let prioritization = ComputeBudgetInstruction::set_compute_unit_price(priority); + let prioritization = ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price); let message = Message::new(&[transfer, prioritization], Some(&from_keypair.pubkey())); Transaction::new(&vec![from_keypair], message, recent_blockhash) } @@ -999,7 +1042,7 @@ mod tests { &Keypair::new(), &Pubkey::new_unique(), 1, - i, + i * 10, bank.last_blockhash(), ) }) diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state.rs b/core/src/banking_stage/transaction_scheduler/transaction_state.rs index e8878e25c006f3..727140545ab656 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state.rs @@ -1,8 +1,4 @@ -use { - solana_cost_model::transaction_cost::TransactionCost, - solana_runtime::compute_budget_details::ComputeBudgetDetails, - solana_sdk::{slot_history::Slot, transaction::SanitizedTransaction}, -}; +use solana_sdk::{clock::Slot, transaction::SanitizedTransaction}; /// Simple wrapper type to tie a sanitized transaction to max age slot. pub(crate) struct SanitizedTransactionTTL { @@ -34,77 +30,38 @@ pub(crate) enum TransactionState { /// The transaction is available for scheduling. Unprocessed { transaction_ttl: SanitizedTransactionTTL, - compute_budget_details: ComputeBudgetDetails, - transaction_cost: TransactionCost, - forwarded: bool, + priority: u64, + cost: u64, }, /// The transaction is currently scheduled or being processed. - Pending { - compute_budget_details: ComputeBudgetDetails, - transaction_cost: TransactionCost, - forwarded: bool, - }, + Pending { priority: u64, cost: u64 }, } impl TransactionState { /// Creates a new `TransactionState` in the `Unprocessed` state. - pub(crate) fn new( - transaction_ttl: SanitizedTransactionTTL, - compute_budget_details: ComputeBudgetDetails, - transaction_cost: TransactionCost, - ) -> Self { + pub(crate) fn new(transaction_ttl: SanitizedTransactionTTL, priority: u64, cost: u64) -> Self { Self::Unprocessed { transaction_ttl, - compute_budget_details, - transaction_cost, - forwarded: false, + priority, + cost, } } - /// Returns a reference to the compute budget details of the transaction. - pub(crate) fn compute_budget_details(&self) -> &ComputeBudgetDetails { + /// Return the priority of the transaction. + /// This is *not* the same as the `compute_unit_price` of the transaction. + /// The priority is used to order transactions for processing. + pub(crate) fn priority(&self) -> u64 { match self { - Self::Unprocessed { - compute_budget_details, - .. - } => compute_budget_details, - Self::Pending { - compute_budget_details, - .. - } => compute_budget_details, + Self::Unprocessed { priority, .. } => *priority, + Self::Pending { priority, .. } => *priority, } } - /// Returns a reference to the transaction cost of the transaction. - pub(crate) fn transaction_cost(&self) -> &TransactionCost { + /// Return the cost of the transaction. + pub(crate) fn cost(&self) -> u64 { match self { - Self::Unprocessed { - transaction_cost, .. - } => transaction_cost, - Self::Pending { - transaction_cost, .. - } => transaction_cost, - } - } - - /// Returns the compute unit price of the transaction. - pub(crate) fn compute_unit_price(&self) -> u64 { - self.compute_budget_details().compute_unit_price - } - - /// Returns whether or not the transaction has already been forwarded. - pub(crate) fn forwarded(&self) -> bool { - match self { - Self::Unprocessed { forwarded, .. } => *forwarded, - Self::Pending { forwarded, .. } => *forwarded, - } - } - - /// Sets the transaction as forwarded. - pub(crate) fn set_forwarded(&mut self) { - match self { - Self::Unprocessed { forwarded, .. } => *forwarded = true, - Self::Pending { forwarded, .. } => *forwarded = true, + Self::Unprocessed { cost, .. } => *cost, + Self::Pending { cost, .. } => *cost, } } @@ -119,15 +76,10 @@ impl TransactionState { match self.take() { TransactionState::Unprocessed { transaction_ttl, - compute_budget_details, - transaction_cost, - forwarded, + priority, + cost, } => { - *self = TransactionState::Pending { - compute_budget_details, - transaction_cost, - forwarded, - }; + *self = TransactionState::Pending { priority, cost }; transaction_ttl } TransactionState::Pending { .. } => { @@ -145,16 +97,11 @@ impl TransactionState { pub(crate) fn transition_to_unprocessed(&mut self, transaction_ttl: SanitizedTransactionTTL) { match self.take() { TransactionState::Unprocessed { .. } => panic!("already unprocessed"), - TransactionState::Pending { - compute_budget_details, - transaction_cost, - forwarded, - } => { + TransactionState::Pending { priority, cost } => { *self = Self::Unprocessed { transaction_ttl, - compute_budget_details, - transaction_cost, - forwarded, + priority, + cost, } } } @@ -179,14 +126,8 @@ impl TransactionState { core::mem::replace( self, Self::Pending { - compute_budget_details: ComputeBudgetDetails { - compute_unit_price: 0, - compute_unit_limit: 0, - }, - transaction_cost: TransactionCost::SimpleVote { - writable_accounts: vec![], - }, - forwarded: false, + priority: 0, + cost: 0, }, ) } @@ -196,7 +137,6 @@ impl TransactionState { mod tests { use { super::*, - solana_cost_model::transaction_cost::UsageCostDetails, solana_sdk::{ compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, @@ -215,24 +155,13 @@ mod tests { ]; let message = Message::new(&ixs, Some(&from_keypair.pubkey())); let tx = Transaction::new(&[&from_keypair], message, Hash::default()); - let transaction_cost = TransactionCost::Transaction(UsageCostDetails { - signature_cost: 5000, - ..UsageCostDetails::default() - }); let transaction_ttl = SanitizedTransactionTTL { transaction: SanitizedTransaction::from_transaction_for_tests(tx), max_age_slot: Slot::MAX, }; - - TransactionState::new( - transaction_ttl, - ComputeBudgetDetails { - compute_unit_price, - compute_unit_limit: 0, - }, - transaction_cost, - ) + const TEST_TRANSACTION_COST: u64 = 5000; + TransactionState::new(transaction_ttl, compute_unit_price, TEST_TRANSACTION_COST) } #[test] @@ -294,16 +223,16 @@ mod tests { } #[test] - fn test_compute_unit_price() { - let compute_unit_price = 15; - let mut transaction_state = create_transaction_state(compute_unit_price); - assert_eq!(transaction_state.compute_unit_price(), compute_unit_price); + fn test_priority() { + let priority = 15; + let mut transaction_state = create_transaction_state(priority); + assert_eq!(transaction_state.priority(), priority); // ensure compute unit price is not lost through state transitions let transaction_ttl = transaction_state.transition_to_pending(); - assert_eq!(transaction_state.compute_unit_price(), compute_unit_price); + assert_eq!(transaction_state.priority(), priority); transaction_state.transition_to_unprocessed(transaction_ttl); - assert_eq!(transaction_state.compute_unit_price(), compute_unit_price); + assert_eq!(transaction_state.priority(), priority); } #[test] diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index e314a3e49cda83..a627375a03e6ba 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -6,8 +6,6 @@ use { crate::banking_stage::scheduler_messages::TransactionId, itertools::MinMaxResult, min_max_heap::MinMaxHeap, - solana_cost_model::transaction_cost::TransactionCost, - solana_runtime::compute_budget_details::ComputeBudgetDetails, std::collections::HashMap, }; @@ -99,14 +97,13 @@ impl TransactionStateContainer { &mut self, transaction_id: TransactionId, transaction_ttl: SanitizedTransactionTTL, - compute_budget_details: ComputeBudgetDetails, - transaction_cost: TransactionCost, + priority: u64, + cost: u64, ) -> bool { - let priority_id = - TransactionPriorityId::new(compute_budget_details.compute_unit_price, transaction_id); + let priority_id = TransactionPriorityId::new(priority, transaction_id); self.id_to_transaction_state.insert( transaction_id, - TransactionState::new(transaction_ttl, compute_budget_details, transaction_cost), + TransactionState::new(transaction_ttl, priority, cost), ); self.push_id_into_queue(priority_id) } @@ -121,8 +118,7 @@ impl TransactionStateContainer { let transaction_state = self .get_mut_transaction_state(&transaction_id) .expect("transaction must exist"); - let priority_id = - TransactionPriorityId::new(transaction_state.compute_unit_price(), transaction_id); + let priority_id = TransactionPriorityId::new(transaction_state.priority(), transaction_id); transaction_state.transition_to_unprocessed(transaction_ttl); self.push_id_into_queue(priority_id); } @@ -148,7 +144,7 @@ impl TransactionStateContainer { .expect("transaction must exist"); } - pub(crate) fn get_min_max_prioritization_fees(&self) -> MinMaxResult { + pub(crate) fn get_min_max_priority(&self) -> MinMaxResult { match self.priority_queue.peek_min() { Some(min) => match self.priority_queue.peek_max() { Some(max) => MinMaxResult::MinMax(min.priority, max.priority), @@ -163,10 +159,8 @@ impl TransactionStateContainer { mod tests { use { super::*, - solana_cost_model::cost_model::CostModel, solana_sdk::{ compute_budget::ComputeBudgetInstruction, - feature_set::FeatureSet, hash::Hash, message::Message, signature::Keypair, @@ -177,13 +171,8 @@ mod tests { }, }; - fn test_transaction( - priority: u64, - ) -> ( - SanitizedTransactionTTL, - ComputeBudgetDetails, - TransactionCost, - ) { + /// Returns (transaction_ttl, priority, cost) + fn test_transaction(priority: u64) -> (SanitizedTransactionTTL, u64, u64) { let from_keypair = Keypair::new(); let ixs = vec![ system_instruction::transfer( @@ -199,31 +188,23 @@ mod tests { message, Hash::default(), )); - let transaction_cost = CostModel::calculate_cost(&tx, &FeatureSet::default()); let transaction_ttl = SanitizedTransactionTTL { transaction: tx, max_age_slot: Slot::MAX, }; - ( - transaction_ttl, - ComputeBudgetDetails { - compute_unit_price: priority, - compute_unit_limit: 0, - }, - transaction_cost, - ) + const TEST_TRANSACTION_COST: u64 = 5000; + (transaction_ttl, priority, TEST_TRANSACTION_COST) } fn push_to_container(container: &mut TransactionStateContainer, num: usize) { for id in 0..num as u64 { let priority = id; - let (transaction_ttl, compute_budget_details, transaction_cost) = - test_transaction(priority); + let (transaction_ttl, priority, cost) = test_transaction(priority); container.insert_new_transaction( TransactionId::new(id), transaction_ttl, - compute_budget_details, - transaction_cost, + priority, + cost, ); } } @@ -248,7 +229,7 @@ mod tests { container .id_to_transaction_state .iter() - .map(|ts| ts.1.compute_unit_price()) + .map(|ts| ts.1.priority()) .next() .unwrap(), 4 From 8f7fda8b9f4de353ca852d86611fc8ba52c621d3 Mon Sep 17 00:00:00 2001 From: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Date: Fri, 9 Feb 2024 13:32:15 -0700 Subject: [PATCH 165/401] [docs] translation support (#35166) * chore: update crowdin and add serve command * feat: upload all desired files to crowdin * fix: absolute urls for pdfs * fix: do not import components with relative paths * feat: updated readme * fix: whitespace --- docs/README.md | 43 ++++++++++++++++++++++-- docs/crowdin.yml | 17 +++++++--- docs/package-lock.json | 53 ++++++++++-------------------- docs/package.json | 3 +- docs/src/index.mdx | 2 +- docs/src/runtime/zk-token-proof.md | 11 ++++--- 6 files changed, 79 insertions(+), 50 deletions(-) diff --git a/docs/README.md b/docs/README.md index ceff97a78db556..0e002b6ac75dcf 100644 --- a/docs/README.md +++ b/docs/README.md @@ -63,8 +63,12 @@ npm run start ## Translations -Translations are sourced from [Crowdin](https://docusaurus.io/docs/i18n/crowdin) and generated when `master` is built. -For local development use the following two commands in this `docs` directory. +Translations are sourced from [Crowdin](https://docusaurus.io/docs/i18n/crowdin) +and generated when the branch noted as the `STABLE` channel is built via the +`build.sh` script. + +For local development, and with the `CROWDIN_PERSONAL_TOKEN` env variable set, +use the following two commands in this `docs` directory. To download the newest documentation translations run: @@ -72,12 +76,45 @@ To download the newest documentation translations run: npm run crowdin:download ``` -To upload changes from `src` & generate [explicit IDs](https://docusaurus.io/docs/markdown-features/headings#explicit-ids): +To upload changes from `src` & generate +[explicit IDs](https://docusaurus.io/docs/markdown-features/headings#explicit-ids): ```shell npm run crowdin:upload ``` +> Translations are only included when deploying the `STABLE` channel of the docs +> (via `build.sh`). Resulting in only the `docs.solanalabs.com` documentation +> site to include translated content. Therefore, the `edge` and `beta` docs +> sites are not expected to include translated content, even though the language +> selector will still be present. + +### Common issues + +#### `CROWDIN_PERSONAL_TOKEN` env variable + +The `crowdin.yml` file requires a `CROWDIN_PERSONAL_TOKEN` env variable to be +set with a valid Crowdin access token. + +For local development, you can store this in a `.env` file that the Crowdin CLI +will auto detect. + +For building and publishing via the GitHub actions, the `CROWDIN_PERSONAL_TOKEN` +secret must be set. + +#### Translation locale fails to build with `SyntaxError` + +Some translation locales may fail to build with a `SyntaxError` thrown by +Docusaurus due to how certain language symbols get parsed by Docusaurus while +generating the static version of the docs. + +> Note: When any locale fails to build, the entire docs build will fail +> resulting in the docs not being able to be deployed at all. + +There are several known locales that fail to build the current documentation. +They are listed in the commented out `localesNotBuilding` attribute in the +[`docusaurus.config.js`](https://github.com/solana-labs/solana/blob/master/docs/docusaurus.config.js) + ## CI Build Flow The docs are built and published in Travis CI with the `./build.sh` script. On each PR, the docs are built, but not published. diff --git a/docs/crowdin.yml b/docs/crowdin.yml index 4a14b0899569a3..a8d31e9e7ec099 100644 --- a/docs/crowdin.yml +++ b/docs/crowdin.yml @@ -4,13 +4,22 @@ base_url: 'https://solana.crowdin.com' preserve_hierarchy: true files: [ # JSON translation files - # { - # source: '/i18n/en/**/*', - # translation: '/i18n/%two_letters_code%/**/%original_file_name%', - #}, + { + source: '/i18n/en/**/*', + translation: '/i18n/%two_letters_code%/**/%original_file_name%', + }, # Docs Markdown files { source: '/src/**/*.md', translation: '/i18n/%two_letters_code%/docusaurus-plugin-content-docs/current/**/%original_file_name%', }, + { + source: '/src/**/*.mdx', + translation: '/i18n/%two_letters_code%/docusaurus-plugin-content-docs/current/**/%original_file_name%', + }, + # Custom sidebar category files + { + source: '/src/**/*.json', + translation: '/i18n/%two_letters_code%/docusaurus-plugin-content-docs/current/**/%original_file_name%', + }, ] diff --git a/docs/package-lock.json b/docs/package-lock.json index 976f65828db4bf..13c82661487a82 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -8,7 +8,7 @@ "name": "solana-docs", "version": "0.0.0", "dependencies": { - "@crowdin/cli": "^3.6.1", + "@crowdin/cli": "^3.17.0", "@docusaurus/core": "^2.2.0", "@docusaurus/plugin-google-gtag": "^2.4.0", "@docusaurus/preset-classic": "^2.2.0", @@ -2029,12 +2029,15 @@ } }, "node_modules/@crowdin/cli": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/@crowdin/cli/-/cli-3.9.0.tgz", - "integrity": "sha512-4wQjqJZmU/mg3VYfRL6IYXw/pPAL9vdfW3QVSBovYA+bYaEt43ZuGsSrqeBGOhLehasWwRqklXWsl96gxQlLdw==", + "version": "3.17.0", + "resolved": "https://registry.npmjs.org/@crowdin/cli/-/cli-3.17.0.tgz", + "integrity": "sha512-ipr5wyBvpVuJ/DtJgDqTJiECu7zsVn9DwyTdf+sa0ukksXyiX3+H6wPm4eefIfEVSEaM92Q572dJZ5OnIH/Sag==", "dependencies": { - "njre": "^0.2.0", - "shelljs": "^0.8.4" + "command-exists-promise": "^2.0.2", + "node-fetch": "2.6.7", + "shelljs": "^0.8.4", + "tar": "^4.4.8", + "yauzl": "^2.10.0" }, "bin": { "crowdin": "jdeploy-bundle/jdeploy.js" @@ -9570,20 +9573,6 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, - "node_modules/njre": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/njre/-/njre-0.2.0.tgz", - "integrity": "sha512-+Wq8R6VmjK+jI8a9NdzfU6Vh50r3tjsdvl5KJE1OyHeH8I/nx5Ptm12qpO3qNUbstXuZfBDgDL0qQZw9JyjhMw==", - "dependencies": { - "command-exists-promise": "^2.0.2", - "node-fetch": "^2.5.0", - "tar": "^4.4.8", - "yauzl": "^2.10.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/no-case": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", @@ -15762,12 +15751,15 @@ "optional": true }, "@crowdin/cli": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/@crowdin/cli/-/cli-3.9.0.tgz", - "integrity": "sha512-4wQjqJZmU/mg3VYfRL6IYXw/pPAL9vdfW3QVSBovYA+bYaEt43ZuGsSrqeBGOhLehasWwRqklXWsl96gxQlLdw==", + "version": "3.17.0", + "resolved": "https://registry.npmjs.org/@crowdin/cli/-/cli-3.17.0.tgz", + "integrity": "sha512-ipr5wyBvpVuJ/DtJgDqTJiECu7zsVn9DwyTdf+sa0ukksXyiX3+H6wPm4eefIfEVSEaM92Q572dJZ5OnIH/Sag==", "requires": { - "njre": "^0.2.0", - "shelljs": "^0.8.4" + "command-exists-promise": "^2.0.2", + "node-fetch": "2.6.7", + "shelljs": "^0.8.4", + "tar": "^4.4.8", + "yauzl": "^2.10.0" } }, "@cspotcode/source-map-support": { @@ -21261,17 +21253,6 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, - "njre": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/njre/-/njre-0.2.0.tgz", - "integrity": "sha512-+Wq8R6VmjK+jI8a9NdzfU6Vh50r3tjsdvl5KJE1OyHeH8I/nx5Ptm12qpO3qNUbstXuZfBDgDL0qQZw9JyjhMw==", - "requires": { - "command-exists-promise": "^2.0.2", - "node-fetch": "^2.5.0", - "tar": "^4.4.8", - "yauzl": "^2.10.0" - } - }, "no-case": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", diff --git a/docs/package.json b/docs/package.json index 7279aa3e160586..c449ca7316fe8d 100644 --- a/docs/package.json +++ b/docs/package.json @@ -5,6 +5,7 @@ "scripts": { "start": "docusaurus start", "build": "docusaurus build", + "serve": "docusaurus serve", "clear": "docusaurus clear", "help": "docusaurus --help", "swizzle": "docusaurus swizzle", @@ -20,7 +21,7 @@ "crowdin:upload": "npm run write-i18n && crowdin upload" }, "dependencies": { - "@crowdin/cli": "^3.6.1", + "@crowdin/cli": "^3.17.0", "@docusaurus/core": "^2.2.0", "@docusaurus/plugin-google-gtag": "^2.4.0", "@docusaurus/preset-classic": "^2.2.0", diff --git a/docs/src/index.mdx b/docs/src/index.mdx index eff65e951be627..b7a098ea747132 100644 --- a/docs/src/index.mdx +++ b/docs/src/index.mdx @@ -55,6 +55,6 @@ Explore what it takes to operate a Solana validator and help secure the network. ## Learn more -import HomeCtaLinks from "../components/HomeCtaLinks"; +import HomeCtaLinks from "@site/components/HomeCtaLinks"; diff --git a/docs/src/runtime/zk-token-proof.md b/docs/src/runtime/zk-token-proof.md index 46fab4c7112f9a..35384f17c9396b 100644 --- a/docs/src/runtime/zk-token-proof.md +++ b/docs/src/runtime/zk-token-proof.md @@ -41,7 +41,8 @@ cannot change the original value that is contained in a commitment. Interested readers can refer to the following resources for a more in-depth treatment of Pedersen commitment and the (twisted) ElGamal encryption schemes. -- [Notes](./zk-docs/twisted_elgamal.pdf) on the twisted ElGamal encryption +- [Notes](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/twisted_elgamal.pdf) + on the twisted ElGamal encryption - A technical [overview](https://github.com/solana-labs/solana-program-library/blob/master/token/zk-token-protocol-paper/part1.pdf) of the SPL Token 2022 confidential extension @@ -98,14 +99,14 @@ The ZK Token proof program supports the following list of zero-knowledge proofs. - The ElGamal public-key validity proof instruction certifies that an ElGamal public-key is a properly formed public key. - Mathematical description and proof of security: - [[Notes]](./zk-docs/pubkey_proof.pdf) + [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/pubkey_proof.pdf) - `VerifyZeroBalance`: - The zero-balance proof certifies that an ElGamal ciphertext encrypts the number zero. - Mathematical description and proof of security: - [[Notes]](./zk-docs/zero_proof.pdf) + [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/zero_proof.pdf) #### Equality proofs @@ -114,11 +115,11 @@ The ZK Token proof program supports the following list of zero-knowledge proofs. - The ciphertext-commitment equality proof certifies that an ElGamal ciphertext and a Pedersen commitment encode the same message. - Mathematical description and proof of security: - [[Notes]](./zk-docs/ciphertext_commitment_equality.pdf) + [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/ciphertext_commitment_equality.pdf) - `VerifyCiphertextCiphertextEquality`: - The ciphertext-ciphertext equality proof certifies that two ElGamal ciphertexts encrypt the same message. - Mathematical description and proof of security: - [[Notes]](./zk-docs/ciphertext_ciphertext_equality.pdf) + [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/ciphertext_ciphertext_equality.pdf) From b5e903d1d75ed4baa3a5523ff34b0646c17d0c00 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 9 Feb 2024 18:26:51 -0500 Subject: [PATCH 166/401] Refactors AccountsIndex::get() (#35163) --- accounts-db/src/accounts_index.rs | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 493bb3130a9e2d..a3e7ff37b97d23 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1441,22 +1441,13 @@ impl + Into> AccountsIndex { ancestors: Option<&Ancestors>, max_root: Option, ) -> AccountIndexGetResult { - let read_lock = self.get_bin(pubkey); - let account = read_lock - .get(pubkey) - .map(ReadAccountMapEntry::from_account_map_entry); - - match account { - Some(locked_entry) => { + self.get_account_read_entry(pubkey) + .and_then(|locked_entry| { let slot_list = locked_entry.slot_list(); - let found_index = self.latest_slot(ancestors, slot_list, max_root); - match found_index { - Some(found_index) => AccountIndexGetResult::Found(locked_entry, found_index), - None => AccountIndexGetResult::NotFound, - } - } - None => AccountIndexGetResult::NotFound, - } + self.latest_slot(ancestors, slot_list, max_root) + .map(|found_index| AccountIndexGetResult::Found(locked_entry, found_index)) + }) + .unwrap_or(AccountIndexGetResult::NotFound) } // Get the maximum root <= `max_allowed_root` from the given `slice` From cb61ce435ee3accee75252462d48ffb28b31f0e7 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 9 Feb 2024 16:35:40 -0800 Subject: [PATCH 167/401] [TieredStorage] Put commonly used test functions into test_utils.rs (#35065) #### Problem There're some test functions that have been used in different mod in TieredStorage. It's better to have one same place for all tiere-storage related test functions. #### Summary of Changes Created test_utils.rs under /tiered_storage and move test-related functions into it. #### Test Plan Existing tests. --- accounts-db/src/tiered_storage.rs | 67 ++------------ accounts-db/src/tiered_storage/hot.rs | 92 +++----------------- accounts-db/src/tiered_storage/test_utils.rs | 76 ++++++++++++++++ 3 files changed, 95 insertions(+), 140 deletions(-) create mode 100644 accounts-db/src/tiered_storage/test_utils.rs diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index f0a23150e2fa70..335e93c72e9750 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -10,6 +10,7 @@ pub mod meta; pub mod mmap_utils; pub mod owners; pub mod readable; +mod test_utils; pub mod writer; use { @@ -160,17 +161,12 @@ impl TieredStorage { mod tests { use { super::*, - crate::account_storage::meta::{StoredAccountMeta, StoredMeta, StoredMetaWriteVersion}, + crate::account_storage::meta::StoredMetaWriteVersion, footer::{TieredStorageFooter, TieredStorageMagicNumber}, hot::HOT_FORMAT, index::IndexOffset, - owners::OWNER_NO_OWNER, solana_sdk::{ - account::{Account, AccountSharedData}, - clock::Slot, - hash::Hash, - pubkey::Pubkey, - rent_collector::RENT_EXEMPT_RENT_EPOCH, + account::AccountSharedData, clock::Slot, hash::Hash, pubkey::Pubkey, system_instruction::MAX_PERMITTED_DATA_LENGTH, }, std::{ @@ -178,6 +174,7 @@ mod tests { mem::ManuallyDrop, }, tempfile::tempdir, + test_utils::{create_test_account, verify_test_account}, }; impl TieredStorage { @@ -310,58 +307,6 @@ mod tests { assert!(!tiered_storage_path.try_exists().unwrap()); } - /// Create a test account based on the specified seed. - fn create_account(seed: u64) -> (StoredMeta, AccountSharedData) { - let data_byte = seed as u8; - let account = Account { - lamports: seed, - data: std::iter::repeat(data_byte).take(seed as usize).collect(), - owner: Pubkey::new_unique(), - executable: seed % 2 > 0, - rent_epoch: if seed % 3 > 0 { - seed - } else { - RENT_EXEMPT_RENT_EPOCH - }, - }; - - let stored_meta = StoredMeta { - write_version_obsolete: StoredMetaWriteVersion::default(), - pubkey: Pubkey::new_unique(), - data_len: seed, - }; - (stored_meta, AccountSharedData::from(account)) - } - - fn verify_account( - stored_meta: &StoredAccountMeta<'_>, - account: Option<&impl ReadableAccount>, - account_hash: &AccountHash, - ) { - let (lamports, owner, data, executable, account_hash) = account - .map(|acc| { - ( - acc.lamports(), - acc.owner(), - acc.data(), - acc.executable(), - // only persist rent_epoch for those rent-paying accounts - Some(*account_hash), - ) - }) - .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); - - assert_eq!(stored_meta.lamports(), lamports); - assert_eq!(stored_meta.data().len(), data.len()); - assert_eq!(stored_meta.data(), data); - assert_eq!(stored_meta.executable(), executable); - assert_eq!(stored_meta.owner(), owner); - assert_eq!( - *stored_meta.hash(), - account_hash.unwrap_or(AccountHash(Hash::default())) - ); - } - /// The helper function for all write_accounts tests. /// Currently only supports hot accounts. fn do_test_write_accounts( @@ -371,7 +316,7 @@ mod tests { ) { let accounts: Vec<_> = account_data_sizes .iter() - .map(|size| create_account(*size)) + .map(|size| create_test_account(*size)) .collect(); let account_refs: Vec<_> = accounts @@ -415,7 +360,7 @@ mod tests { let mut verified_accounts = HashSet::new(); while let Some((stored_meta, next)) = reader.get_account(index_offset).unwrap() { if let Some((account, account_hash)) = expected_accounts_map.get(stored_meta.pubkey()) { - verify_account(&stored_meta, *account, account_hash); + verify_test_account(&stored_meta, *account, stored_meta.pubkey(), account_hash); verified_accounts.insert(stored_meta.pubkey()); } index_offset = next; diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 7db9e90d65d353..f662c2e062ee11 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -657,26 +657,21 @@ impl HotStorageWriter { pub mod tests { use { super::*, - crate::{ - account_storage::meta::StoredMeta, - tiered_storage::{ - byte_block::ByteBlockWriter, - file::TieredStorageFile, - footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter, FOOTER_SIZE}, - hot::{HotAccountMeta, HotStorageReader}, - index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, - meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, - owners::{OwnersBlockFormat, OwnersTable}, - }, + crate::tiered_storage::{ + byte_block::ByteBlockWriter, + file::TieredStorageFile, + footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter, FOOTER_SIZE}, + hot::{HotAccountMeta, HotStorageReader}, + index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, + meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, + owners::{OwnersBlockFormat, OwnersTable}, + test_utils::{create_test_account, verify_test_account}, }, assert_matches::assert_matches, memoffset::offset_of, rand::{seq::SliceRandom, Rng}, solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount}, - hash::Hash, - pubkey::Pubkey, - slot_history::Slot, + account::ReadableAccount, hash::Hash, pubkey::Pubkey, slot_history::Slot, stake_history::Epoch, }, tempfile::TempDir, @@ -1288,67 +1283,6 @@ pub mod tests { assert_matches!(HotStorageWriter::new(&path), Err(_)); } - /// Create a test account based on the specified seed. - /// The created test account might have default rent_epoch - /// and write_version. - /// - /// When the seed is zero, then a zero-lamport test account will be - /// created. - fn create_test_account(seed: u64) -> (StoredMeta, AccountSharedData) { - let data_byte = seed as u8; - let owner_byte = u8::MAX - data_byte; - let account = Account { - lamports: seed, - data: std::iter::repeat(data_byte).take(seed as usize).collect(), - // this will allow some test account sharing the same owner. - owner: [owner_byte; 32].into(), - executable: seed % 2 > 0, - rent_epoch: if seed % 3 > 0 { - seed - } else { - RENT_EXEMPT_RENT_EPOCH - }, - }; - - let stored_meta = StoredMeta { - write_version_obsolete: u64::MAX, - pubkey: Pubkey::new_unique(), - data_len: seed, - }; - (stored_meta, AccountSharedData::from(account)) - } - - fn verify_account( - stored_meta: &StoredAccountMeta<'_>, - account: Option<&impl ReadableAccount>, - address: &Pubkey, - account_hash: &AccountHash, - ) { - let (lamports, owner, data, executable, account_hash) = account - .map(|acc| { - ( - acc.lamports(), - acc.owner(), - acc.data(), - acc.executable(), - // only persist rent_epoch for those rent-paying accounts - Some(*account_hash), - ) - }) - .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); - - assert_eq!(stored_meta.lamports(), lamports); - assert_eq!(stored_meta.data().len(), data.len()); - assert_eq!(stored_meta.data(), data); - assert_eq!(stored_meta.executable(), executable); - assert_eq!(stored_meta.owner(), owner); - assert_eq!(stored_meta.pubkey(), address); - assert_eq!( - *stored_meta.hash(), - account_hash.unwrap_or(AccountHash(Hash::default())) - ); - } - #[test] fn test_write_account_and_index_blocks() { let account_data_sizes = &[ @@ -1401,7 +1335,7 @@ pub mod tests { .unwrap(); let (account, address, account_hash, _write_version) = storable_accounts.get(i); - verify_account(&stored_meta, account, address, account_hash); + verify_test_account(&stored_meta, account, address, account_hash); assert_eq!(i + 1, next.0 as usize); } @@ -1420,7 +1354,7 @@ pub mod tests { let (account, address, account_hash, _write_version) = storable_accounts.get(stored_info.offset); - verify_account(&stored_meta, account, address, account_hash); + verify_test_account(&stored_meta, account, address, account_hash); } // verify get_accounts @@ -1429,7 +1363,7 @@ pub mod tests { // first, we verify everything for (i, stored_meta) in accounts.iter().enumerate() { let (account, address, account_hash, _write_version) = storable_accounts.get(i); - verify_account(stored_meta, account, address, account_hash); + verify_test_account(stored_meta, account, address, account_hash); } // second, we verify various initial position diff --git a/accounts-db/src/tiered_storage/test_utils.rs b/accounts-db/src/tiered_storage/test_utils.rs new file mode 100644 index 00000000000000..2ed2399f30fbaa --- /dev/null +++ b/accounts-db/src/tiered_storage/test_utils.rs @@ -0,0 +1,76 @@ +#![cfg(test)] +//! Helper functions for TieredStorage tests +use { + crate::{ + account_storage::meta::{StoredAccountMeta, StoredMeta}, + accounts_hash::AccountHash, + tiered_storage::owners::OWNER_NO_OWNER, + }, + solana_sdk::{ + account::{Account, AccountSharedData, ReadableAccount}, + hash::Hash, + pubkey::Pubkey, + rent_collector::RENT_EXEMPT_RENT_EPOCH, + }, +}; + +/// Create a test account based on the specified seed. +/// The created test account might have default rent_epoch +/// and write_version. +/// +/// When the seed is zero, then a zero-lamport test account will be +/// created. +pub(super) fn create_test_account(seed: u64) -> (StoredMeta, AccountSharedData) { + let data_byte = seed as u8; + let owner_byte = u8::MAX - data_byte; + let account = Account { + lamports: seed, + data: std::iter::repeat(data_byte).take(seed as usize).collect(), + // this will allow some test account sharing the same owner. + owner: [owner_byte; 32].into(), + executable: seed % 2 > 0, + rent_epoch: if seed % 3 > 0 { + seed + } else { + RENT_EXEMPT_RENT_EPOCH + }, + }; + + let stored_meta = StoredMeta { + write_version_obsolete: u64::MAX, + pubkey: Pubkey::new_unique(), + data_len: seed, + }; + (stored_meta, AccountSharedData::from(account)) +} + +pub(super) fn verify_test_account( + stored_meta: &StoredAccountMeta<'_>, + account: Option<&impl ReadableAccount>, + address: &Pubkey, + account_hash: &AccountHash, +) { + let (lamports, owner, data, executable, account_hash) = account + .map(|acc| { + ( + acc.lamports(), + acc.owner(), + acc.data(), + acc.executable(), + // only persist rent_epoch for those rent-paying accounts + Some(*account_hash), + ) + }) + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); + + assert_eq!(stored_meta.lamports(), lamports); + assert_eq!(stored_meta.data().len(), data.len()); + assert_eq!(stored_meta.data(), data); + assert_eq!(stored_meta.executable(), executable); + assert_eq!(stored_meta.owner(), owner); + assert_eq!(stored_meta.pubkey(), address); + assert_eq!( + *stored_meta.hash(), + account_hash.unwrap_or(AccountHash(Hash::default())) + ); +} From 027c654772f1d6c469b156cf18b43308190cecf9 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Fri, 9 Feb 2024 20:57:15 -0600 Subject: [PATCH 168/401] Update SPL dependency versions for 2.0 bump (#35156) * Update SPL dependency versions for 2.0 bump * Update Cargo.lock * Update token-2022 version to v2.0.1 * pin spl-instruction-padding 0.1.1 and spl-tlv-account-resolution 0.5.2 * pin programs/sbf spl-tlv-account-resolution 0.5.2 --- Cargo.lock | 175 +++++++++++++--------------------------- Cargo.toml | 14 ++-- programs/sbf/Cargo.lock | 111 +++++++++---------------- 3 files changed, 103 insertions(+), 197 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cbba36ccc8fff9..414ffee64c33a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3365,17 +3365,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "num-derive" version = "0.4.2" @@ -3440,55 +3429,13 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" -dependencies = [ - "num_enum_derive 0.5.11", -] - -[[package]] -name = "num_enum" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" -dependencies = [ - "num_enum_derive 0.6.1", -] - [[package]] name = "num_enum" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" dependencies = [ - "num_enum_derive 0.7.2", -] - -[[package]] -name = "num_enum_derive" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" -dependencies = [ - "proc-macro-crate 1.1.0", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "num_enum_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" -dependencies = [ - "proc-macro-crate 1.1.0", - "proc-macro2", - "quote", - "syn 2.0.48", + "num_enum_derive", ] [[package]] @@ -4014,16 +3961,6 @@ dependencies = [ "toml 0.5.8", ] -[[package]] -name = "proc-macro-crate" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" -dependencies = [ - "thiserror", - "toml 0.5.8", -] - [[package]] name = "proc-macro-crate" version = "2.0.0" @@ -5284,10 +5221,10 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive 0.4.2", + "num-derive", "num-traits", "num_cpus", - "num_enum 0.7.2", + "num_enum", "ouroboros", "percentage", "qualifier_attr", @@ -5331,7 +5268,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive 0.4.2", + "num-derive", "num-traits", "rustc_version 0.4.0", "serde", @@ -5533,7 +5470,7 @@ dependencies = [ "log", "memmap2", "modular-bitfield", - "num_enum 0.7.2", + "num_enum", "rand 0.8.5", "rayon", "solana-logger", @@ -5873,7 +5810,7 @@ dependencies = [ "log", "lru", "min-max-heap", - "num_enum 0.7.2", + "num_enum", "prio-graph", "quinn", "rand 0.8.5", @@ -6290,7 +6227,7 @@ dependencies = [ "lru", "mockall", "num_cpus", - "num_enum 0.7.2", + "num_enum", "prost", "rand 0.8.5", "rand_chacha 0.3.1", @@ -6663,7 +6600,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive 0.4.2", + "num-derive", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -6699,7 +6636,7 @@ dependencies = [ "libc", "libsecp256k1", "log", - "num-derive 0.4.2", + "num-derive", "num-traits", "percentage", "rand 0.8.5", @@ -6813,7 +6750,7 @@ dependencies = [ "dialoguer", "hidapi", "log", - "num-derive 0.4.2", + "num-derive", "num-traits", "parking_lot 0.12.1", "qstring", @@ -7007,10 +6944,10 @@ dependencies = [ "memoffset 0.9.0", "mockall", "modular-bitfield", - "num-derive 0.4.2", + "num-derive", "num-traits", "num_cpus", - "num_enum 0.7.2", + "num_enum", "ouroboros", "percentage", "qualifier_attr", @@ -7101,9 +7038,9 @@ dependencies = [ "libsecp256k1", "log", "memmap2", - "num-derive 0.4.2", + "num-derive", "num-traits", - "num_enum 0.7.2", + "num_enum", "pbkdf2 0.11.0", "qstring", "qualifier_attr", @@ -7658,7 +7595,7 @@ dependencies = [ "assert_matches", "bincode", "log", - "num-derive 0.4.2", + "num-derive", "num-traits", "rustc_version 0.4.0", "serde", @@ -7741,7 +7678,7 @@ dependencies = [ "bytemuck", "criterion", "curve25519-dalek", - "num-derive 0.4.2", + "num-derive", "num-traits", "solana-program-runtime", "solana-sdk", @@ -7774,7 +7711,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive 0.4.2", + "num-derive", "num-traits", "rand 0.7.3", "serde", @@ -7822,13 +7759,13 @@ checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" [[package]] name = "spl-associated-token-account" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "992d9c64c2564cc8f63a4b508bf3ebcdf2254b0429b13cd1d31adb6162432a5f" +checksum = "4414117bead33f2a5cf059cefac0685592bdd36f31f3caac49b89bff7f6bbf32" dependencies = [ "assert_matches", "borsh 0.10.3", - "num-derive 0.4.2", + "num-derive", "num-traits", "solana-program", "spl-token", @@ -7838,9 +7775,9 @@ dependencies = [ [[package]] name = "spl-discriminator" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce5d563b58ef1bb2cdbbfe0dfb9ffdc24903b10ae6a4df2d8f425ece375033f" +checksum = "daa600f2fe56f32e923261719bae640d873edadbc5237681a39b8e37bfd4d263" dependencies = [ "bytemuck", "solana-program", @@ -7849,9 +7786,9 @@ dependencies = [ [[package]] name = "spl-discriminator-derive" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" +checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", @@ -7860,9 +7797,9 @@ dependencies = [ [[package]] name = "spl-discriminator-syn" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e5f2044ca42c8938d54d1255ce599c79a1ffd86b677dfab695caa20f9ffc3f2" +checksum = "18fea7be851bd98d10721782ea958097c03a0c2a07d8d4997041d0ece6319a63" dependencies = [ "proc-macro2", "quote", @@ -7873,28 +7810,28 @@ dependencies = [ [[package]] name = "spl-instruction-padding" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c5557ec281a34f7f9053feb6e0d795162ba0c6a52898b21c3d1e899481191d5" +checksum = "be3f0c53b6eb2dfccb77b5710bddb04548da338a3f56bed214177f6a577d1ca6" dependencies = [ - "num_enum 0.5.11", + "num_enum", "solana-program", ] [[package]] name = "spl-memo" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f180b03318c3dbab3ef4e1e4d46d5211ae3c780940dd0a28695aba4b59a75a" +checksum = "58e9bae02de3405079a057fe244c867a08f92d48327d231fc60da831f94caf0a" dependencies = [ "solana-program", ] [[package]] name = "spl-pod" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2881dddfca792737c0706fa0175345ab282b1b0879c7d877bad129645737c079" +checksum = "85a5db7e4efb1107b0b8e52a13f035437cdcb36ef99c58f6d467f089d9b2915a" dependencies = [ "borsh 0.10.3", "bytemuck", @@ -7905,11 +7842,11 @@ dependencies = [ [[package]] name = "spl-program-error" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" +checksum = "7e0657b6490196971d9e729520ba934911ff41fbb2cb9004463dbe23cf8b4b4f" dependencies = [ - "num-derive 0.4.2", + "num-derive", "num-traits", "solana-program", "spl-program-error-derive", @@ -7918,9 +7855,9 @@ dependencies = [ [[package]] name = "spl-program-error-derive" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5269c8e868da17b6552ef35a51355a017bd8e0eae269c201fef830d35fa52c" +checksum = "1845dfe71fd68f70382232742e758557afe973ae19e6c06807b2c30f5d5cb474" dependencies = [ "proc-macro2", "quote", @@ -7930,9 +7867,9 @@ dependencies = [ [[package]] name = "spl-tlv-account-resolution" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f7020347c07892c08560d230fbb8a980316c9e198e22b198b7b9d951ff96047" +checksum = "56f335787add7fa711819f9e7c573f8145a5358a709446fe2d24bf2a88117c90" dependencies = [ "bytemuck", "solana-program", @@ -7944,30 +7881,30 @@ dependencies = [ [[package]] name = "spl-token" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08459ba1b8f7c1020b4582c4edf0f5c7511a5e099a7a97570c9698d4f2337060" +checksum = "95ae123223633a389f95d1da9d49c2d0a50d499e7060b9624626a69e536ad2a4" dependencies = [ "arrayref", "bytemuck", - "num-derive 0.3.3", + "num-derive", "num-traits", - "num_enum 0.6.1", + "num_enum", "solana-program", "thiserror", ] [[package]] name = "spl-token-2022" -version = "1.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d697fac19fd74ff472dfcc13f0b442dd71403178ce1de7b5d16f83a33561c059" +checksum = "b9fec83597cf7be923c5c3bdfd2fcc08cdfacd2eeb6c4e413da06b6916f50827" dependencies = [ "arrayref", "bytemuck", - "num-derive 0.4.2", + "num-derive", "num-traits", - "num_enum 0.7.2", + "num_enum", "solana-program", "solana-security-txt", "solana-zk-token-sdk", @@ -7983,9 +7920,9 @@ dependencies = [ [[package]] name = "spl-token-group-interface" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b889509d49fa74a4a033ca5dae6c2307e9e918122d97e58562f5c4ffa795c75d" +checksum = "7eb67fbacd587377a400aba81718abe4424d0e9d5ea510034d3b7f130d102153" dependencies = [ "bytemuck", "solana-program", @@ -7996,9 +7933,9 @@ dependencies = [ [[package]] name = "spl-token-metadata-interface" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c16ce3ba6979645fb7627aa1e435576172dd63088dc7848cb09aa331fa1fe4f" +checksum = "e16aa8f64b6e0eaab3f5034e84d867c8435d8216497b4543a4978a31f4b6e8a8" dependencies = [ "borsh 0.10.3", "solana-program", @@ -8010,9 +7947,9 @@ dependencies = [ [[package]] name = "spl-transfer-hook-interface" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aabdb7c471566f6ddcee724beb8618449ea24b399e58d464d6b5bc7db550259" +checksum = "5f6dfe329fcff44cbe2eea994bd8f737f0b0a69faed39e56f9b6ee03badf7e14" dependencies = [ "arrayref", "bytemuck", @@ -8026,9 +7963,9 @@ dependencies = [ [[package]] name = "spl-type-length-value" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a468e6f6371f9c69aae760186ea9f1a01c2908351b06a5e0026d21cfc4d7ecac" +checksum = "8f9ebd75d29c5f48de5f6a9c114e08531030b75b8ac2c557600ac7da0b73b1e8" dependencies = [ "bytemuck", "solana-program", diff --git a/Cargo.toml b/Cargo.toml index d4c48c23d59e54..7cc9c5aa84049e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -390,14 +390,14 @@ solana-zk-keygen = { path = "zk-keygen", version = "=1.18.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.18.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.18.0" } solana_rbpf = "=0.8.0" -spl-associated-token-account = "=2.3.0" +spl-associated-token-account = "=2.3.1" spl-instruction-padding = "0.1" -spl-memo = "=4.0.0" -spl-pod = "=0.1.0" -spl-token = "=4.0.0" -spl-token-2022 = "=1.0.0" -spl-token-group-interface = "=0.1.0" -spl-token-metadata-interface = "=0.2.0" +spl-memo = "=4.0.1" +spl-pod = "=0.1.1" +spl-token = "=4.0.1" +spl-token-2022 = "=2.0.1" +spl-token-group-interface = "=0.1.1" +spl-token-metadata-interface = "=0.2.1" static_assertions = "1.1.0" stream-cancel = "0.8.2" strum = "0.24" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 5af01021f6a1cf..5a3616e966082f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3071,34 +3071,13 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" -dependencies = [ - "num_enum_derive 0.6.1", -] - [[package]] name = "num_enum" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" dependencies = [ - "num_enum_derive 0.7.2", -] - -[[package]] -name = "num_enum_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" -dependencies = [ - "proc-macro-crate 1.1.3", - "proc-macro2", - "quote", - "syn 2.0.48", + "num_enum_derive", ] [[package]] @@ -3583,16 +3562,6 @@ dependencies = [ "toml", ] -[[package]] -name = "proc-macro-crate" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" -dependencies = [ - "thiserror", - "toml", -] - [[package]] name = "proc-macro-crate" version = "2.0.1" @@ -4629,7 +4598,7 @@ dependencies = [ "num-derive 0.4.2", "num-traits", "num_cpus", - "num_enum 0.7.2", + "num_enum", "ouroboros", "percentage", "qualifier_attr", @@ -4776,7 +4745,7 @@ dependencies = [ "log", "memmap2", "modular-bitfield", - "num_enum 0.7.2", + "num_enum", "rand 0.8.5", "solana-measure", "solana-sdk", @@ -4927,7 +4896,7 @@ dependencies = [ "log", "lru", "min-max-heap", - "num_enum 0.7.2", + "num_enum", "prio-graph", "quinn", "rand 0.8.5", @@ -5206,7 +5175,7 @@ dependencies = [ "lru", "mockall", "num_cpus", - "num_enum 0.7.2", + "num_enum", "prost", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5690,7 +5659,7 @@ dependencies = [ "num-derive 0.4.2", "num-traits", "num_cpus", - "num_enum 0.7.2", + "num_enum", "ouroboros", "percentage", "qualifier_attr", @@ -6184,7 +6153,7 @@ dependencies = [ "memmap2", "num-derive 0.4.2", "num-traits", - "num_enum 0.7.2", + "num_enum", "pbkdf2 0.11.0", "qstring", "qualifier_attr", @@ -6725,9 +6694,9 @@ checksum = "c530c2b0d0bf8b69304b39fe2001993e267461948b890cd037d8ad4293fa1a0d" [[package]] name = "spl-associated-token-account" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "992d9c64c2564cc8f63a4b508bf3ebcdf2254b0429b13cd1d31adb6162432a5f" +checksum = "4414117bead33f2a5cf059cefac0685592bdd36f31f3caac49b89bff7f6bbf32" dependencies = [ "assert_matches", "borsh 0.10.3", @@ -6741,9 +6710,9 @@ dependencies = [ [[package]] name = "spl-discriminator" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce5d563b58ef1bb2cdbbfe0dfb9ffdc24903b10ae6a4df2d8f425ece375033f" +checksum = "daa600f2fe56f32e923261719bae640d873edadbc5237681a39b8e37bfd4d263" dependencies = [ "bytemuck", "solana-program", @@ -6752,9 +6721,9 @@ dependencies = [ [[package]] name = "spl-discriminator-derive" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" +checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", @@ -6763,9 +6732,9 @@ dependencies = [ [[package]] name = "spl-discriminator-syn" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e5f2044ca42c8938d54d1255ce599c79a1ffd86b677dfab695caa20f9ffc3f2" +checksum = "18fea7be851bd98d10721782ea958097c03a0c2a07d8d4997041d0ece6319a63" dependencies = [ "proc-macro2", "quote", @@ -6776,18 +6745,18 @@ dependencies = [ [[package]] name = "spl-memo" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f180b03318c3dbab3ef4e1e4d46d5211ae3c780940dd0a28695aba4b59a75a" +checksum = "58e9bae02de3405079a057fe244c867a08f92d48327d231fc60da831f94caf0a" dependencies = [ "solana-program", ] [[package]] name = "spl-pod" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2881dddfca792737c0706fa0175345ab282b1b0879c7d877bad129645737c079" +checksum = "85a5db7e4efb1107b0b8e52a13f035437cdcb36ef99c58f6d467f089d9b2915a" dependencies = [ "borsh 0.10.3", "bytemuck", @@ -6798,9 +6767,9 @@ dependencies = [ [[package]] name = "spl-program-error" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" +checksum = "7e0657b6490196971d9e729520ba934911ff41fbb2cb9004463dbe23cf8b4b4f" dependencies = [ "num-derive 0.4.2", "num-traits", @@ -6811,9 +6780,9 @@ dependencies = [ [[package]] name = "spl-program-error-derive" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5269c8e868da17b6552ef35a51355a017bd8e0eae269c201fef830d35fa52c" +checksum = "1845dfe71fd68f70382232742e758557afe973ae19e6c06807b2c30f5d5cb474" dependencies = [ "proc-macro2", "quote", @@ -6823,9 +6792,9 @@ dependencies = [ [[package]] name = "spl-tlv-account-resolution" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f7020347c07892c08560d230fbb8a980316c9e198e22b198b7b9d951ff96047" +checksum = "56f335787add7fa711819f9e7c573f8145a5358a709446fe2d24bf2a88117c90" dependencies = [ "bytemuck", "solana-program", @@ -6837,30 +6806,30 @@ dependencies = [ [[package]] name = "spl-token" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08459ba1b8f7c1020b4582c4edf0f5c7511a5e099a7a97570c9698d4f2337060" +checksum = "95ae123223633a389f95d1da9d49c2d0a50d499e7060b9624626a69e536ad2a4" dependencies = [ "arrayref", "bytemuck", - "num-derive 0.3.0", + "num-derive 0.4.2", "num-traits", - "num_enum 0.6.1", + "num_enum", "solana-program", "thiserror", ] [[package]] name = "spl-token-2022" -version = "1.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d697fac19fd74ff472dfcc13f0b442dd71403178ce1de7b5d16f83a33561c059" +checksum = "b9fec83597cf7be923c5c3bdfd2fcc08cdfacd2eeb6c4e413da06b6916f50827" dependencies = [ "arrayref", "bytemuck", "num-derive 0.4.2", "num-traits", - "num_enum 0.7.2", + "num_enum", "solana-program", "solana-security-txt", "solana-zk-token-sdk", @@ -6876,9 +6845,9 @@ dependencies = [ [[package]] name = "spl-token-group-interface" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b889509d49fa74a4a033ca5dae6c2307e9e918122d97e58562f5c4ffa795c75d" +checksum = "7eb67fbacd587377a400aba81718abe4424d0e9d5ea510034d3b7f130d102153" dependencies = [ "bytemuck", "solana-program", @@ -6889,9 +6858,9 @@ dependencies = [ [[package]] name = "spl-token-metadata-interface" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c16ce3ba6979645fb7627aa1e435576172dd63088dc7848cb09aa331fa1fe4f" +checksum = "e16aa8f64b6e0eaab3f5034e84d867c8435d8216497b4543a4978a31f4b6e8a8" dependencies = [ "borsh 0.10.3", "solana-program", @@ -6903,9 +6872,9 @@ dependencies = [ [[package]] name = "spl-transfer-hook-interface" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aabdb7c471566f6ddcee724beb8618449ea24b399e58d464d6b5bc7db550259" +checksum = "5f6dfe329fcff44cbe2eea994bd8f737f0b0a69faed39e56f9b6ee03badf7e14" dependencies = [ "arrayref", "bytemuck", @@ -6919,9 +6888,9 @@ dependencies = [ [[package]] name = "spl-type-length-value" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a468e6f6371f9c69aae760186ea9f1a01c2908351b06a5e0026d21cfc4d7ecac" +checksum = "8f9ebd75d29c5f48de5f6a9c114e08531030b75b8ac2c557600ac7da0b73b1e8" dependencies = [ "bytemuck", "solana-program", From f8d01df1db23b10ba27d96f902bb4f3184e15947 Mon Sep 17 00:00:00 2001 From: Bork Bork <107079055+BorkBorked@users.noreply.github.com> Date: Sun, 11 Feb 2024 01:46:07 +0100 Subject: [PATCH 169/401] chore(docs): proofreading (#35172) * proofread * proofread --- docs/README.md | 2 +- docs/src/proposals/comprehensive-compute-fees.md | 2 +- docs/src/proposals/simple-payment-and-state-verification.md | 2 +- metrics/README.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/README.md b/docs/README.md index 0e002b6ac75dcf..d537c5c8c489b7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -8,7 +8,7 @@ Static content delivery is handled using `vercel`. > documentation which is generalize to the Solana protocol as a whole, and apply > to all Solana validator implementations, are maintained within the > [`developer-content`](https://github.com/solana-foundation/developer-content/) -> repo. Those "common docs" are manged by the Solana Foundation within their +> repo. Those "common docs" are managed by the Solana Foundation within their > GitHub organization and are publicly accessible via > [solana.com/docs](https://solana.com/docs) diff --git a/docs/src/proposals/comprehensive-compute-fees.md b/docs/src/proposals/comprehensive-compute-fees.md index 79e92b7b9c9edb..e0194811e2d70f 100644 --- a/docs/src/proposals/comprehensive-compute-fees.md +++ b/docs/src/proposals/comprehensive-compute-fees.md @@ -35,7 +35,7 @@ A fee could be calculated based on: - Fixed rate per writable account 3. Data byte cost - Fixed rate per byte of the sum of the length all a transactions instruction - datas + data 4. Account sizes - Account sizes can't be known up-front but can account for a considerable amount of the load the transaction incurs on the network. The payer will diff --git a/docs/src/proposals/simple-payment-and-state-verification.md b/docs/src/proposals/simple-payment-and-state-verification.md index caa8a2d55e1f4c..6a561cd96a19a1 100644 --- a/docs/src/proposals/simple-payment-and-state-verification.md +++ b/docs/src/proposals/simple-payment-and-state-verification.md @@ -117,7 +117,7 @@ https://github.com/solana-labs/solana/blob/b6bfed64cb159ee67bb6bdbaefc7f833bbed3 let mut hash = hashv(&[ // bank hash of the parent block self.parent_hash.as_ref(), - // hash of all the modifed accounts + // hash of all the modified accounts accounts_delta_hash.hash.as_ref(), // Number of signatures processed in this block &signature_count_buf, diff --git a/metrics/README.md b/metrics/README.md index f1890aacc308fe..15c8b148a09478 100644 --- a/metrics/README.md +++ b/metrics/README.md @@ -41,8 +41,8 @@ The cluster telemetry dashboard shows the current state of the cluster: The fee market dashboard shows: -1. Total Priorization Fees -2. Block Min Priorization Fees +1. Total Prioritization Fees +2. Block Min Prioritization Fees 3. Cost Tracker Stats ### Ping Results From adb8c2ac86c62cf9b2c97a734bb2baba4fbd9bb6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 11 Feb 2024 23:45:05 +0800 Subject: [PATCH 170/401] build(deps): bump bytemuck from 1.14.2 to 1.14.3 (#35161) * build(deps): bump bytemuck from 1.14.2 to 1.14.3 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.14.2 to 1.14.3. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.14.2...v1.14.3) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 414ffee64c33a9..8db89b9ef01303 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -911,9 +911,9 @@ checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "bytemuck" -version = "1.14.2" +version = "1.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea31d69bda4949c1c1562c1e6f042a1caefac98cdc8a298260a2ff41c1e2d42b" +checksum = "a2ef034f05691a48569bd920a96c81b9d91bbad1ab5ac7c4616c1f6ef36cb79f" dependencies = [ "bytemuck_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 7cc9c5aa84049e..366ef313e55e63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -164,7 +164,7 @@ bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.7" -bytemuck = "1.14.2" +bytemuck = "1.14.3" byteorder = "1.5.0" bytes = "1.5" bzip2 = "0.4.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 5a3616e966082f..e97cdba8b750fa 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -834,9 +834,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.14.2" +version = "1.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea31d69bda4949c1c1562c1e6f042a1caefac98cdc8a298260a2ff41c1e2d42b" +checksum = "a2ef034f05691a48569bd920a96c81b9d91bbad1ab5ac7c4616c1f6ef36cb79f" dependencies = [ "bytemuck_derive", ] From 39b4aecc7d28b021f33e6959b5a1dfc4fc1fafbb Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sun, 11 Feb 2024 23:50:22 +0800 Subject: [PATCH 171/401] chore: bump toml for programs/sbf from 0.5.8 to 0.5.11 (#35159) --- programs/sbf/Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index e97cdba8b750fa..115e6ea235d646 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7418,9 +7418,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] From 07717c2043eb39a3a1a0dc53d5eede807fb07aed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 15:26:33 +0800 Subject: [PATCH 172/401] build(deps): bump chrono from 0.4.33 to 0.4.34 (#35176) * build(deps): bump chrono from 0.4.33 to 0.4.34 Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.33 to 0.4.34. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.33...v0.4.34) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8db89b9ef01303..07dbe0854f242e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1064,9 +1064,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", diff --git a/Cargo.toml b/Cargo.toml index 366ef313e55e63..78400151de336e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -171,7 +171,7 @@ bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" cc = "1.0.83" -chrono = { version = "0.4.33", default-features = false } +chrono = { version = "0.4.34", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" console = "0.15.8" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 115e6ea235d646..3799d1a2fa7708 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -940,9 +940,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", From d1a0d3adb1a671068dc27ae881b3e2835a394ad8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 15:26:58 +0800 Subject: [PATCH 173/401] build(deps): bump thiserror from 1.0.56 to 1.0.57 (#35181) * build(deps): bump thiserror from 1.0.56 to 1.0.57 Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.56 to 1.0.57. - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.56...1.0.57) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07dbe0854f242e..9146a3d2cf061b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8272,18 +8272,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 78400151de336e..678977d936edcd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -412,7 +412,7 @@ tar = "0.4.40" tarpc = "0.29.0" tempfile = "3.10.0" test-case = "3.3.1" -thiserror = "1.0.56" +thiserror = "1.0.57" tiny-bip39 = "0.8.2" # Update solana-tokio patch below when updating this version tokio = "1.29.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3799d1a2fa7708..1ace3220b86898 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7183,18 +7183,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", From 246a2c71be99ea73ecfb98dd2f3447908e76ef8e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 15:27:19 +0800 Subject: [PATCH 174/401] build(deps): bump indexmap from 2.2.2 to 2.2.3 (#35180) * build(deps): bump indexmap from 2.2.2 to 2.2.3 Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.2.2 to 2.2.3. - [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.2.2...2.2.3) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 22 +++++++++++----------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9146a3d2cf061b..ebc6f2ffe73446 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2298,7 +2298,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.2", + "indexmap 2.2.3", "slab", "tokio", "tokio-util 0.7.1", @@ -2674,9 +2674,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4868,7 +4868,7 @@ version = "0.9.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "itoa", "ryu", "serde", @@ -5212,7 +5212,7 @@ dependencies = [ "fnv", "im", "index_list", - "indexmap 2.2.2", + "indexmap 2.2.3", "itertools", "lazy_static", "libsecp256k1", @@ -5692,7 +5692,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.2.2", + "indexmap 2.2.3", "indicatif", "log", "quinn", @@ -5773,7 +5773,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.2.2", + "indexmap 2.2.3", "indicatif", "log", "rand 0.8.5", @@ -6112,7 +6112,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.2.2", + "indexmap 2.2.3", "itertools", "log", "lru", @@ -7204,7 +7204,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.2.2", + "indexmap 2.2.3", "itertools", "libc", "log", @@ -7315,7 +7315,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.2.2", + "indexmap 2.2.3", "indicatif", "pickledb", "serde", @@ -7344,7 +7344,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.2.2", + "indexmap 2.2.3", "indicatif", "log", "rayon", @@ -8555,7 +8555,7 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "toml_datetime", "winnow", ] @@ -8566,7 +8566,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c9ffdf896f8daaabf9b66ba8e77ea1ed5ed0f72821b398aba62352e95062951" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index 678977d936edcd..169ade4c9a3528 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -222,7 +222,7 @@ hyper = "0.14.28" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.11" -indexmap = "2.2.2" +indexmap = "2.2.3" indicatif = "0.17.7" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1ace3220b86898..43ea1700146e82 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1936,7 +1936,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.2", + "indexmap 2.2.3", "slab", "tokio", "tokio-util 0.7.1", @@ -2287,9 +2287,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -4317,7 +4317,7 @@ version = "0.9.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "itoa", "ryu", "serde", @@ -4588,7 +4588,7 @@ dependencies = [ "fnv", "im", "index_list", - "indexmap 2.2.2", + "indexmap 2.2.3", "itertools", "lazy_static", "log", @@ -4815,7 +4815,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.2.2", + "indexmap 2.2.3", "indicatif", "log", "quinn", @@ -4865,7 +4865,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.2.2", + "indexmap 2.2.3", "log", "rand 0.8.5", "rayon", @@ -5116,7 +5116,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.2.2", + "indexmap 2.2.3", "itertools", "log", "lru", @@ -6279,7 +6279,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.2.2", + "indexmap 2.2.3", "itertools", "libc", "log", @@ -6381,7 +6381,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.2.2", + "indexmap 2.2.3", "indicatif", "log", "rayon", @@ -7437,7 +7437,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "toml_datetime", "winnow", ] From 9cc1fa8d00a3af96660bcc74b0cc89a2932af867 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 15:28:09 +0800 Subject: [PATCH 175/401] build(deps): bump either from 1.9.0 to 1.10.0 (#35179) * build(deps): bump either from 1.9.0 to 1.10.0 Bumps [either](https://github.com/rayon-rs/either) from 1.9.0 to 1.10.0. - [Commits](https://github.com/rayon-rs/either/compare/1.9.0...1.10.0) --- updated-dependencies: - dependency-name: either dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ebc6f2ffe73446..82411b2c454712 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1802,9 +1802,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "encode_unicode" diff --git a/Cargo.toml b/Cargo.toml index 169ade4c9a3528..38fb07cf4ff8d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -196,7 +196,7 @@ dlopen2 = "0.5.0" eager = "0.1.0" ed25519-dalek = "=1.0.1" ed25519-dalek-bip32 = "0.2.0" -either = "1.9.0" +either = "1.10.0" enum-iterator = "1.5.0" env_logger = "0.9.3" etcd-client = "0.11.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 43ea1700146e82..eacdcb3d3cfd83 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1488,9 +1488,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "elf" From 233f6d270d6dfcdc42df902ac2075ea9e51b7d24 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 08:32:38 +0000 Subject: [PATCH 176/401] build(deps): bump ahash from 0.8.7 to 0.8.8 (#35177) * build(deps): bump ahash from 0.8.7 to 0.8.8 Bumps [ahash](https://github.com/tkaitchuck/ahash) from 0.8.7 to 0.8.8. - [Release notes](https://github.com/tkaitchuck/ahash/releases) - [Commits](https://github.com/tkaitchuck/ahash/compare/0.8.7...v0.8.8) --- updated-dependencies: - dependency-name: ahash dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82411b2c454712..17bc6edc50917d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,9 +75,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -2344,7 +2344,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", ] [[package]] @@ -6500,7 +6500,7 @@ dependencies = [ name = "solana-perf" version = "1.18.0" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", "assert_matches", "bincode", "bv", diff --git a/Cargo.toml b/Cargo.toml index 38fb07cf4ff8d7..ecdc4a1ca6b798 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -138,7 +138,7 @@ edition = "2021" Inflector = "0.11.4" aquamarine = "0.3.3" aes-gcm-siv = "0.10.3" -ahash = "0.8.7" +ahash = "0.8.8" anyhow = "1.0.79" arbitrary = "1.3.2" ark-bn254 = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index eacdcb3d3cfd83..5bd1b633f2c45c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -1976,7 +1976,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", ] [[package]] @@ -5299,7 +5299,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" name = "solana-perf" version = "1.18.0" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", "bincode", "bv", "caps", From bf1becb3a8421c84d8e215da3454d1a3ba186f59 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 08:55:24 +0000 Subject: [PATCH 177/401] build(deps): bump indicatif from 0.17.7 to 0.17.8 (#35178) * build(deps): bump indicatif from 0.17.7 to 0.17.8 Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.17.7 to 0.17.8. - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/commits) --- updated-dependencies: - dependency-name: indicatif dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17bc6edc50917d..17f821a640e500 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2685,9 +2685,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" dependencies = [ "console", "instant", diff --git a/Cargo.toml b/Cargo.toml index ecdc4a1ca6b798..adc66eecffda3b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -223,7 +223,7 @@ hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.11" indexmap = "2.2.3" -indicatif = "0.17.7" +indicatif = "0.17.8" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 5bd1b633f2c45c..76f44ab8c97949 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2298,9 +2298,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" dependencies = [ "console", "instant", From 897adb271196ba75edd752e0d21696cee8610017 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 13 Feb 2024 09:42:05 -0700 Subject: [PATCH 178/401] Update the directory naming for incorrect shred version backup (#35158) The directory is currently named with the expected_shred_version; however, the backup contains shreds that do NOT match the expected_shred_version. So, use the found (incorrect) shred version in the name instead. --- core/src/validator.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index a90044881ee458..f1432d67f397dc 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -2097,12 +2097,13 @@ fn maybe_warp_slot( Ok(()) } -/// Searches the blockstore for data shreds with the incorrect shred version. -fn blockstore_contains_bad_shred_version( +/// Searches the blockstore for data shreds with a shred version that differs +/// from the passed `expected_shred_version` +fn blockstore_contains_incorrect_shred_version( blockstore: &Blockstore, start_slot: Slot, expected_shred_version: u16, -) -> Result { +) -> Result, BlockstoreError> { const TIMEOUT: Duration = Duration::from_secs(60); let timer = Instant::now(); // Search for shreds with incompatible version in blockstore @@ -2113,7 +2114,7 @@ fn blockstore_contains_bad_shred_version( let shreds = blockstore.get_data_shreds_for_slot(slot, 0)?; for shred in &shreds { if shred.version() != expected_shred_version { - return Ok(true); + return Ok(Some(shred.version())); } } if timer.elapsed() > TIMEOUT { @@ -2121,7 +2122,7 @@ fn blockstore_contains_bad_shred_version( break; } } - Ok(false) + Ok(None) } /// If the blockstore contains any shreds with the incorrect shred version, @@ -2134,10 +2135,13 @@ fn backup_and_clear_blockstore( ) -> Result<(), BlockstoreError> { let blockstore = Blockstore::open_with_options(ledger_path, blockstore_options_from_config(config))?; - let do_copy_and_clear = - blockstore_contains_bad_shred_version(&blockstore, start_slot, expected_shred_version)?; + let incorrect_shred_version = blockstore_contains_incorrect_shred_version( + &blockstore, + start_slot, + expected_shred_version, + )?; - if do_copy_and_clear { + if let Some(incorrect_shred_version) = incorrect_shred_version { // .unwrap() safe because getting to this point implies blockstore has slots/shreds let end_slot = blockstore.highest_slot()?.unwrap(); @@ -2149,7 +2153,7 @@ fn backup_and_clear_blockstore( .ledger_column_options .shred_storage_type .blockstore_directory(), - expected_shred_version, + incorrect_shred_version, start_slot, end_slot ); From 2d09e4965e92f2ad624123163f821812a821b49e Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 14 Feb 2024 18:33:20 +0800 Subject: [PATCH 179/401] clean feature: enable_bpf_loader_extend_program_ix (#35194) --- programs/bpf_loader/src/lib.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 21a7b5fed77257..39b00d0bfc4d25 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -35,8 +35,8 @@ use { entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ bpf_account_data_direct_mapping, deprecate_executable_meta_update_in_bpf_loader, - disable_bpf_loader_instructions, enable_bpf_loader_extend_program_ix, - enable_bpf_loader_set_authority_checked_ix, FeatureSet, + disable_bpf_loader_instructions, enable_bpf_loader_set_authority_checked_ix, + FeatureSet, }, instruction::{AccountMeta, InstructionError}, loader_instruction::LoaderInstruction, @@ -1149,13 +1149,6 @@ fn process_loader_upgradeable_instruction( } } UpgradeableLoaderInstruction::ExtendProgram { additional_bytes } => { - if !invoke_context - .feature_set - .is_active(&enable_bpf_loader_extend_program_ix::ID) - { - return Err(InstructionError::InvalidInstructionData); - } - if additional_bytes == 0 { ic_logger_msg!(log_collector, "Additional bytes must be greater than 0"); return Err(InstructionError::InvalidInstructionData); From 716ad5441bb35151c934c5f131608b6767ff6e3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Wed, 14 Feb 2024 12:13:53 +0100 Subject: [PATCH 180/401] Refactor - `LoadedPrograms::replenish()` (#35145) * Replace LoadedPrograms::replenish() with LoadedPrograms::assign_program(). * Removes LoadedPrograms::replenish(). * Defines replacement by having the same loaded program type. * Implements a proper insertion sort with a two key comparison operator. --- program-runtime/src/loaded_programs.rs | 238 +++++++++++++------------ runtime/src/bank.rs | 4 +- 2 files changed, 123 insertions(+), 119 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 19f5f7486ea330..a760660b0ba334 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -696,24 +696,20 @@ impl LoadedPrograms { &self.environments } - /// Refill the cache with a single entry. It's typically called during transaction loading, + /// Insert a single entry. It's typically called during transaction loading, /// when the cache doesn't contain the entry corresponding to program `key`. - /// The function dedupes the cache, in case some other thread replenished the entry in parallel. - pub fn replenish( - &mut self, - key: Pubkey, - entry: Arc, - ) -> (bool, Arc) { + pub fn assign_program(&mut self, key: Pubkey, entry: Arc) -> bool { let slot_versions = &mut self.entries.entry(key).or_default().slot_versions; - let index = slot_versions - .iter() - .position(|at| at.effective_slot >= entry.effective_slot); - if let Some(existing) = index.and_then(|index| slot_versions.get_mut(index)) { - if existing.deployment_slot == entry.deployment_slot - && existing.effective_slot == entry.effective_slot - { - if matches!(existing.program, LoadedProgramType::Unloaded(_)) { - // The unloaded program is getting reloaded + match slot_versions.binary_search_by(|at| { + at.effective_slot + .cmp(&entry.effective_slot) + .then(at.deployment_slot.cmp(&entry.deployment_slot)) + }) { + Ok(index) => { + let existing = slot_versions.get_mut(index).unwrap(); + if std::mem::discriminant(&existing.program) + != std::mem::discriminant(&entry.program) + { // Copy over the usage counter to the new entry entry.tx_usage_counter.fetch_add( existing.tx_usage_counter.load(Ordering::Relaxed), @@ -723,34 +719,21 @@ impl LoadedPrograms { existing.ix_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, ); + *existing = entry.clone(); self.stats.reloads.fetch_add(1, Ordering::Relaxed); - } else if existing.is_tombstone() != entry.is_tombstone() { - // Either the old entry is tombstone and the new one is not. - // (Let's give the new entry a chance). - // Or, the old entry is not a tombstone and the new one is a tombstone. - // (Remove the old entry, as the tombstone makes it obsolete). - self.stats.insertions.fetch_add(1, Ordering::Relaxed); + false } else { + // Something is wrong, I can feel it ... self.stats.replacements.fetch_add(1, Ordering::Relaxed); - return (true, existing.clone()); + true } - *existing = entry.clone(); - return (false, entry); + } + Err(index) => { + self.stats.insertions.fetch_add(1, Ordering::Relaxed); + slot_versions.insert(index, entry.clone()); + false } } - self.stats.insertions.fetch_add(1, Ordering::Relaxed); - slot_versions.insert(index.unwrap_or(slot_versions.len()), entry.clone()); - (false, entry) - } - - /// Assign the program `entry` to the given `key` in the cache. - /// This is typically called when a deployed program is managed (un-/re-/deployed) via - /// loader instructions. Because of the cooldown, entires can not have the same - /// deployment_slot and effective_slot. - pub fn assign_program(&mut self, key: Pubkey, entry: Arc) -> Arc { - let (was_occupied, entry) = self.replenish(key, entry); - debug_assert!(!was_occupied); - entry } pub fn prune_by_deployment_slot(&mut self, slot: Slot) { @@ -986,7 +969,7 @@ impl LoadedPrograms { pub fn merge(&mut self, tx_batch_cache: &LoadedProgramsForTxBatch) { tx_batch_cache.entries.iter().for_each(|(key, entry)| { - self.replenish(*key, entry.clone()); + self.assign_program(*key, entry.clone()); }) } @@ -1233,7 +1216,9 @@ mod tests { slot: Slot, reason: LoadedProgramType, ) -> Arc { - cache.assign_program(key, Arc::new(LoadedProgram::new_tombstone(slot, reason))) + let program = Arc::new(LoadedProgram::new_tombstone(slot, reason)); + cache.assign_program(key, program.clone()); + program } fn insert_unloaded_program( @@ -1256,7 +1241,8 @@ mod tests { .to_unloaded() .expect("Failed to unload the program"), ); - cache.replenish(key, unloaded).1 + cache.assign_program(key, unloaded.clone()); + unloaded } fn num_matching_entries(cache: &LoadedPrograms, predicate: P) -> usize @@ -1323,7 +1309,7 @@ mod tests { .enumerate() .for_each(|(i, deployment_slot)| { let usage_counter = *program1_usage_counters.get(i).unwrap_or(&0); - cache.replenish( + cache.assign_program( program1, new_test_loaded_program_with_usage( *deployment_slot, @@ -1356,7 +1342,7 @@ mod tests { .enumerate() .for_each(|(i, deployment_slot)| { let usage_counter = *program2_usage_counters.get(i).unwrap_or(&0); - cache.replenish( + cache.assign_program( program2, new_test_loaded_program_with_usage( *deployment_slot, @@ -1388,7 +1374,7 @@ mod tests { .enumerate() .for_each(|(i, deployment_slot)| { let usage_counter = *program3_usage_counters.get(i).unwrap_or(&0); - cache.replenish( + cache.assign_program( program3, new_test_loaded_program_with_usage( *deployment_slot, @@ -1470,7 +1456,7 @@ mod tests { .enumerate() .for_each(|(i, deployment_slot)| { let usage_counter = *program1_usage_counters.get(i).unwrap_or(&0); - cache.replenish( + cache.assign_program( program1, new_test_loaded_program_with_usage( *deployment_slot, @@ -1503,7 +1489,7 @@ mod tests { .enumerate() .for_each(|(i, deployment_slot)| { let usage_counter = *program2_usage_counters.get(i).unwrap_or(&0); - cache.replenish( + cache.assign_program( program2, new_test_loaded_program_with_usage( *deployment_slot, @@ -1535,7 +1521,7 @@ mod tests { .enumerate() .for_each(|(i, deployment_slot)| { let usage_counter = *program3_usage_counters.get(i).unwrap_or(&0); - cache.replenish( + cache.assign_program( program3, new_test_loaded_program_with_usage( *deployment_slot, @@ -1628,7 +1614,7 @@ mod tests { let program = Pubkey::new_unique(); let num_total_programs = 6; (0..num_total_programs).for_each(|i| { - cache.replenish( + cache.assign_program( program, new_test_loaded_program_with_usage(i, i + 2, AtomicU64::new(i + 10)), ); @@ -1655,7 +1641,7 @@ mod tests { // Replenish the program that was just unloaded. Use 0 as the usage counter. This should be // updated with the usage counter from the unloaded program. - cache.replenish( + cache.assign_program( program, new_test_loaded_program_with_usage(0, 2, AtomicU64::new(0)), ); @@ -1674,21 +1660,63 @@ mod tests { } #[test] - fn test_replace_tombstones() { + fn test_fuzz_assign_program_order() { + use rand::prelude::SliceRandom; + const EXPECTED_ENTRIES: [(u64, u64); 7] = + [(1, 2), (5, 5), (5, 6), (5, 10), (9, 10), (10, 10), (3, 12)]; + let mut rng = rand::thread_rng(); + let program_id = Pubkey::new_unique(); + for _ in 0..1000 { + let mut entries = EXPECTED_ENTRIES.to_vec(); + entries.shuffle(&mut rng); + let mut cache = new_mock_cache::(); + for (deployment_slot, effective_slot) in entries { + assert!(!cache.assign_program( + program_id, + new_test_loaded_program(deployment_slot, effective_slot) + )); + } + for ((deployment_slot, effective_slot), entry) in EXPECTED_ENTRIES + .iter() + .zip(cache.entries.get(&program_id).unwrap().slot_versions.iter()) + { + assert_eq!(entry.deployment_slot, *deployment_slot); + assert_eq!(entry.effective_slot, *effective_slot); + } + } + } + + #[test] + fn test_assign_program_tombstones() { let mut cache = new_mock_cache::(); let program1 = Pubkey::new_unique(); - let env = Arc::new(BuiltinProgram::new_mock()); + let env = cache.environments.program_runtime_v1.clone(); + set_tombstone( &mut cache, program1, 10, - LoadedProgramType::FailedVerification(env), + LoadedProgramType::FailedVerification(env.clone()), ); + assert_eq!(cache.entries.get(&program1).unwrap().slot_versions.len(), 1); + set_tombstone(&mut cache, program1, 10, LoadedProgramType::Closed); + assert_eq!(cache.entries.get(&program1).unwrap().slot_versions.len(), 1); + set_tombstone( + &mut cache, + program1, + 10, + LoadedProgramType::FailedVerification(env.clone()), + ); + assert_eq!(cache.entries.get(&program1).unwrap().slot_versions.len(), 1); - let loaded_program = new_test_loaded_program(10, 10); - let (existing, program) = cache.replenish(program1, loaded_program.clone()); - assert!(!existing); - assert_eq!(program, loaded_program); + // Fail on exact replacement + assert!(cache.assign_program( + program1, + Arc::new(LoadedProgram::new_tombstone( + 10, + LoadedProgramType::FailedVerification(env) + )) + )); } #[test] @@ -1726,11 +1754,7 @@ mod tests { // Add a program at slot 50, and a tombstone for the program at slot 60 let program2 = Pubkey::new_unique(); - assert!( - !cache - .replenish(program2, new_test_builtin_program(50, 51)) - .0 - ); + cache.assign_program(program2, new_test_builtin_program(50, 51)); let second_level = &cache .entries .get(&program2) @@ -1830,10 +1854,7 @@ mod tests { cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); - let loaded_program = new_test_loaded_program(10, 10); - let (existing, program) = cache.replenish(program1, loaded_program.clone()); - assert!(!existing); - assert_eq!(program, loaded_program); + cache.assign_program(program1, new_test_loaded_program(10, 10)); let new_env = Arc::new(BuiltinProgram::new_mock()); cache.upcoming_environments = Some(ProgramRuntimeEnvironments { @@ -1849,9 +1870,7 @@ mod tests { ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::default(), }); - let (existing, program) = cache.replenish(program1, updated_program.clone()); - assert!(!existing); - assert_eq!(program, updated_program); + cache.assign_program(program1, updated_program.clone()); // Test that there are 2 entries for the program assert_eq!( @@ -1986,38 +2005,27 @@ mod tests { cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); - assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); - assert!(!cache.replenish(program1, new_test_loaded_program(10, 11)).0); - assert!(!cache.replenish(program1, new_test_loaded_program(20, 21)).0); - - // Test: inserting duplicate entry return pre existing entry from the cache - assert!(cache.replenish(program1, new_test_loaded_program(20, 21)).0); + cache.assign_program(program1, new_test_loaded_program(0, 1)); + cache.assign_program(program1, new_test_loaded_program(10, 11)); + cache.assign_program(program1, new_test_loaded_program(20, 21)); let program2 = Pubkey::new_unique(); - assert!(!cache.replenish(program2, new_test_loaded_program(5, 6)).0); - assert!( - !cache - .replenish( - program2, - new_test_loaded_program(11, 11 + DELAY_VISIBILITY_SLOT_OFFSET) - ) - .0 + cache.assign_program(program2, new_test_loaded_program(5, 6)); + cache.assign_program( + program2, + new_test_loaded_program(11, 11 + DELAY_VISIBILITY_SLOT_OFFSET), ); let program3 = Pubkey::new_unique(); - assert!(!cache.replenish(program3, new_test_loaded_program(25, 26)).0); + cache.assign_program(program3, new_test_loaded_program(25, 26)); let program4 = Pubkey::new_unique(); - assert!(!cache.replenish(program4, new_test_loaded_program(0, 1)).0); - assert!(!cache.replenish(program4, new_test_loaded_program(5, 6)).0); + cache.assign_program(program4, new_test_loaded_program(0, 1)); + cache.assign_program(program4, new_test_loaded_program(5, 6)); // The following is a special case, where effective slot is 3 slots in the future - assert!( - !cache - .replenish( - program4, - new_test_loaded_program(15, 15 + DELAY_VISIBILITY_SLOT_OFFSET) - ) - .0 + cache.assign_program( + program4, + new_test_loaded_program(15, 15 + DELAY_VISIBILITY_SLOT_OFFSET), ); // Current fork graph @@ -2243,15 +2251,15 @@ mod tests { cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); - assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); - assert!(!cache.replenish(program1, new_test_loaded_program(20, 21)).0); + cache.assign_program(program1, new_test_loaded_program(0, 1)); + cache.assign_program(program1, new_test_loaded_program(20, 21)); let program2 = Pubkey::new_unique(); - assert!(!cache.replenish(program2, new_test_loaded_program(5, 6)).0); - assert!(!cache.replenish(program2, new_test_loaded_program(11, 12)).0); + cache.assign_program(program2, new_test_loaded_program(5, 6)); + cache.assign_program(program2, new_test_loaded_program(11, 12)); let program3 = Pubkey::new_unique(); - assert!(!cache.replenish(program3, new_test_loaded_program(25, 26)).0); + cache.assign_program(program3, new_test_loaded_program(25, 26)); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 let mut missing = vec![ @@ -2316,12 +2324,12 @@ mod tests { cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); - assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); - assert!(!cache.replenish(program1, new_test_loaded_program(20, 21)).0); + cache.assign_program(program1, new_test_loaded_program(0, 1)); + cache.assign_program(program1, new_test_loaded_program(20, 21)); let program2 = Pubkey::new_unique(); - assert!(!cache.replenish(program2, new_test_loaded_program(5, 6)).0); - assert!(!cache.replenish(program2, new_test_loaded_program(11, 12)).0); + cache.assign_program(program2, new_test_loaded_program(5, 6)); + cache.assign_program(program2, new_test_loaded_program(11, 12)); let program3 = Pubkey::new_unique(); // Insert an unloaded program with correct/cache's environment at slot 25 @@ -2330,17 +2338,13 @@ mod tests { // Insert another unloaded program with a different environment at slot 20 // Since this entry's environment won't match cache's environment, looking up this // entry should return missing instead of unloaded entry. - assert!( - !cache - .replenish( - program3, - Arc::new( - new_test_loaded_program(20, 21) - .to_unloaded() - .expect("Failed to create unloaded program") - ) - ) - .0 + cache.assign_program( + program3, + Arc::new( + new_test_loaded_program(20, 21) + .to_unloaded() + .expect("Failed to create unloaded program"), + ), ); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 @@ -2407,8 +2411,8 @@ mod tests { cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); - assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); - assert!(!cache.replenish(program1, new_test_loaded_program(5, 6)).0); + cache.assign_program(program1, new_test_loaded_program(0, 1)); + cache.assign_program(program1, new_test_loaded_program(5, 6)); cache.prune(10, 0); @@ -2447,11 +2451,11 @@ mod tests { cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); - assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); - assert!(!cache.replenish(program1, new_test_loaded_program(5, 6)).0); + cache.assign_program(program1, new_test_loaded_program(0, 1)); + cache.assign_program(program1, new_test_loaded_program(5, 6)); let program2 = Pubkey::new_unique(); - assert!(!cache.replenish(program2, new_test_loaded_program(10, 11)).0); + cache.assign_program(program2, new_test_loaded_program(10, 11)); let mut missing = vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f2722983dcdbdf..1a9f1d8bba1abf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1363,7 +1363,7 @@ impl Bank { drop(loaded_programs_cache); let recompiled = new.load_program(&key, false, Some(program_to_recompile)); let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); - loaded_programs_cache.replenish(key, recompiled); + loaded_programs_cache.assign_program(key, recompiled); } } else if new.epoch() != loaded_programs_cache.latest_root_epoch || slot_index.saturating_add(slots_in_recompilation_phase) >= slots_in_epoch @@ -7056,7 +7056,7 @@ impl Bank { self.loaded_programs_cache .write() .unwrap() - .replenish(program_id, Arc::new(builtin)); + .assign_program(program_id, Arc::new(builtin)); debug!("Added program {} under {:?}", name, program_id); } From 4b77ee5a1c8cad855f52babc30a12bac4c87f17b Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Wed, 14 Feb 2024 21:27:52 +0900 Subject: [PATCH 181/401] Report lost_insertions metrics correctly (#35191) --- program-runtime/src/loaded_programs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index a760660b0ba334..740eac7fc9a342 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -180,7 +180,7 @@ impl Stats { let evictions: u64 = self.evictions.values().sum(); let reloads = self.reloads.load(Ordering::Relaxed); let insertions = self.insertions.load(Ordering::Relaxed); - let lost_insertions = self.insertions.load(Ordering::Relaxed); + let lost_insertions = self.lost_insertions.load(Ordering::Relaxed); let replacements = self.replacements.load(Ordering::Relaxed); let one_hit_wonders = self.one_hit_wonders.load(Ordering::Relaxed); let prunes_orphan = self.prunes_orphan.load(Ordering::Relaxed); From 1752202169d27675179f0ab3a4256f7d14b09b1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Wed, 14 Feb 2024 17:28:58 +0100 Subject: [PATCH 182/401] Refactor - Adds check that only loaded programs can be unloaded (#35146) * Adds check that only loaded programs can be unloaded. * Removes unused code. * Adds test_unloaded(). --- program-runtime/src/loaded_programs.rs | 85 ++++++++++++++++++-------- runtime/src/bank.rs | 7 --- 2 files changed, 60 insertions(+), 32 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 740eac7fc9a342..6da84b0d1f0692 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -382,6 +382,20 @@ impl LoadedProgram { } pub fn to_unloaded(&self) -> Option { + match &self.program { + LoadedProgramType::LegacyV0(_) + | LoadedProgramType::LegacyV1(_) + | LoadedProgramType::Typed(_) => {} + #[cfg(test)] + LoadedProgramType::TestLoaded(_) => {} + LoadedProgramType::FailedVerification(_) + | LoadedProgramType::Closed + | LoadedProgramType::DelayVisibility + | LoadedProgramType::Unloaded(_) + | LoadedProgramType::Builtin(_) => { + return None; + } + } Some(Self { program: LoadedProgramType::Unloaded(self.program.get_environment()?.clone()), account_size: self.account_size, @@ -1054,31 +1068,6 @@ impl LoadedPrograms { } } - fn unload_program(&mut self, id: &Pubkey) { - if let Some(second_level) = self.entries.get_mut(id) { - for entry in second_level.slot_versions.iter_mut() { - if let Some(unloaded) = entry.to_unloaded() { - *entry = Arc::new(unloaded); - self.stats - .evictions - .entry(*id) - .and_modify(|c| saturating_add_assign!(*c, 1)) - .or_insert(1); - } else { - error!( - "Failed to create an unloaded cache entry for a program type {:?}", - entry.program - ); - } - } - } - } - - pub fn unload_all_programs(&mut self) { - let keys = self.entries.keys().copied().collect::>(); - keys.iter().for_each(|key| self.unload_program(key)); - } - /// This function removes the given entry for the given program from the cache. /// The function expects that the program and entry exists in the cache. Otherwise it'll panic. fn unload_program_entry(&mut self, program: &Pubkey, remove_entry: &Arc) { @@ -2390,6 +2379,52 @@ mod tests { assert!(match_missing(&missing, &program3, true)); } + #[test] + fn test_unloaded() { + let mut cache = new_mock_cache::(); + for loaded_program_type in [ + LoadedProgramType::FailedVerification(cache.environments.program_runtime_v1.clone()), + LoadedProgramType::Closed, + LoadedProgramType::DelayVisibility, // Never inserted in the global cache + LoadedProgramType::Unloaded(cache.environments.program_runtime_v1.clone()), + LoadedProgramType::Builtin(BuiltinProgram::new_mock()), + ] { + let entry = Arc::new(LoadedProgram { + program: loaded_program_type, + account_size: 0, + deployment_slot: 0, + effective_slot: 0, + tx_usage_counter: AtomicU64::default(), + ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), + }); + assert!(entry.to_unloaded().is_none()); + + // Check that unload_program_entry() does nothing for this entry + let program_id = Pubkey::new_unique(); + cache.assign_program(program_id, entry.clone()); + cache.unload_program_entry(&program_id, &entry); + assert_eq!( + cache.entries.get(&program_id).unwrap().slot_versions.len(), + 1 + ); + assert!(cache.stats.evictions.is_empty()); + } + + let entry = new_test_loaded_program_with_usage(1, 2, AtomicU64::new(3)); + let unloaded_entry = entry.to_unloaded().unwrap(); + assert_eq!(unloaded_entry.deployment_slot, 1); + assert_eq!(unloaded_entry.effective_slot, 2); + assert_eq!(unloaded_entry.latest_access_slot.load(Ordering::Relaxed), 1); + assert_eq!(unloaded_entry.tx_usage_counter.load(Ordering::Relaxed), 3); + + // Check that unload_program_entry() does its work + let program_id = Pubkey::new_unique(); + cache.assign_program(program_id, entry.clone()); + cache.unload_program_entry(&program_id, &entry); + assert!(cache.stats.evictions.get(&program_id).is_some()); + } + #[test] fn test_fork_prune_find_first_ancestor() { let mut cache = new_mock_cache::(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1a9f1d8bba1abf..972d89551909c2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4525,13 +4525,6 @@ impl Bank { balances } - pub fn clear_program_cache(&self) { - self.loaded_programs_cache - .write() - .unwrap() - .unload_all_programs(); - } - #[allow(clippy::type_complexity)] pub fn load_and_execute_transactions( &self, From 1c78ed6f3ed09edc2949b49b775afe3b2ca93b6b Mon Sep 17 00:00:00 2001 From: 0xF812 <67316259+f8122dac91@users.noreply.github.com> Date: Thu, 15 Feb 2024 09:39:39 +0900 Subject: [PATCH 183/401] Fix: add missing required attribute to LOOKUP_TABLE_ADDRESS arg for get subcommand (#35200) make LOOKUP_TABLE_ADDRESS arg required for address-lookup-table get subcommand --- cli/src/address_lookup_table.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/cli/src/address_lookup_table.rs b/cli/src/address_lookup_table.rs index 0a968e1b74444e..9a0943880f1a70 100644 --- a/cli/src/address_lookup_table.rs +++ b/cli/src/address_lookup_table.rs @@ -257,6 +257,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .index(1) .value_name("LOOKUP_TABLE_ADDRESS") .takes_value(true) + .required(true) .help("Address of the lookup table to show"), ), ), From d472725a122cef15fbe4e6607b18dde481e87be7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 15 Feb 2024 13:46:00 +0100 Subject: [PATCH 184/401] Cleanup - `disable_bpf_loader_instructions` (#35164) * Cleans up disable_bpf_loader_instructions. * fix test_program_sbf_disguised_as_sbf_loader * remove bpf loader execute bench * Revert "remove bpf loader execute bench" This reverts commit f3042ee3e1d6e0208df7d7f80f61e14294f108a8. * move test utility functions out of test file * update bench to loader v3 * clippy * fix dev-context build * fix dev-context import * dev-context-util * move dev-context-util attr to module level for loader_utils --------- Co-authored-by: HaoranYi --- programs/bpf_loader/src/lib.rs | 212 +-------------------- programs/sbf/Cargo.toml | 1 + programs/sbf/benches/bpf_loader.rs | 26 +-- programs/sbf/tests/programs.rs | 289 +---------------------------- runtime/src/loader_utils.rs | 53 +++++- 5 files changed, 78 insertions(+), 503 deletions(-) diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 39b00d0bfc4d25..5ba8b26e086c69 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -35,11 +35,9 @@ use { entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ bpf_account_data_direct_mapping, deprecate_executable_meta_update_in_bpf_loader, - disable_bpf_loader_instructions, enable_bpf_loader_set_authority_checked_ix, - FeatureSet, + enable_bpf_loader_set_authority_checked_ix, FeatureSet, }, instruction::{AccountMeta, InstructionError}, - loader_instruction::LoaderInstruction, loader_upgradeable_instruction::UpgradeableLoaderInstruction, native_loader, program_utils::limited_deserialize, @@ -385,7 +383,11 @@ pub fn process_instruction_inner( process_loader_upgradeable_instruction(invoke_context) } else if bpf_loader::check_id(program_id) { invoke_context.consume_checked(DEFAULT_LOADER_COMPUTE_UNITS)?; - process_loader_instruction(invoke_context) + ic_logger_msg!( + log_collector, + "BPF loader management instructions are no longer supported", + ); + Err(InstructionError::UnsupportedProgramId) } else if bpf_loader_deprecated::check_id(program_id) { invoke_context.consume_checked(DEPRECATED_LOADER_COMPUTE_UNITS)?; ic_logger_msg!(log_collector, "Deprecated loader is no longer supported"); @@ -1349,72 +1351,6 @@ fn common_close_account( Ok(()) } -fn process_loader_instruction(invoke_context: &mut InvokeContext) -> Result<(), InstructionError> { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let instruction_data = instruction_context.get_instruction_data(); - let program_id = instruction_context.get_last_program_key(transaction_context)?; - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if program.get_owner() != program_id { - ic_msg!( - invoke_context, - "Executable account not owned by the BPF loader" - ); - return Err(InstructionError::IncorrectProgramId); - } - - // Return `UnsupportedProgramId` error for bpf_loader when - // `disable_bpf_loader_instruction` feature is activated. - if invoke_context - .feature_set - .is_active(&disable_bpf_loader_instructions::id()) - { - ic_msg!( - invoke_context, - "BPF loader management instructions are no longer supported" - ); - return Err(InstructionError::UnsupportedProgramId); - } - - let is_program_signer = program.is_signer(); - match limited_deserialize(instruction_data)? { - LoaderInstruction::Write { offset, bytes } => { - if !is_program_signer { - ic_msg!(invoke_context, "Program account did not sign"); - return Err(InstructionError::MissingRequiredSignature); - } - drop(program); - write_program_data(offset as usize, &bytes, invoke_context)?; - } - LoaderInstruction::Finalize => { - if !is_program_signer { - ic_msg!(invoke_context, "key[0] did not sign the transaction"); - return Err(InstructionError::MissingRequiredSignature); - } - deploy_program!( - invoke_context, - *program.get_key(), - program.get_owner(), - program.get_data().len(), - invoke_context.programs_loaded_for_tx_batch.slot(), - {}, - program.get_data(), - ); - - // `deprecate_executable_meta_update_in_bpf_loader` feature doesn't - // apply to bpf_loader v2. Instead, the deployment by bpf_loader - // will be deprecated by its own feature - // `disable_bpf_loader_instructions`. Before we activate - // deprecate_executable_meta_update_in_bpf_loader, we should - // activate `disable_bpf_loader_instructions` first. - program.set_executable(true)?; - ic_msg!(invoke_context, "Finalized account {:?}", program.get_key()); - } - } - - Ok(()) -} - fn execute<'a, 'b: 'a>( executable: &'a Executable>, invoke_context: &'a mut InvokeContext<'b>, @@ -1694,7 +1630,6 @@ mod tests { Entrypoint::vm, |invoke_context| { let mut features = FeatureSet::all_enabled(); - features.deactivate(&disable_bpf_loader_instructions::id()); features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); invoke_context.feature_set = Arc::new(features); test_utils::load_all_invoked_programs(invoke_context); @@ -1715,137 +1650,6 @@ mod tests { program_account } - #[test] - fn test_bpf_loader_write() { - let loader_id = bpf_loader::id(); - let program_id = Pubkey::new_unique(); - let mut program_account = AccountSharedData::new(1, 0, &loader_id); - let instruction_data = bincode::serialize(&LoaderInstruction::Write { - offset: 3, - bytes: vec![1, 2, 3], - }) - .unwrap(); - - // Case: No program account - process_instruction( - &loader_id, - &[], - &instruction_data, - Vec::new(), - Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), - ); - - // Case: Not signed - process_instruction( - &loader_id, - &[], - &instruction_data, - vec![(program_id, program_account.clone())], - vec![AccountMeta { - pubkey: program_id, - is_signer: false, - is_writable: true, - }], - Err(InstructionError::MissingRequiredSignature), - ); - - // Case: Write bytes to an offset - program_account.set_data(vec![0; 6]); - let accounts = process_instruction( - &loader_id, - &[], - &instruction_data, - vec![(program_id, program_account.clone())], - vec![AccountMeta { - pubkey: program_id, - is_signer: true, - is_writable: true, - }], - Ok(()), - ); - assert_eq!(&vec![0, 0, 0, 1, 2, 3], accounts.first().unwrap().data()); - - // Case: Overflow - program_account.set_data(vec![0; 5]); - process_instruction( - &loader_id, - &[], - &instruction_data, - vec![(program_id, program_account)], - vec![AccountMeta { - pubkey: program_id, - is_signer: true, - is_writable: true, - }], - Err(InstructionError::AccountDataTooSmall), - ); - } - - #[test] - fn test_bpf_loader_finalize() { - let loader_id = bpf_loader::id(); - let program_id = Pubkey::new_unique(); - let mut program_account = - load_program_account_from_elf(&loader_id, "test_elfs/out/noop_aligned.so"); - program_account.set_executable(false); - let instruction_data = bincode::serialize(&LoaderInstruction::Finalize).unwrap(); - - // Case: No program account - process_instruction( - &loader_id, - &[], - &instruction_data, - Vec::new(), - Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), - ); - - // Case: Not signed - process_instruction( - &loader_id, - &[], - &instruction_data, - vec![(program_id, program_account.clone())], - vec![AccountMeta { - pubkey: program_id, - is_signer: false, - is_writable: true, - }], - Err(InstructionError::MissingRequiredSignature), - ); - - // Case: Finalize - let accounts = process_instruction( - &loader_id, - &[], - &instruction_data, - vec![(program_id, program_account.clone())], - vec![AccountMeta { - pubkey: program_id, - is_signer: true, - is_writable: true, - }], - Ok(()), - ); - assert!(accounts.first().unwrap().executable()); - - // Case: Finalize bad ELF - *program_account.data_as_mut_slice().get_mut(0).unwrap() = 0; - process_instruction( - &loader_id, - &[], - &instruction_data, - vec![(program_id, program_account)], - vec![AccountMeta { - pubkey: program_id, - is_signer: true, - is_writable: true, - }], - Err(InstructionError::InvalidAccountData), - ); - } - #[test] fn test_bpf_loader_invoke_main() { let loader_id = bpf_loader::id(); @@ -1867,7 +1671,7 @@ mod tests { &[], Vec::new(), Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::UnsupportedProgramId), ); // Case: Only a program account @@ -1917,7 +1721,6 @@ mod tests { Entrypoint::vm, |invoke_context| { let mut features = FeatureSet::all_enabled(); - features.deactivate(&disable_bpf_loader_instructions::id()); features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); invoke_context.feature_set = Arc::new(features); invoke_context.mock_set_remaining(0); @@ -2467,7 +2270,6 @@ mod tests { Entrypoint::vm, |invoke_context| { let mut features = FeatureSet::all_enabled(); - features.deactivate(&disable_bpf_loader_instructions::id()); features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); invoke_context.feature_set = Arc::new(features); }, diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index e61ad6e1aaf724..6477f12f56362c 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -101,6 +101,7 @@ solana_rbpf = { workspace = true } [dev-dependencies] solana-ledger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } [[bench]] diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index f433c8374d8e47..47c55245000df1 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -6,7 +6,8 @@ use { solana_rbpf::memory_region::MemoryState, - solana_sdk::feature_set::bpf_account_data_direct_mapping, std::slice, + solana_sdk::{feature_set::bpf_account_data_direct_mapping, signer::keypair::Keypair}, + std::slice, }; extern crate test; @@ -27,7 +28,7 @@ use { bank::Bank, bank_client::BankClient, genesis_utils::{create_genesis_config, GenesisConfigInfo}, - loader_utils::{load_program, load_program_from_file}, + loader_utils::{load_program_from_file, load_upgradeable_program_and_advance_slot}, }, solana_sdk::{ account::AccountSharedData, @@ -190,12 +191,6 @@ fn bench_program_execute_noop(bencher: &mut Bencher) { .. } = create_genesis_config(50); - // deactivate `disable_bpf_loader_instructions` feature so that the program - // can be loaded, finalized and benched. - genesis_config - .accounts - .remove(&feature_set::disable_bpf_loader_instructions::id()); - genesis_config .accounts .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); @@ -204,12 +199,17 @@ fn bench_program_execute_noop(bencher: &mut Bencher) { let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); - let invoke_program_id = load_program(&bank_client, &bpf_loader::id(), &mint_keypair, "noop"); - let bank = bank_client - .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) - .expect("Failed to advance the slot"); - + let authority_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); + + let (_, invoke_program_id) = load_upgradeable_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &mint_keypair, + &authority_keypair, + "noop", + ); + let account_metas = vec![AccountMeta::new(mint_pubkey, true)]; let instruction = diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 8f8f9d2ffec92d..d67b57641446b5 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -28,9 +28,9 @@ use { solana_runtime::{ bank::TransactionBalancesSet, loader_utils::{ - create_program, load_and_finalize_program, load_program, load_program_from_file, - load_upgradeable_buffer, load_upgradeable_program, set_upgrade_authority, - upgrade_program, + create_program, load_program_from_file, load_upgradeable_buffer, + load_upgradeable_program, load_upgradeable_program_and_advance_slot, + load_upgradeable_program_wrapper, set_upgrade_authority, upgrade_program, }, }, solana_sbf_rust_invoke::instructions::*, @@ -45,12 +45,11 @@ use { entrypoint::MAX_PERMITTED_DATA_INCREASE, feature_set::{self, FeatureSet}, fee::FeeStructure, - loader_instruction, message::{v0::LoadedAddresses, SanitizedMessage}, signature::keypair_from_seed, stake, system_instruction::{self, MAX_PERMITTED_DATA_LENGTH}, - sysvar::{self, clock, rent}, + sysvar::{self, clock}, transaction::VersionedTransaction, }, solana_transaction_status::{ @@ -64,7 +63,6 @@ use { solana_runtime::{ bank::Bank, bank_client::BankClient, - bank_forks::BankForks, genesis_utils::{ bootstrap_validator_stake_lamports, create_genesis_config, create_genesis_config_with_leader_ex, GenesisConfigInfo, @@ -86,12 +84,7 @@ use { system_program, transaction::{SanitizedTransaction, Transaction, TransactionError}, }, - std::{ - cell::RefCell, - str::FromStr, - sync::{Arc, RwLock}, - time::Duration, - }, + std::{cell::RefCell, str::FromStr, sync::Arc, time::Duration}, }; #[cfg(feature = "sbf_rust")] @@ -248,64 +241,6 @@ fn execute_transactions( .collect() } -fn load_program_and_advance_slot( - bank_client: &mut BankClient, - bank_forks: &RwLock, - loader_id: &Pubkey, - payer_keypair: &Keypair, - name: &str, -) -> (Arc, Pubkey) { - let pubkey = load_program(bank_client, loader_id, payer_keypair, name); - ( - bank_client - .advance_slot(1, bank_forks, &Pubkey::default()) - .expect("Failed to advance the slot"), - pubkey, - ) -} - -fn load_upgradeable_program_wrapper( - bank_client: &BankClient, - mint_keypair: &Keypair, - authority_keypair: &Keypair, - name: &str, -) -> Pubkey { - let buffer_keypair = Keypair::new(); - let program_keypair = Keypair::new(); - load_upgradeable_program( - bank_client, - mint_keypair, - &buffer_keypair, - &program_keypair, - authority_keypair, - name, - ); - program_keypair.pubkey() -} - -fn load_upgradeable_program_and_advance_slot( - bank_client: &mut BankClient, - bank_forks: &RwLock, - mint_keypair: &Keypair, - authority_keypair: &Keypair, - name: &str, -) -> (Arc, Pubkey) { - let program_id = - load_upgradeable_program_wrapper(bank_client, mint_keypair, authority_keypair, name); - - // load_upgradeable_program sets clock sysvar to 1, which causes the program to be effective - // after 2 slots. They need to be called individually to create the correct fork graph in between. - bank_client - .advance_slot(1, bank_forks, &Pubkey::default()) - .expect("Failed to advance the slot"); - - let bank = bank_client - .advance_slot(1, bank_forks, &Pubkey::default()) - .expect("Failed to advance the slot"); - - (bank, program_id) -} - #[test] #[cfg(any(feature = "sbf_c", feature = "sbf_rust"))] fn test_program_sbf_sanity() { @@ -438,66 +373,6 @@ fn test_program_sbf_loader_deprecated() { } } -/// This test is written with bpf_loader v2 specific instructions, which will be -/// deprecated when `disable_bpf_loader_instructions` feature is activated. -/// -/// The same test has been migrated to -/// `test_sol_alloc_free_no_longer_deployable_with_upgradeable_loader` with a new version -/// of bpf_upgradeable_loader! -#[test] -#[cfg(feature = "sbf_rust")] -fn test_sol_alloc_free_no_longer_deployable() { - solana_logger::setup(); - - let program_keypair = Keypair::new(); - let program_address = program_keypair.pubkey(); - - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config(50); - - // deactivate `disable_bpf_loader_instructions` feature so that the program - // can be loaded, finalized and tested. - genesis_config - .accounts - .remove(&feature_set::disable_bpf_loader_instructions::id()); - - genesis_config - .accounts - .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); - - let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - - let elf = load_program_from_file("solana_sbf_rust_deprecated_loader"); - let mut program_account = AccountSharedData::new(1, elf.len(), &bpf_loader::id()); - program_account - .data_as_mut_slice() - .get_mut(..) - .unwrap() - .copy_from_slice(&elf); - bank.store_account(&program_address, &program_account); - - let finalize_tx = Transaction::new( - &[&mint_keypair, &program_keypair], - Message::new( - &[loader_instruction::finalize( - &program_keypair.pubkey(), - &bpf_loader::id(), - )], - Some(&mint_keypair.pubkey()), - ), - bank.last_blockhash(), - ); - - // Try and deploy a program that depends on _sol_alloc_free - assert_eq!( - bank.process_transaction(&finalize_tx).unwrap_err(), - TransactionError::InstructionError(0, InstructionError::InvalidAccountData) - ); -} - #[test] #[cfg(feature = "sbf_rust")] #[should_panic( @@ -1628,97 +1503,6 @@ fn test_program_sbf_instruction_introspection() { assert!(bank.get_account(&sysvar::instructions::id()).is_none()); } -/// This test is to test bpf_loader v2 `Finalize` instruction with different -/// programs. It is going to be deprecated once we activate -/// `disable_bpf_loader_instructions`. -#[test] -#[cfg(feature = "sbf_rust")] -fn test_program_sbf_test_use_latest_executor() { - solana_logger::setup(); - - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config(50); - - // deactivate `disable_bpf_loader_instructions` feature so that the program - // can be loaded, finalized and tested. - genesis_config - .accounts - .remove(&feature_set::disable_bpf_loader_instructions::id()); - genesis_config - .accounts - .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); - - let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let mut bank_client = BankClient::new_shared(bank); - let panic_id = load_program( - &bank_client, - &bpf_loader::id(), - &mint_keypair, - "solana_sbf_rust_panic", - ); - - // Write the panic program into the program account - let (program_keypair, instruction) = load_and_finalize_program( - &bank_client, - &bpf_loader::id(), - None, - &mint_keypair, - "solana_sbf_rust_panic", - ); - - // Finalize the panic program, but fail the tx - let message = Message::new( - &[ - instruction, - Instruction::new_with_bytes(panic_id, &[0], vec![]), - ], - Some(&mint_keypair.pubkey()), - ); - - bank_client - .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) - .expect("Failed to advance the slot"); - - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair, &program_keypair], message) - .is_err()); - - // Write the noop program into the same program account - let (program_keypair, instruction) = load_and_finalize_program( - &bank_client, - &bpf_loader::id(), - Some(program_keypair), - &mint_keypair, - "solana_sbf_rust_noop", - ); - bank_client - .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) - .expect("Failed to advance the slot"); - let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); - bank_client - .send_and_confirm_message(&[&mint_keypair, &program_keypair], message) - .unwrap(); - - // Call the noop program, should get noop not panic - let message = Message::new( - &[Instruction::new_with_bytes( - program_keypair.pubkey(), - &[0], - vec![], - )], - Some(&mint_keypair.pubkey()), - ); - bank_client - .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) - .expect("Failed to advance the slot"); - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair], message) - .is_ok()); -} - #[test] #[cfg(feature = "sbf_rust")] fn test_program_sbf_upgrade() { @@ -2492,7 +2276,7 @@ fn test_program_sbf_disguised_as_sbf_loader() { let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); assert_eq!( result.unwrap_err().unwrap(), - TransactionError::InstructionError(0, InstructionError::IncorrectProgramId) + TransactionError::InstructionError(0, InstructionError::UnsupportedProgramId) ); } } @@ -2887,67 +2671,6 @@ fn test_program_upgradeable_locks() { assert_eq!(results2[1], Err(TransactionError::AccountInUse)); } -/// This test is to test bpf_loader v2 `Finalize` instruction. It is going to be -/// deprecated once we activate `disable_bpf_loader_instructions`. -#[test] -#[cfg(feature = "sbf_rust")] -fn test_program_sbf_finalize() { - solana_logger::setup(); - - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config(50); - - // deactivate `disable_bpf_loader_instructions` feature so that the program - // can be loaded, finalized and tested. - genesis_config - .accounts - .remove(&feature_set::disable_bpf_loader_instructions::id()); - - genesis_config - .accounts - .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); - - let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let mut bank_client = BankClient::new_shared(bank.clone()); - - let (_, program_pubkey) = load_program_and_advance_slot( - &mut bank_client, - bank_forks.as_ref(), - &bpf_loader::id(), - &mint_keypair, - "solana_sbf_rust_finalize", - ); - - // Write the noop program into the same program account - let (program_keypair, _instruction) = load_and_finalize_program( - &bank_client, - &bpf_loader::id(), - None, - &mint_keypair, - "solana_sbf_rust_noop", - ); - - bank_client - .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) - .expect("Failed to advance the slot"); - - let account_metas = vec![ - AccountMeta::new(program_keypair.pubkey(), true), - AccountMeta::new_readonly(bpf_loader::id(), false), - AccountMeta::new(rent::id(), false), - ]; - let instruction = Instruction::new_with_bytes(program_pubkey, &[], account_metas.clone()); - let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); - let result = bank_client.send_and_confirm_message(&[&mint_keypair, &program_keypair], message); - assert_eq!( - result.unwrap_err().unwrap(), - TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete) - ); -} - #[test] #[cfg(feature = "sbf_rust")] fn test_program_sbf_ro_account_modify() { diff --git a/runtime/src/loader_utils.rs b/runtime/src/loader_utils.rs index 7f4650ae561d3f..7265641e900bc1 100644 --- a/runtime/src/loader_utils.rs +++ b/runtime/src/loader_utils.rs @@ -1,5 +1,6 @@ +#![cfg(feature = "dev-context-only-utils")] use { - crate::{bank::Bank, bank_client::BankClient}, + crate::{bank::Bank, bank_client::BankClient, bank_forks::BankForks}, serde::Serialize, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -13,7 +14,13 @@ use { signature::{Keypair, Signer}, system_instruction, }, - std::{env, fs::File, io::Read, path::PathBuf}, + std::{ + env, + fs::File, + io::Read, + path::PathBuf, + sync::{Arc, RwLock}, + }, }; const CHUNK_SIZE: usize = 512; // Size of chunk just needs to fit into tx @@ -206,6 +213,48 @@ pub fn load_upgradeable_program( }); } +pub fn load_upgradeable_program_wrapper( + bank_client: &BankClient, + mint_keypair: &Keypair, + authority_keypair: &Keypair, + name: &str, +) -> Pubkey { + let buffer_keypair = Keypair::new(); + let program_keypair = Keypair::new(); + load_upgradeable_program( + bank_client, + mint_keypair, + &buffer_keypair, + &program_keypair, + authority_keypair, + name, + ); + program_keypair.pubkey() +} + +pub fn load_upgradeable_program_and_advance_slot( + bank_client: &mut BankClient, + bank_forks: &RwLock, + mint_keypair: &Keypair, + authority_keypair: &Keypair, + name: &str, +) -> (Arc, Pubkey) { + let program_id = + load_upgradeable_program_wrapper(bank_client, mint_keypair, authority_keypair, name); + + // load_upgradeable_program sets clock sysvar to 1, which causes the program to be effective + // after 2 slots. They need to be called individually to create the correct fork graph in between. + bank_client + .advance_slot(1, bank_forks, &Pubkey::default()) + .expect("Failed to advance the slot"); + + let bank = bank_client + .advance_slot(1, bank_forks, &Pubkey::default()) + .expect("Failed to advance the slot"); + + (bank, program_id) +} + pub fn upgrade_program( bank_client: &T, payer_keypair: &Keypair, From c5aaca43ab7e2a3d4d0b2494206a5e83f5d61749 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 15 Feb 2024 14:40:08 -0500 Subject: [PATCH 185/401] Bring up to date the concurrent accounts benches (#34815) --- runtime/benches/accounts.rs | 53 ++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index fb81ce4716553e..b99425b1507cab 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -19,7 +19,7 @@ use { }, solana_runtime::bank::*, solana_sdk::{ - account::{AccountSharedData, ReadableAccount}, + account::{Account, AccountSharedData, ReadableAccount}, genesis_config::{create_genesis_config, ClusterType}, hash::Hash, lamports::LamportsError, @@ -203,23 +203,29 @@ fn store_accounts_with_possible_contention( let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); let num_keys = 1000; let slot = 0; + + let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .take(num_keys) + .collect(); + let accounts_data: Vec<_> = std::iter::repeat(Account { + lamports: 1, + ..Default::default() + }) + .take(num_keys) + .collect(); + let storable_accounts: Vec<_> = pubkeys.iter().zip(accounts_data.iter()).collect(); + accounts.store_accounts_cached((slot, storable_accounts.as_slice())); accounts.add_root(slot); - let pubkeys: Arc> = Arc::new( - (0..num_keys) - .map(|_| { - let pubkey = solana_sdk::pubkey::new_rand(); - let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); - accounts.store_slow_uncached(slot, &pubkey, &account); - pubkey - }) - .collect(), - ); + accounts + .accounts_db + .flush_accounts_cache_slot_for_tests(slot); - for _ in 0..num_readers { + let pubkeys = Arc::new(pubkeys); + for i in 0..num_readers { let accounts = accounts.clone(); let pubkeys = pubkeys.clone(); Builder::new() - .name("readers".to_string()) + .name(format!("reader{i:02}")) .spawn(move || { reader_f(&accounts, &pubkeys); }) @@ -227,21 +233,19 @@ fn store_accounts_with_possible_contention( } let num_new_keys = 1000; - let new_accounts: Vec<_> = (0..num_new_keys) - .map(|_| AccountSharedData::new(1, 0, AccountSharedData::default().owner())) - .collect(); bencher.iter(|| { - for account in &new_accounts { - // Write to a different slot than the one being read from. Because - // there's a new account pubkey being written to every time, will - // compete for the accounts index lock on every store - accounts.store_slow_uncached(slot + 1, &solana_sdk::pubkey::new_rand(), account); - } - }) + let new_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .take(num_new_keys) + .collect(); + let new_storable_accounts: Vec<_> = new_pubkeys.iter().zip(accounts_data.iter()).collect(); + // Write to a different slot than the one being read from. Because + // there's a new account pubkey being written to every time, will + // compete for the accounts index lock on every store + accounts.store_accounts_cached((slot + 1, new_storable_accounts.as_slice())); + }); } #[bench] -#[ignore] fn bench_concurrent_read_write(bencher: &mut Bencher) { store_accounts_with_possible_contention( "concurrent_read_write", @@ -261,7 +265,6 @@ fn bench_concurrent_read_write(bencher: &mut Bencher) { } #[bench] -#[ignore] fn bench_concurrent_scan_write(bencher: &mut Bencher) { store_accounts_with_possible_contention("concurrent_scan_write", bencher, |accounts, _| loop { test::black_box( From e21251090f65d6d056a6dee069e7bf4cf33b7c57 Mon Sep 17 00:00:00 2001 From: sakridge Date: Fri, 16 Feb 2024 18:29:42 +0100 Subject: [PATCH 186/401] Remove spammy banking-stage retryable tx metric which is not needed (#35207) Already covered by other metrics like the filtered retryable and the number filtered. --- core/src/banking_stage/consumer.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 938a5dd52a2549..660dc2ac977b0d 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -236,9 +236,6 @@ impl Consumer { slot_metrics_tracker.accumulate_process_transactions_summary(&process_transactions_summary); slot_metrics_tracker.accumulate_transaction_errors(error_counters); - let retryable_tx_count = retryable_transaction_indexes.len(); - inc_new_counter_info!("banking_stage-unprocessed_transactions", retryable_tx_count); - // Filter out the retryable transactions that are too old let (filtered_retryable_transaction_indexes, filter_retryable_packets_us) = measure_us!(Self::filter_pending_packets_from_pending_txs( From 9a69e3aa7ac61616650c4d62772005245c12a86b Mon Sep 17 00:00:00 2001 From: Tyera Date: Fri, 16 Feb 2024 11:33:15 -0700 Subject: [PATCH 187/401] ledger-tool: add warn log if capitalization changes during create-snapshot (#35155) * Add warn log if capitalization changes during create-snapshot * Add enable-capitalization-change flag * Print capitalization message at end --- ledger-tool/src/main.rs | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 500a64173a25c4..ef3f6515dfcd8f 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1295,6 +1295,12 @@ fn main() { .takes_value(true) .help("Snapshot archive format to use.") .conflicts_with("no_snapshot"), + ) + .arg( + Arg::with_name("enable_capitalization_change") + .long("enable-capitalization-change") + .takes_value(false) + .help("If snapshot creation should succeed with a capitalization delta."), ), ) .subcommand( @@ -1806,6 +1812,9 @@ fn main() { None }; + let enable_capitalization_change = + arg_matches.is_present("enable_capitalization_change"); + let snapshot_type_str = if is_incremental { "incremental " } else if is_minimized { @@ -2047,8 +2056,31 @@ fn main() { } } + let pre_capitalization = bank.capitalization(); + bank.set_capitalization(); + let post_capitalization = bank.capitalization(); + + let capitalization_message = if pre_capitalization != post_capitalization { + let amount = if pre_capitalization > post_capitalization { + format!("-{}", pre_capitalization - post_capitalization) + } else { + (post_capitalization - pre_capitalization).to_string() + }; + let msg = format!("Capitalization change: {amount} lamports"); + warn!("{msg}"); + if !enable_capitalization_change { + eprintln!( + "{msg}\nBut `--enable-capitalization-change flag not provided" + ); + exit(1); + } + Some(msg) + } else { + None + }; + let bank = if let Some(warp_slot) = warp_slot { // need to flush the write cache in order to use Storages to calculate // the accounts hash, and need to root `bank` before flushing the cache @@ -2175,6 +2207,9 @@ fn main() { } } + if let Some(msg) = capitalization_message { + println!("{msg}"); + } println!( "Shred version: {}", compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())) From d268139a0e2fd18dcf16df63d280c34952eb8166 Mon Sep 17 00:00:00 2001 From: Sergey Timoshin Date: Fri, 16 Feb 2024 18:59:15 +0000 Subject: [PATCH 188/401] fix: correct typo in alt_bn128 function names. (#35210) The typo in the function names convert_edianness_64 and convert_edianness_128 has been corrected to convert_endianness_64 and convert_endianness_128 respectively. --- sdk/program/src/alt_bn128/mod.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/sdk/program/src/alt_bn128/mod.rs b/sdk/program/src/alt_bn128/mod.rs index f214157152c114..f8995e2a19c429 100644 --- a/sdk/program/src/alt_bn128/mod.rs +++ b/sdk/program/src/alt_bn128/mod.rs @@ -165,13 +165,13 @@ mod target_arch { input.resize(ALT_BN128_ADDITION_INPUT_LEN, 0); let p: G1 = PodG1( - convert_edianness_64(&input[..64]) + convert_endianness_64(&input[..64]) .try_into() .map_err(AltBn128Error::TryIntoVecError)?, ) .try_into()?; let q: G1 = PodG1( - convert_edianness_64(&input[64..ALT_BN128_ADDITION_INPUT_LEN]) + convert_endianness_64(&input[64..ALT_BN128_ADDITION_INPUT_LEN]) .try_into() .map_err(AltBn128Error::TryIntoVecError)?, ) @@ -191,7 +191,7 @@ mod target_arch { .serialize_with_mode(&mut result_point_data[32..], Compress::No) .map_err(|_| AltBn128Error::InvalidInputData)?; - Ok(convert_edianness_64(&result_point_data[..]).to_vec()) + Ok(convert_endianness_64(&result_point_data[..]).to_vec()) } pub fn alt_bn128_multiplication(input: &[u8]) -> Result, AltBn128Error> { @@ -203,13 +203,13 @@ mod target_arch { input.resize(ALT_BN128_MULTIPLICATION_INPUT_LEN, 0); let p: G1 = PodG1( - convert_edianness_64(&input[..64]) + convert_endianness_64(&input[..64]) .try_into() .map_err(AltBn128Error::TryIntoVecError)?, ) .try_into()?; let fr = BigInteger256::deserialize_uncompressed_unchecked( - &convert_edianness_64(&input[64..96])[..], + &convert_endianness_64(&input[64..96])[..], ) .map_err(|_| AltBn128Error::InvalidInputData)?; @@ -227,7 +227,7 @@ mod target_arch { .map_err(|_| AltBn128Error::InvalidInputData)?; Ok( - convert_edianness_64(&result_point_data[..ALT_BN128_MULTIPLICATION_OUTPUT_LEN]) + convert_endianness_64(&result_point_data[..ALT_BN128_MULTIPLICATION_OUTPUT_LEN]) .to_vec(), ) } @@ -247,7 +247,7 @@ mod target_arch { for i in 0..ele_len { vec_pairs.push(( PodG1( - convert_edianness_64( + convert_endianness_64( &input[i.saturating_mul(ALT_BN128_PAIRING_ELEMENT_LEN) ..i.saturating_mul(ALT_BN128_PAIRING_ELEMENT_LEN) .saturating_add(ALT_BN128_POINT_SIZE)], @@ -257,7 +257,7 @@ mod target_arch { ) .try_into()?, PodG2( - convert_edianness_128( + convert_endianness_128( &input[i .saturating_mul(ALT_BN128_PAIRING_ELEMENT_LEN) .saturating_add(ALT_BN128_POINT_SIZE) @@ -285,14 +285,14 @@ mod target_arch { Ok(output) } - fn convert_edianness_64(bytes: &[u8]) -> Vec { + fn convert_endianness_64(bytes: &[u8]) -> Vec { bytes .chunks(32) .flat_map(|b| b.iter().copied().rev().collect::>()) .collect::>() } - fn convert_edianness_128(bytes: &[u8]) -> Vec { + fn convert_endianness_128(bytes: &[u8]) -> Vec { bytes .chunks(64) .flat_map(|b| b.iter().copied().rev().collect::>()) From 2a9ed3ee925ba9aeb0a3e4e5a61cbaec2ae7758c Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 16 Feb 2024 11:10:15 -0800 Subject: [PATCH 189/401] Remove ability to submit metrics with no fields (#35133) --- metrics/src/datapoint.rs | 23 ------------------- .../src/send_transaction_service.rs | 2 -- 2 files changed, 25 deletions(-) diff --git a/metrics/src/datapoint.rs b/metrics/src/datapoint.rs index bbc3f5e693e085..e2740ce3aecc47 100644 --- a/metrics/src/datapoint.rs +++ b/metrics/src/datapoint.rs @@ -158,18 +158,10 @@ macro_rules! create_datapoint { point } }; - (@point $name:expr $(,)?) => { - $crate::datapoint::DataPoint::new(&$name) - }; } #[macro_export] macro_rules! datapoint { - ($level:expr, $name:expr $(,)?) => { - if log::log_enabled!($level) { - $crate::submit($crate::create_datapoint!(@point $name), $level); - } - }; ($level:expr, $name:expr, $($fields:tt)+) => { if log::log_enabled!($level) { $crate::submit($crate::create_datapoint!(@point $name, $($fields)+), $level); @@ -178,9 +170,6 @@ macro_rules! datapoint { } #[macro_export] macro_rules! datapoint_error { - ($name:expr $(,)?) => { - $crate::datapoint!(log::Level::Error, $name); - }; ($name:expr, $($fields:tt)+) => { $crate::datapoint!(log::Level::Error, $name, $($fields)+); }; @@ -188,9 +177,6 @@ macro_rules! datapoint_error { #[macro_export] macro_rules! datapoint_warn { - ($name:expr $(,)?) => { - $crate::datapoint!(log::Level::Warn, $name); - }; ($name:expr, $($fields:tt)+) => { $crate::datapoint!(log::Level::Warn, $name, $($fields)+); }; @@ -198,9 +184,6 @@ macro_rules! datapoint_warn { #[macro_export] macro_rules! datapoint_info { - ($name:expr) => { - $crate::datapoint!(log::Level::Info, $name); - }; ($name:expr, $($fields:tt)+) => { $crate::datapoint!(log::Level::Info, $name, $($fields)+); }; @@ -208,9 +191,6 @@ macro_rules! datapoint_info { #[macro_export] macro_rules! datapoint_debug { - ($name:expr) => { - $crate::datapoint!(log::Level::Debug, $name); - }; ($name:expr, $($fields:tt)+) => { $crate::datapoint!(log::Level::Debug, $name, $($fields)+); }; @@ -218,9 +198,6 @@ macro_rules! datapoint_debug { #[macro_export] macro_rules! datapoint_trace { - ($name:expr) => { - $crate::datapoint!(log::Level::Trace, $name); - }; ($name:expr, $($fields:tt)+) => { $crate::datapoint!(log::Level::Trace, $name, $($fields)+); }; diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 4e4ba9956f760f..dbdcda2f2ff905 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -7,7 +7,6 @@ use { tpu_connection::TpuConnection, }, solana_measure::measure::Measure, - solana_metrics::datapoint_warn, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ clock::Slot, hash::Hash, nonce_account, pubkey::Pubkey, saturating_add_assign, @@ -481,7 +480,6 @@ impl SendTransactionService { let entry = retry_transactions.entry(signature); if let Entry::Vacant(_) = entry { if retry_len >= config.retry_pool_max_size { - datapoint_warn!("send_transaction_service-queue-overflow"); break; } else { transaction_info.last_sent_time = Some(last_sent_time); From 78e187f220c6ba9ca7c76476a6b58d0cbbd8caf2 Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 16 Feb 2024 13:00:50 -0700 Subject: [PATCH 190/401] bpf-loader-upgradeable: export `get_program_data_address` helper (#35131) --- sdk/program/src/bpf_loader_upgradeable.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/sdk/program/src/bpf_loader_upgradeable.rs b/sdk/program/src/bpf_loader_upgradeable.rs index 907a953d026706..40610f1c299637 100644 --- a/sdk/program/src/bpf_loader_upgradeable.rs +++ b/sdk/program/src/bpf_loader_upgradeable.rs @@ -119,6 +119,11 @@ impl UpgradeableLoaderState { } } +/// Returns the program data address for a program ID +pub fn get_program_data_address(program_address: &Pubkey) -> Pubkey { + Pubkey::find_program_address(&[program_address.as_ref()], &id()).0 +} + /// Returns the instructions required to initialize a Buffer account. pub fn create_buffer( payer_address: &Pubkey, @@ -175,7 +180,7 @@ pub fn deploy_with_max_program_len( program_lamports: u64, max_data_len: usize, ) -> Result, InstructionError> { - let (programdata_address, _) = Pubkey::find_program_address(&[program_address.as_ref()], &id()); + let programdata_address = get_program_data_address(program_address); Ok(vec![ system_instruction::create_account( payer_address, @@ -208,7 +213,7 @@ pub fn upgrade( authority_address: &Pubkey, spill_address: &Pubkey, ) -> Instruction { - let (programdata_address, _) = Pubkey::find_program_address(&[program_address.as_ref()], &id()); + let programdata_address = get_program_data_address(program_address); Instruction::new_with_bincode( id(), &UpgradeableLoaderInstruction::Upgrade, @@ -281,7 +286,7 @@ pub fn set_upgrade_authority( current_authority_address: &Pubkey, new_authority_address: Option<&Pubkey>, ) -> Instruction { - let (programdata_address, _) = Pubkey::find_program_address(&[program_address.as_ref()], &id()); + let programdata_address = get_program_data_address(program_address); let mut metas = vec![ AccountMeta::new(programdata_address, false), @@ -300,7 +305,7 @@ pub fn set_upgrade_authority_checked( current_authority_address: &Pubkey, new_authority_address: &Pubkey, ) -> Instruction { - let (programdata_address, _) = Pubkey::find_program_address(&[program_address.as_ref()], &id()); + let programdata_address = get_program_data_address(program_address); let metas = vec![ AccountMeta::new(programdata_address, false), @@ -355,8 +360,7 @@ pub fn extend_program( payer_address: Option<&Pubkey>, additional_bytes: u32, ) -> Instruction { - let (program_data_address, _) = - Pubkey::find_program_address(&[program_address.as_ref()], &id()); + let program_data_address = get_program_data_address(program_address); let mut metas = vec![ AccountMeta::new(program_data_address, false), AccountMeta::new(*program_address, false), From 6fc8a6135d7f600d980589016cf78c6d4eaf9a2c Mon Sep 17 00:00:00 2001 From: Outrider Date: Fri, 16 Feb 2024 20:19:14 +0000 Subject: [PATCH 191/401] Update README.md (#35202) * Update README.md grammar fixed * Update docs/README.md --------- Co-authored-by: Tyera --- docs/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README.md b/docs/README.md index d537c5c8c489b7..bdc8962cc1f652 100644 --- a/docs/README.md +++ b/docs/README.md @@ -5,8 +5,8 @@ Static content delivery is handled using `vercel`. > Note: The documentation within this repo is specifically focused on the > Solana validator client maintained by Solana Labs. The more "common" -> documentation which is generalize to the Solana protocol as a whole, and apply -> to all Solana validator implementations, are maintained within the +> documentation, which is generalized to the Solana protocol as a whole and applies +> to all Solana validator implementations, is maintained within the > [`developer-content`](https://github.com/solana-foundation/developer-content/) > repo. Those "common docs" are managed by the Solana Foundation within their > GitHub organization and are publicly accessible via From e4064023bf7936ced97b0d4de22137742324983d Mon Sep 17 00:00:00 2001 From: sakridge Date: Fri, 16 Feb 2024 21:58:06 +0100 Subject: [PATCH 192/401] Set COPYFILE_DISABLE for mac os so it doesn't generate ._ files (#35213) --- ledger/src/blockstore.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 45c1cbf49bdb2c..cda801bb296e45 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -4315,6 +4315,7 @@ pub fn create_new_ledger( blockstore_dir, ]; let output = std::process::Command::new("tar") + .env("COPYFILE_DISABLE", "1") .args(args) .output() .unwrap(); From 69345899f3b7449daac57ec1a28142bb410d7f06 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Sat, 17 Feb 2024 16:10:58 -0800 Subject: [PATCH 193/401] [TieredStorage] Make TieredStorage::write_accounts() thread-safe (#35143) #### Problem While accounts-db might not invoke appends_account twice for the same AccountsFile, TieredStorage::write_accounts() itself isn't thread-safe, and it depends on the above accounts-db assumption. #### Summary of Changes This PR makes TieredStorage::write_accounts() thread-safe. So only the first thread that successfully updates the already_written flag can proceed and write the input accounts. All subsequent calls to write_accounts() will be a no-op and return AttemptToUpdateReadOnly Error. --- accounts-db/src/tiered_storage.rs | 36 ++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 335e93c72e9750..a6f4ea89428bf9 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -30,7 +30,10 @@ use { borrow::Borrow, fs::{self, OpenOptions}, path::{Path, PathBuf}, - sync::OnceLock, + sync::{ + atomic::{AtomicBool, Ordering}, + OnceLock, + }, }, }; @@ -47,9 +50,14 @@ pub struct TieredStorageFormat { pub account_block_format: AccountBlockFormat, } +/// The implementation of AccountsFile for tiered-storage. #[derive(Debug)] pub struct TieredStorage { + /// The internal reader instance for its accounts file. reader: OnceLock, + /// A status flag indicating whether its file has been already written. + already_written: AtomicBool, + /// The path to the file that stores accounts. path: PathBuf, } @@ -73,6 +81,7 @@ impl TieredStorage { pub fn new_writable(path: impl Into) -> Self { Self { reader: OnceLock::::new(), + already_written: false.into(), path: path.into(), } } @@ -83,6 +92,7 @@ impl TieredStorage { let path = path.into(); Ok(Self { reader: TieredStorageReader::new_from_path(&path).map(OnceLock::from)?, + already_written: true.into(), path, }) } @@ -95,9 +105,7 @@ impl TieredStorage { /// Writes the specified accounts into this TieredStorage. /// /// Note that this function can only be called once per a TieredStorage - /// instance. TieredStorageError::AttemptToUpdateReadOnly will be returned - /// if this function is invoked more than once on the same TieredStorage - /// instance. + /// instance. Otherwise, it will trigger panic. pub fn write_accounts< 'a, 'b, @@ -110,10 +118,10 @@ impl TieredStorage { skip: usize, format: &TieredStorageFormat, ) -> TieredStorageResult> { - if self.is_read_only() { - return Err(TieredStorageError::AttemptToUpdateReadOnly( - self.path.to_path_buf(), - )); + let was_written = self.already_written.swap(true, Ordering::AcqRel); + + if was_written { + panic!("cannot write same tiered storage file more than once"); } if format == &HOT_FORMAT { @@ -123,16 +131,17 @@ impl TieredStorage { }; // panic here if self.reader.get() is not None as self.reader can only be - // None since we have passed `is_read_only()` check previously, indicating - // self.reader is not yet set. + // None since a false-value `was_written` indicates the accounts file has + // not been written previously, implying is_read_only() was also false. + debug_assert!(!self.is_read_only()); self.reader .set(TieredStorageReader::new_from_path(&self.path)?) .unwrap(); - return result; + result + } else { + Err(TieredStorageError::UnknownFormat(self.path.to_path_buf())) } - - Err(TieredStorageError::UnknownFormat(self.path.to_path_buf())) } /// Returns the underlying reader of the TieredStorage. None will be @@ -255,6 +264,7 @@ mod tests { } #[test] + #[should_panic(expected = "cannot write same tiered storage file more than once")] fn test_write_accounts_twice() { // Generate a new temp path that is guaranteed to NOT already have a file. let temp_dir = tempdir().unwrap(); From 4b65cc8eef6ef79cb9b9cbc534a99b4900e58cf7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 14:26:43 +0800 Subject: [PATCH 194/401] build(deps): bump syn from 2.0.48 to 2.0.49 (#35212) * build(deps): bump syn from 2.0.48 to 2.0.49 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.48 to 2.0.49. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.48...2.0.49) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 66 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 62 +++++++++++++++++++------------------- 2 files changed, 64 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17f821a640e500..50b23018e48ce0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -459,7 +459,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -607,7 +607,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -775,7 +775,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", "syn_derive", ] @@ -1535,7 +1535,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -1546,7 +1546,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -1608,7 +1608,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -1732,7 +1732,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -1838,7 +1838,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -2102,7 +2102,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -3373,7 +3373,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -3447,7 +3447,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -3943,7 +3943,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -4111,7 +4111,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -4793,7 +4793,7 @@ checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -4847,7 +4847,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -4897,7 +4897,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -6028,7 +6028,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -7077,7 +7077,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -7792,7 +7792,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -7804,7 +7804,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.48", + "syn 2.0.49", "thiserror", ] @@ -7862,7 +7862,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -8050,9 +8050,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" dependencies = [ "proc-macro2", "quote", @@ -8068,7 +8068,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -8239,7 +8239,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -8251,7 +8251,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", "test-case-core", ] @@ -8287,7 +8287,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -8424,7 +8424,7 @@ source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7 dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -8670,7 +8670,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -8973,7 +8973,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", "wasm-bindgen-shared", ] @@ -9007,7 +9007,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9301,7 +9301,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -9321,7 +9321,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 76f44ab8c97949..b872eace459c6a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -425,7 +425,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -573,7 +573,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -726,7 +726,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", "syn_derive", ] @@ -1238,7 +1238,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -1249,7 +1249,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -1424,7 +1424,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -1533,7 +1533,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -1780,7 +1780,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -3016,7 +3016,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -3089,7 +3089,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -3544,7 +3544,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -3685,7 +3685,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -4263,7 +4263,7 @@ checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -4308,7 +4308,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -5057,7 +5057,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -6187,7 +6187,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -6727,7 +6727,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -6739,7 +6739,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.48", + "syn 2.0.49", "thiserror", ] @@ -6787,7 +6787,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -6975,9 +6975,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" dependencies = [ "proc-macro2", "quote", @@ -6993,7 +6993,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -7150,7 +7150,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -7162,7 +7162,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", "test-case-core", ] @@ -7198,7 +7198,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -7321,7 +7321,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -7539,7 +7539,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -7821,7 +7821,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", "wasm-bindgen-shared", ] @@ -7855,7 +7855,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8140,7 +8140,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] @@ -8160,7 +8160,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.49", ] [[package]] From 7f75cc3c3dbb35f0e1f2dd5c24429b7ede0aca19 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 21:46:42 +0800 Subject: [PATCH 195/401] build(deps): bump anyhow from 1.0.79 to 1.0.80 (#35230) * build(deps): bump anyhow from 1.0.79 to 1.0.80 Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.79 to 1.0.80. - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.79...1.0.80) --- updated-dependencies: - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50b23018e48ce0..c11d3d8bf723c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -163,9 +163,9 @@ checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "aquamarine" diff --git a/Cargo.toml b/Cargo.toml index adc66eecffda3b..8da8f2ad72f499 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -139,7 +139,7 @@ Inflector = "0.11.4" aquamarine = "0.3.3" aes-gcm-siv = "0.10.3" ahash = "0.8.8" -anyhow = "1.0.79" +anyhow = "1.0.80" arbitrary = "1.3.2" ark-bn254 = "0.4.0" ark-ec = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b872eace459c6a..2d7b721985e383 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -152,9 +152,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "aquamarine" From 8397c5cd69283afdc9ea1a56bd63ec56dbb26aa4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 21:47:08 +0800 Subject: [PATCH 196/401] build(deps): bump semver from 1.0.21 to 1.0.22 (#35232) * build(deps): bump semver from 1.0.21 to 1.0.22 Bumps [semver](https://github.com/dtolnay/semver) from 1.0.21 to 1.0.22. - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.21...1.0.22) --- updated-dependencies: - dependency-name: semver dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c11d3d8bf723c5..d273a1e607d9f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1004,7 +1004,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "thiserror", @@ -4552,7 +4552,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.21", + "semver 1.0.22", ] [[package]] @@ -4742,9 +4742,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" dependencies = [ "serde", ] @@ -5500,7 +5500,7 @@ dependencies = [ "predicates", "regex", "reqwest", - "semver 1.0.21", + "semver 1.0.22", "serial_test", "solana-download-utils", "solana-logger", @@ -5606,7 +5606,7 @@ dependencies = [ "num-traits", "pretty-hex", "reqwest", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_derive", "serde_json", @@ -5669,7 +5669,7 @@ dependencies = [ "humantime", "indicatif", "pretty-hex", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "solana-account-decoder", @@ -6171,7 +6171,7 @@ dependencies = [ "nix 0.26.4", "reqwest", "scopeguard", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_yaml 0.8.26", "serde_yaml 0.9.31", @@ -6691,7 +6691,7 @@ dependencies = [ "futures-util", "log", "reqwest", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_derive", "serde_json", @@ -6754,7 +6754,7 @@ dependencies = [ "num-traits", "parking_lot 0.12.1", "qstring", - "semver 1.0.21", + "semver 1.0.22", "solana-sdk", "thiserror", "uriparse", @@ -6835,7 +6835,7 @@ dependencies = [ "jsonrpc-http-server", "log", "reqwest", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_derive", "serde_json", @@ -6856,7 +6856,7 @@ dependencies = [ "bs58", "jsonrpc-core", "reqwest", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_derive", "serde_json", @@ -7561,7 +7561,7 @@ version = "1.18.0" dependencies = [ "log", "rustc_version 0.4.0", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_derive", "solana-frozen-abi", diff --git a/Cargo.toml b/Cargo.toml index 8da8f2ad72f499..0aaa2a29b7ad64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -291,7 +291,7 @@ rustc_version = "0.4" rustls = { version = "0.21.10", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" -semver = "1.0.21" +semver = "1.0.22" seqlock = "0.2.0" serde = "1.0.196" serde_bytes = "0.11.14" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2d7b721985e383..e683734cb7d9ba 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4224,9 +4224,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "seqlock" From 6810068e112f8483f073fd84b087cb3221c0ed1a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 21:47:43 +0800 Subject: [PATCH 197/401] build(deps): bump serde_yaml from 0.9.31 to 0.9.32 (#35231) * build(deps): bump serde_yaml from 0.9.31 to 0.9.32 Bumps [serde_yaml](https://github.com/dtolnay/serde-yaml) from 0.9.31 to 0.9.32. - [Release notes](https://github.com/dtolnay/serde-yaml/releases) - [Commits](https://github.com/dtolnay/serde-yaml/compare/0.9.31...0.9.32) --- updated-dependencies: - dependency-name: serde_yaml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d273a1e607d9f7..d336e0c5f6648b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4864,9 +4864,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.31" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ "indexmap 2.2.3", "itoa", @@ -5380,7 +5380,7 @@ dependencies = [ "rand 0.8.5", "rayon", "serde_json", - "serde_yaml 0.9.31", + "serde_yaml 0.9.32", "serial_test", "solana-clap-utils", "solana-cli-config", @@ -5650,7 +5650,7 @@ dependencies = [ "lazy_static", "serde", "serde_derive", - "serde_yaml 0.9.31", + "serde_yaml 0.9.32", "solana-clap-utils", "solana-sdk", "url 2.5.0", @@ -6041,7 +6041,7 @@ dependencies = [ "itertools", "serde", "serde_json", - "serde_yaml 0.9.31", + "serde_yaml 0.9.32", "solana-accounts-db", "solana-clap-utils", "solana-cli-config", @@ -6174,7 +6174,7 @@ dependencies = [ "semver 1.0.22", "serde", "serde_yaml 0.8.26", - "serde_yaml 0.9.31", + "serde_yaml 0.9.32", "solana-clap-utils", "solana-config-program", "solana-logger", @@ -7516,7 +7516,7 @@ dependencies = [ "rayon", "serde", "serde_json", - "serde_yaml 0.9.31", + "serde_yaml 0.9.32", "signal-hook", "solana-account-decoder", "solana-accounts-db", diff --git a/Cargo.toml b/Cargo.toml index 0aaa2a29b7ad64..8c987d26902347 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -298,7 +298,7 @@ serde_bytes = "0.11.14" serde_derive = "1.0.103" serde_json = "1.0.113" serde_with = { version = "2.3.3", default-features = false } -serde_yaml = "0.9.31" +serde_yaml = "0.9.32" serial_test = "2.0.0" sha2 = "0.10.8" sha3 = "0.10.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index e683734cb7d9ba..6914ebf6d1e158 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4313,9 +4313,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.31" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ "indexmap 2.2.3", "itoa", From ebf60359f48e2785b1fc159f6688a5122ba2b5ba Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 19 Feb 2024 07:56:27 -0600 Subject: [PATCH 198/401] clean up dev-context-only attribute (#35201) Co-authored-by: HaoranYi --- core/src/vote_simulator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index 58d8a40d2eb4c6..32e1f3cd96fac6 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -65,7 +65,6 @@ impl VoteSimulator { } } - #[cfg(feature = "dev-context-only-utils")] pub fn fill_bank_forks( &mut self, forks: Tree, From 2ec136a1ea7d6f72f59ecc6b98d123fc0bc16a10 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 19 Feb 2024 10:22:00 -0500 Subject: [PATCH 199/401] Adds get_and_then() & family to AccountsIndex (#35218) --- accounts-db/src/accounts_db.rs | 61 ++++++++++--------------------- accounts-db/src/accounts_index.rs | 24 ++++++++++++ 2 files changed, 44 insertions(+), 41 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index c89cf45e320971..6fc5297efb30d3 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9924,11 +9924,11 @@ pub mod tests { .store(data.len(), Ordering::Relaxed); let genesis_config = GenesisConfig::default(); - assert!(db.accounts_index.get_account_read_entry(&pubkey).is_none()); + assert!(!db.accounts_index.contains(&pubkey)); let result = db.generate_index(None, false, &genesis_config); // index entry should only contain a single entry for the pubkey since index cannot hold more than 1 entry per slot - let entry = db.accounts_index.get_account_read_entry(&pubkey).unwrap(); - assert_eq!(entry.slot_list().len(), 1); + let entry = db.accounts_index.get_cloned(&pubkey).unwrap(); + assert_eq!(entry.slot_list.read().unwrap().len(), 1); assert_eq!(append_vec.alive_bytes(), expected_alive_bytes); // total # accounts in append vec assert_eq!(append_vec.approx_stored_count(), 2); @@ -11409,16 +11409,14 @@ pub mod tests { let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); let ancestors = vec![(unrooted_slot, 1)].into_iter().collect(); + assert!(!db.accounts_index.contains(&key)); if is_cached { db.store_cached((unrooted_slot, &[(&key, &account0)][..]), None); } else { db.store_for_tests(unrooted_slot, &[(&key, &account0)]); } assert!(db.get_bank_hash_stats(unrooted_slot).is_some()); - assert!(db - .accounts_index - .get(&key, Some(&ancestors), None) - .is_some()); + assert!(db.accounts_index.contains(&key)); db.assert_load_account(unrooted_slot, key, 1); // Purge the slot @@ -11427,11 +11425,7 @@ pub mod tests { assert!(db.get_bank_hash_stats(unrooted_slot).is_none()); assert!(db.accounts_cache.slot_cache(unrooted_slot).is_none()); assert!(db.storage.get_slot_storage_entry(unrooted_slot).is_none()); - assert!(db.accounts_index.get_account_read_entry(&key).is_none()); - assert!(db - .accounts_index - .get(&key, Some(&ancestors), None) - .is_none()); + assert!(!db.accounts_index.contains(&key)); // Test we can store for the same slot again and get the right information let account0 = AccountSharedData::new(2, 0, &key); @@ -12189,11 +12183,8 @@ pub mod tests { // The earlier entry for pubkey in the account index is purged, let (slot_list_len, index_slot) = { - let account_entry = accounts - .accounts_index - .get_account_read_entry(&pubkey) - .unwrap(); - let slot_list = account_entry.slot_list(); + let account_entry = accounts.accounts_index.get_cloned(&pubkey).unwrap(); + let slot_list = account_entry.slot_list.read().unwrap(); (slot_list.len(), slot_list[0].0) }; assert_eq!(slot_list_len, 1); @@ -12258,10 +12249,7 @@ pub mod tests { accounts.print_accounts_stats("post_purge"); // Make sure the index is for pubkey cleared - assert!(accounts - .accounts_index - .get_account_read_entry(&pubkey) - .is_none()); + assert!(!accounts.accounts_index.contains(&pubkey)); // slot 1 & 2 should not have any stores assert_no_stores(&accounts, 1); @@ -13833,10 +13821,7 @@ pub mod tests { assert!(db .load_without_fixed_root(&ancestors, &unrooted_key) .is_some()); - assert!(db - .accounts_index - .get_account_read_entry(&unrooted_key) - .is_some()); + assert!(db.accounts_index.contains(&unrooted_key)); assert_eq!(db.accounts_cache.num_slots(), 1); assert!(db.accounts_cache.slot_cache(unrooted_slot).is_some()); assert_eq!( @@ -14368,12 +14353,13 @@ pub mod tests { let before_size = storage0.alive_bytes.load(Ordering::Acquire); let account_info = accounts_db .accounts_index - .get_account_read_entry(account.pubkey()) - .map(|locked_entry| { - // Should only be one entry per key, since every key was only stored to slot 0 - locked_entry.slot_list()[0] - }) - .unwrap(); + .get_cloned(account.pubkey()) + .unwrap() + .slot_list + .read() + .unwrap() + // Should only be one entry per key, since every key was only stored to slot 0 + [0]; assert_eq!(account_info.0, slot); let reclaims = [account_info]; accounts_db.remove_dead_accounts(reclaims.iter(), None, None, true); @@ -15335,8 +15321,7 @@ pub mod tests { assert_no_storages_at_slot(&db, *slot); assert!(db.accounts_cache.slot_cache(*slot).is_none()); let account_in_slot = slot_to_pubkey_map[slot]; - let item = db.accounts_index.get_account_read_entry(&account_in_slot); - assert!(item.is_none(), "item: {item:?}"); + assert!(!db.accounts_index.contains(&account_in_slot)); } // Wait for flush to finish before starting next trial @@ -15722,20 +15707,14 @@ pub mod tests { // The later rooted zero-lamport update to `shared_key` cannot be cleaned // because it is kept alive by the unrooted slot. accounts.clean_accounts_for_tests(); - assert!(accounts - .accounts_index - .get_account_read_entry(&shared_key) - .is_some()); + assert!(accounts.accounts_index.contains(&shared_key)); // Simulate purge_slot() all from AccountsBackgroundService accounts.purge_slot(slot0, 0, true); // Now clean should clean up the remaining key accounts.clean_accounts_for_tests(); - assert!(accounts - .accounts_index - .get_account_read_entry(&shared_key) - .is_none()); + assert!(!accounts.accounts_index.contains(&shared_key)); assert_no_storages_at_slot(&accounts, slot0); } diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index a3e7ff37b97d23..1be3ffd3a32bf5 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1138,6 +1138,30 @@ impl + Into> AccountsIndex { .map(ReadAccountMapEntry::from_account_map_entry) } + /// Gets the index's entry for `pubkey` and applies `callback` to it + /// + /// If `callback`'s boolean return value is true, add this entry to the in-mem cache. + pub fn get_and_then( + &self, + pubkey: &Pubkey, + callback: impl FnOnce(Option<&AccountMapEntry>) -> (bool, R), + ) -> R { + self.get_bin(pubkey).get_internal(pubkey, callback) + } + + /// Gets the index's entry for `pubkey` and clones it + /// + /// Prefer `get_and_then()` whenever possible. + /// NOTE: The entry is *not* added to the in-mem cache. + pub fn get_cloned(&self, pubkey: &Pubkey) -> Option> { + self.get_and_then(pubkey, |entry| (false, entry.cloned())) + } + + /// Is `pubkey` in the index? + pub fn contains(&self, pubkey: &Pubkey) -> bool { + self.get_and_then(pubkey, |entry| (false, entry.is_some())) + } + fn slot_list_mut( &self, pubkey: &Pubkey, From d87e7bc8e5c7e5187b71a9dd054d3c3aa621078d Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 19 Feb 2024 16:51:13 +0100 Subject: [PATCH 200/401] Fix typos (#35234) * fix typos * fix typo * fix typos * fix typo --- cli/src/cluster_query.rs | 4 ++-- docs/src/proposals.md | 2 +- docs/src/validator/geyser.md | 2 +- geyser-plugin-manager/src/geyser_plugin_manager.rs | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index a5162e9498441a..87dc35810def95 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -161,7 +161,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { .takes_value(true) .value_name("BLOCKHASH") .validator(is_hash) - .help("Query fees for BLOCKHASH instead of the the most recent blockhash"), + .help("Query fees for BLOCKHASH instead of the most recent blockhash"), ), ) .subcommand( @@ -233,7 +233,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { Arg::with_name("print_accounts") .long("print-accounts") .takes_value(false) - .help("Print list of non-circualting account addresses"), + .help("Print list of non-circulating account addresses"), ), ) .subcommand( diff --git a/docs/src/proposals.md b/docs/src/proposals.md index 61bd657ff353e3..0835a338524a3b 100644 --- a/docs/src/proposals.md +++ b/docs/src/proposals.md @@ -37,7 +37,7 @@ To submit a new design proposal for Solana: Once a design proposal has been accepted, the PR will be merged into the `master` branch of the Solana repo. This also signifies the maintainers support your plan of attack. > **NOTE:** The merging of the PR will **automatically** create a link in the "Accepted Proposals" table of contents sidebar. -> Once approved, continue to submit PRs that implement the proposal. When the implementation reveals the need for tweaks to the proposal, be sure to update the "accepted proposal" document and have these change reviewed by the same approving maintainers. +> Once approved, continue to submit PRs that implement the proposal. When the implementation reveals the need for tweaks to the proposal, be sure to update the "accepted proposal" document and have these changes reviewed by the same approving maintainers. ### After Implemented diff --git a/docs/src/validator/geyser.md b/docs/src/validator/geyser.md index a8a29d10dd022a..769856303767d6 100644 --- a/docs/src/validator/geyser.md +++ b/docs/src/validator/geyser.md @@ -442,7 +442,7 @@ The following are the tables in the Postgres database When a validator lacks sufficient compute power, the overhead of saving the account data can cause it to fall behind the network especially when all accounts or a large number of accounts are selected. The node hosting the -PostgreSQL database need to be powerful enough to handle the database loads +PostgreSQL database needs to be powerful enough to handle the database loads as well. It has been found using GCP n2-standard-64 machine type for the validator and n2-highmem-32 for the PostgreSQL node is adequate for handling transmitting all accounts while keeping up with the network. In addition, it is diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 02792525ad370c..a15f9e1318075d 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -304,10 +304,10 @@ pub enum GeyserPluginManagerRequest { #[derive(thiserror::Error, Debug)] pub enum GeyserPluginManagerError { - #[error("Cannot open the the plugin config file")] + #[error("Cannot open the plugin config file")] CannotOpenConfigFile(String), - #[error("Cannot read the the plugin config file")] + #[error("Cannot read the plugin config file")] CannotReadConfigFile(String), #[error("The config file is not in a valid Json format")] From 30adda4a71bf4a63103badbe34f596272b6d716f Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Mon, 19 Feb 2024 14:43:58 -0600 Subject: [PATCH 201/401] Update version to 1.19.0 (#35168) --- Cargo.lock | 228 ++++++++-------- Cargo.toml | 162 ++++++------ programs/sbf/Cargo.lock | 250 +++++++++--------- programs/sbf/Cargo.toml | 48 ++-- .../tests/crates/fail/Cargo.toml | 4 +- .../tests/crates/noop/Cargo.toml | 4 +- 6 files changed, 348 insertions(+), 348 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d336e0c5f6648b..46b790f6574470 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2162,7 +2162,7 @@ dependencies = [ [[package]] name = "gen-headers" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "regex", @@ -2170,7 +2170,7 @@ dependencies = [ [[package]] name = "gen-syscall-list" -version = "1.18.0" +version = "1.19.0" dependencies = [ "regex", ] @@ -4079,7 +4079,7 @@ dependencies = [ [[package]] name = "proto" -version = "1.18.0" +version = "1.19.0" dependencies = [ "protobuf-src", "tonic-build", @@ -4322,7 +4322,7 @@ dependencies = [ [[package]] name = "rbpf-cli" -version = "1.18.0" +version = "1.19.0" [[package]] name = "rdrand" @@ -5126,7 +5126,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.18.0" +version = "1.19.0" dependencies = [ "Inflector", "assert_matches", @@ -5151,7 +5151,7 @@ dependencies = [ [[package]] name = "solana-accounts-bench" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "log", @@ -5165,7 +5165,7 @@ dependencies = [ [[package]] name = "solana-accounts-cluster-bench" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "log", @@ -5195,7 +5195,7 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "1.18.0" +version = "1.19.0" dependencies = [ "arrayref", "assert_matches", @@ -5263,7 +5263,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "bytemuck", @@ -5282,7 +5282,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program-tests" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -5293,7 +5293,7 @@ dependencies = [ [[package]] name = "solana-banking-bench" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 3.2.23", "crossbeam-channel", @@ -5317,7 +5317,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "borsh 1.2.1", "futures 0.3.30", @@ -5334,7 +5334,7 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "1.18.0" +version = "1.19.0" dependencies = [ "serde", "solana-sdk", @@ -5343,7 +5343,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "crossbeam-channel", @@ -5361,7 +5361,7 @@ dependencies = [ [[package]] name = "solana-bench-streamer" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 3.2.23", "crossbeam-channel", @@ -5372,7 +5372,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "crossbeam-channel", @@ -5413,7 +5413,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bv", "fnv", @@ -5430,7 +5430,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -5451,7 +5451,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program-tests" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -5462,7 +5462,7 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bv", "bytemuck", @@ -5481,7 +5481,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-bpf" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "solana-logger", @@ -5489,7 +5489,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-sbf" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_cmd", "bzip2", @@ -5510,7 +5510,7 @@ dependencies = [ [[package]] name = "solana-cargo-registry" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "flate2", @@ -5539,11 +5539,11 @@ dependencies = [ [[package]] name = "solana-cargo-test-bpf" -version = "1.18.0" +version = "1.19.0" [[package]] name = "solana-cargo-test-sbf" -version = "1.18.0" +version = "1.19.0" dependencies = [ "cargo_metadata", "clap 3.2.23", @@ -5554,7 +5554,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "chrono", @@ -5571,7 +5571,7 @@ dependencies = [ [[package]] name = "solana-clap-v3-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "chrono", @@ -5589,7 +5589,7 @@ dependencies = [ [[package]] name = "solana-cli" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -5643,7 +5643,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.18.0" +version = "1.19.0" dependencies = [ "anyhow", "dirs-next", @@ -5658,7 +5658,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.18.0" +version = "1.19.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -5684,7 +5684,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "bincode", @@ -5716,7 +5716,7 @@ dependencies = [ [[package]] name = "solana-client-test" -version = "1.18.0" +version = "1.19.0" dependencies = [ "futures-util", "rand 0.8.5", @@ -5746,7 +5746,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -5754,7 +5754,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "chrono", @@ -5767,7 +5767,7 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "bincode", @@ -5790,7 +5790,7 @@ dependencies = [ [[package]] name = "solana-core" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "base64 0.21.7", @@ -5876,7 +5876,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "1.18.0" +version = "1.19.0" dependencies = [ "lazy_static", "log", @@ -5901,7 +5901,7 @@ dependencies = [ [[package]] name = "solana-dos" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "clap 3.2.23", @@ -5931,7 +5931,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "console", "indicatif", @@ -5943,7 +5943,7 @@ dependencies = [ [[package]] name = "solana-ed25519-program-tests" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "ed25519-dalek", @@ -5954,7 +5954,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -5976,7 +5976,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "byteorder", @@ -5998,7 +5998,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bitflags 2.4.2", "block-buffer 0.10.4", @@ -6023,7 +6023,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.18.0" +version = "1.19.0" dependencies = [ "proc-macro2", "quote", @@ -6033,7 +6033,7 @@ dependencies = [ [[package]] name = "solana-genesis" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bincode", @@ -6058,7 +6058,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "solana-accounts-db", @@ -6069,7 +6069,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-interface" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "solana-sdk", @@ -6079,7 +6079,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bs58", "crossbeam-channel", @@ -6104,7 +6104,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -6155,7 +6155,7 @@ dependencies = [ [[package]] name = "solana-install" -version = "1.18.0" +version = "1.19.0" dependencies = [ "atty", "bincode", @@ -6190,7 +6190,7 @@ dependencies = [ [[package]] name = "solana-keygen" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bs58", "clap 3.2.23", @@ -6207,7 +6207,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -6277,7 +6277,7 @@ dependencies = [ [[package]] name = "solana-ledger-tool" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_cmd", "bs58", @@ -6329,7 +6329,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "log", @@ -6341,7 +6341,7 @@ dependencies = [ [[package]] name = "solana-local-cluster" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "crossbeam-channel", @@ -6380,7 +6380,7 @@ dependencies = [ [[package]] name = "solana-log-analyzer" -version = "1.18.0" +version = "1.19.0" dependencies = [ "byte-unit", "clap 3.2.23", @@ -6392,7 +6392,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.18.0" +version = "1.19.0" dependencies = [ "env_logger", "lazy_static", @@ -6401,7 +6401,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "solana-sdk", @@ -6409,11 +6409,11 @@ dependencies = [ [[package]] name = "solana-memory-management" -version = "1.18.0" +version = "1.19.0" [[package]] name = "solana-merkle-root-bench" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "log", @@ -6426,7 +6426,7 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.18.0" +version = "1.19.0" dependencies = [ "fast-math", "hex", @@ -6435,7 +6435,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.18.0" +version = "1.19.0" dependencies = [ "crossbeam-channel", "env_logger", @@ -6451,7 +6451,7 @@ dependencies = [ [[package]] name = "solana-net-shaper" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 3.2.23", "rand 0.8.5", @@ -6462,7 +6462,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "clap 3.2.23", @@ -6488,7 +6488,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" [[package]] name = "solana-notifier" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "reqwest", @@ -6498,7 +6498,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.18.0" +version = "1.19.0" dependencies = [ "ahash 0.8.8", "assert_matches", @@ -6529,7 +6529,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -6551,7 +6551,7 @@ dependencies = [ [[package]] name = "solana-poh-bench" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 3.2.23", "log", @@ -6566,7 +6566,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "anyhow", "arbitrary", @@ -6625,7 +6625,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "base64 0.21.7", @@ -6654,7 +6654,7 @@ dependencies = [ [[package]] name = "solana-program-test" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "async-trait", @@ -6684,7 +6684,7 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "anyhow", "crossbeam-channel", @@ -6708,7 +6708,7 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-mutex", "async-trait", @@ -6735,7 +6735,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.18.0" +version = "1.19.0" dependencies = [ "lazy_static", "num_cpus", @@ -6743,7 +6743,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "console", @@ -6762,7 +6762,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bincode", @@ -6821,7 +6821,7 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "async-trait", @@ -6850,7 +6850,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bs58", @@ -6870,7 +6870,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "anyhow", "clap 2.33.3", @@ -6887,7 +6887,7 @@ dependencies = [ [[package]] name = "solana-rpc-test" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "bs58", @@ -6914,7 +6914,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.18.0" +version = "1.19.0" dependencies = [ "aquamarine", "arrayref", @@ -6998,7 +6998,7 @@ dependencies = [ [[package]] name = "solana-runtime-transaction" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "log", @@ -7012,7 +7012,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.18.0" +version = "1.19.0" dependencies = [ "anyhow", "assert_matches", @@ -7071,7 +7071,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bs58", "proc-macro2", @@ -7088,7 +7088,7 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-send-transaction-service" -version = "1.18.0" +version = "1.19.0" dependencies = [ "crossbeam-channel", "log", @@ -7103,7 +7103,7 @@ dependencies = [ [[package]] name = "solana-stake-accounts" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -7119,7 +7119,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -7136,7 +7136,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.18.0" +version = "1.19.0" dependencies = [ "backoff", "bincode", @@ -7168,7 +7168,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "bs58", @@ -7184,7 +7184,7 @@ dependencies = [ [[package]] name = "solana-store-tool" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "log", @@ -7196,7 +7196,7 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "async-channel", @@ -7226,7 +7226,7 @@ dependencies = [ [[package]] name = "solana-svm" -version = "1.18.0" +version = "1.19.0" dependencies = [ "itertools", "log", @@ -7247,7 +7247,7 @@ dependencies = [ [[package]] name = "solana-system-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -7261,7 +7261,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bincode", @@ -7292,7 +7292,7 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "log", @@ -7306,7 +7306,7 @@ dependencies = [ [[package]] name = "solana-tokens" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -7339,7 +7339,7 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "bincode", @@ -7361,7 +7361,7 @@ dependencies = [ [[package]] name = "solana-transaction-dos" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "clap 2.33.3", @@ -7388,7 +7388,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.18.0" +version = "1.19.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -7411,7 +7411,7 @@ dependencies = [ [[package]] name = "solana-turbine" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -7447,7 +7447,7 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "solana-connection-cache", @@ -7460,14 +7460,14 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-logic" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-sdk", ] [[package]] name = "solana-unified-scheduler-pool" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "crossbeam-channel", @@ -7484,7 +7484,7 @@ dependencies = [ [[package]] name = "solana-upload-perf" -version = "1.18.0" +version = "1.19.0" dependencies = [ "serde_json", "solana-metrics", @@ -7492,7 +7492,7 @@ dependencies = [ [[package]] name = "solana-validator" -version = "1.18.0" +version = "1.19.0" dependencies = [ "chrono", "clap 2.33.3", @@ -7557,7 +7557,7 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "rustc_version 0.4.0", @@ -7571,7 +7571,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "crossbeam-channel", @@ -7590,7 +7590,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -7613,7 +7613,7 @@ dependencies = [ [[package]] name = "solana-watchtower" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "humantime", @@ -7632,7 +7632,7 @@ dependencies = [ [[package]] name = "solana-wen-restart" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "prost", @@ -7654,7 +7654,7 @@ dependencies = [ [[package]] name = "solana-zk-keygen" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bs58", "clap 3.2.23", @@ -7673,7 +7673,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bytemuck", "criterion", @@ -7687,7 +7687,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program-tests" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bytemuck", "curve25519-dalek", @@ -7699,7 +7699,7 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.18.0" +version = "1.19.0" dependencies = [ "aes-gcm-siv", "base64 0.21.7", diff --git a/Cargo.toml b/Cargo.toml index 8c987d26902347..40ccdd6d25f646 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,7 +127,7 @@ exclude = ["programs/sbf"] resolver = "2" [workspace.package] -version = "1.18.0" +version = "1.19.0" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" homepage = "https://solanalabs.com/" @@ -308,87 +308,87 @@ smallvec = "1.13.1" smpl_jwt = "0.7.1" socket2 = "0.5.5" soketto = "0.7" -solana-account-decoder = { path = "account-decoder", version = "=1.18.0" } -solana-accounts-db = { path = "accounts-db", version = "=1.18.0" } -solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.18.0" } -solana-banks-client = { path = "banks-client", version = "=1.18.0" } -solana-banks-interface = { path = "banks-interface", version = "=1.18.0" } -solana-banks-server = { path = "banks-server", version = "=1.18.0" } -solana-bench-tps = { path = "bench-tps", version = "=1.18.0" } -solana-bloom = { path = "bloom", version = "=1.18.0" } -solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.18.0" } -solana-bucket-map = { path = "bucket_map", version = "=1.18.0" } -solana-cargo-registry = { path = "cargo-registry", version = "=1.18.0" } -solana-clap-utils = { path = "clap-utils", version = "=1.18.0" } -solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.18.0" } -solana-cli = { path = "cli", version = "=1.18.0" } -solana-cli-config = { path = "cli-config", version = "=1.18.0" } -solana-cli-output = { path = "cli-output", version = "=1.18.0" } -solana-client = { path = "client", version = "=1.18.0" } -solana-compute-budget-program = { path = "programs/compute-budget", version = "=1.18.0" } -solana-config-program = { path = "programs/config", version = "=1.18.0" } -solana-connection-cache = { path = "connection-cache", version = "=1.18.0", default-features = false } -solana-core = { path = "core", version = "=1.18.0" } -solana-cost-model = { path = "cost-model", version = "=1.18.0" } -solana-download-utils = { path = "download-utils", version = "=1.18.0" } -solana-entry = { path = "entry", version = "=1.18.0" } -solana-faucet = { path = "faucet", version = "=1.18.0" } -solana-frozen-abi = { path = "frozen-abi", version = "=1.18.0" } -solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=1.18.0" } -solana-genesis = { path = "genesis", version = "=1.18.0" } -solana-genesis-utils = { path = "genesis-utils", version = "=1.18.0" } -solana-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.18.0" } -solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=1.18.0" } -solana-gossip = { path = "gossip", version = "=1.18.0" } -solana-ledger = { path = "ledger", version = "=1.18.0" } -solana-loader-v4-program = { path = "programs/loader-v4", version = "=1.18.0" } -solana-local-cluster = { path = "local-cluster", version = "=1.18.0" } -solana-logger = { path = "logger", version = "=1.18.0" } -solana-measure = { path = "measure", version = "=1.18.0" } -solana-merkle-tree = { path = "merkle-tree", version = "=1.18.0" } -solana-metrics = { path = "metrics", version = "=1.18.0" } -solana-net-utils = { path = "net-utils", version = "=1.18.0" } +solana-account-decoder = { path = "account-decoder", version = "=1.19.0" } +solana-accounts-db = { path = "accounts-db", version = "=1.19.0" } +solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.19.0" } +solana-banks-client = { path = "banks-client", version = "=1.19.0" } +solana-banks-interface = { path = "banks-interface", version = "=1.19.0" } +solana-banks-server = { path = "banks-server", version = "=1.19.0" } +solana-bench-tps = { path = "bench-tps", version = "=1.19.0" } +solana-bloom = { path = "bloom", version = "=1.19.0" } +solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.19.0" } +solana-bucket-map = { path = "bucket_map", version = "=1.19.0" } +solana-cargo-registry = { path = "cargo-registry", version = "=1.19.0" } +solana-clap-utils = { path = "clap-utils", version = "=1.19.0" } +solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.19.0" } +solana-cli = { path = "cli", version = "=1.19.0" } +solana-cli-config = { path = "cli-config", version = "=1.19.0" } +solana-cli-output = { path = "cli-output", version = "=1.19.0" } +solana-client = { path = "client", version = "=1.19.0" } +solana-compute-budget-program = { path = "programs/compute-budget", version = "=1.19.0" } +solana-config-program = { path = "programs/config", version = "=1.19.0" } +solana-connection-cache = { path = "connection-cache", version = "=1.19.0", default-features = false } +solana-core = { path = "core", version = "=1.19.0" } +solana-cost-model = { path = "cost-model", version = "=1.19.0" } +solana-download-utils = { path = "download-utils", version = "=1.19.0" } +solana-entry = { path = "entry", version = "=1.19.0" } +solana-faucet = { path = "faucet", version = "=1.19.0" } +solana-frozen-abi = { path = "frozen-abi", version = "=1.19.0" } +solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=1.19.0" } +solana-genesis = { path = "genesis", version = "=1.19.0" } +solana-genesis-utils = { path = "genesis-utils", version = "=1.19.0" } +solana-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.19.0" } +solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=1.19.0" } +solana-gossip = { path = "gossip", version = "=1.19.0" } +solana-ledger = { path = "ledger", version = "=1.19.0" } +solana-loader-v4-program = { path = "programs/loader-v4", version = "=1.19.0" } +solana-local-cluster = { path = "local-cluster", version = "=1.19.0" } +solana-logger = { path = "logger", version = "=1.19.0" } +solana-measure = { path = "measure", version = "=1.19.0" } +solana-merkle-tree = { path = "merkle-tree", version = "=1.19.0" } +solana-metrics = { path = "metrics", version = "=1.19.0" } +solana-net-utils = { path = "net-utils", version = "=1.19.0" } solana-nohash-hasher = "0.2.1" -solana-notifier = { path = "notifier", version = "=1.18.0" } -solana-perf = { path = "perf", version = "=1.18.0" } -solana-poh = { path = "poh", version = "=1.18.0" } -solana-program = { path = "sdk/program", version = "=1.18.0" } -solana-program-runtime = { path = "program-runtime", version = "=1.18.0" } -solana-program-test = { path = "program-test", version = "=1.18.0" } -solana-pubsub-client = { path = "pubsub-client", version = "=1.18.0" } -solana-quic-client = { path = "quic-client", version = "=1.18.0" } -solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=1.18.0" } -solana-remote-wallet = { path = "remote-wallet", version = "=1.18.0", default-features = false } -solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=1.18.0" } -solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=1.18.0" } -solana-rpc = { path = "rpc", version = "=1.18.0" } -solana-rpc-client = { path = "rpc-client", version = "=1.18.0", default-features = false } -solana-rpc-client-api = { path = "rpc-client-api", version = "=1.18.0" } -solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=1.18.0" } -solana-runtime = { path = "runtime", version = "=1.18.0" } -solana-runtime-transaction = { path = "runtime-transaction", version = "=1.18.0" } -solana-sdk = { path = "sdk", version = "=1.18.0" } -solana-sdk-macro = { path = "sdk/macro", version = "=1.18.0" } -solana-send-transaction-service = { path = "send-transaction-service", version = "=1.18.0" } -solana-stake-program = { path = "programs/stake", version = "=1.18.0" } -solana-storage-bigtable = { path = "storage-bigtable", version = "=1.18.0" } -solana-storage-proto = { path = "storage-proto", version = "=1.18.0" } -solana-streamer = { path = "streamer", version = "=1.18.0" } -solana-svm = { path = "svm", version = "=1.18.0" } -solana-system-program = { path = "programs/system", version = "=1.18.0" } -solana-test-validator = { path = "test-validator", version = "=1.18.0" } -solana-thin-client = { path = "thin-client", version = "=1.18.0" } -solana-tpu-client = { path = "tpu-client", version = "=1.18.0", default-features = false } -solana-transaction-status = { path = "transaction-status", version = "=1.18.0" } -solana-turbine = { path = "turbine", version = "=1.18.0" } -solana-udp-client = { path = "udp-client", version = "=1.18.0" } -solana-version = { path = "version", version = "=1.18.0" } -solana-vote = { path = "vote", version = "=1.18.0" } -solana-vote-program = { path = "programs/vote", version = "=1.18.0" } -solana-wen-restart = { path = "wen-restart", version = "=1.18.0" } -solana-zk-keygen = { path = "zk-keygen", version = "=1.18.0" } -solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.18.0" } -solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.18.0" } +solana-notifier = { path = "notifier", version = "=1.19.0" } +solana-perf = { path = "perf", version = "=1.19.0" } +solana-poh = { path = "poh", version = "=1.19.0" } +solana-program = { path = "sdk/program", version = "=1.19.0" } +solana-program-runtime = { path = "program-runtime", version = "=1.19.0" } +solana-program-test = { path = "program-test", version = "=1.19.0" } +solana-pubsub-client = { path = "pubsub-client", version = "=1.19.0" } +solana-quic-client = { path = "quic-client", version = "=1.19.0" } +solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=1.19.0" } +solana-remote-wallet = { path = "remote-wallet", version = "=1.19.0", default-features = false } +solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=1.19.0" } +solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=1.19.0" } +solana-rpc = { path = "rpc", version = "=1.19.0" } +solana-rpc-client = { path = "rpc-client", version = "=1.19.0", default-features = false } +solana-rpc-client-api = { path = "rpc-client-api", version = "=1.19.0" } +solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=1.19.0" } +solana-runtime = { path = "runtime", version = "=1.19.0" } +solana-runtime-transaction = { path = "runtime-transaction", version = "=1.19.0" } +solana-sdk = { path = "sdk", version = "=1.19.0" } +solana-sdk-macro = { path = "sdk/macro", version = "=1.19.0" } +solana-send-transaction-service = { path = "send-transaction-service", version = "=1.19.0" } +solana-stake-program = { path = "programs/stake", version = "=1.19.0" } +solana-storage-bigtable = { path = "storage-bigtable", version = "=1.19.0" } +solana-storage-proto = { path = "storage-proto", version = "=1.19.0" } +solana-streamer = { path = "streamer", version = "=1.19.0" } +solana-svm = { path = "svm", version = "=1.19.0" } +solana-system-program = { path = "programs/system", version = "=1.19.0" } +solana-test-validator = { path = "test-validator", version = "=1.19.0" } +solana-thin-client = { path = "thin-client", version = "=1.19.0" } +solana-tpu-client = { path = "tpu-client", version = "=1.19.0", default-features = false } +solana-transaction-status = { path = "transaction-status", version = "=1.19.0" } +solana-turbine = { path = "turbine", version = "=1.19.0" } +solana-udp-client = { path = "udp-client", version = "=1.19.0" } +solana-version = { path = "version", version = "=1.19.0" } +solana-vote = { path = "vote", version = "=1.19.0" } +solana-vote-program = { path = "programs/vote", version = "=1.19.0" } +solana-wen-restart = { path = "wen-restart", version = "=1.19.0" } +solana-zk-keygen = { path = "zk-keygen", version = "=1.19.0" } +solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.19.0" } +solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.19.0" } solana_rbpf = "=0.8.0" spl-associated-token-account = "=2.3.1" spl-instruction-padding = "0.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 6914ebf6d1e158..a9916da972072c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4550,7 +4550,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.18.0" +version = "1.19.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -4573,7 +4573,7 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "1.18.0" +version = "1.19.0" dependencies = [ "arrayref", "bincode", @@ -4633,7 +4633,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "bytemuck", @@ -4652,7 +4652,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "borsh 1.2.1", "futures 0.3.30", @@ -4667,7 +4667,7 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "1.18.0" +version = "1.19.0" dependencies = [ "serde", "solana-sdk", @@ -4676,7 +4676,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "crossbeam-channel", @@ -4694,7 +4694,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bv", "fnv", @@ -4711,7 +4711,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "byteorder 1.5.0", @@ -4728,7 +4728,7 @@ dependencies = [ [[package]] name = "solana-bpf-rust-big-mod-exp" -version = "1.18.0" +version = "1.19.0" dependencies = [ "array-bytes", "serde", @@ -4738,7 +4738,7 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bv", "bytemuck", @@ -4754,7 +4754,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "chrono", "clap 2.33.3", @@ -4769,7 +4769,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.18.0" +version = "1.19.0" dependencies = [ "dirs-next", "lazy_static", @@ -4783,7 +4783,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.18.0" +version = "1.19.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -4808,7 +4808,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "bincode", @@ -4839,7 +4839,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -4847,7 +4847,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "chrono", @@ -4859,7 +4859,7 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "bincode", @@ -4878,7 +4878,7 @@ dependencies = [ [[package]] name = "solana-core" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bincode", @@ -4953,7 +4953,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "1.18.0" +version = "1.19.0" dependencies = [ "lazy_static", "log", @@ -4975,7 +4975,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "console", "indicatif", @@ -4987,7 +4987,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "crossbeam-channel", @@ -5007,7 +5007,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "byteorder 1.5.0", @@ -5029,7 +5029,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.18.0" +version = "1.19.0" dependencies = [ "block-buffer 0.10.4", "bs58", @@ -5052,7 +5052,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.18.0" +version = "1.19.0" dependencies = [ "proc-macro2", "quote", @@ -5062,7 +5062,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "solana-accounts-db", @@ -5073,7 +5073,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-interface" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "solana-sdk", @@ -5083,7 +5083,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bs58", "crossbeam-channel", @@ -5108,7 +5108,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -5156,7 +5156,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "bincode", @@ -5222,7 +5222,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "solana-measure", @@ -5233,7 +5233,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.18.0" +version = "1.19.0" dependencies = [ "env_logger", "lazy_static", @@ -5242,7 +5242,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "solana-sdk", @@ -5250,7 +5250,7 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.18.0" +version = "1.19.0" dependencies = [ "fast-math", "solana-program", @@ -5258,7 +5258,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.18.0" +version = "1.19.0" dependencies = [ "crossbeam-channel", "gethostname", @@ -5271,7 +5271,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "clap 3.1.6", @@ -5297,7 +5297,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" [[package]] name = "solana-perf" -version = "1.18.0" +version = "1.19.0" dependencies = [ "ahash 0.8.8", "bincode", @@ -5324,7 +5324,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "1.18.0" +version = "1.19.0" dependencies = [ "core_affinity", "crossbeam-channel", @@ -5340,7 +5340,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "ark-bn254", "ark-ec", @@ -5393,7 +5393,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bincode", @@ -5419,7 +5419,7 @@ dependencies = [ [[package]] name = "solana-program-test" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "async-trait", @@ -5448,7 +5448,7 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "crossbeam-channel", "futures-util", @@ -5471,7 +5471,7 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-mutex", "async-trait", @@ -5495,7 +5495,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.18.0" +version = "1.19.0" dependencies = [ "lazy_static", "num_cpus", @@ -5503,7 +5503,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.18.0" +version = "1.19.0" dependencies = [ "console", "dialoguer", @@ -5520,7 +5520,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bincode", @@ -5575,7 +5575,7 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "base64 0.21.7", @@ -5599,7 +5599,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bs58", @@ -5619,7 +5619,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "1.18.0" +version = "1.19.0" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -5630,7 +5630,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.18.0" +version = "1.19.0" dependencies = [ "aquamarine", "arrayref", @@ -5706,7 +5706,7 @@ dependencies = [ [[package]] name = "solana-sbf-programs" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "byteorder 1.5.0", @@ -5735,7 +5735,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-128bit" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-sbf-rust-128bit-dep", @@ -5743,21 +5743,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-128bit-dep" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-alloc" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-alt-bn128" -version = "1.18.0" +version = "1.19.0" dependencies = [ "array-bytes", "solana-program", @@ -5765,7 +5765,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-alt-bn128-compression" -version = "1.18.0" +version = "1.19.0" dependencies = [ "array-bytes", "solana-program", @@ -5773,21 +5773,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-call-depth" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-caller-access" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-curve25519" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-zk-token-sdk", @@ -5795,14 +5795,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-custom-heap" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-dep-crate" -version = "1.18.0" +version = "1.19.0" dependencies = [ "byteorder 1.5.0", "solana-program", @@ -5810,21 +5810,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-deprecated-loader" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-dup-accounts" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-error-handling" -version = "1.18.0" +version = "1.19.0" dependencies = [ "num-derive 0.3.0", "num-traits", @@ -5834,42 +5834,42 @@ dependencies = [ [[package]] name = "solana-sbf-rust-external-spend" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-finalize" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-get-minimum-delegation" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-inner_instruction_alignment_check" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-instruction-introspection" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke" -version = "1.18.0" +version = "1.19.0" dependencies = [ "rustversion", "solana-program", @@ -5879,49 +5879,49 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke-and-error" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-and-ok" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-and-return" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoked" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-iter" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-log-data" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-many-args" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-sbf-rust-many-args-dep", @@ -5929,14 +5929,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-many-args-dep" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-mem" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -5946,7 +5946,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-membuiltins" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-sbf-rust-mem", @@ -5954,21 +5954,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-noop" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-panic" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-param-passing" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-sbf-rust-param-passing-dep", @@ -5976,14 +5976,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-param-passing-dep" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-poseidon" -version = "1.18.0" +version = "1.19.0" dependencies = [ "array-bytes", "solana-program", @@ -5991,7 +5991,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-rand" -version = "1.18.0" +version = "1.19.0" dependencies = [ "getrandom 0.2.10", "rand 0.8.5", @@ -6000,14 +6000,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-realloc-invoke" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-sbf-rust-realloc", @@ -6015,7 +6015,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-remaining-compute-units" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -6025,21 +6025,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-ro-account_modify" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-ro-modify" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sanity" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -6049,7 +6049,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-secp256k1-recover" -version = "1.18.0" +version = "1.19.0" dependencies = [ "libsecp256k1 0.7.0", "solana-program", @@ -6057,7 +6057,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sha" -version = "1.18.0" +version = "1.19.0" dependencies = [ "blake3", "solana-program", @@ -6065,21 +6065,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sibling-instructions" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sibling_inner-instructions" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-simulation" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-logger", "solana-program", @@ -6090,21 +6090,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-spoof1" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-spoof1-system" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sysvar" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -6114,21 +6114,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-upgradeable" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-upgraded" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sdk" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "base64 0.21.7", @@ -6181,7 +6181,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bs58", "proc-macro2", @@ -6198,7 +6198,7 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-send-transaction-service" -version = "1.18.0" +version = "1.19.0" dependencies = [ "crossbeam-channel", "log", @@ -6212,7 +6212,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "log", @@ -6225,7 +6225,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.18.0" +version = "1.19.0" dependencies = [ "backoff", "bincode", @@ -6257,7 +6257,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "bs58", @@ -6272,7 +6272,7 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-channel", "bytes", @@ -6300,7 +6300,7 @@ dependencies = [ [[package]] name = "solana-svm" -version = "1.18.0" +version = "1.19.0" dependencies = [ "itertools", "log", @@ -6320,7 +6320,7 @@ dependencies = [ [[package]] name = "solana-system-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "log", @@ -6332,7 +6332,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.18.0" +version = "1.19.0" dependencies = [ "base64 0.21.7", "bincode", @@ -6363,7 +6363,7 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "log", @@ -6376,7 +6376,7 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "bincode", @@ -6398,7 +6398,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.18.0" +version = "1.19.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -6421,7 +6421,7 @@ dependencies = [ [[package]] name = "solana-turbine" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "bytes", @@ -6455,7 +6455,7 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "1.18.0" +version = "1.19.0" dependencies = [ "async-trait", "solana-connection-cache", @@ -6468,14 +6468,14 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-logic" -version = "1.18.0" +version = "1.19.0" dependencies = [ "solana-sdk", ] [[package]] name = "solana-unified-scheduler-pool" -version = "1.18.0" +version = "1.19.0" dependencies = [ "assert_matches", "crossbeam-channel", @@ -6491,7 +6491,7 @@ dependencies = [ [[package]] name = "solana-validator" -version = "1.18.0" +version = "1.19.0" dependencies = [ "chrono", "clap 2.33.3", @@ -6554,7 +6554,7 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "rustc_version", @@ -6568,7 +6568,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "1.18.0" +version = "1.19.0" dependencies = [ "crossbeam-channel", "itertools", @@ -6585,7 +6585,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bincode", "log", @@ -6605,7 +6605,7 @@ dependencies = [ [[package]] name = "solana-wen-restart" -version = "1.18.0" +version = "1.19.0" dependencies = [ "log", "prost", @@ -6624,7 +6624,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program" -version = "1.18.0" +version = "1.19.0" dependencies = [ "bytemuck", "num-derive 0.4.2", @@ -6636,7 +6636,7 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.18.0" +version = "1.19.0" dependencies = [ "aes-gcm-siv", "base64 0.21.7", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 6477f12f56362c..37b1877594c34c 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.18.0" +version = "1.19.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -25,29 +25,29 @@ rand = "0.8" rustversion = "1.0.14" serde = "1.0.112" serde_json = "1.0.56" -solana-account-decoder = { path = "../../account-decoder", version = "=1.18.0" } -solana-accounts-db = { path = "../../accounts-db", version = "=1.18.0" } -solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.18.0" } -solana-cli-output = { path = "../../cli-output", version = "=1.18.0" } -solana-ledger = { path = "../../ledger", version = "=1.18.0" } -solana-logger = { path = "../../logger", version = "=1.18.0" } -solana-measure = { path = "../../measure", version = "=1.18.0" } -solana-program = { path = "../../sdk/program", version = "=1.18.0" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.18.0" } -solana-program-test = { path = "../../program-test", version = "=1.18.0" } -solana-runtime = { path = "../../runtime", version = "=1.18.0" } -solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=1.18.0" } -solana-sbf-rust-invoke = { path = "rust/invoke", version = "=1.18.0" } -solana-sbf-rust-invoked = { path = "rust/invoked", version = "=1.18.0", default-features = false } -solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=1.18.0" } -solana-sbf-rust-mem = { path = "rust/mem", version = "=1.18.0" } -solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=1.18.0" } -solana-sbf-rust-realloc = { path = "rust/realloc", version = "=1.18.0", default-features = false } -solana-sbf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.18.0" } -solana-sdk = { path = "../../sdk", version = "=1.18.0" } -solana-transaction-status = { path = "../../transaction-status", version = "=1.18.0" } -solana-validator = { path = "../../validator", version = "=1.18.0" } -solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.18.0" } +solana-account-decoder = { path = "../../account-decoder", version = "=1.19.0" } +solana-accounts-db = { path = "../../accounts-db", version = "=1.19.0" } +solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.19.0" } +solana-cli-output = { path = "../../cli-output", version = "=1.19.0" } +solana-ledger = { path = "../../ledger", version = "=1.19.0" } +solana-logger = { path = "../../logger", version = "=1.19.0" } +solana-measure = { path = "../../measure", version = "=1.19.0" } +solana-program = { path = "../../sdk/program", version = "=1.19.0" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.19.0" } +solana-program-test = { path = "../../program-test", version = "=1.19.0" } +solana-runtime = { path = "../../runtime", version = "=1.19.0" } +solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=1.19.0" } +solana-sbf-rust-invoke = { path = "rust/invoke", version = "=1.19.0" } +solana-sbf-rust-invoked = { path = "rust/invoked", version = "=1.19.0", default-features = false } +solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=1.19.0" } +solana-sbf-rust-mem = { path = "rust/mem", version = "=1.19.0" } +solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=1.19.0" } +solana-sbf-rust-realloc = { path = "rust/realloc", version = "=1.19.0", default-features = false } +solana-sbf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.19.0" } +solana-sdk = { path = "../../sdk", version = "=1.19.0" } +solana-transaction-status = { path = "../../transaction-status", version = "=1.19.0" } +solana-validator = { path = "../../validator", version = "=1.19.0" } +solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.19.0" } solana_rbpf = "=0.8.0" static_assertions = "1.1.0" thiserror = "1.0" diff --git a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml index 00fdb7e5330ea7..8e1b7f77206707 100644 --- a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fail" -version = "1.18.0" +version = "1.19.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.18.0" } +solana-program = { path = "../../../../program", version = "=1.19.0" } [lib] crate-type = ["cdylib"] diff --git a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml index c4fb1364393d35..2d48c1295424da 100644 --- a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noop" -version = "1.18.0" +version = "1.19.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.18.0" } +solana-program = { path = "../../../../program", version = "=1.19.0" } [lib] crate-type = ["cdylib"] From 13f232436af5238d9d5e6d3e355daf69421c70bf Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 07:09:29 -0500 Subject: [PATCH 202/401] Renames BankSnapshotType -> BankSnapshotKind (#35246) --- runtime/src/snapshot_bank_utils.rs | 10 +++--- runtime/src/snapshot_utils.rs | 50 +++++++++++++++--------------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index dfeda8e59e0fe1..42680fa1e9357e 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -18,7 +18,7 @@ use { get_snapshot_file_name, get_storages_to_serialize, hard_link_storages_to_snapshot, rebuild_storages_from_snapshot_dir, serialize_snapshot_data_file, verify_and_unarchive_snapshots, verify_unpacked_snapshots_dir_and_version, - AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, BankSnapshotType, SnapshotError, + AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, BankSnapshotKind, SnapshotError, SnapshotRootPaths, SnapshotVersion, StorageAndNextAppendVecId, UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError, }, @@ -187,7 +187,7 @@ pub fn add_bank_snapshot( Ok(BankSnapshotInfo { slot, - snapshot_type: BankSnapshotType::Pre, + snapshot_kind: BankSnapshotKind::Pre, snapshot_dir: bank_snapshot_dir, snapshot_version, }) @@ -1236,7 +1236,7 @@ pub fn create_snapshot_dirs_for_tests( continue; // leave the snapshot dir at PRE stage } - // Reserialize the snapshot dir to convert it from PRE to POST, because only the POST type can be used + // Reserialize the snapshot dir to convert it from PRE to POST, because only the POST kind can be used // to construct a bank. assert!( crate::serde_snapshot::reserialize_bank_with_new_accounts_hash( @@ -2415,10 +2415,10 @@ mod tests { assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 10); - purge_old_bank_snapshots(&bank_snapshots_dir, 3, Some(BankSnapshotType::Pre)); + purge_old_bank_snapshots(&bank_snapshots_dir, 3, Some(BankSnapshotKind::Pre)); assert_eq!(get_bank_snapshots_pre(&bank_snapshots_dir).len(), 3); - purge_old_bank_snapshots(&bank_snapshots_dir, 2, Some(BankSnapshotType::Post)); + purge_old_bank_snapshots(&bank_snapshots_dir, 2, Some(BankSnapshotKind::Post)); assert_eq!(get_bank_snapshots_post(&bank_snapshots_dir).len(), 2); assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 5); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 1bd9c4d254958d..6dabb3d38e9669 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -121,13 +121,13 @@ impl SnapshotVersion { } /// Information about a bank snapshot. Namely the slot of the bank, the path to the snapshot, and -/// the type of the snapshot. +/// the kind of the snapshot. #[derive(PartialEq, Eq, Debug)] pub struct BankSnapshotInfo { /// Slot of the bank pub slot: Slot, - /// Type of the snapshot - pub snapshot_type: BankSnapshotType, + /// Snapshot kind + pub snapshot_kind: BankSnapshotKind, /// Path to the bank snapshot directory pub snapshot_dir: PathBuf, /// Snapshot version @@ -195,12 +195,12 @@ impl BankSnapshotInfo { // AccountsPackage for a snapshot/slot; if AHV is in the middle of reserializing the // bank snapshot file (writing the new "Post" file), and then the process dies, // there will be an incomplete "Post" file on disk. We do not want only the existence of - // this "Post" file to be sufficient for deciding the snapshot type as "Post". More so, + // this "Post" file to be sufficient for deciding the snapshot kind as "Post". More so, // "Post" *requires* the *absence* of a "Pre" file. - let snapshot_type = if bank_snapshot_pre_path.is_file() { - BankSnapshotType::Pre + let snapshot_kind = if bank_snapshot_pre_path.is_file() { + BankSnapshotKind::Pre } else if bank_snapshot_post_path.is_file() { - BankSnapshotType::Post + BankSnapshotKind::Post } else { return Err(SnapshotNewFromDirError::MissingSnapshotFile( bank_snapshot_dir, @@ -209,7 +209,7 @@ impl BankSnapshotInfo { Ok(BankSnapshotInfo { slot, - snapshot_type, + snapshot_kind, snapshot_dir: bank_snapshot_dir, snapshot_version, }) @@ -218,9 +218,9 @@ impl BankSnapshotInfo { pub fn snapshot_path(&self) -> PathBuf { let mut bank_snapshot_path = self.snapshot_dir.join(get_snapshot_file_name(self.slot)); - let ext = match self.snapshot_type { - BankSnapshotType::Pre => BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, - BankSnapshotType::Post => "", + let ext = match self.snapshot_kind { + BankSnapshotKind::Pre => BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, + BankSnapshotKind::Post => "", }; bank_snapshot_path.set_extension(ext); @@ -236,7 +236,7 @@ impl BankSnapshotInfo { /// that this bank snapshot is "pre" accounts hash. Later, when the accounts hash is calculated, /// the bank snapshot is re-serialized, and is now "post" accounts hash. #[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum BankSnapshotType { +pub enum BankSnapshotKind { /// This bank snapshot has *not* yet had its accounts hash calculated Pre, /// This bank snapshot *has* had its accounts hash calculated @@ -894,25 +894,25 @@ pub fn get_bank_snapshots(bank_snapshots_dir: impl AsRef) -> Vec) -> Vec { let mut bank_snapshots = get_bank_snapshots(bank_snapshots_dir); - bank_snapshots.retain(|bank_snapshot| bank_snapshot.snapshot_type == BankSnapshotType::Pre); + bank_snapshots.retain(|bank_snapshot| bank_snapshot.snapshot_kind == BankSnapshotKind::Pre); bank_snapshots } /// Get the bank snapshots in a directory /// -/// This function retains only the bank snapshots of type BankSnapshotType::Post +/// This function retains only the bank snapshots of kind BankSnapshotKind::Post pub fn get_bank_snapshots_post(bank_snapshots_dir: impl AsRef) -> Vec { let mut bank_snapshots = get_bank_snapshots(bank_snapshots_dir); - bank_snapshots.retain(|bank_snapshot| bank_snapshot.snapshot_type == BankSnapshotType::Post); + bank_snapshots.retain(|bank_snapshot| bank_snapshot.snapshot_kind == BankSnapshotKind::Post); bank_snapshots } /// Get the bank snapshot with the highest slot in a directory /// -/// This function gets the highest bank snapshot of type BankSnapshotType::Pre +/// This function gets the highest bank snapshot of kind BankSnapshotKind::Pre pub fn get_highest_bank_snapshot_pre( bank_snapshots_dir: impl AsRef, ) -> Option { @@ -921,7 +921,7 @@ pub fn get_highest_bank_snapshot_pre( /// Get the bank snapshot with the highest slot in a directory /// -/// This function gets the highest bank snapshot of type BankSnapshotType::Post +/// This function gets the highest bank snapshot of kind BankSnapshotKind::Post pub fn get_highest_bank_snapshot_post( bank_snapshots_dir: impl AsRef, ) -> Option { @@ -930,7 +930,7 @@ pub fn get_highest_bank_snapshot_post( /// Get the bank snapshot with the highest slot in a directory /// -/// This function gets the highest bank snapshot of any type +/// This function gets the highest bank snapshot of any kind pub fn get_highest_bank_snapshot(bank_snapshots_dir: impl AsRef) -> Option { do_get_highest_bank_snapshot(get_bank_snapshots(&bank_snapshots_dir)) } @@ -2142,11 +2142,11 @@ pub fn verify_snapshot_archive( pub fn purge_old_bank_snapshots( bank_snapshots_dir: impl AsRef, num_bank_snapshots_to_retain: usize, - filter_by_type: Option, + filter_by_kind: Option, ) { - let mut bank_snapshots = match filter_by_type { - Some(BankSnapshotType::Pre) => get_bank_snapshots_pre(&bank_snapshots_dir), - Some(BankSnapshotType::Post) => get_bank_snapshots_post(&bank_snapshots_dir), + let mut bank_snapshots = match filter_by_kind { + Some(BankSnapshotKind::Pre) => get_bank_snapshots_pre(&bank_snapshots_dir), + Some(BankSnapshotKind::Post) => get_bank_snapshots_post(&bank_snapshots_dir), None => get_bank_snapshots(&bank_snapshots_dir), }; @@ -2164,8 +2164,8 @@ pub fn purge_old_bank_snapshots( /// Only a single bank snapshot could be needed at startup (when using fast boot), so /// retain the highest bank snapshot "post", and purge the rest. pub fn purge_old_bank_snapshots_at_startup(bank_snapshots_dir: impl AsRef) { - purge_old_bank_snapshots(&bank_snapshots_dir, 0, Some(BankSnapshotType::Pre)); - purge_old_bank_snapshots(&bank_snapshots_dir, 1, Some(BankSnapshotType::Post)); + purge_old_bank_snapshots(&bank_snapshots_dir, 0, Some(BankSnapshotKind::Pre)); + purge_old_bank_snapshots(&bank_snapshots_dir, 1, Some(BankSnapshotKind::Post)); let highest_bank_snapshot_post = get_highest_bank_snapshot_post(&bank_snapshots_dir); if let Some(highest_bank_snapshot_post) = highest_bank_snapshot_post { From 012f58848272c6e95d231fc74f809cb30225eb0c Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 10:16:46 -0500 Subject: [PATCH 203/401] Removes copying owner when serializing Account (#35118) * Removes copying owner when serializing Account * Provide generic AbiExample impls for &T and &[T] --------- Co-authored-by: Ryo Onodera --- Cargo.lock | 1 - frozen-abi/Cargo.toml | 1 - frozen-abi/src/abi_example.rs | 34 ++++++++++++++-------------------- programs/sbf/Cargo.lock | 1 - sdk/src/account.rs | 7 +++---- 5 files changed, 17 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 46b790f6574470..a90b6f90e52274 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6007,7 +6007,6 @@ dependencies = [ "either", "generic-array 0.14.7", "im", - "lazy_static", "log", "memmap2", "rustc_version 0.4.0", diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 25272a04b80b7b..6b8a39bf0ba49a 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -12,7 +12,6 @@ edition = { workspace = true } [dependencies] bs58 = { workspace = true } bv = { workspace = true, features = ["serde"] } -lazy_static = { workspace = true } log = { workspace = true, features = ["std"] } serde = { workspace = true, features = ["derive", "rc"] } serde_bytes = { workspace = true } diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index 50a17af715bd1d..7931b05b6a81b4 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -1,6 +1,5 @@ use { crate::abi_digester::{AbiDigester, DigestError, DigestResult}, - lazy_static::lazy_static, log::*, serde::Serialize, std::any::type_name, @@ -350,6 +349,20 @@ fn leak_and_inhibit_drop<'a, T>(t: T) -> &'a mut T { Box::leak(Box::new(t)) } +impl AbiExample for &T { + fn example() -> Self { + info!("AbiExample for (&T): {}", type_name::()); + leak_and_inhibit_drop(T::example()) + } +} + +impl AbiExample for &[T] { + fn example() -> Self { + info!("AbiExample for (&[T]): {}", type_name::()); + leak_and_inhibit_drop(vec![T::example()]) + } +} + impl AbiExample for std::sync::Weak { fn example() -> Self { info!("AbiExample for (Arc's Weak): {}", type_name::()); @@ -434,25 +447,6 @@ impl AbiExample for Vec { } } -lazy_static! { - /// we need &Vec, so we need something with a static lifetime - static ref VEC_U8: Vec = vec![u8::default()]; -} - -impl AbiExample for &Vec { - fn example() -> Self { - info!("AbiExample for (&Vec): {}", type_name::()); - &VEC_U8 - } -} - -impl AbiExample for &[u8] { - fn example() -> Self { - info!("AbiExample for (&[u8]): {}", type_name::()); - &VEC_U8[..] - } -} - impl AbiExample for VecDeque { fn example() -> Self { info!("AbiExample for (Vec): {}", type_name::()); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a9916da972072c..d7c1f064cce047 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5037,7 +5037,6 @@ dependencies = [ "either", "generic-array 0.14.7", "im", - "lazy_static", "log", "memmap2", "rustc_version", diff --git a/sdk/src/account.rs b/sdk/src/account.rs index a67f49ded47f1d..227a56668038e9 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -63,15 +63,14 @@ mod account_serialize { #[serde(with = "serde_bytes")] // a slice so we don't have to make a copy just to serialize this data: &'a [u8], - // can't be &pubkey because abi example doesn't support it - owner: Pubkey, + owner: &'a Pubkey, executable: bool, rent_epoch: Epoch, } /// allows us to implement serialize on AccountSharedData that is equivalent to Account::serialize without making a copy of the Vec pub fn serialize_account( - account: &(impl ReadableAccount + Serialize), + account: &impl ReadableAccount, serializer: S, ) -> Result where @@ -80,7 +79,7 @@ mod account_serialize { let temp = Account { lamports: account.lamports(), data: account.data(), - owner: *account.owner(), + owner: account.owner(), executable: account.executable(), rent_epoch: account.rent_epoch(), }; From d7ae65fc46004bfa7c2c7b41542190e650ae69c4 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 20 Feb 2024 16:04:32 +0000 Subject: [PATCH 204/401] removes feature-gated code revising turbine epoch stakes for shreds propagation (#35226) --- sdk/src/feature_set.rs | 2 +- turbine/src/cluster_nodes.rs | 19 ++++--------------- 2 files changed, 5 insertions(+), 16 deletions(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 2201ed5c400247..82687673246293 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -676,7 +676,7 @@ pub mod reduce_stake_warmup_cooldown { solana_sdk::declare_id!("GwtDQBghCTBgmX2cpEGNPxTEBUTQRaDMGTr5qychdGMj"); } -pub mod revise_turbine_epoch_stakes { +mod revise_turbine_epoch_stakes { solana_sdk::declare_id!("BTWmtJC8U5ZLMbBUUA1k6As62sYjPEjAiNAT55xYGdJU"); } diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 8079178cf415b9..0c55cb41e56472 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -362,7 +362,8 @@ impl ClusterNodesCache { working_bank: &Bank, cluster_info: &ClusterInfo, ) -> Arc> { - let epoch = get_epoch(shred_slot, root_bank); + let epoch_schedule = root_bank.epoch_schedule(); + let epoch = epoch_schedule.get_epoch(shred_slot); let entry = self.get_cache_entry(epoch); if let Some((_, nodes)) = entry .read() @@ -382,8 +383,8 @@ impl ClusterNodesCache { .iter() .find_map(|bank| bank.epoch_staked_nodes(epoch)); if epoch_staked_nodes.is_none() { - inc_new_counter_debug!("cluster_nodes-unknown_epoch_staked_nodes", 1); - if epoch != get_epoch(root_bank.slot(), root_bank) { + inc_new_counter_info!("cluster_nodes-unknown_epoch_staked_nodes", 1); + if epoch != epoch_schedule.get_epoch(root_bank.slot()) { return self.get(root_bank.slot(), root_bank, working_bank, cluster_info); } inc_new_counter_info!("cluster_nodes-unknown_epoch_staked_nodes_root", 1); @@ -397,18 +398,6 @@ impl ClusterNodesCache { } } -fn get_epoch(shred_slot: Slot, root_bank: &Bank) -> Epoch { - if check_feature_activation( - &feature_set::revise_turbine_epoch_stakes::id(), - shred_slot, - root_bank, - ) { - root_bank.epoch_schedule().get_epoch(shred_slot) - } else { - root_bank.get_leader_schedule_epoch(shred_slot) - } -} - impl From for NodeId { fn from(node: ContactInfo) -> Self { NodeId::ContactInfo(node) From d48f277091df459fb92be51a38464fd9c1ffb15b Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 20 Feb 2024 08:09:27 -0800 Subject: [PATCH 205/401] SVM: Remove accounts-db deps in accounts_loader tests (#35223) --- svm/src/account_loader.rs | 43 +++++++++++++++------------------------ 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index cfe0b069f156ae..f83652eb7ad119 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -450,7 +450,6 @@ mod tests { use { super::*, nonce::state::Versions as NonceVersions, - solana_accounts_db::{accounts::Accounts, accounts_db::AccountsDb, ancestors::Ancestors}, solana_program_runtime::{ compute_budget_processor, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, @@ -476,8 +475,7 @@ mod tests { }; struct TestCallbacks { - accounts: Accounts, - ancestors: Ancestors, + accounts_map: HashMap, rent_collector: RentCollector, feature_set: Arc, } @@ -488,9 +486,7 @@ mod tests { } fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { - self.accounts - .load_without_fixed_root(&self.ancestors, pubkey) - .map(|(acc, _slot)| acc) + self.accounts_map.get(pubkey).cloned() } fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { @@ -515,18 +511,14 @@ mod tests { feature_set: &mut FeatureSet, fee_structure: &FeeStructure, ) -> Vec { - let accounts_db = AccountsDb::new_single_for_tests(); - let accounts = Accounts::new(Arc::new(accounts_db)); - for ka in ka.iter() { - accounts.accounts_db.store_for_tests(0, &[(&ka.0, &ka.1)]); - } - - let ancestors = vec![(0, 0)].into_iter().collect(); feature_set.deactivate(&feature_set::disable_rent_fees_collection::id()); let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); + let mut accounts_map = HashMap::new(); + for (pubkey, account) in ka { + accounts_map.insert(*pubkey, account.clone()); + } let callbacks = TestCallbacks { - accounts, - ancestors, + accounts_map, rent_collector: rent_collector.clone(), feature_set: Arc::new(feature_set.clone()), }; @@ -991,17 +983,19 @@ mod tests { } fn load_accounts_no_store( - accounts: Accounts, + ka: &[TransactionAccount], tx: Transaction, account_overrides: Option<&AccountOverrides>, ) -> Vec { let tx = SanitizedTransaction::from_transaction_for_tests(tx); - let ancestors = vec![(0, 0)].into_iter().collect(); let mut error_counters = TransactionErrorMetrics::default(); + let mut accounts_map = HashMap::new(); + for (pubkey, account) in ka { + accounts_map.insert(*pubkey, account.clone()); + } let callbacks = TestCallbacks { - accounts, - ancestors, + accounts_map, rent_collector: RentCollector::default(), feature_set: Arc::new(FeatureSet::all_enabled()), }; @@ -1020,9 +1014,6 @@ mod tests { #[test] fn test_instructions() { solana_logger::setup(); - let accounts_db = AccountsDb::new_single_for_tests(); - let accounts = Accounts::new(Arc::new(accounts_db)); - let instructions_key = solana_sdk::sysvar::instructions::id(); let keypair = Keypair::new(); let instructions = vec![CompiledInstruction::new(1, &(), vec![0, 1])]; @@ -1034,7 +1025,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_no_store(accounts, tx, None); + let loaded_accounts = load_accounts_no_store(&[], tx, None); assert_eq!(loaded_accounts.len(), 1); assert!(loaded_accounts[0].0.is_err()); } @@ -1042,8 +1033,6 @@ mod tests { #[test] fn test_overrides() { solana_logger::setup(); - let accounts_db = AccountsDb::new_single_for_tests(); - let accounts = Accounts::new(Arc::new(accounts_db)); let mut account_overrides = AccountOverrides::default(); let slot_history_id = sysvar::slot_history::id(); let account = AccountSharedData::new(42, 0, &Pubkey::default()); @@ -1051,7 +1040,6 @@ mod tests { let keypair = Keypair::new(); let account = AccountSharedData::new(1_000_000, 0, &Pubkey::default()); - accounts.store_slow_uncached(0, &keypair.pubkey(), &account); let instructions = vec![CompiledInstruction::new(2, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( @@ -1062,7 +1050,8 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_no_store(accounts, tx, Some(&account_overrides)); + let loaded_accounts = + load_accounts_no_store(&[(keypair.pubkey(), account)], tx, Some(&account_overrides)); assert_eq!(loaded_accounts.len(), 1); let loaded_transaction = loaded_accounts[0].0.as_ref().unwrap(); assert_eq!(loaded_transaction.accounts[0].0, keypair.pubkey()); From befe8b9d983f6c386241a68327b602bed3e158fc Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 20 Feb 2024 09:30:46 -0800 Subject: [PATCH 206/401] replay: reload tower if set-identity during startup (#35173) * replay: reload tower if set-identity during startup * pr feedback: add unit tests * pr feedback: use tower.node_pubkey, more descriptive names --- core/src/consensus.rs | 22 ++++++ core/src/replay_stage.rs | 126 ++++++++++++++++++++++++------ sdk/program/src/vote/state/mod.rs | 18 +++++ 3 files changed, 141 insertions(+), 25 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 4f129b18282218..3e24f33233863e 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -292,6 +292,28 @@ impl Tower { } } + #[cfg(test)] + pub fn new_random(node_pubkey: Pubkey) -> Self { + use rand::Rng; + + let mut rng = rand::thread_rng(); + let root_slot = rng.gen(); + let vote_state = VoteState::new_rand_for_tests(node_pubkey, root_slot); + let last_vote = VoteStateUpdate::from( + vote_state + .votes + .iter() + .map(|lv| (lv.slot(), lv.confirmation_count())) + .collect::>(), + ); + Self { + node_pubkey, + vote_state, + last_vote: VoteTransaction::CompactVoteStateUpdate(last_vote), + ..Tower::default() + } + } + pub fn new_from_bankforks( bank_forks: &BankForks, node_pubkey: &Pubkey, diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 27c30b9e52eb7b..1b7b4737f55fc2 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -578,6 +578,21 @@ impl ReplayStage { let _exit = Finalizer::new(exit.clone()); let mut identity_keypair = cluster_info.keypair().clone(); let mut my_pubkey = identity_keypair.pubkey(); + if my_pubkey != tower.node_pubkey { + // set-identity was called during the startup procedure, ensure the tower is consistent + // before starting the loop. further calls to set-identity will reload the tower in the loop + let my_old_pubkey = tower.node_pubkey; + tower = Self::load_tower( + tower_storage.as_ref(), + &my_pubkey, + &vote_account, + &bank_forks, + ); + warn!( + "Identity changed during startup from {} to {}", + my_old_pubkey, my_pubkey + ); + } let (mut progress, mut heaviest_subtree_fork_choice) = Self::initialize_progress_and_fork_choice_with_locked_bank_forks( &bank_forks, @@ -983,28 +998,12 @@ impl ReplayStage { my_pubkey = identity_keypair.pubkey(); // Load the new identity's tower - tower = Tower::restore(tower_storage.as_ref(), &my_pubkey) - .and_then(|restored_tower| { - let root_bank = bank_forks.read().unwrap().root_bank(); - let slot_history = root_bank.get_slot_history(); - restored_tower.adjust_lockouts_after_replay( - root_bank.slot(), - &slot_history, - ) - }) - .unwrap_or_else(|err| { - if err.is_file_missing() { - Tower::new_from_bankforks( - &bank_forks.read().unwrap(), - &my_pubkey, - &vote_account, - ) - } else { - error!("Failed to load tower for {}: {}", my_pubkey, err); - std::process::exit(1); - } - }); - + tower = Self::load_tower( + tower_storage.as_ref(), + &my_pubkey, + &vote_account, + &bank_forks, + ); // Ensure the validator can land votes with the new identity before // becoming leader has_new_vote_been_rooted = !wait_for_vote_to_start_leader; @@ -1154,6 +1153,32 @@ impl ReplayStage { }) } + fn load_tower( + tower_storage: &dyn TowerStorage, + node_pubkey: &Pubkey, + vote_account: &Pubkey, + bank_forks: &Arc>, + ) -> Tower { + Tower::restore(tower_storage, node_pubkey) + .and_then(|restored_tower| { + let root_bank = bank_forks.read().unwrap().root_bank(); + let slot_history = root_bank.get_slot_history(); + restored_tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history) + }) + .unwrap_or_else(|err| { + if err.is_file_missing() { + Tower::new_from_bankforks( + &bank_forks.read().unwrap(), + node_pubkey, + vote_account, + ) + } else { + error!("Failed to load tower for {}: {}", node_pubkey, err); + std::process::exit(1); + } + }) + } + fn check_for_vote_only_mode( heaviest_bank_slot: Slot, forks_root: Slot, @@ -4230,9 +4255,9 @@ pub(crate) mod tests { crate::{ consensus::{ progress_map::{ValidatorStakeInfo, RETRANSMIT_BASE_DELAY_MS}, - tower_storage::NullTowerStorage, + tower_storage::{FileTowerStorage, NullTowerStorage}, tree_diff::TreeDiff, - Tower, + Tower, VOTE_THRESHOLD_DEPTH, }, replay_stage::ReplayStage, vote_simulator::{self, VoteSimulator}, @@ -4254,7 +4279,7 @@ pub(crate) mod tests { }, solana_runtime::{ accounts_background_service::AbsRequestSender, - commitment::BlockCommitment, + commitment::{BlockCommitment, VOTE_THRESHOLD_SIZE}, genesis_utils::{GenesisConfigInfo, ValidatorVoteKeypairs}, }, solana_sdk::{ @@ -4278,6 +4303,7 @@ pub(crate) mod tests { iter, sync::{atomic::AtomicU64, Arc, RwLock}, }, + tempfile::tempdir, trees::{tr, Tree}, }; @@ -8598,4 +8624,54 @@ pub(crate) mod tests { assert_eq!(reset_fork, Some(4)); assert_eq!(failures, vec![HeaviestForkFailures::LockedOut(4),]); } + + #[test] + fn test_tower_load_missing() { + let tower_file = tempdir().unwrap().into_path(); + let tower_storage = FileTowerStorage::new(tower_file); + let node_pubkey = Pubkey::new_unique(); + let vote_account = Pubkey::new_unique(); + let tree = tr(0) / (tr(1) / (tr(3) / (tr(4))) / (tr(2) / (tr(5) / (tr(6))))); + let generate_votes = |pubkeys: Vec| { + pubkeys + .into_iter() + .zip(iter::once(vec![0, 1, 2, 5, 6]).chain(iter::repeat(vec![0, 1, 3, 4]).take(2))) + .collect() + }; + let (vote_simulator, _blockstore) = + setup_forks_from_tree(tree, 3, Some(Box::new(generate_votes))); + let bank_forks = vote_simulator.bank_forks; + + let tower = + ReplayStage::load_tower(&tower_storage, &node_pubkey, &vote_account, &bank_forks); + let expected_tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, VOTE_THRESHOLD_SIZE); + assert_eq!(tower.vote_state, expected_tower.vote_state); + assert_eq!(tower.node_pubkey, node_pubkey); + } + + #[test] + fn test_tower_load() { + let tower_file = tempdir().unwrap().into_path(); + let tower_storage = FileTowerStorage::new(tower_file); + let node_keypair = Keypair::new(); + let node_pubkey = node_keypair.pubkey(); + let vote_account = Pubkey::new_unique(); + let tree = tr(0) / (tr(1) / (tr(3) / (tr(4))) / (tr(2) / (tr(5) / (tr(6))))); + let generate_votes = |pubkeys: Vec| { + pubkeys + .into_iter() + .zip(iter::once(vec![0, 1, 2, 5, 6]).chain(iter::repeat(vec![0, 1, 3, 4]).take(2))) + .collect() + }; + let (vote_simulator, _blockstore) = + setup_forks_from_tree(tree, 3, Some(Box::new(generate_votes))); + let bank_forks = vote_simulator.bank_forks; + let expected_tower = Tower::new_random(node_pubkey); + expected_tower.save(&tower_storage, &node_keypair).unwrap(); + + let tower = + ReplayStage::load_tower(&tower_storage, &node_pubkey, &vote_account, &bank_forks); + assert_eq!(tower.vote_state, expected_tower.vote_state); + assert_eq!(tower.node_pubkey, expected_tower.node_pubkey); + } } diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 8cfcd0ef19d9e8..a6e765472750c6 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -352,6 +352,24 @@ impl VoteState { } } + pub fn new_rand_for_tests(node_pubkey: Pubkey, root_slot: Slot) -> Self { + let votes = (1..32) + .map(|x| LandedVote { + latency: 0, + lockout: Lockout::new_with_confirmation_count( + u64::from(x).saturating_add(root_slot), + 32_u32.saturating_sub(x), + ), + }) + .collect(); + Self { + node_pubkey, + root_slot: Some(root_slot), + votes, + ..VoteState::default() + } + } + pub fn get_authorized_voter(&self, epoch: Epoch) -> Option { self.authorized_voters.get_authorized_voter(epoch) } From e656e46b2443abe903d6b71fe138599051d84d3d Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 13:32:06 -0500 Subject: [PATCH 207/401] Makes help's default for `--accounts-index-path` consistent (#35255) --- ledger-tool/src/main.rs | 5 +++-- validator/src/cli.rs | 8 +++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index ef3f6515dfcd8f..73bf4127ebfabd 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -616,8 +616,9 @@ fn main() { .takes_value(true) .multiple(true) .help( - "Persistent accounts-index location. May be specified multiple times. [default: \ - [ledger]/accounts_index]", + "Persistent accounts-index location. \ + May be specified multiple times. \ + [default: /accounts_index]", ); let accounts_db_test_hash_calculation_arg = Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 958cdc4ec947de..6c16e09d1b8a02 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1293,9 +1293,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("PATH") .takes_value(true) .multiple(true) - .help("Persistent accounts-index location. \ - May be specified multiple times. \ - [default: [ledger]/accounts_index]"), + .help( + "Persistent accounts-index location. \ + May be specified multiple times. \ + [default: /accounts_index]", + ), ) .arg( Arg::with_name("accounts_db_test_hash_calculation") From ce72c22b3ad9120644878202ed3c9bf66f19a16a Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 13:51:54 -0500 Subject: [PATCH 208/401] Replaces ReadAccountMapEntry in accounts index (#35239) --- accounts-db/src/accounts_index.rs | 84 +++++++++++++------------------ 1 file changed, 35 insertions(+), 49 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 1be3ffd3a32bf5..17e0d527746960 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -2526,9 +2526,12 @@ pub mod tests { index.set_startup(Startup::Normal); for (i, key) in [key0, key1].iter().enumerate() { - let entry = index.get_account_read_entry(key).unwrap(); - assert_eq!(entry.ref_count(), 1); - assert_eq!(entry.slot_list().to_vec(), vec![(slot0, account_infos[i]),]); + let entry = index.get_cloned(key).unwrap(); + assert_eq!(entry.ref_count.load(Ordering::Relaxed), 1); + assert_eq!( + entry.slot_list.read().unwrap().as_slice(), + &[(slot0, account_infos[i])], + ); } } @@ -2586,10 +2589,10 @@ pub mod tests { // verify the added entry matches expected { - let entry = index.get_account_read_entry(&key).unwrap(); + let entry = index.get_cloned(&key).unwrap(); + let slot_list = entry.slot_list.read().unwrap(); assert_eq!(entry.ref_count(), u64::from(!is_cached)); - let expected = vec![(slot0, account_infos[0])]; - assert_eq!(entry.slot_list().to_vec(), expected); + assert_eq!(slot_list.as_slice(), &[(slot0, account_infos[0])]); let new_entry: AccountMapEntry<_> = PreAllocatedAccountMapEntry::new( slot0, account_infos[0], @@ -2598,8 +2601,8 @@ pub mod tests { ) .into_account_map_entry(&index.storage.storage); assert_eq!( - entry.slot_list().to_vec(), - new_entry.slot_list.read().unwrap().to_vec(), + slot_list.as_slice(), + new_entry.slot_list.read().unwrap().as_slice(), ); } @@ -2636,35 +2639,22 @@ pub mod tests { assert!(gc.is_empty()); index.populate_and_retrieve_duplicate_keys_from_startup(|_slot_keys| {}); - for lock in &[false, true] { - let read_lock = if *lock { - Some(index.get_bin(&key)) - } else { - None - }; - - let entry = if *lock { - index - .get_account_read_entry_with_lock(&key, read_lock.as_ref().unwrap()) - .unwrap() - } else { - index.get_account_read_entry(&key).unwrap() - }; + let entry = index.get_cloned(&key).unwrap(); + let slot_list = entry.slot_list.read().unwrap(); - assert_eq!(entry.ref_count(), if is_cached { 0 } else { 2 }); - assert_eq!( - entry.slot_list().to_vec(), - vec![(slot0, account_infos[0]), (slot1, account_infos[1])] - ); + assert_eq!(entry.ref_count(), if is_cached { 0 } else { 2 }); + assert_eq!( + slot_list.as_slice(), + &[(slot0, account_infos[0]), (slot1, account_infos[1])], + ); - let new_entry = PreAllocatedAccountMapEntry::new( - slot1, - account_infos[1], - &index.storage.storage, - false, - ); - assert_eq!(entry.slot_list()[1], new_entry.into()); - } + let new_entry = PreAllocatedAccountMapEntry::new( + slot1, + account_infos[1], + &index.storage.storage, + false, + ); + assert_eq!(slot_list[1], new_entry.into()); } #[test] @@ -4090,25 +4080,21 @@ pub mod tests { assert!(!index.clean_rooted_entries(&key, &mut gc, Some(slot2))); index.upsert_simple_test(&key, slot2, value); - assert_eq!( - 2, - index - .get_account_read_entry(&key) - .unwrap() - .slot_list() - .len() - ); - assert_eq!( - &vec![(slot1, value), (slot2, value)], - index.get_account_read_entry(&key).unwrap().slot_list() - ); + { + let account_map_entry = index.get_cloned(&key).unwrap(); + let slot_list = account_map_entry.slot_list.read().unwrap(); + assert_eq!(2, slot_list.len()); + assert_eq!(&[(slot1, value), (slot2, value)], slot_list.as_slice()); + } assert!(!index.clean_rooted_entries(&key, &mut gc, Some(slot2))); assert_eq!( 2, index - .get_account_read_entry(&key) + .get_cloned(&key) + .unwrap() + .slot_list + .read() .unwrap() - .slot_list() .len() ); assert!(gc.is_empty()); From 0b5cc03aa36300e5d02364745bb4e262cebdde3d Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 13:52:11 -0500 Subject: [PATCH 209/401] Replaces ReadAccountMapEntry in ancient append vecs (#35238) --- accounts-db/src/ancient_append_vecs.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 4566a8923924d2..1ebcc77763ae27 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1506,11 +1506,9 @@ pub mod tests { if two_refs { original_results.iter().for_each(|results| { results.stored_accounts.iter().for_each(|account| { - let entry = db - .accounts_index - .get_account_read_entry(account.pubkey()) - .unwrap(); - entry.addref(); + db.accounts_index.get_and_then(account.pubkey(), |entry| { + (false, entry.unwrap().addref()) + }); }) }); } @@ -1854,11 +1852,8 @@ pub mod tests { ); original_results.iter().for_each(|results| { results.stored_accounts.iter().for_each(|account| { - let entry = db - .accounts_index - .get_account_read_entry(account.pubkey()) - .unwrap(); - entry.addref(); + db.accounts_index + .get_and_then(account.pubkey(), |entry| (true, entry.unwrap().addref())); }) }); From 915faaba1c3d9cb32f8ff0e00c1471463e8bd272 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 13:52:29 -0500 Subject: [PATCH 210/401] Replaces ReadAccountMapEntry in snapshot minimizer (#35237) --- runtime/src/snapshot_minimizer.rs | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 4e7d576f0b6c95..556a854da0c41b 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -256,15 +256,23 @@ impl<'a> SnapshotMinimizer<'a> { fn get_minimized_slot_set(&self) -> DashSet { let minimized_slot_set = DashSet::new(); self.minimized_account_set.par_iter().for_each(|pubkey| { - if let Some(read_entry) = self - .accounts_db() + self.accounts_db() .accounts_index - .get_account_read_entry(&pubkey) - { - if let Some(max_slot) = read_entry.slot_list().iter().map(|(slot, _)| *slot).max() { - minimized_slot_set.insert(max_slot); - } - } + .get_and_then(&pubkey, |entry| { + if let Some(entry) = entry { + let max_slot = entry + .slot_list + .read() + .unwrap() + .iter() + .map(|(slot, _)| *slot) + .max(); + if let Some(max_slot) = max_slot { + minimized_slot_set.insert(max_slot); + } + } + (false, ()) + }); }); minimized_slot_set } @@ -321,12 +329,7 @@ impl<'a> SnapshotMinimizer<'a> { if self.minimized_account_set.contains(account.pubkey()) { chunk_bytes += account.stored_size(); keep_accounts.push(account); - } else if self - .accounts_db() - .accounts_index - .get_account_read_entry(account.pubkey()) - .is_some() - { + } else if self.accounts_db().accounts_index.contains(account.pubkey()) { purge_pubkeys.push(account.pubkey()); } }); From d88b7d95b185359186993ffa8b74636ad56e9af2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 13:53:02 -0500 Subject: [PATCH 211/401] Replaces ReadAccountMapEntry in slots_by_pubkey() (#35241) --- runtime/src/bank/tests.rs | 42 ++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index f6db68d3c0c0c1..f4e48ef3dfd6ae 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -1631,19 +1631,25 @@ fn test_rent_eager_under_fixed_cycle_for_development() { } impl Bank { - fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &Ancestors) -> Vec { - let (locked_entry, _) = self - .rc + fn slots_by_pubkey(&self, pubkey: &Pubkey) -> Vec { + self.rc .accounts .accounts_db .accounts_index - .get(pubkey, Some(ancestors), None) - .unwrap(); - locked_entry - .slot_list() - .iter() - .map(|(slot, _)| *slot) - .collect::>() + .get_and_then(pubkey, |entry| { + let slots = entry + .map(|entry| { + entry + .slot_list + .read() + .unwrap() + .iter() + .map(|(slot, _)| *slot) + .collect() + }) + .unwrap_or_default(); + (false, slots) + }) } } @@ -1693,7 +1699,6 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { ); let genesis_slot = 0; - let ancestors = vec![(some_slot, 0), (0, 1)].into_iter().collect(); let previous_epoch = bank.epoch(); bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), some_slot)); @@ -1706,16 +1711,13 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { little_lamports ); assert_eq!(bank.get_account(&rent_due_pubkey).unwrap().rent_epoch(), 0); + assert_eq!(bank.slots_by_pubkey(&rent_due_pubkey), vec![genesis_slot]); assert_eq!( - bank.slots_by_pubkey(&rent_due_pubkey, &ancestors), - vec![genesis_slot] - ); - assert_eq!( - bank.slots_by_pubkey(&rent_exempt_pubkey, &ancestors), + bank.slots_by_pubkey(&rent_exempt_pubkey), vec![genesis_slot] ); assert_eq!( - bank.slots_by_pubkey(&zero_lamport_pubkey, &ancestors), + bank.slots_by_pubkey(&zero_lamport_pubkey), vec![genesis_slot] ); @@ -1740,15 +1742,15 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { RENT_EXEMPT_RENT_EPOCH ); assert_eq!( - bank.slots_by_pubkey(&rent_due_pubkey, &ancestors), + bank.slots_by_pubkey(&rent_due_pubkey), vec![genesis_slot, some_slot] ); assert_eq!( - bank.slots_by_pubkey(&rent_exempt_pubkey, &ancestors), + bank.slots_by_pubkey(&rent_exempt_pubkey), vec![genesis_slot, some_slot] ); assert_eq!( - bank.slots_by_pubkey(&zero_lamport_pubkey, &ancestors), + bank.slots_by_pubkey(&zero_lamport_pubkey), vec![genesis_slot] ); } From a1c39a3c2229257c6c51ccbba94f980578a3c802 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 20 Feb 2024 13:12:41 -0600 Subject: [PATCH 212/401] List the default value for `--accounts` in CLI help (#35254) --- ledger-tool/src/main.rs | 6 +++++- validator/src/cli.rs | 5 ++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 73bf4127ebfabd..d6218fad6915f0 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -604,7 +604,11 @@ fn main() { .long("accounts") .value_name("PATHS") .takes_value(true) - .help("Comma separated persistent accounts location"); + .help( + "Persistent accounts location. \ + May be specified multiple times. \ + [default: /accounts]", + ); let accounts_hash_cache_path_arg = Arg::with_name("accounts_hash_cache_path") .long("accounts-hash-cache-path") .value_name("PATH") diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 6c16e09d1b8a02..b3e1a885b3d52b 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -276,7 +276,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("PATHS") .takes_value(true) .multiple(true) - .help("Comma separated persistent accounts location"), + .help("Comma separated persistent accounts location. \ + May be specified multiple times. \ + [default: /accounts]" + ), ) .arg( Arg::with_name("account_shrink_path") From 0acee67891513dabb5c3e18d5f0bb08b1f210948 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Tue, 20 Feb 2024 15:54:56 -0500 Subject: [PATCH 213/401] SVM: move transaction_results from accounts-db to SVM (#35183) SVM: Remove accounts-db deps in accounts_loader tests --- Cargo.lock | 5 ++-- accounts-db/Cargo.toml | 1 + accounts-db/src/accounts.rs | 26 +++++++------------ accounts-db/src/lib.rs | 1 - banks-server/Cargo.toml | 2 +- banks-server/src/banks_server.rs | 2 +- core/src/banking_stage/committer.rs | 8 +++--- ledger/src/blockstore_processor.rs | 10 ++++--- programs/sbf/Cargo.lock | 6 +++-- programs/sbf/Cargo.toml | 2 ++ programs/sbf/tests/programs.rs | 8 +++--- rpc/Cargo.toml | 1 + rpc/src/transaction_status_service.rs | 2 +- runtime/src/bank.rs | 10 +++---- runtime/src/bank/tests.rs | 3 +-- runtime/src/bank_utils.rs | 2 +- svm/Cargo.toml | 2 -- svm/src/account_loader.rs | 15 +++++++++-- svm/src/lib.rs | 1 + svm/src/transaction_processor.rs | 11 ++++---- .../src/transaction_results.rs | 0 21 files changed, 63 insertions(+), 55 deletions(-) rename {accounts-db => svm}/src/transaction_results.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index a90b6f90e52274..186044e9034d88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5250,6 +5250,7 @@ dependencies = [ "solana-rayon-threadlimit", "solana-sdk", "solana-stake-program", + "solana-svm", "solana-system-program", "solana-vote-program", "static_assertions", @@ -5348,12 +5349,12 @@ dependencies = [ "bincode", "crossbeam-channel", "futures 0.3.30", - "solana-accounts-db", "solana-banks-interface", "solana-client", "solana-runtime", "solana-sdk", "solana-send-transaction-service", + "solana-svm", "tarpc", "tokio", "tokio-serde", @@ -6803,6 +6804,7 @@ dependencies = [ "solana-stake-program", "solana-storage-bigtable", "solana-streamer", + "solana-svm", "solana-tpu-client", "solana-transaction-status", "solana-version", @@ -7231,7 +7233,6 @@ dependencies = [ "log", "percentage", "rustc_version 0.4.0", - "solana-accounts-db", "solana-bpf-loader-program", "solana-frozen-abi", "solana-frozen-abi-macro", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 80559f5fb27821..22cad43217c33b 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -55,6 +55,7 @@ solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } +solana-svm = { workspace = true } solana-system-program = { workspace = true } solana-vote-program = { workspace = true } static_assertions = { workspace = true } diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 9b65fc803d937e..371db9eb08c095 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -7,7 +7,6 @@ use { accounts_index::{IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport}, ancestors::Ancestors, storable_accounts::StorableAccounts, - transaction_results::TransactionExecutionResult, }, dashmap::DashMap, log::*, @@ -23,10 +22,12 @@ use { }, nonce_info::{NonceFull, NonceInfo}, pubkey::Pubkey, - rent_debits::RentDebits, slot_hashes::SlotHashes, transaction::{Result, SanitizedTransaction, TransactionAccountLocks, TransactionError}, - transaction_context::{IndexOfAccount, TransactionAccount}, + transaction_context::TransactionAccount, + }, + solana_svm::{ + account_loader::TransactionLoadResult, transaction_results::TransactionExecutionResult, }, std::{ cmp::Reverse, @@ -98,19 +99,6 @@ pub struct Accounts { pub(crate) account_locks: Mutex, } -// for the load instructions -pub type TransactionRent = u64; -pub type TransactionProgramIndices = Vec>; -#[derive(PartialEq, Eq, Debug, Clone)] -pub struct LoadedTransaction { - pub accounts: Vec, - pub program_indices: TransactionProgramIndices, - pub rent: TransactionRent, - pub rent_debits: RentDebits, -} - -pub type TransactionLoadResult = (Result, Option); - pub enum AccountAddressFilter { Exclude, // exclude all addresses matching the filter Include, // only include addresses matching the filter @@ -804,7 +792,6 @@ fn prepare_if_nonce_account( mod tests { use { super::*, - crate::transaction_results::{DurableNonceFee, TransactionExecutionDetails}, assert_matches::assert_matches, solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ @@ -814,10 +801,15 @@ mod tests { instruction::{CompiledInstruction, InstructionError}, message::{Message, MessageHeader}, native_loader, nonce, nonce_account, + rent_debits::RentDebits, signature::{keypair_from_seed, signers::Signers, Keypair, Signer}, system_instruction, system_program, transaction::{Transaction, MAX_TX_ACCOUNT_LOCKS}, }, + solana_svm::{ + account_loader::LoadedTransaction, + transaction_results::{DurableNonceFee, TransactionExecutionDetails}, + }, std::{ borrow::Cow, sync::atomic::{AtomicBool, AtomicU64, Ordering}, diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 3016c6252ac612..792de99c49b8d9 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -40,7 +40,6 @@ pub mod sorted_storages; pub mod stake_rewards; pub mod storable_accounts; pub mod tiered_storage; -pub mod transaction_results; pub mod utils; mod verify_accounts_hash_in_background; pub mod waitable_condvar; diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml index 1404d88b5cde4e..6cf5f77f92548b 100644 --- a/banks-server/Cargo.toml +++ b/banks-server/Cargo.toml @@ -13,12 +13,12 @@ edition = { workspace = true } bincode = { workspace = true } crossbeam-channel = { workspace = true } futures = { workspace = true } -solana-accounts-db = { workspace = true } solana-banks-interface = { workspace = true } solana-client = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } +solana-svm = { workspace = true } tarpc = { workspace = true, features = ["full"] } tokio = { workspace = true, features = ["full"] } tokio-serde = { workspace = true, features = ["bincode"] } diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 1fcdce1ad436c5..22f63e9f60a0d5 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -2,7 +2,6 @@ use { bincode::{deserialize, serialize}, crossbeam_channel::{unbounded, Receiver, Sender}, futures::{future, prelude::stream::StreamExt}, - solana_accounts_db::transaction_results::TransactionExecutionResult, solana_banks_interface::{ Banks, BanksRequest, BanksResponse, BanksTransactionResultWithMetadata, BanksTransactionResultWithSimulation, TransactionConfirmationStatus, TransactionMetadata, @@ -30,6 +29,7 @@ use { send_transaction_service::{SendTransactionService, TransactionInfo}, tpu_info::NullTpuInfo, }, + solana_svm::transaction_results::TransactionExecutionResult, std::{ convert::TryFrom, io, diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index ab8f3a9ed57b5b..5a3b15f8c9a55a 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -1,10 +1,6 @@ use { super::leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, itertools::Itertools, - solana_accounts_db::{ - accounts::TransactionLoadResult, - transaction_results::{TransactionExecutionResult, TransactionResults}, - }, solana_ledger::{ blockstore_processor::TransactionStatusSender, token_balances::collect_token_balances, }, @@ -16,6 +12,10 @@ use { transaction_batch::TransactionBatch, }, solana_sdk::{hash::Hash, pubkey::Pubkey, saturating_add_assign}, + solana_svm::{ + account_loader::TransactionLoadResult, + transaction_results::{TransactionExecutionResult, TransactionResults}, + }, solana_transaction_status::{ token_balances::TransactionTokenBalancesSet, TransactionTokenBalance, }, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index b450caaa54577b..63edb23e01cc18 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -20,9 +20,6 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, epoch_accounts_hash::EpochAccountsHash, - transaction_results::{ - TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, - }, }, solana_cost_model::cost_model::CostModel, solana_entry::entry::{ @@ -57,7 +54,12 @@ use { VersionedTransaction, }, }, - solana_svm::runtime_config::RuntimeConfig, + solana_svm::{ + runtime_config::RuntimeConfig, + transaction_results::{ + TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, + }, + }, solana_transaction_status::token_balances::TransactionTokenBalancesSet, solana_vote::{vote_account::VoteAccountsHashMap, vote_sender_types::ReplayVoteSender}, std::{ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d7c1f064cce047..7db23180c782b2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4621,6 +4621,7 @@ dependencies = [ "solana-rayon-threadlimit", "solana-sdk", "solana-stake-program", + "solana-svm", "solana-system-program", "solana-vote-program", "static_assertions", @@ -4681,12 +4682,12 @@ dependencies = [ "bincode", "crossbeam-channel", "futures 0.3.30", - "solana-accounts-db", "solana-banks-interface", "solana-client", "solana-runtime", "solana-sdk", "solana-send-transaction-service", + "solana-svm", "tarpc", "tokio", "tokio-serde", @@ -5559,6 +5560,7 @@ dependencies = [ "solana-stake-program", "solana-storage-bigtable", "solana-streamer", + "solana-svm", "solana-tpu-client", "solana-transaction-status", "solana-version", @@ -5727,6 +5729,7 @@ dependencies = [ "solana-sbf-rust-realloc", "solana-sbf-rust-realloc-invoke", "solana-sdk", + "solana-svm", "solana-transaction-status", "solana_rbpf", "walkdir", @@ -6305,7 +6308,6 @@ dependencies = [ "log", "percentage", "rustc_version", - "solana-accounts-db", "solana-bpf-loader-program", "solana-frozen-abi", "solana-frozen-abi-macro", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 37b1877594c34c..8a99a0f005471a 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -48,6 +48,7 @@ solana-sdk = { path = "../../sdk", version = "=1.19.0" } solana-transaction-status = { path = "../../transaction-status", version = "=1.19.0" } solana-validator = { path = "../../validator", version = "=1.19.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.19.0" } +solana-svm = { path = "../../svm", version = "=1.19.0" } solana_rbpf = "=0.8.0" static_assertions = "1.1.0" thiserror = "1.0" @@ -96,6 +97,7 @@ solana-sbf-rust-invoke = { workspace = true } solana-sbf-rust-realloc = { workspace = true, features = ["default"] } solana-sbf-rust-realloc-invoke = { workspace = true } solana-sdk = { workspace = true } +solana-svm = { workspace = true } solana-transaction-status = { workspace = true } solana_rbpf = { workspace = true } diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index d67b57641446b5..943713d7ad9bbd 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -15,10 +15,6 @@ use { solana_account_decoder::parse_bpf_loader::{ parse_bpf_upgradeable_loader, BpfUpgradeableLoaderAccountType, }, - solana_accounts_db::transaction_results::{ - DurableNonceFee, InnerInstruction, TransactionExecutionDetails, TransactionExecutionResult, - TransactionResults, - }, solana_ledger::token_balances::collect_token_balances, solana_program_runtime::{ compute_budget::ComputeBudget, @@ -52,6 +48,10 @@ use { sysvar::{self, clock}, transaction::VersionedTransaction, }, + solana_svm::transaction_results::{ + DurableNonceFee, InnerInstruction, TransactionExecutionDetails, TransactionExecutionResult, + TransactionResults, + }, solana_transaction_status::{ map_inner_instructions, ConfirmedTransactionWithStatusMeta, TransactionStatusMeta, TransactionWithStatusMeta, VersionedTransactionWithStatusMeta, diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 98d9ee572f6824..d4f2648b6b1078 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -48,6 +48,7 @@ solana-send-transaction-service = { workspace = true } solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 82c7d48f01f21e..8730fb2ed0f3d8 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -2,11 +2,11 @@ use { crate::transaction_notifier_interface::TransactionNotifierArc, crossbeam_channel::{Receiver, RecvTimeoutError}, itertools::izip, - solana_accounts_db::transaction_results::{DurableNonceFee, TransactionExecutionDetails}, solana_ledger::{ blockstore::Blockstore, blockstore_processor::{TransactionStatusBatch, TransactionStatusMessage}, }, + solana_svm::transaction_results::{DurableNonceFee, TransactionExecutionDetails}, solana_transaction_status::{ extract_and_fmt_memos, map_inner_instructions, Reward, TransactionStatusMeta, }, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 972d89551909c2..7e051019c99871 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -71,7 +71,7 @@ use { }, serde::Serialize, solana_accounts_db::{ - accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot, TransactionLoadResult}, + accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot}, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, @@ -89,9 +89,6 @@ use { sorted_storages::SortedStorages, stake_rewards::StakeReward, storable_accounts::StorableAccounts, - transaction_results::{ - TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, - }, }, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_cost_model::cost_tracker::CostTracker, @@ -160,13 +157,16 @@ use { self, InflationPointCalculationEvent, PointValue, StakeStateV2, }, solana_svm::{ - account_loader::TransactionCheckResult, + account_loader::{TransactionCheckResult, TransactionLoadResult}, account_overrides::AccountOverrides, runtime_config::RuntimeConfig, transaction_error_metrics::TransactionErrorMetrics, transaction_processor::{ TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, }, + transaction_results::{ + TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, + }, }, solana_system_program::{get_system_account_kind, SystemAccountKind}, solana_vote::vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap}, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index f4e48ef3dfd6ae..a01e9c19de6a39 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -35,7 +35,6 @@ use { ancestors::Ancestors, inline_spl_token, partitioned_rewards::TestPartitionedEpochRewards, - transaction_results::DurableNonceFee, }, solana_logger, solana_program_runtime::{ @@ -110,7 +109,7 @@ use { solana_stake_program::stake_state::{self, StakeStateV2}, solana_svm::{ account_loader::load_accounts, transaction_account_state_info::TransactionAccountStateInfo, - transaction_error_metrics::TransactionErrorMetrics, + transaction_error_metrics::TransactionErrorMetrics, transaction_results::DurableNonceFee, }, solana_vote_program::{ vote_instruction, diff --git a/runtime/src/bank_utils.rs b/runtime/src/bank_utils.rs index d8d6144d89d1d7..10835afb82dc49 100644 --- a/runtime/src/bank_utils.rs +++ b/runtime/src/bank_utils.rs @@ -7,8 +7,8 @@ use { solana_sdk::{pubkey::Pubkey, signature::Signer}, }; use { - solana_accounts_db::transaction_results::TransactionResults, solana_sdk::transaction::SanitizedTransaction, + solana_svm::transaction_results::TransactionResults, solana_vote::{vote_parser, vote_sender_types::ReplayVoteSender}, }; diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 4fdf7d9cb1a0b4..ac672613c9c4fc 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -13,7 +13,6 @@ edition = { workspace = true } itertools = { workspace = true } log = { workspace = true } percentage = { workspace = true } -solana-accounts-db = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } @@ -29,7 +28,6 @@ crate-type = ["lib"] name = "solana_svm" [dev-dependencies] -solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index f83652eb7ad119..854d59bac095cb 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -6,7 +6,6 @@ use { }, itertools::Itertools, log::warn, - solana_accounts_db::accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, solana_program_runtime::{ compute_budget_processor::process_compute_budget_instructions, loaded_programs::LoadedProgramsForTxBatch, @@ -29,12 +28,24 @@ use { saturating_add_assign, sysvar::{self, instructions::construct_instructions_data}, transaction::{self, Result, SanitizedTransaction, TransactionError}, - transaction_context::IndexOfAccount, + transaction_context::{IndexOfAccount, TransactionAccount}, }, solana_system_program::{get_system_account_kind, SystemAccountKind}, std::{collections::HashMap, num::NonZeroUsize}, }; +// for the load instructions +pub type TransactionRent = u64; +pub type TransactionProgramIndices = Vec>; +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct LoadedTransaction { + pub accounts: Vec, + pub program_indices: TransactionProgramIndices, + pub rent: TransactionRent, + pub rent_debits: RentDebits, +} + +pub type TransactionLoadResult = (Result, Option); pub type TransactionCheckResult = (transaction::Result<()>, Option, Option); pub fn load_accounts( diff --git a/svm/src/lib.rs b/svm/src/lib.rs index ff28128edca36d..5505e34bea9d61 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -8,6 +8,7 @@ pub mod runtime_config; pub mod transaction_account_state_info; pub mod transaction_error_metrics; pub mod transaction_processor; +pub mod transaction_results; #[macro_use] extern crate solana_metrics; diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 71fc4e8e8a46b2..b58d178df4b963 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -1,19 +1,18 @@ use { crate::{ - account_loader::{load_accounts, TransactionCheckResult}, + account_loader::{ + load_accounts, LoadedTransaction, TransactionCheckResult, TransactionLoadResult, + }, account_overrides::AccountOverrides, runtime_config::RuntimeConfig, transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, - }, - log::debug, - percentage::Percentage, - solana_accounts_db::{ - accounts::{LoadedTransaction, TransactionLoadResult}, transaction_results::{ DurableNonceFee, TransactionExecutionDetails, TransactionExecutionResult, }, }, + log::debug, + percentage::Percentage, solana_measure::measure::Measure, solana_program_runtime::{ compute_budget::ComputeBudget, diff --git a/accounts-db/src/transaction_results.rs b/svm/src/transaction_results.rs similarity index 100% rename from accounts-db/src/transaction_results.rs rename to svm/src/transaction_results.rs From 40224345765cb6a379a4182ad395582284d6bdd9 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Tue, 20 Feb 2024 18:21:16 -0300 Subject: [PATCH 214/401] Update build downstream projects script (#35262) --- scripts/build-downstream-anchor-projects.sh | 45 ++++++++++++--------- scripts/patch-crates.sh | 8 ++++ 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index de2860573ee145..cdfa0bae10addb 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -68,25 +68,27 @@ anchor() { cd "$solana_dir"/target/downstream-projects-anchor } +openbook() { + # Openbook-v2 is still using cargo 1.70.0, which is not compatible with the latest main + rm -rf openbook-v2 + git clone https://github.com/openbook-dex/openbook-v2.git + cd openbook-v2 + update_solana_dependencies . "$solana_ver" + patch_crates_io_solana Cargo.toml "$solana_dir" + $cargo_build_sbf --features enable-gpl + cd programs/openbook-v2 + $cargo_test_sbf --features enable-gpl +} + mango() { ( set -x - rm -rf mango-v3 - git clone https://github.com/blockworks-foundation/mango-v3 - # copy toolchain file to use solana's rust version - cp "$solana_dir"/rust-toolchain.toml mango-v3/ - cd mango-v3 - + rm -rf mango-v4 + git clone https://github.com/blockworks-foundation/mango-v4.git + cd mango-v4 update_solana_dependencies . "$solana_ver" - update_anchor_dependencies . "$anchor_ver" - patch_crates_io_solana Cargo.toml "$solana_dir" - patch_crates_io_anchor Cargo.toml "$anchor_dir" - - cd program - $cargo build - $cargo test - $cargo_build_sbf - $cargo_test_sbf + patch_crates_io_solana_no_header Cargo.toml "$solana_dir" + $cargo_test_sbf --features enable-gpl ) } @@ -97,18 +99,21 @@ metaplex() { git clone https://github.com/metaplex-foundation/mpl-token-metadata # copy toolchain file to use solana's rust version cp "$solana_dir"/rust-toolchain.toml mpl-token-metadata/ - cd mpl-token-metadata/programs/token-metadata/program + cd mpl-token-metadata + ./configs/program-scripts/dump.sh ./programs/bin + ROOT_DIR=$(pwd) + cd programs/token-metadata update_solana_dependencies . "$solana_ver" patch_crates_io_solana Cargo.toml "$solana_dir" - $cargo build - $cargo test - $cargo_build_sbf - $cargo_test_sbf + OUT_DIR="$ROOT_DIR"/programs/bin + export SBF_OUT_DIR="$OUT_DIR" + $cargo_test_sbf --sbf-out-dir "${OUT_DIR}" ) } _ anchor #_ metaplex #_ mango +#_ openbook diff --git a/scripts/patch-crates.sh b/scripts/patch-crates.sh index 91a3010c8a0bd7..181c1e5a2375b6 100644 --- a/scripts/patch-crates.sh +++ b/scripts/patch-crates.sh @@ -31,6 +31,14 @@ patch_crates_io_solana() { declare solana_dir="$2" cat >> "$Cargo_toml" <> "$Cargo_toml" < Date: Tue, 20 Feb 2024 16:45:38 -0600 Subject: [PATCH 215/401] update comments for is_builtin (#35258) Co-authored-by: HaoranYi --- sdk/src/account.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 227a56668038e9..96cdd5b90ce99b 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -845,9 +845,12 @@ pub fn is_executable(account: &impl ReadableAccount, feature_set: &FeatureSet) - } } -/// Return true if the account program is a builtin program. Note that for -/// builtin program, even when its account data is empty, it is still be -/// executable, such as vote program etc. +/// Return true if the account program is a builtin program. +/// +/// This function also ensures that all valid builtin programs have non-empty +/// program data. Typically, the program data contains only the "name" for the +/// program. If, for some reason, the program account's data is empty, we should +/// exclude such a program from `builtins`. pub fn is_builtin(account: &impl ReadableAccount) -> bool { native_loader::check_id(account.owner()) && !account.data().is_empty() } From f122e99c4e737b7cc1c2a697cf81e56eb39ccec7 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 17:55:45 -0500 Subject: [PATCH 216/401] rpc: Faster sorting for get_token_largest_accounts() (#35263) --- rpc/src/rpc.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 5cc5b82344e0d1..b66668317756a5 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1875,16 +1875,26 @@ impl JsonRpcRequestProcessor { } }) .collect(); - token_balances.sort_by(|a, b| { + + let sort_largest = |a: &RpcTokenAccountBalance, b: &RpcTokenAccountBalance| { a.amount .amount .parse::() .unwrap() .cmp(&b.amount.amount.parse::().unwrap()) .reverse() - }); - token_balances.truncate(NUM_LARGEST_ACCOUNTS); - Ok(new_response(&bank, token_balances)) + }; + + let largest_token_balances = if token_balances.len() > NUM_LARGEST_ACCOUNTS { + token_balances + .select_nth_unstable_by(NUM_LARGEST_ACCOUNTS, sort_largest) + .0 + } else { + token_balances.as_mut_slice() + }; + largest_token_balances.sort_unstable_by(sort_largest); + + Ok(new_response(&bank, largest_token_balances.to_vec())) } pub fn get_token_accounts_by_owner( From b0134ab04da4a8774e4f6d681d5ecbb5dba9a455 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 20 Feb 2024 16:13:57 -0800 Subject: [PATCH 217/401] validator: include waited_for_supermajority in startup metric (#35137) --- core/src/validator.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/src/validator.rs b/core/src/validator.rs index f1432d67f397dc..b71c11cd967d34 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1366,6 +1366,8 @@ impl Validator { ("id", id.to_string(), String), ("version", solana_version::version!(), String), ("cluster_type", genesis_config.cluster_type as u32, i64), + ("waited_for_supermajority", waited_for_supermajority, bool), + ("expected_shred_version", config.expected_shred_version, Option), ); *start_progress.write().unwrap() = ValidatorStartProgress::Running; From 6d154871830df1c87e1cfcac2682251e644d299a Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 20 Feb 2024 19:29:14 -0500 Subject: [PATCH 218/401] rpc: Parse largest token accounts after sorting & truncating (#35264) --- rpc/src/rpc.rs | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index b66668317756a5..82eda9489ef247 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1861,29 +1861,18 @@ impl JsonRpcRequestProcessor { "Invalid param: not a Token mint".to_string(), )); } - let mut token_balances: Vec = self + let mut token_balances: Vec<_> = self .get_filtered_spl_token_accounts_by_mint(&bank, &mint_owner, mint, vec![])? .into_iter() .map(|(address, account)| { let amount = StateWithExtensions::::unpack(account.data()) .map(|account| account.base.amount) .unwrap_or(0); - let amount = token_amount_to_ui_amount(amount, decimals); - RpcTokenAccountBalance { - address: address.to_string(), - amount, - } + (address, amount) }) .collect(); - let sort_largest = |a: &RpcTokenAccountBalance, b: &RpcTokenAccountBalance| { - a.amount - .amount - .parse::() - .unwrap() - .cmp(&b.amount.amount.parse::().unwrap()) - .reverse() - }; + let sort_largest = |a: &(_, u64), b: &(_, u64)| b.1.cmp(&a.1); let largest_token_balances = if token_balances.len() > NUM_LARGEST_ACCOUNTS { token_balances @@ -1894,7 +1883,14 @@ impl JsonRpcRequestProcessor { }; largest_token_balances.sort_unstable_by(sort_largest); - Ok(new_response(&bank, largest_token_balances.to_vec())) + let largest_token_balances = largest_token_balances + .iter() + .map(|(address, amount)| RpcTokenAccountBalance { + address: address.to_string(), + amount: token_amount_to_ui_amount(*amount, decimals), + }) + .collect(); + Ok(new_response(&bank, largest_token_balances)) } pub fn get_token_accounts_by_owner( From cd4cf814fc2ffb84d6165231d2578cd7c6a25dcb Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 20 Feb 2024 19:39:00 -0800 Subject: [PATCH 219/401] Scheduler: Separate scheduler metrics module (#35216) --- .../transaction_scheduler/mod.rs | 1 + .../scheduler_controller.rs | 230 +----------------- .../scheduler_metrics.rs | 224 +++++++++++++++++ 3 files changed, 229 insertions(+), 226 deletions(-) create mode 100644 core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs diff --git a/core/src/banking_stage/transaction_scheduler/mod.rs b/core/src/banking_stage/transaction_scheduler/mod.rs index 65ece5fee6a8a1..5a3ab0c06ded5d 100644 --- a/core/src/banking_stage/transaction_scheduler/mod.rs +++ b/core/src/banking_stage/transaction_scheduler/mod.rs @@ -4,6 +4,7 @@ mod in_flight_tracker; pub(crate) mod prio_graph_scheduler; pub(crate) mod scheduler_controller; pub(crate) mod scheduler_error; +mod scheduler_metrics; mod thread_aware_account_locks; mod transaction_id_generator; mod transaction_priority_id; diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index a5c0fa134f5369..7d9a70931b4410 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -3,7 +3,9 @@ use { super::{ - prio_graph_scheduler::PrioGraphScheduler, scheduler_error::SchedulerError, + prio_graph_scheduler::PrioGraphScheduler, + scheduler_error::SchedulerError, + scheduler_metrics::{SchedulerCountMetrics, SchedulerTimingMetrics}, transaction_id_generator::TransactionIdGenerator, transaction_state::SanitizedTransactionTTL, transaction_state_container::TransactionStateContainer, @@ -17,7 +19,6 @@ use { TOTAL_BUFFERED_PACKETS, }, crossbeam_channel::RecvTimeoutError, - itertools::MinMaxResult, solana_cost_model::cost_model::CostModel, solana_measure::measure_us, solana_program_runtime::compute_budget_processor::process_compute_budget_instructions, @@ -25,7 +26,7 @@ use { solana_sdk::{ clock::MAX_PROCESSING_AGE, feature_set::include_loaded_accounts_data_size_in_fee_calculation, fee::FeeBudgetLimits, - saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, + saturating_add_assign, transaction::SanitizedTransaction, }, solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ @@ -437,229 +438,6 @@ impl SchedulerController { } } -#[derive(Default)] -struct SchedulerCountMetrics { - interval: AtomicInterval, - - /// Number of packets received. - num_received: usize, - /// Number of packets buffered. - num_buffered: usize, - - /// Number of transactions scheduled. - num_scheduled: usize, - /// Number of transactions that were unschedulable. - num_unschedulable: usize, - /// Number of transactions that were filtered out during scheduling. - num_schedule_filtered_out: usize, - /// Number of completed transactions received from workers. - num_finished: usize, - /// Number of transactions that were retryable. - num_retryable: usize, - - /// Number of transactions that were immediately dropped on receive. - num_dropped_on_receive: usize, - /// Number of transactions that were dropped due to sanitization failure. - num_dropped_on_sanitization: usize, - /// Number of transactions that were dropped due to failed lock validation. - num_dropped_on_validate_locks: usize, - /// Number of transactions that were dropped due to failed transaction - /// checks during receive. - num_dropped_on_receive_transaction_checks: usize, - /// Number of transactions that were dropped due to clearing. - num_dropped_on_clear: usize, - /// Number of transactions that were dropped due to age and status checks. - num_dropped_on_age_and_status: usize, - /// Number of transactions that were dropped due to exceeded capacity. - num_dropped_on_capacity: usize, - /// Min prioritization fees in the transaction container - min_prioritization_fees: u64, - /// Max prioritization fees in the transaction container - max_prioritization_fees: u64, -} - -impl SchedulerCountMetrics { - fn maybe_report_and_reset(&mut self, should_report: bool) { - const REPORT_INTERVAL_MS: u64 = 1000; - if self.interval.should_update(REPORT_INTERVAL_MS) { - if should_report { - self.report(); - } - self.reset(); - } - } - - fn report(&self) { - datapoint_info!( - "banking_stage_scheduler_counts", - ("num_received", self.num_received, i64), - ("num_buffered", self.num_buffered, i64), - ("num_scheduled", self.num_scheduled, i64), - ("num_unschedulable", self.num_unschedulable, i64), - ( - "num_schedule_filtered_out", - self.num_schedule_filtered_out, - i64 - ), - ("num_finished", self.num_finished, i64), - ("num_retryable", self.num_retryable, i64), - ("num_dropped_on_receive", self.num_dropped_on_receive, i64), - ( - "num_dropped_on_sanitization", - self.num_dropped_on_sanitization, - i64 - ), - ( - "num_dropped_on_validate_locks", - self.num_dropped_on_validate_locks, - i64 - ), - ( - "num_dropped_on_receive_transaction_checks", - self.num_dropped_on_receive_transaction_checks, - i64 - ), - ("num_dropped_on_clear", self.num_dropped_on_clear, i64), - ( - "num_dropped_on_age_and_status", - self.num_dropped_on_age_and_status, - i64 - ), - ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64), - ("min_priority", self.get_min_priority(), i64), - ("max_priority", self.get_max_priority(), i64) - ); - } - - fn has_data(&self) -> bool { - self.num_received != 0 - || self.num_buffered != 0 - || self.num_scheduled != 0 - || self.num_unschedulable != 0 - || self.num_schedule_filtered_out != 0 - || self.num_finished != 0 - || self.num_retryable != 0 - || self.num_dropped_on_receive != 0 - || self.num_dropped_on_sanitization != 0 - || self.num_dropped_on_validate_locks != 0 - || self.num_dropped_on_receive_transaction_checks != 0 - || self.num_dropped_on_clear != 0 - || self.num_dropped_on_age_and_status != 0 - || self.num_dropped_on_capacity != 0 - } - - fn reset(&mut self) { - self.num_received = 0; - self.num_buffered = 0; - self.num_scheduled = 0; - self.num_unschedulable = 0; - self.num_schedule_filtered_out = 0; - self.num_finished = 0; - self.num_retryable = 0; - self.num_dropped_on_receive = 0; - self.num_dropped_on_sanitization = 0; - self.num_dropped_on_validate_locks = 0; - self.num_dropped_on_receive_transaction_checks = 0; - self.num_dropped_on_clear = 0; - self.num_dropped_on_age_and_status = 0; - self.num_dropped_on_capacity = 0; - self.min_prioritization_fees = u64::MAX; - self.max_prioritization_fees = 0; - } - - pub fn update_priority_stats(&mut self, min_max_fees: MinMaxResult) { - // update min/max priority - match min_max_fees { - itertools::MinMaxResult::NoElements => { - // do nothing - } - itertools::MinMaxResult::OneElement(e) => { - self.min_prioritization_fees = e; - self.max_prioritization_fees = e; - } - itertools::MinMaxResult::MinMax(min, max) => { - self.min_prioritization_fees = min; - self.max_prioritization_fees = max; - } - } - } - - pub fn get_min_priority(&self) -> u64 { - // to avoid getting u64::max recorded by metrics / in case of edge cases - if self.min_prioritization_fees != u64::MAX { - self.min_prioritization_fees - } else { - 0 - } - } - - pub fn get_max_priority(&self) -> u64 { - self.max_prioritization_fees - } -} - -#[derive(Default)] -struct SchedulerTimingMetrics { - interval: AtomicInterval, - /// Time spent making processing decisions. - decision_time_us: u64, - /// Time spent receiving packets. - receive_time_us: u64, - /// Time spent buffering packets. - buffer_time_us: u64, - /// Time spent filtering transactions during scheduling. - schedule_filter_time_us: u64, - /// Time spent scheduling transactions. - schedule_time_us: u64, - /// Time spent clearing transactions from the container. - clear_time_us: u64, - /// Time spent cleaning expired or processed transactions from the container. - clean_time_us: u64, - /// Time spent receiving completed transactions. - receive_completed_time_us: u64, -} - -impl SchedulerTimingMetrics { - fn maybe_report_and_reset(&mut self, should_report: bool) { - const REPORT_INTERVAL_MS: u64 = 1000; - if self.interval.should_update(REPORT_INTERVAL_MS) { - if should_report { - self.report(); - } - self.reset(); - } - } - - fn report(&self) { - datapoint_info!( - "banking_stage_scheduler_timing", - ("decision_time_us", self.decision_time_us, i64), - ("receive_time_us", self.receive_time_us, i64), - ("buffer_time_us", self.buffer_time_us, i64), - ("schedule_filter_time_us", self.schedule_filter_time_us, i64), - ("schedule_time_us", self.schedule_time_us, i64), - ("clear_time_us", self.clear_time_us, i64), - ("clean_time_us", self.clean_time_us, i64), - ( - "receive_completed_time_us", - self.receive_completed_time_us, - i64 - ) - ); - } - - fn reset(&mut self) { - self.decision_time_us = 0; - self.receive_time_us = 0; - self.buffer_time_us = 0; - self.schedule_filter_time_us = 0; - self.schedule_time_us = 0; - self.clear_time_us = 0; - self.clean_time_us = 0; - self.receive_completed_time_us = 0; - } -} - #[cfg(test)] mod tests { use { diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs new file mode 100644 index 00000000000000..2ab86bd684e4b4 --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -0,0 +1,224 @@ +use {itertools::MinMaxResult, solana_sdk::timing::AtomicInterval}; + +#[derive(Default)] +pub struct SchedulerCountMetrics { + interval: AtomicInterval, + + /// Number of packets received. + pub num_received: usize, + /// Number of packets buffered. + pub num_buffered: usize, + + /// Number of transactions scheduled. + pub num_scheduled: usize, + /// Number of transactions that were unschedulable. + pub num_unschedulable: usize, + /// Number of transactions that were filtered out during scheduling. + pub num_schedule_filtered_out: usize, + /// Number of completed transactions received from workers. + pub num_finished: usize, + /// Number of transactions that were retryable. + pub num_retryable: usize, + + /// Number of transactions that were immediately dropped on receive. + pub num_dropped_on_receive: usize, + /// Number of transactions that were dropped due to sanitization failure. + pub num_dropped_on_sanitization: usize, + /// Number of transactions that were dropped due to failed lock validation. + pub num_dropped_on_validate_locks: usize, + /// Number of transactions that were dropped due to failed transaction + /// checks during receive. + pub num_dropped_on_receive_transaction_checks: usize, + /// Number of transactions that were dropped due to clearing. + pub num_dropped_on_clear: usize, + /// Number of transactions that were dropped due to age and status checks. + pub num_dropped_on_age_and_status: usize, + /// Number of transactions that were dropped due to exceeded capacity. + pub num_dropped_on_capacity: usize, + /// Min prioritization fees in the transaction container + pub min_prioritization_fees: u64, + /// Max prioritization fees in the transaction container + pub max_prioritization_fees: u64, +} + +impl SchedulerCountMetrics { + pub fn maybe_report_and_reset(&mut self, should_report: bool) { + const REPORT_INTERVAL_MS: u64 = 1000; + if self.interval.should_update(REPORT_INTERVAL_MS) { + if should_report { + self.report(); + } + self.reset(); + } + } + + fn report(&self) { + datapoint_info!( + "banking_stage_scheduler_counts", + ("num_received", self.num_received, i64), + ("num_buffered", self.num_buffered, i64), + ("num_scheduled", self.num_scheduled, i64), + ("num_unschedulable", self.num_unschedulable, i64), + ( + "num_schedule_filtered_out", + self.num_schedule_filtered_out, + i64 + ), + ("num_finished", self.num_finished, i64), + ("num_retryable", self.num_retryable, i64), + ("num_dropped_on_receive", self.num_dropped_on_receive, i64), + ( + "num_dropped_on_sanitization", + self.num_dropped_on_sanitization, + i64 + ), + ( + "num_dropped_on_validate_locks", + self.num_dropped_on_validate_locks, + i64 + ), + ( + "num_dropped_on_receive_transaction_checks", + self.num_dropped_on_receive_transaction_checks, + i64 + ), + ("num_dropped_on_clear", self.num_dropped_on_clear, i64), + ( + "num_dropped_on_age_and_status", + self.num_dropped_on_age_and_status, + i64 + ), + ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64), + ("min_priority", self.get_min_priority(), i64), + ("max_priority", self.get_max_priority(), i64) + ); + } + + pub fn has_data(&self) -> bool { + self.num_received != 0 + || self.num_buffered != 0 + || self.num_scheduled != 0 + || self.num_unschedulable != 0 + || self.num_schedule_filtered_out != 0 + || self.num_finished != 0 + || self.num_retryable != 0 + || self.num_dropped_on_receive != 0 + || self.num_dropped_on_sanitization != 0 + || self.num_dropped_on_validate_locks != 0 + || self.num_dropped_on_receive_transaction_checks != 0 + || self.num_dropped_on_clear != 0 + || self.num_dropped_on_age_and_status != 0 + || self.num_dropped_on_capacity != 0 + } + + fn reset(&mut self) { + self.num_received = 0; + self.num_buffered = 0; + self.num_scheduled = 0; + self.num_unschedulable = 0; + self.num_schedule_filtered_out = 0; + self.num_finished = 0; + self.num_retryable = 0; + self.num_dropped_on_receive = 0; + self.num_dropped_on_sanitization = 0; + self.num_dropped_on_validate_locks = 0; + self.num_dropped_on_receive_transaction_checks = 0; + self.num_dropped_on_clear = 0; + self.num_dropped_on_age_and_status = 0; + self.num_dropped_on_capacity = 0; + self.min_prioritization_fees = u64::MAX; + self.max_prioritization_fees = 0; + } + + pub fn update_priority_stats(&mut self, min_max_fees: MinMaxResult) { + // update min/max priority + match min_max_fees { + itertools::MinMaxResult::NoElements => { + // do nothing + } + itertools::MinMaxResult::OneElement(e) => { + self.min_prioritization_fees = e; + self.max_prioritization_fees = e; + } + itertools::MinMaxResult::MinMax(min, max) => { + self.min_prioritization_fees = min; + self.max_prioritization_fees = max; + } + } + } + + pub fn get_min_priority(&self) -> u64 { + // to avoid getting u64::max recorded by metrics / in case of edge cases + if self.min_prioritization_fees != u64::MAX { + self.min_prioritization_fees + } else { + 0 + } + } + + pub fn get_max_priority(&self) -> u64 { + self.max_prioritization_fees + } +} + +#[derive(Default)] +pub struct SchedulerTimingMetrics { + interval: AtomicInterval, + /// Time spent making processing decisions. + pub decision_time_us: u64, + /// Time spent receiving packets. + pub receive_time_us: u64, + /// Time spent buffering packets. + pub buffer_time_us: u64, + /// Time spent filtering transactions during scheduling. + pub schedule_filter_time_us: u64, + /// Time spent scheduling transactions. + pub schedule_time_us: u64, + /// Time spent clearing transactions from the container. + pub clear_time_us: u64, + /// Time spent cleaning expired or processed transactions from the container. + pub clean_time_us: u64, + /// Time spent receiving completed transactions. + pub receive_completed_time_us: u64, +} + +impl SchedulerTimingMetrics { + pub fn maybe_report_and_reset(&mut self, should_report: bool) { + const REPORT_INTERVAL_MS: u64 = 1000; + if self.interval.should_update(REPORT_INTERVAL_MS) { + if should_report { + self.report(); + } + self.reset(); + } + } + + fn report(&self) { + datapoint_info!( + "banking_stage_scheduler_timing", + ("decision_time_us", self.decision_time_us, i64), + ("receive_time_us", self.receive_time_us, i64), + ("buffer_time_us", self.buffer_time_us, i64), + ("schedule_filter_time_us", self.schedule_filter_time_us, i64), + ("schedule_time_us", self.schedule_time_us, i64), + ("clear_time_us", self.clear_time_us, i64), + ("clean_time_us", self.clean_time_us, i64), + ( + "receive_completed_time_us", + self.receive_completed_time_us, + i64 + ) + ); + } + + fn reset(&mut self) { + self.decision_time_us = 0; + self.receive_time_us = 0; + self.buffer_time_us = 0; + self.schedule_filter_time_us = 0; + self.schedule_time_us = 0; + self.clear_time_us = 0; + self.clean_time_us = 0; + self.receive_completed_time_us = 0; + } +} From 86b5f3cde784702f8a4ed07558f23f3330de0937 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 20 Feb 2024 23:33:04 -0600 Subject: [PATCH 220/401] Revert "Update build downstream projects script (#35262)" (#35272) This reverts commit 40224345765cb6a379a4182ad395582284d6bdd9. --- scripts/build-downstream-anchor-projects.sh | 45 +++++++++------------ scripts/patch-crates.sh | 8 ---- 2 files changed, 20 insertions(+), 33 deletions(-) diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index cdfa0bae10addb..de2860573ee145 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -68,27 +68,25 @@ anchor() { cd "$solana_dir"/target/downstream-projects-anchor } -openbook() { - # Openbook-v2 is still using cargo 1.70.0, which is not compatible with the latest main - rm -rf openbook-v2 - git clone https://github.com/openbook-dex/openbook-v2.git - cd openbook-v2 - update_solana_dependencies . "$solana_ver" - patch_crates_io_solana Cargo.toml "$solana_dir" - $cargo_build_sbf --features enable-gpl - cd programs/openbook-v2 - $cargo_test_sbf --features enable-gpl -} - mango() { ( set -x - rm -rf mango-v4 - git clone https://github.com/blockworks-foundation/mango-v4.git - cd mango-v4 + rm -rf mango-v3 + git clone https://github.com/blockworks-foundation/mango-v3 + # copy toolchain file to use solana's rust version + cp "$solana_dir"/rust-toolchain.toml mango-v3/ + cd mango-v3 + update_solana_dependencies . "$solana_ver" - patch_crates_io_solana_no_header Cargo.toml "$solana_dir" - $cargo_test_sbf --features enable-gpl + update_anchor_dependencies . "$anchor_ver" + patch_crates_io_solana Cargo.toml "$solana_dir" + patch_crates_io_anchor Cargo.toml "$anchor_dir" + + cd program + $cargo build + $cargo test + $cargo_build_sbf + $cargo_test_sbf ) } @@ -99,21 +97,18 @@ metaplex() { git clone https://github.com/metaplex-foundation/mpl-token-metadata # copy toolchain file to use solana's rust version cp "$solana_dir"/rust-toolchain.toml mpl-token-metadata/ - cd mpl-token-metadata - ./configs/program-scripts/dump.sh ./programs/bin - ROOT_DIR=$(pwd) - cd programs/token-metadata + cd mpl-token-metadata/programs/token-metadata/program update_solana_dependencies . "$solana_ver" patch_crates_io_solana Cargo.toml "$solana_dir" - OUT_DIR="$ROOT_DIR"/programs/bin - export SBF_OUT_DIR="$OUT_DIR" - $cargo_test_sbf --sbf-out-dir "${OUT_DIR}" + $cargo build + $cargo test + $cargo_build_sbf + $cargo_test_sbf ) } _ anchor #_ metaplex #_ mango -#_ openbook diff --git a/scripts/patch-crates.sh b/scripts/patch-crates.sh index 181c1e5a2375b6..91a3010c8a0bd7 100644 --- a/scripts/patch-crates.sh +++ b/scripts/patch-crates.sh @@ -31,14 +31,6 @@ patch_crates_io_solana() { declare solana_dir="$2" cat >> "$Cargo_toml" <> "$Cargo_toml" < Date: Wed, 21 Feb 2024 15:45:17 +0800 Subject: [PATCH 221/401] build(deps): bump ahash from 0.8.8 to 0.8.9 (#35247) * build(deps): bump ahash from 0.8.8 to 0.8.9 Bumps [ahash](https://github.com/tkaitchuck/ahash) from 0.8.8 to 0.8.9. - [Release notes](https://github.com/tkaitchuck/ahash/releases) - [Commits](https://github.com/tkaitchuck/ahash/compare/v0.8.8...v0.8.9) --- updated-dependencies: - dependency-name: ahash dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 186044e9034d88..21f9b5d5fc5c51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,9 +75,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -2344,7 +2344,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.9", ] [[package]] @@ -6500,7 +6500,7 @@ dependencies = [ name = "solana-perf" version = "1.19.0" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.9", "assert_matches", "bincode", "bv", diff --git a/Cargo.toml b/Cargo.toml index 40ccdd6d25f646..6214e9f3608770 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -138,7 +138,7 @@ edition = "2021" Inflector = "0.11.4" aquamarine = "0.3.3" aes-gcm-siv = "0.10.3" -ahash = "0.8.8" +ahash = "0.8.9" anyhow = "1.0.80" arbitrary = "1.3.2" ark-bn254 = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7db23180c782b2..9454d1647c66a3 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -1976,7 +1976,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.9", ] [[package]] @@ -5299,7 +5299,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" name = "solana-perf" version = "1.19.0" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.9", "bincode", "bv", "caps", From 770da4817836f813ccad7b859d006e8a42db1e6e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Feb 2024 15:45:52 +0800 Subject: [PATCH 222/401] build(deps): bump serde from 1.0.196 to 1.0.197 (#35248) * build(deps): bump serde from 1.0.196 to 1.0.197 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.196 to 1.0.197. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.196...v1.0.197) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21f9b5d5fc5c51..2e3f72069f30a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4769,9 +4769,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -4787,9 +4787,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 6214e9f3608770..4b46fed6accb02 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.22" seqlock = "0.2.0" -serde = "1.0.196" +serde = "1.0.197" serde_bytes = "0.11.14" serde_derive = "1.0.103" serde_json = "1.0.113" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9454d1647c66a3..cf07b9d0ba0cc8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4239,9 +4239,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -4257,9 +4257,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", From 8233964e39a7bdb5ff1542c5554e5bddf2602dec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Feb 2024 15:46:40 +0800 Subject: [PATCH 223/401] build(deps): bump syn from 2.0.49 to 2.0.50 (#35249) * build(deps): bump syn from 2.0.49 to 2.0.50 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.49 to 2.0.50. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.49...2.0.50) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 66 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 62 +++++++++++++++++++------------------- 2 files changed, 64 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e3f72069f30a3..64365f6a7fc87e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -459,7 +459,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -607,7 +607,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -775,7 +775,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "syn_derive", ] @@ -1535,7 +1535,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1546,7 +1546,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1608,7 +1608,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1732,7 +1732,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1838,7 +1838,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -2102,7 +2102,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -3373,7 +3373,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -3447,7 +3447,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -3943,7 +3943,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4111,7 +4111,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4793,7 +4793,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4847,7 +4847,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4897,7 +4897,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -6028,7 +6028,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7078,7 +7078,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7792,7 +7792,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7804,7 +7804,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.49", + "syn 2.0.50", "thiserror", ] @@ -7862,7 +7862,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -8050,9 +8050,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.49" +version = "2.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" +checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" dependencies = [ "proc-macro2", "quote", @@ -8068,7 +8068,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -8239,7 +8239,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -8251,7 +8251,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "test-case-core", ] @@ -8287,7 +8287,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -8424,7 +8424,7 @@ source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7 dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -8670,7 +8670,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -8973,7 +8973,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "wasm-bindgen-shared", ] @@ -9007,7 +9007,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9301,7 +9301,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -9321,7 +9321,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cf07b9d0ba0cc8..f2ce6ea8134599 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -425,7 +425,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -573,7 +573,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -726,7 +726,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "syn_derive", ] @@ -1238,7 +1238,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1249,7 +1249,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1424,7 +1424,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1533,7 +1533,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1780,7 +1780,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -3016,7 +3016,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -3089,7 +3089,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -3544,7 +3544,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -3685,7 +3685,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4263,7 +4263,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4308,7 +4308,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5057,7 +5057,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -6189,7 +6189,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -6728,7 +6728,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -6740,7 +6740,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.49", + "syn 2.0.50", "thiserror", ] @@ -6788,7 +6788,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -6976,9 +6976,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.49" +version = "2.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" +checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" dependencies = [ "proc-macro2", "quote", @@ -6994,7 +6994,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7151,7 +7151,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7163,7 +7163,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "test-case-core", ] @@ -7199,7 +7199,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7322,7 +7322,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7540,7 +7540,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7822,7 +7822,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "wasm-bindgen-shared", ] @@ -7856,7 +7856,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8141,7 +8141,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -8161,7 +8161,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] From 14453eb1b0634a630d4982085600cc2440ebc556 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Feb 2024 15:46:55 +0800 Subject: [PATCH 224/401] build(deps): bump serde_json from 1.0.113 to 1.0.114 (#35250) * build(deps): bump serde_json from 1.0.113 to 1.0.114 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.113 to 1.0.114. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.113...v1.0.114) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 64365f6a7fc87e..a727fae2c8b0ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4798,9 +4798,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index 4b46fed6accb02..0ec4b780fe13e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -296,7 +296,7 @@ seqlock = "0.2.0" serde = "1.0.197" serde_bytes = "0.11.14" serde_derive = "1.0.103" -serde_json = "1.0.113" +serde_json = "1.0.114" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.32" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f2ce6ea8134599..6ab36567f1a744 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4268,9 +4268,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", From d223a430964e47a1f20e84ab991fec3514bec31b Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 21 Feb 2024 22:30:55 +0800 Subject: [PATCH 225/401] ci: update anchor downstream project (#35274) * Update build downstream projects script (#35262) * fix patch-crates.sh doesn't pass the real variables * fix downstream project anchor doesn't be triggered correctly --------- Co-authored-by: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> --- .../workflows/downstream-project-anchor.yml | 3 ++ scripts/build-downstream-anchor-projects.sh | 45 ++++++++++--------- scripts/patch-crates.sh | 8 ++++ 3 files changed, 36 insertions(+), 20 deletions(-) diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml index 516a0fdc56ae76..487d8361ea38d5 100644 --- a/.github/workflows/downstream-project-anchor.yml +++ b/.github/workflows/downstream-project-anchor.yml @@ -17,6 +17,9 @@ on: - "cargo-test-bpf" - "cargo-build-sbf" - "cargo-test-sbf" + - "scripts/build-downstream-anchor-projects.sh" + - ".github/scripts/purge-ubuntu-runner.sh" + - ".github/scripts/downstream-project-spl-install-deps.sh" - ".github/workflows/downstream-project-anchor.yml" workflow_call: inputs: diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index de2860573ee145..cdfa0bae10addb 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -68,25 +68,27 @@ anchor() { cd "$solana_dir"/target/downstream-projects-anchor } +openbook() { + # Openbook-v2 is still using cargo 1.70.0, which is not compatible with the latest main + rm -rf openbook-v2 + git clone https://github.com/openbook-dex/openbook-v2.git + cd openbook-v2 + update_solana_dependencies . "$solana_ver" + patch_crates_io_solana Cargo.toml "$solana_dir" + $cargo_build_sbf --features enable-gpl + cd programs/openbook-v2 + $cargo_test_sbf --features enable-gpl +} + mango() { ( set -x - rm -rf mango-v3 - git clone https://github.com/blockworks-foundation/mango-v3 - # copy toolchain file to use solana's rust version - cp "$solana_dir"/rust-toolchain.toml mango-v3/ - cd mango-v3 - + rm -rf mango-v4 + git clone https://github.com/blockworks-foundation/mango-v4.git + cd mango-v4 update_solana_dependencies . "$solana_ver" - update_anchor_dependencies . "$anchor_ver" - patch_crates_io_solana Cargo.toml "$solana_dir" - patch_crates_io_anchor Cargo.toml "$anchor_dir" - - cd program - $cargo build - $cargo test - $cargo_build_sbf - $cargo_test_sbf + patch_crates_io_solana_no_header Cargo.toml "$solana_dir" + $cargo_test_sbf --features enable-gpl ) } @@ -97,18 +99,21 @@ metaplex() { git clone https://github.com/metaplex-foundation/mpl-token-metadata # copy toolchain file to use solana's rust version cp "$solana_dir"/rust-toolchain.toml mpl-token-metadata/ - cd mpl-token-metadata/programs/token-metadata/program + cd mpl-token-metadata + ./configs/program-scripts/dump.sh ./programs/bin + ROOT_DIR=$(pwd) + cd programs/token-metadata update_solana_dependencies . "$solana_ver" patch_crates_io_solana Cargo.toml "$solana_dir" - $cargo build - $cargo test - $cargo_build_sbf - $cargo_test_sbf + OUT_DIR="$ROOT_DIR"/programs/bin + export SBF_OUT_DIR="$OUT_DIR" + $cargo_test_sbf --sbf-out-dir "${OUT_DIR}" ) } _ anchor #_ metaplex #_ mango +#_ openbook diff --git a/scripts/patch-crates.sh b/scripts/patch-crates.sh index 91a3010c8a0bd7..771dcddbd02fa4 100644 --- a/scripts/patch-crates.sh +++ b/scripts/patch-crates.sh @@ -31,6 +31,14 @@ patch_crates_io_solana() { declare solana_dir="$2" cat >> "$Cargo_toml" <> "$Cargo_toml" < Date: Wed, 21 Feb 2024 10:07:57 -0600 Subject: [PATCH 226/401] Obtain BankForks read lock once to get ancestors and descendants (#35273) No need to get the read lock twice; instead, hold it and get both items --- core/src/replay_stage.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 1b7b4737f55fc2..2dbe2fdbc2e932 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -659,8 +659,10 @@ impl ReplayStage { let mut tpu_has_bank = poh_recorder.read().unwrap().has_bank(); let mut replay_active_banks_time = Measure::start("replay_active_banks_time"); - let mut ancestors = bank_forks.read().unwrap().ancestors(); - let mut descendants = bank_forks.read().unwrap().descendants(); + let (mut ancestors, mut descendants) = { + let r_bank_forks = bank_forks.read().unwrap(); + (r_bank_forks.ancestors(), r_bank_forks.descendants()) + }; let did_complete_bank = Self::replay_active_banks( &blockstore, &bank_forks, From 4905076fb6722f4b88cf4e701ae8572d1a55c2bb Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 21 Feb 2024 10:16:16 -0600 Subject: [PATCH 227/401] Remove channel that sends roots to BlockstoreCleanupService (#35211) Currently, ReplayStage sends new roots to BlockstoreCleanupService, and BlockstoreCleanupService decides when to clean based on advancement of the latest root. This is totally unnecessary as the latest root is cached by the Blockstore, and this value can simply be fetched. This change removes the channel completely, and instead just fetches the latest root from Blockstore directly. Moreso, some logic is added to check the latest root less frequently, based on the set purge interval. All in all, we went from sending > 100 slots/min across a crossbeam channel to reading an atomic roughly 3 times/min, while also removing the need for an additional thread that read from the channel. --- core/src/replay_stage.rs | 9 -- core/src/tvu.rs | 9 +- ledger/src/blockstore_cleanup_service.rs | 192 +++++++++-------------- 3 files changed, 74 insertions(+), 136 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 2dbe2fdbc2e932..31595e6b6504d9 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -281,7 +281,6 @@ pub struct ReplayStageConfig { pub exit: Arc, pub rpc_subscriptions: Arc, pub leader_schedule_cache: Arc, - pub latest_root_senders: Vec>, pub accounts_background_request_sender: AbsRequestSender, pub block_commitment_cache: Arc>, pub transaction_status_sender: Option, @@ -551,7 +550,6 @@ impl ReplayStage { exit, rpc_subscriptions, leader_schedule_cache, - latest_root_senders, accounts_background_request_sender, block_commitment_cache, transaction_status_sender, @@ -951,7 +949,6 @@ impl ReplayStage { &leader_schedule_cache, &lockouts_sender, &accounts_background_request_sender, - &latest_root_senders, &rpc_subscriptions, &block_commitment_cache, &mut heaviest_subtree_fork_choice, @@ -2230,7 +2227,6 @@ impl ReplayStage { leader_schedule_cache: &Arc, lockouts_sender: &Sender, accounts_background_request_sender: &AbsRequestSender, - latest_root_senders: &[Sender], rpc_subscriptions: &Arc, block_commitment_cache: &Arc>, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, @@ -2319,11 +2315,6 @@ impl ReplayStage { .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err)); } } - latest_root_senders.iter().for_each(|s| { - if let Err(e) = s.send(new_root) { - trace!("latest root send failed: {:?}", e); - } - }); info!("new root {}", new_root); } diff --git a/core/src/tvu.rs b/core/src/tvu.rs index d498ab405d39aa..b0fe93890761b4 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -246,14 +246,12 @@ impl Tvu { exit.clone(), ); - let (blockstore_cleanup_slot_sender, blockstore_cleanup_slot_receiver) = unbounded(); let replay_stage_config = ReplayStageConfig { vote_account: *vote_account, authorized_voter_keypairs, exit: exit.clone(), rpc_subscriptions: rpc_subscriptions.clone(), leader_schedule_cache: leader_schedule_cache.clone(), - latest_root_senders: vec![blockstore_cleanup_slot_sender], accounts_background_request_sender, block_commitment_cache, transaction_status_sender, @@ -322,12 +320,7 @@ impl Tvu { )?; let blockstore_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { - BlockstoreCleanupService::new( - blockstore_cleanup_slot_receiver, - blockstore.clone(), - max_ledger_shreds, - exit.clone(), - ) + BlockstoreCleanupService::new(blockstore.clone(), max_ledger_shreds, exit.clone()) }); let duplicate_shred_listener = DuplicateShredListener::new( diff --git a/ledger/src/blockstore_cleanup_service.rs b/ledger/src/blockstore_cleanup_service.rs index d9212bf6ddfb58..2f79be6694844d 100644 --- a/ledger/src/blockstore_cleanup_service.rs +++ b/ledger/src/blockstore_cleanup_service.rs @@ -9,9 +9,8 @@ use { blockstore::{Blockstore, PurgeType}, blockstore_db::{Result as BlockstoreResult, DATA_SHRED_CF}, }, - crossbeam_channel::{Receiver, RecvTimeoutError}, solana_measure::measure::Measure, - solana_sdk::clock::Slot, + solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT}, std::{ string::ToString, sync::{ @@ -19,7 +18,7 @@ use { Arc, }, thread::{self, Builder, JoinHandle}, - time::Duration, + time::{Duration, Instant}, }, }; @@ -36,46 +35,53 @@ pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000; // Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000; -// Check for removing slots at this interval so we don't purge too often -// and starve other blockstore users. -pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512; +// Perform blockstore cleanup at this interval to limit the overhead of cleanup +// Cleanup will be considered after the latest root has advanced by this value +const DEFAULT_CLEANUP_SLOT_INTERVAL: u64 = 512; +// The above slot interval can be roughly equated to a time interval. So, scale +// how often we check for cleanup with the interval. Doing so will avoid wasted +// checks when we know that the latest root could not have advanced far enough +// +// Given that the timing of new slots/roots is not exact, divide by 10 to avoid +// a long wait incase a check occurs just before the interval has elapsed +const LOOP_LIMITER: Duration = + Duration::from_millis(DEFAULT_CLEANUP_SLOT_INTERVAL * DEFAULT_MS_PER_SLOT / 10); pub struct BlockstoreCleanupService { t_cleanup: JoinHandle<()>, } impl BlockstoreCleanupService { - pub fn new( - new_root_receiver: Receiver, - blockstore: Arc, - max_ledger_shreds: u64, - exit: Arc, - ) -> Self { + pub fn new(blockstore: Arc, max_ledger_shreds: u64, exit: Arc) -> Self { let mut last_purge_slot = 0; - - info!( - "BlockstoreCleanupService active. max ledger shreds={}", - max_ledger_shreds - ); + let mut last_check_time = Instant::now(); let t_cleanup = Builder::new() .name("solBstoreClean".to_string()) - .spawn(move || loop { - if exit.load(Ordering::Relaxed) { - break; - } - if let Err(e) = Self::cleanup_ledger( - &new_root_receiver, - &blockstore, - max_ledger_shreds, - &mut last_purge_slot, - DEFAULT_PURGE_SLOT_INTERVAL, - ) { - match e { - RecvTimeoutError::Disconnected => break, - RecvTimeoutError::Timeout => (), + .spawn(move || { + info!( + "BlockstoreCleanupService has started with max \ + ledger shreds={max_ledger_shreds}", + ); + loop { + if exit.load(Ordering::Relaxed) { + break; + } + if last_check_time.elapsed() > LOOP_LIMITER { + Self::cleanup_ledger( + &blockstore, + max_ledger_shreds, + &mut last_purge_slot, + DEFAULT_CLEANUP_SLOT_INTERVAL, + ); + + last_check_time = Instant::now(); } + // Only sleep for 1 second instead of LOOP_LIMITER so that this + // thread can respond to the exit flag in a timely manner + thread::sleep(Duration::from_secs(1)); } + info!("BlockstoreCleanupService has stopped"); }) .unwrap(); @@ -136,8 +142,8 @@ impl BlockstoreCleanupService { .unwrap_or(lowest_slot); if highest_slot < lowest_slot { error!( - "Skipping cleanup: Blockstore highest slot {} < lowest slot {}", - highest_slot, lowest_slot + "Skipping Blockstore cleanup: \ + highest slot {highest_slot} < lowest slot {lowest_slot}", ); return (false, 0, num_shreds); } @@ -146,8 +152,8 @@ impl BlockstoreCleanupService { let num_slots = highest_slot - lowest_slot + 1; let mean_shreds_per_slot = num_shreds / num_slots; info!( - "{} alive shreds in slots [{}, {}], mean of {} shreds per slot", - num_shreds, lowest_slot, highest_slot, mean_shreds_per_slot + "Blockstore has {num_shreds} alive shreds in slots [{lowest_slot}, {highest_slot}], \ + mean of {mean_shreds_per_slot} shreds per slot", ); if num_shreds <= max_ledger_shreds { @@ -164,17 +170,11 @@ impl BlockstoreCleanupService { let lowest_cleanup_slot = std::cmp::min(lowest_slot + num_slots_to_clean - 1, root); (true, lowest_cleanup_slot, num_shreds) } else { - error!("Skipping cleanup: calculated mean of 0 shreds per slot"); + error!("Skipping Blockstore cleanup: calculated mean of 0 shreds per slot"); (false, 0, num_shreds) } } - fn receive_new_roots(new_root_receiver: &Receiver) -> Result { - let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?; - // Get the newest root - Ok(new_root_receiver.try_iter().last().unwrap_or(root)) - } - /// Checks for new roots and initiates a cleanup if the last cleanup was at /// least `purge_interval` slots ago. A cleanup will no-op if the ledger /// already has fewer than `max_ledger_shreds`; otherwise, the cleanup will @@ -182,8 +182,6 @@ impl BlockstoreCleanupService { /// /// # Arguments /// - /// - `new_root_receiver`: signal receiver which contains the information - /// about what `Slot` is the current root. /// - `max_ledger_shreds`: the number of shreds to keep since the new root. /// - `last_purge_slot`: an both an input and output parameter indicating /// the id of the last purged slot. As an input parameter, it works @@ -191,85 +189,53 @@ impl BlockstoreCleanupService { /// ledger cleanup. As an output parameter, it will be updated if this /// function actually performs the ledger cleanup. /// - `purge_interval`: the minimum slot interval between two ledger - /// cleanup. When the root derived from `new_root_receiver` minus + /// cleanup. When the max root fetched from the Blockstore minus /// `last_purge_slot` is fewer than `purge_interval`, the function will /// simply return `Ok` without actually running the ledger cleanup. /// In this case, `purge_interval` will remain unchanged. /// /// Also see `blockstore::purge_slot`. pub fn cleanup_ledger( - new_root_receiver: &Receiver, blockstore: &Arc, max_ledger_shreds: u64, last_purge_slot: &mut u64, purge_interval: u64, - ) -> Result<(), RecvTimeoutError> { - let root = Self::receive_new_roots(new_root_receiver)?; + ) { + let root = blockstore.max_root(); if root - *last_purge_slot <= purge_interval { - return Ok(()); + return; } - - let disk_utilization_pre = blockstore.storage_size(); - info!( - "purge: last_root={}, last_purge_slot={}, purge_interval={}, disk_utilization={:?}", - root, last_purge_slot, purge_interval, disk_utilization_pre - ); - *last_purge_slot = root; + info!("Looking for Blockstore data to cleanup, latest root: {root}"); + let disk_utilization_pre = blockstore.storage_size(); let (slots_to_clean, lowest_cleanup_slot, total_shreds) = Self::find_slots_to_clean(blockstore, root, max_ledger_shreds); if slots_to_clean { - let purge_complete = Arc::new(AtomicBool::new(false)); - let blockstore = blockstore.clone(); - let purge_complete1 = purge_complete.clone(); - let _t_purge = Builder::new() - .name("solLedgerPurge".to_string()) - .spawn(move || { - let mut slot_update_time = Measure::start("slot_update"); - *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; - slot_update_time.stop(); - - info!("purging data older than {}", lowest_cleanup_slot); - - let mut purge_time = Measure::start("purge_slots"); - - // purge any slots older than lowest_cleanup_slot. - blockstore.purge_slots(0, lowest_cleanup_slot, PurgeType::CompactionFilter); - // Update only after purge operation. - // Safety: This value can be used by compaction_filters shared via Arc. - // Compactions are async and run as a multi-threaded background job. However, this - // shouldn't cause consistency issues for iterators and getters because we have - // already expired all affected keys (older than or equal to lowest_cleanup_slot) - // by the above `purge_slots`. According to the general RocksDB design where SST - // files are immutable, even running iterators aren't affected; the database grabs - // a snapshot of the live set of sst files at iterator's creation. - // Also, we passed the PurgeType::CompactionFilter, meaning no delete_range for - // transaction_status and address_signatures CFs. These are fine because they - // don't require strong consistent view for their operation. - blockstore.set_max_expired_slot(lowest_cleanup_slot); - - purge_time.stop(); - info!("{}", purge_time); - - purge_complete1.store(true, Ordering::Relaxed); - }) - .unwrap(); - - // Keep pulling roots off `new_root_receiver` while purging to avoid channel buildup - while !purge_complete.load(Ordering::Relaxed) { - if let Err(err) = Self::receive_new_roots(new_root_receiver) { - debug!("receive_new_roots: {}", err); - } - thread::sleep(Duration::from_secs(1)); - } + *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; + + let mut purge_time = Measure::start("purge_slots()"); + // purge any slots older than lowest_cleanup_slot. + blockstore.purge_slots(0, lowest_cleanup_slot, PurgeType::CompactionFilter); + // Update only after purge operation. + // Safety: This value can be used by compaction_filters shared via Arc. + // Compactions are async and run as a multi-threaded background job. However, this + // shouldn't cause consistency issues for iterators and getters because we have + // already expired all affected keys (older than or equal to lowest_cleanup_slot) + // by the above `purge_slots`. According to the general RocksDB design where SST + // files are immutable, even running iterators aren't affected; the database grabs + // a snapshot of the live set of sst files at iterator's creation. + // Also, we passed the PurgeType::CompactionFilter, meaning no delete_range for + // transaction_status and address_signatures CFs. These are fine because they + // don't require strong consistent view for their operation. + blockstore.set_max_expired_slot(lowest_cleanup_slot); + purge_time.stop(); + info!("Cleaned up Blockstore data older than slot {lowest_cleanup_slot}. {purge_time}"); } let disk_utilization_post = blockstore.storage_size(); Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post, total_shreds); - - Ok(()) } fn report_disk_metrics( @@ -297,7 +263,6 @@ mod tests { use { super::*, crate::{blockstore::make_many_slot_entries, get_tmp_ledger_path_auto_delete}, - crossbeam_channel::unbounded, }; fn flush_blockstore_contents_to_disk(blockstore: Blockstore) -> Blockstore { @@ -388,7 +353,7 @@ mod tests { } #[test] - fn test_cleanup1() { + fn test_cleanup() { solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -397,19 +362,11 @@ mod tests { // Initiate a flush so inserted shreds found by find_slots_to_clean() let blockstore = Arc::new(flush_blockstore_contents_to_disk(blockstore)); - let (sender, receiver) = unbounded(); - //send a signal to kill all but 5 shreds, which will be in the newest slots + // Mark 50 as a root to kill all but 5 shreds, which will be in the newest slots let mut last_purge_slot = 0; - sender.send(50).unwrap(); - BlockstoreCleanupService::cleanup_ledger( - &receiver, - &blockstore, - 5, - &mut last_purge_slot, - 10, - ) - .unwrap(); + blockstore.set_roots([50].iter()).unwrap(); + BlockstoreCleanupService::cleanup_ledger(&blockstore, 5, &mut last_purge_slot, 10); assert_eq!(last_purge_slot, 50); //check that 0-40 don't exist @@ -424,7 +381,6 @@ mod tests { solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); - let (sender, receiver) = unbounded(); let mut first_insert = Measure::start("first_insert"); let initial_slots = 50; @@ -451,15 +407,13 @@ mod tests { insert_time.stop(); let mut time = Measure::start("purge time"); - sender.send(slot + num_slots).unwrap(); + blockstore.set_roots([slot + num_slots].iter()).unwrap(); BlockstoreCleanupService::cleanup_ledger( - &receiver, &blockstore, initial_slots, &mut last_purge_slot, 10, - ) - .unwrap(); + ); time.stop(); info!( "slot: {} size: {} {} {}", From db2071a2257f7f0f3421cef36bbf0a8d861ff84b Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 21 Feb 2024 11:25:38 -0500 Subject: [PATCH 228/401] Removes get_account_read_entry_with_lock() (#35240) --- accounts-db/src/accounts_index.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 17e0d527746960..51a04e3a4eb4b6 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1126,14 +1126,6 @@ impl + Into> AccountsIndex { pub fn get_account_read_entry(&self, pubkey: &Pubkey) -> Option> { let lock = self.get_bin(pubkey); - self.get_account_read_entry_with_lock(pubkey, &lock) - } - - pub fn get_account_read_entry_with_lock( - &self, - pubkey: &Pubkey, - lock: &AccountMaps<'_, T, U>, - ) -> Option> { lock.get(pubkey) .map(ReadAccountMapEntry::from_account_map_entry) } From a97f26e2a3510902c72250a71c97bc1d117ee97f Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Wed, 21 Feb 2024 14:21:12 -0300 Subject: [PATCH 229/401] Build `transfer-hook` for downstream project (#35245) * Build transfer hook for downstream project * Add transfer hook example to programs list --- ci/downstream-projects/func-spl.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/downstream-projects/func-spl.sh b/ci/downstream-projects/func-spl.sh index 00da118e14f06e..d2581b70db9f66 100755 --- a/ci/downstream-projects/func-spl.sh +++ b/ci/downstream-projects/func-spl.sh @@ -5,6 +5,7 @@ spl() { # Mind the order! PROGRAMS=( instruction-padding/program + token/transfer-hook/example token/program token/program-2022 token/program-2022-test From 531793b4be6d6c55f7c6a13f6da1982a7387f185 Mon Sep 17 00:00:00 2001 From: DimAn Date: Wed, 21 Feb 2024 22:23:23 +0400 Subject: [PATCH 230/401] validator: ignore too old tower error (#35229) * validator: ignore too old tower error * Update core/src/replay_stage.rs Co-authored-by: Ashwin Sekar * remove redundant references --------- Co-authored-by: Ashwin Sekar --- core/src/consensus.rs | 3 +++ core/src/replay_stage.rs | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 3e24f33233863e..ab316d7c7da612 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1533,6 +1533,9 @@ impl TowerError { false } } + pub fn is_too_old(&self) -> bool { + matches!(self, TowerError::TooOldTower(_, _)) + } } #[derive(Debug)] diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 31595e6b6504d9..485c58bdd57fa5 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1171,6 +1171,13 @@ impl ReplayStage { node_pubkey, vote_account, ) + } else if err.is_too_old() { + warn!("Failed to load tower, too old for {}: {}. Creating a new tower from bankforks.", node_pubkey, err); + Tower::new_from_bankforks( + &bank_forks.read().unwrap(), + node_pubkey, + vote_account, + ) } else { error!("Failed to load tower for {}: {}", node_pubkey, err); std::process::exit(1); From 537c3d8e2ce3e50c1d98a564f7eb3dde289fc3dd Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 21 Feb 2024 16:12:23 -0600 Subject: [PATCH 231/401] Format the string literals in /validator directory (#35261) There are lots of string literals in the /validator directory, including many for CLI help and error messages. Any string literals that exceed 100 characters prevent rustfmt from running properly. This change temporarily set format_string = true in rustfmt.toml, and then ran the linter on the validator directory. This was followed up by manually tweaking several strings that were already well crafted for readability (and within the 100 character limit) --- validator/src/admin_rpc_service.rs | 14 +- validator/src/bin/solana-test-validator.rs | 25 +- validator/src/bootstrap.rs | 62 +- validator/src/cli.rs | 846 ++++++++++++--------- validator/src/dashboard.rs | 7 +- validator/src/main.rs | 55 +- 6 files changed, 599 insertions(+), 410 deletions(-) diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index a9fe1c4e39f425..57be4cf488865d 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -615,10 +615,9 @@ impl AdminRpc for AdminRpcImpl { .tpu(Protocol::UDP) .map_err(|err| { error!( - "The public TPU address isn't being published. \ - The node is likely in repair mode. \ - See help for --restricted-repair-only-mode for more information. \ - {err}" + "The public TPU address isn't being published. The node is likely in \ + repair mode. See help for --restricted-repair-only-mode for more \ + information. {err}" ); jsonrpc_core::error::Error::internal_error() })?; @@ -653,10 +652,9 @@ impl AdminRpc for AdminRpcImpl { .tpu_forwards(Protocol::UDP) .map_err(|err| { error!( - "The public TPU Forwards address isn't being published. \ - The node is likely in repair mode. \ - See help for --restricted-repair-only-mode for more information. \ - {err}" + "The public TPU Forwards address isn't being published. The node is \ + likely in repair mode. See help for --restricted-repair-only-mode for \ + more information. {err}" ); jsonrpc_core::error::Error::internal_error() })?; diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index aee5fc039df410..42f5a0634c0cfa 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -285,17 +285,20 @@ fn main() { let warp_slot = if matches.is_present("warp_slot") { Some(match matches.value_of("warp_slot") { Some(_) => value_t_or_exit!(matches, "warp_slot", Slot), - None => { - cluster_rpc_client.as_ref().unwrap_or_else(|_| { - println!("The --url argument must be provided if --warp-slot/-w is used without an explicit slot"); - exit(1); - - }).get_slot() - .unwrap_or_else(|err| { - println!("Unable to get current cluster slot: {err}"); - exit(1); - }) - } + None => cluster_rpc_client + .as_ref() + .unwrap_or_else(|_| { + println!( + "The --url argument must be provided if --warp-slot/-w is used without an \ + explicit slot" + ); + exit(1); + }) + .get_slot() + .unwrap_or_else(|err| { + println!("Unable to get current cluster slot: {err}"); + exit(1); + }), }) } else { None diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 88a45fdad50635..8d5457744a23b8 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -237,7 +237,10 @@ fn get_rpc_peers( }) .count(); - info!("Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, {rpc_peers_blacklisted} blacklisted"); + info!( + "Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, \ + {rpc_peers_blacklisted} blacklisted" + ); if rpc_peers_blacklisted == rpc_peers_total { *retry_reason = if !blacklisted_rpc_nodes.is_empty() @@ -487,9 +490,9 @@ fn get_vetted_rpc_nodes( Ok(rpc_node_details) => rpc_node_details, Err(err) => { error!( - "Failed to get RPC nodes: {err}. Consider checking system \ - clock, removing `--no-port-check`, or adjusting \ - `--known-validator ...` arguments as applicable" + "Failed to get RPC nodes: {err}. Consider checking system clock, removing \ + `--no-port-check`, or adjusting `--known-validator ...` arguments as \ + applicable" ); exit(1); } @@ -905,9 +908,8 @@ fn get_snapshot_hashes_from_known_validators( get_snapshot_hashes_for_node, ) { debug!( - "Snapshot hashes have not been discovered from known validators. \ - This likely means the gossip tables are not fully populated. \ - We will sleep and retry..." + "Snapshot hashes have not been discovered from known validators. This likely means \ + the gossip tables are not fully populated. We will sleep and retry..." ); return KnownSnapshotHashes::default(); } @@ -981,8 +983,9 @@ fn build_known_snapshot_hashes<'a>( // hashes. So if it happens, keep the first and ignore the rest. if is_any_same_slot_and_different_hash(&full_snapshot_hash, known_snapshot_hashes.keys()) { warn!( - "Ignoring all snapshot hashes from node {node} since we've seen a different full snapshot hash with this slot.\ - \nfull snapshot hash: {full_snapshot_hash:?}" + "Ignoring all snapshot hashes from node {node} since we've seen a different full \ + snapshot hash with this slot.\ + \nfull snapshot hash: {full_snapshot_hash:?}" ); debug!( "known full snapshot hashes: {:#?}", @@ -1007,9 +1010,10 @@ fn build_known_snapshot_hashes<'a>( known_incremental_snapshot_hashes.iter(), ) { warn!( - "Ignoring incremental snapshot hash from node {node} since we've seen a different incremental snapshot hash with this slot.\ - \nfull snapshot hash: {full_snapshot_hash:?}\ - \nincremental snapshot hash: {incremental_snapshot_hash:?}" + "Ignoring incremental snapshot hash from node {node} since we've seen a \ + different incremental snapshot hash with this slot.\ + \nfull snapshot hash: {full_snapshot_hash:?}\ + \nincremental snapshot hash: {incremental_snapshot_hash:?}" ); debug!( "known incremental snapshot hashes based on this slot: {:#?}", @@ -1112,7 +1116,10 @@ fn retain_peer_snapshot_hashes_with_highest_incremental_snapshot_slot( peer_snapshot_hash.snapshot_hash.incr == highest_incremental_snapshot_hash }); - trace!("retain peer snapshot hashes with highest incremental snapshot slot: {peer_snapshot_hashes:?}"); + trace!( + "retain peer snapshot hashes with highest incremental snapshot slot: \ + {peer_snapshot_hashes:?}" + ); } /// Check to see if we can use our local snapshots, otherwise download newer ones. @@ -1192,7 +1199,8 @@ fn download_snapshots( }) { info!( - "Incremental snapshot archive already exists locally. Skipping download. slot: {}, hash: {}", + "Incremental snapshot archive already exists locally. Skipping download. \ + slot: {}, hash: {}", incremental_snapshot_hash.0, incremental_snapshot_hash.1 ); } else { @@ -1272,9 +1280,9 @@ fn download_snapshot( { warn!( "The snapshot download is too slow, throughput: {} < min speed {} \ - bytes/sec, but will NOT abort and try a different node as it is the \ - only known validator and the --only-known-rpc flag is set. \ - Abort count: {}, Progress detail: {:?}", + bytes/sec, but will NOT abort and try a different node as it is the \ + only known validator and the --only-known-rpc flag is set. Abort \ + count: {}, Progress detail: {:?}", download_progress.last_throughput, minimal_snapshot_download_speed, download_abort_count, @@ -1284,9 +1292,8 @@ fn download_snapshot( } } warn!( - "The snapshot download is too slow, throughput: {} < min speed {} \ - bytes/sec, will abort and try a different node. \ - Abort count: {}, Progress detail: {:?}", + "The snapshot download is too slow, throughput: {} < min speed {} bytes/sec, \ + will abort and try a different node. Abort count: {}, Progress detail: {:?}", download_progress.last_throughput, minimal_snapshot_download_speed, download_abort_count, @@ -1321,17 +1328,26 @@ fn should_use_local_snapshot( incremental_snapshot_fetch, ) { None => { - info!("Downloading a snapshot for slot {cluster_snapshot_slot} since there is not a local snapshot."); + info!( + "Downloading a snapshot for slot {cluster_snapshot_slot} since there is not a \ + local snapshot." + ); false } Some((local_snapshot_slot, _)) => { if local_snapshot_slot >= cluster_snapshot_slot.saturating_sub(maximum_local_snapshot_age) { - info!("Reusing local snapshot at slot {local_snapshot_slot} instead of downloading a snapshot for slot {cluster_snapshot_slot}."); + info!( + "Reusing local snapshot at slot {local_snapshot_slot} instead of downloading \ + a snapshot for slot {cluster_snapshot_slot}." + ); true } else { - info!("Local snapshot from slot {local_snapshot_slot} is too old. Downloading a newer snapshot for slot {cluster_snapshot_slot}."); + info!( + "Local snapshot from slot {local_snapshot_slot} is too old. Downloading a \ + newer snapshot for slot {cluster_snapshot_slot}." + ); false } } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index b3e1a885b3d52b..84f63d3503a3c2 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -61,7 +61,8 @@ const MAX_SNAPSHOT_DOWNLOAD_ABORT: u32 = 5; const MINIMUM_TICKS_PER_SLOT: u64 = 2; pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { - return App::new(crate_name!()).about(crate_description!()) + return App::new(crate_name!()) + .about(crate_description!()) .version(version) .setting(AppSettings::VersionlessSubcommands) .setting(AppSettings::InferSubcommands) @@ -87,9 +88,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .validator(is_keypair_or_ask_keyword) .requires("vote_account") .multiple(true) - .help("Include an additional authorized voter keypair. \ - May be specified multiple times. \ - [default: the --identity keypair]"), + .help( + "Include an additional authorized voter keypair. May be specified multiple \ + times. [default: the --identity keypair]", + ), ) .arg( Arg::with_name("vote_account") @@ -98,18 +100,21 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_pubkey_or_keypair) .requires("identity") - .help("Validator vote account public key. \ - If unspecified voting will be disabled. \ - The authorized voter for the account must either be the \ - --identity keypair or with the --authorized-voter argument") + .help( + "Validator vote account public key. If unspecified, voting will be disabled. \ + The authorized voter for the account must either be the --identity keypair \ + or set by the --authorized-voter argument", + ), ) .arg( Arg::with_name("init_complete_file") .long("init-complete-file") .value_name("FILE") .takes_value(true) - .help("Create this file if it doesn't already exist \ - once validator initialization is complete"), + .help( + "Create this file if it doesn't already exist once validator initialization \ + is complete", + ), ) .arg( Arg::with_name("ledger_path") @@ -135,8 +140,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { Arg::with_name("no_snapshot_fetch") .long("no-snapshot-fetch") .takes_value(false) - .help("Do not attempt to fetch a snapshot from the cluster, \ - start from a local snapshot if present"), + .help( + "Do not attempt to fetch a snapshot from the cluster, start from a local \ + snapshot if present", + ), ) .arg( Arg::with_name("no_genesis_fetch") @@ -157,18 +164,21 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("RPC_URL") .requires("entrypoint") .conflicts_with_all(&["no_check_vote_account", "no_voting"]) - .help("Sanity check vote account state at startup. The JSON RPC endpoint at RPC_URL must expose `--full-rpc-api`") + .help( + "Sanity check vote account state at startup. The JSON RPC endpoint at RPC_URL \ + must expose `--full-rpc-api`", + ), ) .arg( Arg::with_name("restricted_repair_only_mode") .long("restricted-repair-only-mode") .takes_value(false) - .help("Do not publish the Gossip, TPU, TVU or Repair Service ports causing \ - the validator to operate in a limited capacity that reduces its \ - exposure to the rest of the cluster. \ - \ - The --no-voting flag is implicit when this flag is enabled \ - "), + .help( + "Do not publish the Gossip, TPU, TVU or Repair Service ports. Doing so causes \ + the node to operate in a limited capacity that reduces its exposure to the \ + rest of the cluster. The --no-voting flag is implicit when this flag is \ + enabled", + ), ) .arg( Arg::with_name("dev_halt_at_slot") @@ -203,30 +213,33 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { Arg::with_name("private_rpc") .long("private-rpc") .takes_value(false) - .help("Do not publish the RPC port for use by others") + .help("Do not publish the RPC port for use by others"), ) .arg( Arg::with_name("no_port_check") .long("no-port-check") .takes_value(false) .hidden(hidden_unless_forced()) - .help("Do not perform TCP/UDP reachable port checks at start-up") + .help("Do not perform TCP/UDP reachable port checks at start-up"), ) .arg( Arg::with_name("enable_rpc_transaction_history") .long("enable-rpc-transaction-history") .takes_value(false) - .help("Enable historical transaction info over JSON RPC, \ - including the 'getConfirmedBlock' API. \ - This will cause an increase in disk usage and IOPS"), + .help( + "Enable historical transaction info over JSON RPC, including the \ + 'getConfirmedBlock' API. This will cause an increase in disk usage and IOPS", + ), ) .arg( Arg::with_name("enable_rpc_bigtable_ledger_storage") .long("enable-rpc-bigtable-ledger-storage") .requires("enable_rpc_transaction_history") .takes_value(false) - .help("Fetch historical transaction info from a BigTable instance \ - as a fallback to local ledger data"), + .help( + "Fetch historical transaction info from a BigTable instance as a fallback to \ + local ledger data", + ), ) .arg( Arg::with_name("enable_bigtable_ledger_upload") @@ -240,8 +253,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("enable-extended-tx-metadata-storage") .requires("enable_rpc_transaction_history") .takes_value(false) - .help("Include CPI inner instructions, logs, and return data in \ - the historical transaction info stored"), + .help( + "Include CPI inner instructions, logs, and return data in the historical \ + transaction info stored", + ), ) .arg( Arg::with_name("rpc_max_multiple_accounts") @@ -249,8 +264,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("MAX ACCOUNTS") .takes_value(true) .default_value(&default_args.rpc_max_multiple_accounts) - .help("Override the default maximum accounts accepted by \ - the getMultipleAccounts JSON RPC method") + .help( + "Override the default maximum accounts accepted by the getMultipleAccounts \ + JSON RPC method", + ), ) .arg( Arg::with_name("health_check_slot_distance") @@ -258,9 +275,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("SLOT_DISTANCE") .takes_value(true) .default_value(&default_args.health_check_slot_distance) - .help("Report this validator healthy if its latest optimistically confirmed slot \ - that has been replayed is no further behind than this number of slots from \ - the cluster latest optimistically confirmed slot") + .help( + "Report this validator as healthy if its latest replayed optimistically \ + confirmed slot is within the specified number of slots from the cluster's \ + latest optimistically confirmed slot", + ), ) .arg( Arg::with_name("rpc_faucet_addr") @@ -276,9 +295,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("PATHS") .takes_value(true) .multiple(true) - .help("Comma separated persistent accounts location. \ + .help( + "Comma separated persistent accounts location. \ May be specified multiple times. \ - [default: /accounts]" + [default: /accounts]", ), ) .arg( @@ -294,14 +314,17 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("accounts-hash-cache-path") .value_name("PATH") .takes_value(true) - .help("Use PATH as accounts hash cache location [default: /accounts_hash_cache]"), + .help( + "Use PATH as accounts hash cache location \ + [default: /accounts_hash_cache]", + ), ) .arg( Arg::with_name("snapshots") .long("snapshots") .value_name("DIR") .takes_value(true) - .help("Use DIR as snapshot location [default: --ledger value]"), + .help("Use DIR as snapshot location [default: /snapshots]"), ) .arg( Arg::with_name(use_snapshot_archives_at_startup::cli::NAME) @@ -310,7 +333,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .possible_values(use_snapshot_archives_at_startup::cli::POSSIBLE_VALUES) .default_value(use_snapshot_archives_at_startup::cli::default_value()) .help(use_snapshot_archives_at_startup::cli::HELP) - .long_help(use_snapshot_archives_at_startup::cli::LONG_HELP) + .long_help(use_snapshot_archives_at_startup::cli::LONG_HELP), ) .arg( Arg::with_name("incremental_snapshot_archive_path") @@ -318,7 +341,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .conflicts_with("no-incremental-snapshots") .value_name("DIR") .takes_value(true) - .help("Use DIR as separate location for incremental snapshot archives [default: --snapshots value]"), + .help( + "Use DIR as separate location for incremental snapshot archives \ + [default: --snapshots value]", + ), ) .arg( Arg::with_name("tower") @@ -343,7 +369,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .multiple(true) .validator(solana_net_utils::is_host_port) - .help("etcd gRPC endpoint to connect with") + .help("etcd gRPC endpoint to connect with"), ) .arg( Arg::with_name("etcd_domain_name") @@ -352,7 +378,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("DOMAIN") .default_value(&default_args.etcd_domain_name) .takes_value(true) - .help("domain name against which to verify the etcd server’s TLS certificate") + .help("domain name against which to verify the etcd server’s TLS certificate"), ) .arg( Arg::with_name("etcd_cacert_file") @@ -360,7 +386,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .required_if("tower_storage", "etcd") .value_name("FILE") .takes_value(true) - .help("verify the TLS certificate of the etcd endpoint using this CA bundle") + .help("verify the TLS certificate of the etcd endpoint using this CA bundle"), ) .arg( Arg::with_name("etcd_key_file") @@ -368,7 +394,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .required_if("tower_storage", "etcd") .value_name("FILE") .takes_value(true) - .help("TLS key file to use when establishing a connection to the etcd endpoint") + .help("TLS key file to use when establishing a connection to the etcd endpoint"), ) .arg( Arg::with_name("etcd_cert_file") @@ -376,7 +402,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .required_if("tower_storage", "etcd") .value_name("FILE") .takes_value(true) - .help("TLS certificate to use when establishing a connection to the etcd endpoint") + .help("TLS certificate to use when establishing a connection to the etcd endpoint"), ) .arg( Arg::with_name("gossip_port") @@ -391,8 +417,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("HOST") .takes_value(true) .validator(solana_net_utils::is_host) - .help("Gossip DNS name or IP address for the validator to advertise in gossip \ - [default: ask --entrypoint, or 127.0.0.1 when --entrypoint is not provided]"), + .help( + "Gossip DNS name or IP address for the validator to advertise in gossip \ + [default: ask --entrypoint, or 127.0.0.1 when --entrypoint is not provided]", + ), ) .arg( Arg::with_name("public_tpu_addr") @@ -401,8 +429,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("HOST:PORT") .takes_value(true) .validator(solana_net_utils::is_host_port) - .help("Specify TPU address to advertise in gossip [default: ask --entrypoint or localhost\ - when --entrypoint is not provided]"), + .help( + "Specify TPU address to advertise in gossip \ + [default: ask --entrypoint or localhost when --entrypoint is not provided]", + ), ) .arg( Arg::with_name("public_tpu_forwards_addr") @@ -410,8 +440,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("HOST:PORT") .takes_value(true) .validator(solana_net_utils::is_host_port) - .help("Specify TPU Forwards address to advertise in gossip [default: ask --entrypoint or localhost\ - when --entrypoint is not provided]"), + .help( + "Specify TPU Forwards address to advertise in gossip [default: ask \ + --entrypoint or localhostwhen --entrypoint is not provided]", + ), ) .arg( Arg::with_name("public_rpc_addr") @@ -420,9 +452,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .conflicts_with("private_rpc") .validator(solana_net_utils::is_host_port) - .help("RPC address for the validator to advertise publicly in gossip. \ - Useful for validators running behind a load balancer or proxy \ - [default: use --rpc-bind-address / --rpc-port]"), + .help( + "RPC address for the validator to advertise publicly in gossip. Useful for \ + validators running behind a load balancer or proxy [default: use \ + --rpc-bind-address / --rpc-port]", + ), ) .arg( Arg::with_name("dynamic_port_range") @@ -439,19 +473,21 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER_OF_SLOTS") .takes_value(true) .default_value(&default_args.maximum_local_snapshot_age) - .help("Reuse a local snapshot if it's less than this many \ - slots behind the highest snapshot available for \ - download from other validators"), + .help( + "Reuse a local snapshot if it's less than this many slots behind the highest \ + snapshot available for download from other validators", + ), ) .arg( Arg::with_name("no_incremental_snapshots") .long("no-incremental-snapshots") .takes_value(false) .help("Disable incremental snapshots") - .long_help("Disable incremental snapshots by setting this flag. \ - When enabled, --snapshot-interval-slots will set the \ - incremental snapshot interval. To set the full snapshot \ - interval, use --full-snapshot-interval-slots.") + .long_help( + "Disable incremental snapshots by setting this flag. When enabled, \ + --snapshot-interval-slots will set the incremental snapshot interval. To set \ + the full snapshot interval, use --full-snapshot-interval-slots.", + ), ) .arg( Arg::with_name("incremental_snapshot_interval_slots") @@ -460,8 +496,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER") .takes_value(true) .default_value(&default_args.incremental_snapshot_archive_interval_slots) - .help("Number of slots between generating snapshots, \ - 0 to disable snapshots"), + .help("Number of slots between generating snapshots, 0 to disable snapshots"), ) .arg( Arg::with_name("full_snapshot_interval_slots") @@ -469,8 +504,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER") .takes_value(true) .default_value(&default_args.full_snapshot_archive_interval_slots) - .help("Number of slots between generating full snapshots. \ - Must be a multiple of the incremental snapshot interval.") + .help( + "Number of slots between generating full snapshots. Must be a multiple of the \ + incremental snapshot interval.", + ), ) .arg( Arg::with_name("maximum_full_snapshots_to_retain") @@ -480,7 +517,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .default_value(&default_args.maximum_full_snapshot_archives_to_retain) .validator(validate_maximum_full_snapshot_archives_to_retain) - .help("The maximum number of full snapshot archives to hold on to when purging older snapshots.") + .help( + "The maximum number of full snapshot archives to hold on to when purging \ + older snapshots.", + ), ) .arg( Arg::with_name("maximum_incremental_snapshots_to_retain") @@ -489,7 +529,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .default_value(&default_args.maximum_incremental_snapshot_archives_to_retain) .validator(validate_maximum_incremental_snapshot_archives_to_retain) - .help("The maximum number of incremental snapshot archives to hold on to when purging older snapshots.") + .help( + "The maximum number of incremental snapshot archives to hold on to when \ + purging older snapshots.", + ), ) .arg( Arg::with_name("snapshot_packager_niceness_adj") @@ -498,8 +541,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(solana_perf::thread::is_niceness_adjustment_valid) .default_value(&default_args.snapshot_packager_niceness_adjustment) - .help("Add this value to niceness of snapshot packager thread. Negative value \ - increases priority, positive value decreases priority.") + .help( + "Add this value to niceness of snapshot packager thread. Negative value \ + increases priority, positive value decreases priority.", + ), ) .arg( Arg::with_name("minimal_snapshot_download_speed") @@ -507,9 +552,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("MINIMAL_SNAPSHOT_DOWNLOAD_SPEED") .takes_value(true) .default_value(&default_args.min_snapshot_download_speed) - .help("The minimal speed of snapshot downloads measured in bytes/second. \ - If the initial download speed falls below this threshold, the system will \ - retry the download against a different rpc node."), + .help( + "The minimal speed of snapshot downloads measured in bytes/second. If the \ + initial download speed falls below this threshold, the system will retry the \ + download against a different rpc node.", + ), ) .arg( Arg::with_name("maximum_snapshot_download_abort") @@ -517,8 +564,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("MAXIMUM_SNAPSHOT_DOWNLOAD_ABORT") .takes_value(true) .default_value(&default_args.max_snapshot_download_abort) - .help("The maximum number of times to abort and retry when encountering a \ - slow snapshot download."), + .help( + "The maximum number of times to abort and retry when encountering a slow \ + snapshot download.", + ), ) .arg( Arg::with_name("contact_debug_interval") @@ -538,31 +587,31 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { Arg::with_name("no_os_network_limits_test") .hidden(hidden_unless_forced()) .long("no-os-network-limits-test") - .help("Skip checks for OS network limits.") + .help("Skip checks for OS network limits."), ) .arg( Arg::with_name("no_os_memory_stats_reporting") .long("no-os-memory-stats-reporting") .hidden(hidden_unless_forced()) - .help("Disable reporting of OS memory statistics.") + .help("Disable reporting of OS memory statistics."), ) .arg( Arg::with_name("no_os_network_stats_reporting") .long("no-os-network-stats-reporting") .hidden(hidden_unless_forced()) - .help("Disable reporting of OS network statistics.") + .help("Disable reporting of OS network statistics."), ) .arg( Arg::with_name("no_os_cpu_stats_reporting") .long("no-os-cpu-stats-reporting") .hidden(hidden_unless_forced()) - .help("Disable reporting of OS CPU statistics.") + .help("Disable reporting of OS CPU statistics."), ) .arg( Arg::with_name("no_os_disk_stats_reporting") .long("no-os-disk-stats-reporting") .hidden(hidden_unless_forced()) - .help("Disable reporting of OS disk statistics.") + .help("Disable reporting of OS disk statistics."), ) .arg( Arg::with_name("snapshot_version") @@ -590,12 +639,13 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .possible_values(&["level", "fifo"]) .default_value(&default_args.rocksdb_shred_compaction) - .help("Controls how RocksDB compacts shreds. \ - *WARNING*: You will lose your ledger data when you switch between options. \ - Possible values are: \ - 'level': stores shreds using RocksDB's default (level) compaction. \ - 'fifo': stores shreds under RocksDB's FIFO compaction. \ - This option is more efficient on disk-write-bytes of the ledger store."), + .help( + "Controls how RocksDB compacts shreds. *WARNING*: You will lose your \ + Blockstore data when you switch between options. Possible values are: \ + 'level': stores shreds using RocksDB's default (level) compaction. \ + 'fifo': stores shreds under RocksDB's FIFO compaction. This option is more \ + efficient on disk-write-bytes of the Blockstore.", + ), ) .arg( Arg::with_name("rocksdb_fifo_shred_storage_size") @@ -603,13 +653,13 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("SHRED_STORAGE_SIZE_BYTES") .takes_value(true) .validator(is_parsable::) - .help("The shred storage size in bytes. \ - The suggested value is at least 50% of your ledger storage size. \ - If this argument is unspecified, we will assign a proper \ - value based on --limit-ledger-size. If --limit-ledger-size \ - is not presented, it means there is no limitation on the ledger \ - size and thus rocksdb_fifo_shred_storage_size will also be \ - unbounded."), + .help( + "The shred storage size in bytes. The suggested value is at least 50% of your \ + ledger storage size. If this argument is unspecified, we will assign a \ + proper value based on --limit-ledger-size. If --limit-ledger-size is not \ + presented, it means there is no limitation on the ledger size and thus \ + rocksdb_fifo_shred_storage_size will also be unbounded.", + ), ) .arg( Arg::with_name("rocksdb_ledger_compression") @@ -619,9 +669,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .possible_values(&["none", "lz4", "snappy", "zlib"]) .default_value(&default_args.rocksdb_ledger_compression) - .help("The compression algorithm that is used to compress \ - transaction status data. \ - Turning on compression can save ~10% of the ledger size."), + .help( + "The compression algorithm that is used to compress transaction status data. \ + Turning on compression can save ~10% of the ledger size.", + ), ) .arg( Arg::with_name("rocksdb_perf_sample_interval") @@ -631,8 +682,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .default_value(&default_args.rocksdb_perf_sample_interval) - .help("Controls how often RocksDB read/write performance sample is collected. \ - Reads/writes perf samples are collected in 1 / ROCKS_PERF_SAMPLE_INTERVAL sampling rate."), + .help( + "Controls how often RocksDB read/write performance samples are collected. \ + Perf samples are collected in 1 / ROCKS_PERF_SAMPLE_INTERVAL sampling rate.", + ), ) .arg( Arg::with_name("skip_startup_ledger_verification") @@ -682,9 +735,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("log") .value_name("FILE") .takes_value(true) - .help("Redirect logging to the specified file, '-' for standard error. \ - Sending the SIGUSR1 signal to the validator process will cause it \ - to re-open the log file"), + .help( + "Redirect logging to the specified file, '-' for standard error. Sending the \ + SIGUSR1 signal to the validator process will cause it to re-open the log file", + ), ) .arg( Arg::with_name("wait_for_supermajority") @@ -692,16 +746,20 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .requires("expected_bank_hash") .value_name("SLOT") .validator(is_slot) - .help("After processing the ledger and the next slot is SLOT, wait until a \ - supermajority of stake is visible on gossip before starting PoH"), + .help( + "After processing the ledger and the next slot is SLOT, wait until a \ + supermajority of stake is visible on gossip before starting PoH", + ), ) .arg( Arg::with_name("no_wait_for_vote_to_start_leader") .hidden(hidden_unless_forced()) .long("no-wait-for-vote-to-start-leader") - .help("If the validator starts up with no ledger, it will wait to start block + .help( + "If the validator starts up with no ledger, it will wait to start block production until it sees a vote land in a rooted slot. This prevents - double signing. Turn off to risk double signing a block."), + double signing. Turn off to risk double signing a block.", + ), ) .arg( Arg::with_name("hard_forks") @@ -720,8 +778,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("VALIDATOR IDENTITY") .multiple(true) .takes_value(true) - .help("A snapshot hash must be published in gossip by this validator to be accepted. \ - May be specified multiple times. If unspecified any snapshot hash will be accepted"), + .help( + "A snapshot hash must be published in gossip by this validator to be \ + accepted. May be specified multiple times. If unspecified any snapshot hash \ + will be accepted", + ), ) .arg( Arg::with_name("debug_key") @@ -738,7 +799,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("only-known-rpc") .takes_value(false) .requires("known_validators") - .help("Use the RPC service of known validators only") + .help("Use the RPC service of known validators only"), ) .arg( Arg::with_name("repair_validators") @@ -747,8 +808,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("VALIDATOR IDENTITY") .multiple(true) .takes_value(true) - .help("A list of validators to request repairs from. If specified, repair will not \ - request from validators outside this set [default: all validators]") + .help( + "A list of validators to request repairs from. If specified, repair will not \ + request from validators outside this set [default: all validators]", + ), ) .arg( Arg::with_name("repair_whitelist") @@ -758,9 +821,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("VALIDATOR IDENTITY") .multiple(true) .takes_value(true) - .help("A list of validators to prioritize repairs from. If specified, repair requests \ - from validators in the list will be prioritized over requests from other validators. \ - [default: all validators]") + .help( + "A list of validators to prioritize repairs from. If specified, repair \ + requests from validators in the list will be prioritized over requests from \ + other validators. [default: all validators]", + ), ) .arg( Arg::with_name("gossip_validators") @@ -769,9 +834,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("VALIDATOR IDENTITY") .multiple(true) .takes_value(true) - .help("A list of validators to gossip with. If specified, gossip \ - will not push/pull from from validators outside this set. \ - [default: all validators]") + .help( + "A list of validators to gossip with. If specified, gossip will not \ + push/pull from from validators outside this set. [default: all validators]", + ), ) .arg( Arg::with_name("tpu_coalesce_ms") @@ -814,11 +880,13 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("staked-nodes-overrides") .value_name("PATH") .takes_value(true) - .help("Provide path to a yaml file with custom overrides for stakes of specific - identities. Overriding the amount of stake this validator considers - as valid for other peers in network. The stake amount is used for calculating - number of QUIC streams permitted from the peer and vote packet sender stage. - Format of the file: `staked_map_id: {: }"), + .help( + "Provide path to a yaml file with custom overrides for stakes of specific \ + identities. Overriding the amount of stake this validator considers as valid \ + for other peers in network. The stake amount is used for calculating the \ + number of QUIC streams permitted from the peer and vote packet sender stage. \ + Format of the file: `staked_map_id: {: }", + ), ) .arg( Arg::with_name("bind_address") @@ -835,7 +903,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("HOST") .takes_value(true) .validator(solana_net_utils::is_host) - .help("IP address to bind the RPC port [default: 127.0.0.1 if --private-rpc is present, otherwise use --bind-address]"), + .help( + "IP address to bind the RPC port [default: 127.0.0.1 if --private-rpc is \ + present, otherwise use --bind-address]", + ), ) .arg( Arg::with_name("rpc_threads") @@ -853,8 +924,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(solana_perf::thread::is_niceness_adjustment_valid) .default_value(&default_args.rpc_niceness_adjustment) - .help("Add this value to niceness of RPC threads. Negative value \ - increases priority, positive value decreases priority.") + .help( + "Add this value to niceness of RPC threads. Negative value increases \ + priority, positive value decreases priority.", + ), ) .arg( Arg::with_name("rpc_bigtable_timeout") @@ -871,7 +944,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .value_name("INSTANCE_NAME") .default_value(&default_args.rpc_bigtable_instance_name) - .help("Name of the Bigtable instance to upload to") + .help("Name of the Bigtable instance to upload to"), ) .arg( Arg::with_name("rpc_bigtable_app_profile_id") @@ -879,7 +952,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .value_name("APP_PROFILE_ID") .default_value(&default_args.rpc_bigtable_app_profile_id) - .help("Bigtable application profile id to use in requests") + .help("Bigtable application profile id to use in requests"), ) .arg( Arg::with_name("rpc_bigtable_max_message_size") @@ -919,9 +992,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .hidden(hidden_unless_forced()) - .help("The maximum number of connections that RPC PubSub will support. \ - This is a hard limit and no new connections beyond this limit can \ - be made until an old connection is dropped. (Obsolete)"), + .help( + "The maximum number of connections that RPC PubSub will support. This is a \ + hard limit and no new connections beyond this limit can be made until an old \ + connection is dropped. (Obsolete)", + ), ) .arg( Arg::with_name("rpc_pubsub_max_fragment_size") @@ -930,8 +1005,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .hidden(hidden_unless_forced()) - .help("The maximum length in bytes of acceptable incoming frames. Messages longer \ - than this will be rejected. (Obsolete)"), + .help( + "The maximum length in bytes of acceptable incoming frames. Messages longer \ + than this will be rejected. (Obsolete)", + ), ) .arg( Arg::with_name("rpc_pubsub_max_in_buffer_capacity") @@ -940,8 +1017,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .hidden(hidden_unless_forced()) - .help("The maximum size in bytes to which the incoming websocket buffer can grow. \ - (Obsolete)"), + .help( + "The maximum size in bytes to which the incoming websocket buffer can grow. \ + (Obsolete)", + ), ) .arg( Arg::with_name("rpc_pubsub_max_out_buffer_capacity") @@ -950,8 +1029,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .hidden(hidden_unless_forced()) - .help("The maximum size in bytes to which the outgoing websocket buffer can grow. \ - (Obsolete)"), + .help( + "The maximum size in bytes to which the outgoing websocket buffer can grow. \ + (Obsolete)", + ), ) .arg( Arg::with_name("rpc_pubsub_max_active_subscriptions") @@ -960,8 +1041,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER") .validator(is_parsable::) .default_value(&default_args.rpc_pubsub_max_active_subscriptions) - .help("The maximum number of active subscriptions that RPC PubSub will accept \ - across all connections."), + .help( + "The maximum number of active subscriptions that RPC PubSub will accept \ + across all connections.", + ), ) .arg( Arg::with_name("rpc_pubsub_queue_capacity_items") @@ -970,8 +1053,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER") .validator(is_parsable::) .default_value(&default_args.rpc_pubsub_queue_capacity_items) - .help("The maximum number of notifications that RPC PubSub will store \ - across all connections."), + .help( + "The maximum number of notifications that RPC PubSub will store across all \ + connections.", + ), ) .arg( Arg::with_name("rpc_pubsub_queue_capacity_bytes") @@ -980,8 +1065,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("BYTES") .validator(is_parsable::) .default_value(&default_args.rpc_pubsub_queue_capacity_bytes) - .help("The maximum total size of notifications that RPC PubSub will store \ - across all connections."), + .help( + "The maximum total size of notifications that RPC PubSub will store across \ + all connections.", + ), ) .arg( Arg::with_name("rpc_pubsub_notification_threads") @@ -990,8 +1077,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .value_name("NUM_THREADS") .validator(is_parsable::) - .help("The maximum number of threads that RPC PubSub will use \ - for generating notifications. 0 will disable RPC PubSub notifications"), + .help( + "The maximum number of threads that RPC PubSub will use for generating \ + notifications. 0 will disable RPC PubSub notifications", + ), ) .arg( Arg::with_name("rpc_send_transaction_retry_ms") @@ -1019,7 +1108,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .default_value(&default_args.rpc_send_transaction_leader_forward_count) - .help("The number of upcoming leaders to which to forward transactions sent via rpc service."), + .help( + "The number of upcoming leaders to which to forward transactions sent via rpc \ + service.", + ), ) .arg( Arg::with_name("rpc_send_transaction_default_max_retries") @@ -1027,7 +1119,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER") .takes_value(true) .validator(is_parsable::) - .help("The maximum number of transaction broadcast retries when unspecified by the request, otherwise retried until expiration."), + .help( + "The maximum number of transaction broadcast retries when unspecified by the \ + request, otherwise retried until expiration.", + ), ) .arg( Arg::with_name("rpc_send_transaction_service_max_retries") @@ -1036,7 +1131,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .default_value(&default_args.rpc_send_transaction_service_max_retries) - .help("The maximum number of transaction broadcast retries, regardless of requested value."), + .help( + "The maximum number of transaction broadcast retries, regardless of requested \ + value.", + ), ) .arg( Arg::with_name("rpc_send_transaction_batch_size") @@ -1055,7 +1153,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .default_value(&default_args.rpc_send_transaction_retry_pool_max_size) - .help("The maximum size of transactions retry pool.") + .help("The maximum size of transactions retry pool."), ) .arg( Arg::with_name("rpc_scan_and_fix_roots") @@ -1087,7 +1185,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(solana_net_utils::is_host) .hidden(hidden_unless_forced()) - .help("IP address to bind the AccountsDb Replication port [default: use --bind-address]"), + .help( + "IP address to bind the AccountsDb Replication port [default: use \ + --bind-address]", + ), ) .arg( Arg::with_name("accountsdb_repl_port") @@ -1133,9 +1234,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER") .takes_value(true) .default_value(&default_args.genesis_archive_unpacked_size) - .help( - "maximum total uncompressed file size of downloaded genesis archive", - ), + .help("maximum total uncompressed file size of downloaded genesis archive"), ) .arg( Arg::with_name("wal_recovery_mode") @@ -1146,10 +1245,9 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { "tolerate_corrupted_tail_records", "absolute_consistency", "point_in_time", - "skip_any_corrupted_record"]) - .help( - "Mode to recovery the ledger db write ahead log." - ), + "skip_any_corrupted_record", + ]) + .help("Mode to recovery the ledger db write ahead log."), ) .arg( Arg::with_name("poh_pinned_cpu_core") @@ -1159,7 +1257,9 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("CPU_CORE_INDEX") .validator(|s| { let core_index = usize::from_str(&s).map_err(|e| e.to_string())?; - let max_index = core_affinity::get_core_ids().map(|cids| cids.len() - 1).unwrap_or(0); + let max_index = core_affinity::get_core_ids() + .map(|cids| cids.len() - 1) + .unwrap_or(0); if core_index > max_index { return Err(format!("core index must be in the range [0, {max_index}]")); } @@ -1179,7 +1279,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { Arg::with_name("process_ledger_before_services") .long("process-ledger-before-services") .hidden(hidden_unless_forced()) - .help("Process the local ledger fully before starting networking services") + .help("Process the local ledger fully before starting networking services"), ) .arg( Arg::with_name("account_indexes") @@ -1207,40 +1307,52 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .conflicts_with("account_index_exclude_key") .multiple(true) .value_name("KEY") - .help("When account indexes are enabled, only include specific keys in the index. This overrides --account-index-exclude-key."), + .help( + "When account indexes are enabled, only include specific keys in the index. \ + This overrides --account-index-exclude-key.", + ), ) .arg( Arg::with_name("accounts_db_verify_refcounts") .long("accounts-db-verify-refcounts") - .help("Debug option to scan all append vecs and verify account index refcounts prior to clean") - .hidden(hidden_unless_forced()) + .help( + "Debug option to scan all append vecs and verify account index refcounts \ + prior to clean", + ) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("accounts_db_test_skip_rewrites") .long("accounts-db-test-skip-rewrites") - .help("Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation") - .hidden(hidden_unless_forced()) + .help( + "Debug option to skip rewrites for rent-exempt accounts but still add them in \ + bank delta hash calculation", + ) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("no_skip_initial_accounts_db_clean") .long("no-skip-initial-accounts-db-clean") .help("Do not skip the initial cleaning of accounts when verifying snapshot bank") .hidden(hidden_unless_forced()) - .conflicts_with("accounts_db_skip_shrink") + .conflicts_with("accounts_db_skip_shrink"), ) .arg( Arg::with_name("accounts_db_create_ancient_storage_packed") .long("accounts-db-create-ancient-storage-packed") .help("Create ancient storages in one shot instead of appending.") .hidden(hidden_unless_forced()), - ) + ) .arg( Arg::with_name("accounts_db_ancient_append_vecs") .long("accounts-db-ancient-append-vecs") .value_name("SLOT-OFFSET") .validator(is_parsable::) .takes_value(true) - .help("AppendVecs that are older than (slots_per_epoch - SLOT-OFFSET) are squashed together.") + .help( + "AppendVecs that are older than (slots_per_epoch - SLOT-OFFSET) are squashed \ + together.", + ) .hidden(hidden_unless_forced()), ) .arg( @@ -1249,7 +1361,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("MEGABYTES") .validator(is_parsable::) .takes_value(true) - .help("How large the write cache for account data can become. If this is exceeded, the cache is flushed more aggressively."), + .help( + "How large the write cache for account data can become. If this is exceeded, \ + the cache is flushed more aggressively.", + ), ) .arg( Arg::with_name("accounts_index_scan_results_limit_mb") @@ -1257,7 +1372,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("MEGABYTES") .validator(is_parsable::) .takes_value(true) - .help("How large accumulated results from an accounts index scan can become. If this is exceeded, the scan aborts."), + .help( + "How large accumulated results from an accounts index scan can become. If \ + this is exceeded, the scan aborts.", + ), ) .arg( Arg::with_name("accounts_index_memory_limit_mb") @@ -1265,7 +1383,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("MEGABYTES") .validator(is_parsable::) .takes_value(true) - .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk."), + .help( + "How much memory the accounts index can consume. If this is exceeded, some \ + account index entries will be stored on disk.", + ), ) .arg( Arg::with_name("accounts_index_bins") @@ -1279,16 +1400,24 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { Arg::with_name("partitioned_epoch_rewards_compare_calculation") .long("partitioned-epoch-rewards-compare-calculation") .takes_value(false) - .help("Do normal epoch rewards distribution, but also calculate rewards using the partitioned rewards code path and compare the resulting vote and stake accounts") - .hidden(hidden_unless_forced()) + .help( + "Do normal epoch rewards distribution, but also calculate rewards using the \ + partitioned rewards code path and compare the resulting vote and stake \ + accounts", + ) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("partitioned_epoch_rewards_force_enable_single_slot") .long("partitioned-epoch-rewards-force-enable-single-slot") .takes_value(false) - .help("Force the partitioned rewards distribution, but distribute all rewards in the first slot in the epoch. This should match consensus with the normal rewards distribution.") + .help( + "Force the partitioned rewards distribution, but distribute all rewards in \ + the first slot in the epoch. This should match consensus with the normal \ + rewards distribution.", + ) .conflicts_with("partitioned_epoch_rewards_compare_calculation") - .hidden(hidden_unless_forced()) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("accounts_index_path") @@ -1305,8 +1434,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .arg( Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") - .help("Enables testing of hash calculation using stores in \ - AccountsHashVerifier. This has a computational cost."), + .help( + "Enables testing of hash calculation using stores in AccountsHashVerifier. \ + This has a computational cost.", + ), ) .arg( Arg::with_name("accounts_shrink_optimize_total_space") @@ -1314,10 +1445,12 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .value_name("BOOLEAN") .default_value(&default_args.accounts_shrink_optimize_total_space) - .help("When this is set to true, the system will shrink the most \ - sparse accounts and when the overall shrink ratio is above \ - the specified accounts-shrink-ratio, the shrink will stop and \ - it will skip all other less sparse accounts."), + .help( + "When this is set to true, the system will shrink the most sparse accounts \ + and when the overall shrink ratio is above the specified \ + accounts-shrink-ratio, the shrink will stop and it will skip all other less \ + sparse accounts.", + ), ) .arg( Arg::with_name("accounts_shrink_ratio") @@ -1325,11 +1458,12 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .value_name("RATIO") .default_value(&default_args.accounts_shrink_ratio) - .help("Specifies the shrink ratio for the accounts to be shrunk. \ - The shrink ratio is defined as the ratio of the bytes alive over the \ - total bytes used. If the account's shrink ratio is less than this ratio \ - it becomes a candidate for shrinking. The value must between 0. and 1.0 \ - inclusive."), + .help( + "Specifies the shrink ratio for the accounts to be shrunk. The shrink ratio \ + is defined as the ratio of the bytes alive over the total bytes used. If \ + the account's shrink ratio is less than this ratio it becomes a candidate \ + for shrinking. The value must between 0. and 1.0 inclusive.", + ), ) .arg( Arg::with_name("allow_private_addr") @@ -1344,12 +1478,12 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .validator(is_parsable::) .value_name("BYTES") - .help("Maximum number of bytes written to the program log before truncation") + .help("Maximum number of bytes written to the program log before truncation"), ) .arg( Arg::with_name("replay_slots_concurrently") .long("replay-slots-concurrently") - .help("Allow concurrent replay of slots on different forks") + .help("Allow concurrent replay of slots on different forks"), ) .arg( Arg::with_name("banking_trace_dir_byte_limit") @@ -1365,17 +1499,19 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { // explicitly given, similar to --limit-ledger-size. // see configure_banking_trace_dir_byte_limit() for this. .default_value(&default_args.banking_trace_dir_byte_limit) - .help("Enables the banking trace explicitly, which is enabled by default and \ - writes trace files for simulate-leader-blocks, retaining up to the default \ - or specified total bytes in the ledger. This flag can be used to override \ - its byte limit.") + .help( + "Enables the banking trace explicitly, which is enabled by default and writes \ + trace files for simulate-leader-blocks, retaining up to the default or \ + specified total bytes in the ledger. This flag can be used to override its \ + byte limit.", + ), ) .arg( Arg::with_name("disable_banking_trace") .long("disable-banking-trace") .conflicts_with("banking_trace_dir_byte_limit") .takes_value(false) - .help("Disables the banking trace") + .help("Disables the banking trace"), ) .arg( Arg::with_name("block_verification_method") @@ -1384,7 +1520,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("METHOD") .takes_value(true) .possible_values(BlockVerificationMethod::cli_names()) - .help(BlockVerificationMethod::cli_message()) + .help(BlockVerificationMethod::cli_message()), ) .arg( Arg::with_name("block_production_method") @@ -1392,7 +1528,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("METHOD") .takes_value(true) .possible_values(BlockProductionMethod::cli_names()) - .help(BlockProductionMethod::cli_message()) + .help(BlockProductionMethod::cli_message()), ) .arg( Arg::with_name("wen_restart") @@ -1421,7 +1557,8 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { If wen_restart fails, refer to the progress file (in proto3 format) for further debugging. - ") + ", + ), ) .args(&get_deprecated_arguments()) .after_help("The default subcommand is run") @@ -1433,14 +1570,17 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .short("f") .long("force") .takes_value(false) - .help("Request the validator exit immediately instead of waiting for a restart window") + .help( + "Request the validator exit immediately instead of waiting for a \ + restart window", + ), ) .arg( Arg::with_name("monitor") .short("m") .long("monitor") .takes_value(false) - .help("Monitor the validator after sending the exit request") + .help("Monitor the validator after sending the exit request"), ) .arg( Arg::with_name("min_idle_time") @@ -1449,7 +1589,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .validator(is_parsable::) .value_name("MINUTES") .default_value(&default_args.exit_min_idle_time) - .help("Minimum time that the validator should not be leader before restarting") + .help( + "Minimum time that the validator should not be leader before \ + restarting", + ), ) .arg( Arg::with_name("max_delinquent_stake") @@ -1458,18 +1601,18 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .validator(is_valid_percentage) .default_value(&default_args.exit_max_delinquent_stake) .value_name("PERCENT") - .help("The maximum delinquent stake % permitted for an exit") + .help("The maximum delinquent stake % permitted for an exit"), ) .arg( Arg::with_name("skip_new_snapshot_check") .long("skip-new-snapshot-check") - .help("Skip check for a new snapshot") + .help("Skip check for a new snapshot"), ) .arg( Arg::with_name("skip_health_check") .long("skip-health-check") - .help("Skip health check") - ) + .help("Skip health check"), + ), ) .subcommand( SubCommand::with_name("authorized-voter") @@ -1486,18 +1629,24 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .required(false) .takes_value(true) .validator(is_keypair) - .help("Path to keypair of the authorized voter to add \ - [default: read JSON keypair from stdin]"), + .help( + "Path to keypair of the authorized voter to add [default: \ + read JSON keypair from stdin]", + ), ) - .after_help("Note: the new authorized voter only applies to the \ - currently running validator instance") + .after_help( + "Note: the new authorized voter only applies to the currently running \ + validator instance", + ), ) .subcommand( SubCommand::with_name("remove-all") .about("Remove all authorized voters") - .after_help("Note: the removal only applies to the \ - currently running validator instance") - ) + .after_help( + "Note: the removal only applies to the currently running validator \ + instance", + ), + ), ) .subcommand( SubCommand::with_name("contact-info") @@ -1508,10 +1657,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .value_name("MODE") .possible_values(&["json", "json-compact"]) - .help("Output display mode") - ) + .help("Output display mode"), + ), ) - .subcommand(SubCommand::with_name("repair-shred-from-peer") + .subcommand( + SubCommand::with_name("repair-shred-from-peer") .about("Request a repair from the specified validator") .arg( Arg::with_name("pubkey") @@ -1520,7 +1670,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .required(false) .takes_value(true) .validator(is_pubkey) - .help("Identity pubkey of the validator to repair from") + .help("Identity pubkey of the validator to repair from"), ) .arg( Arg::with_name("slot") @@ -1528,7 +1678,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("SLOT") .takes_value(true) .validator(is_parsable::) - .help("Slot to repair") + .help("Slot to repair"), ) .arg( Arg::with_name("shred") @@ -1536,8 +1686,8 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("SHRED") .takes_value(true) .validator(is_parsable::) - .help("Shred to repair") - ) + .help("Shred to repair"), + ), ) .subcommand( SubCommand::with_name("repair-whitelist") @@ -1553,8 +1703,8 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .value_name("MODE") .possible_values(&["json", "json-compact"]) - .help("Output display mode") - ) + .help("Output display mode"), + ), ) .subcommand( SubCommand::with_name("set") @@ -1562,76 +1712,65 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .setting(AppSettings::ArgRequiredElseHelp) .arg( Arg::with_name("whitelist") - .long("whitelist") - .validator(is_pubkey) - .value_name("VALIDATOR IDENTITY") - .multiple(true) - .takes_value(true) - .help("Set the validator's repair protocol whitelist") + .long("whitelist") + .validator(is_pubkey) + .value_name("VALIDATOR IDENTITY") + .multiple(true) + .takes_value(true) + .help("Set the validator's repair protocol whitelist"), ) - .after_help("Note: repair protocol whitelist changes only apply to the currently \ - running validator instance") + .after_help( + "Note: repair protocol whitelist changes only apply to the currently \ + running validator instance", + ), ) .subcommand( SubCommand::with_name("remove-all") .about("Clear the validator's repair protocol whitelist") - .after_help("Note: repair protocol whitelist changes only apply to the currently \ - running validator instance") - ) - ) - .subcommand( - SubCommand::with_name("init") - .about("Initialize the ledger directory then exit") - ) - .subcommand( - SubCommand::with_name("monitor") - .about("Monitor the validator") + .after_help( + "Note: repair protocol whitelist changes only apply to the currently \ + running validator instance", + ), + ), ) .subcommand( - SubCommand::with_name("run") - .about("Run the validator") + SubCommand::with_name("init").about("Initialize the ledger directory then exit"), ) + .subcommand(SubCommand::with_name("monitor").about("Monitor the validator")) + .subcommand(SubCommand::with_name("run").about("Run the validator")) .subcommand( SubCommand::with_name("plugin") .about("Manage and view geyser plugins") .setting(AppSettings::SubcommandRequiredElseHelp) .setting(AppSettings::InferSubcommands) .subcommand( - SubCommand::with_name("list") - .about("List all current running gesyer plugins") + SubCommand::with_name("list").about("List all current running gesyer plugins"), ) .subcommand( SubCommand::with_name("unload") - .about("Unload a particular gesyer plugin. You must specify the gesyer plugin name") - .arg( - Arg::with_name("name") - .required(true) - .takes_value(true) + .about( + "Unload a particular gesyer plugin. You must specify the gesyer \ + plugin name", ) + .arg(Arg::with_name("name").required(true).takes_value(true)), ) .subcommand( SubCommand::with_name("reload") - .about("Reload a particular gesyer plugin. You must specify the gesyer plugin name and the new config path") - .arg( - Arg::with_name("name") - .required(true) - .takes_value(true) - ) - .arg( - Arg::with_name("config") - .required(true) - .takes_value(true) + .about( + "Reload a particular gesyer plugin. You must specify the gesyer \ + plugin name and the new config path", ) + .arg(Arg::with_name("name").required(true).takes_value(true)) + .arg(Arg::with_name("config").required(true).takes_value(true)), ) .subcommand( SubCommand::with_name("load") - .about("Load a new gesyer plugin. You must specify the config path. Fails if overwriting (use reload)") - .arg( - Arg::with_name("config") - .required(true) - .takes_value(true) + .about( + "Load a new gesyer plugin. You must specify the config path. Fails if \ + overwriting (use reload)", ) - ) + .arg(Arg::with_name("config").required(true).takes_value(true)), + ), ) .subcommand( SubCommand::with_name("set-identity") @@ -1643,28 +1782,36 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .required(false) .takes_value(true) .validator(is_keypair) - .help("Path to validator identity keypair \ - [default: read JSON keypair from stdin]") + .help( + "Path to validator identity keypair [default: read JSON keypair from \ + stdin]", + ), ) .arg( clap::Arg::with_name("require_tower") .long("require-tower") .takes_value(false) - .help("Refuse to set the validator identity if saved tower state is not found"), + .help( + "Refuse to set the validator identity if saved tower state is not \ + found", + ), ) - .after_help("Note: the new identity only applies to the \ - currently running validator instance") + .after_help( + "Note: the new identity only applies to the currently running validator \ + instance", + ), ) .subcommand( SubCommand::with_name("set-log-filter") .about("Adjust the validator log filter") .arg( - Arg::with_name("filter") - .takes_value(true) - .index(1) - .help("New filter using the same format as the RUST_LOG environment variable") + Arg::with_name("filter").takes_value(true).index(1).help( + "New filter using the same format as the RUST_LOG environment variable", + ), ) - .after_help("Note: the new filter only applies to the currently running validator instance") + .after_help( + "Note: the new filter only applies to the currently running validator instance", + ), ) .subcommand( SubCommand::with_name("staked-nodes-overrides") @@ -1674,10 +1821,15 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("PATH") .takes_value(true) .required(true) - .help("Provide path to a file with custom overrides for stakes of specific validator identities."), + .help( + "Provide path to a file with custom overrides for stakes of specific \ + validator identities.", + ), ) - .after_help("Note: the new staked nodes overrides only applies to the \ - currently running validator instance") + .after_help( + "Note: the new staked nodes overrides only applies to the currently running \ + validator instance", + ), ) .subcommand( SubCommand::with_name("wait-for-restart-window") @@ -1689,7 +1841,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .validator(is_parsable::) .value_name("MINUTES") .default_value(&default_args.wait_for_restart_window_min_idle_time) - .help("Minimum time that the validator should not be leader before restarting") + .help( + "Minimum time that the validator should not be leader before \ + restarting", + ), ) .arg( Arg::with_name("identity") @@ -1697,7 +1852,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("ADDRESS") .takes_value(true) .validator(is_pubkey_or_keypair) - .help("Validator identity to monitor [default: your validator]") + .help("Validator identity to monitor [default: your validator]"), ) .arg( Arg::with_name("max_delinquent_stake") @@ -1706,22 +1861,24 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .validator(is_valid_percentage) .default_value(&default_args.wait_for_restart_window_max_delinquent_stake) .value_name("PERCENT") - .help("The maximum delinquent stake % permitted for a restart") + .help("The maximum delinquent stake % permitted for a restart"), ) .arg( Arg::with_name("skip_new_snapshot_check") .long("skip-new-snapshot-check") - .help("Skip check for a new snapshot") + .help("Skip check for a new snapshot"), ) .arg( Arg::with_name("skip_health_check") .long("skip-health-check") - .help("Skip health check") + .help("Skip health check"), ) - .after_help("Note: If this command exits with a non-zero status \ - then this not a good time for a restart") - ). - subcommand( + .after_help( + "Note: If this command exits with a non-zero status then this not a good time \ + for a restart", + ), + ) + .subcommand( SubCommand::with_name("set-public-address") .about("Specify addresses to advertise in gossip") .arg( @@ -1730,7 +1887,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("HOST:PORT") .takes_value(true) .validator(solana_net_utils::is_host_port) - .help("TPU address to advertise in gossip") + .help("TPU address to advertise in gossip"), ) .arg( Arg::with_name("tpu_forwards_addr") @@ -1738,13 +1895,13 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("HOST:PORT") .takes_value(true) .validator(solana_net_utils::is_host_port) - .help("TPU Forwards address to advertise in gossip") + .help("TPU Forwards address to advertise in gossip"), ) .group( ArgGroup::with_name("set_public_address_details") .args(&["tpu_addr", "tpu_forwards_addr"]) .required(true) - .multiple(true) + .multiple(true), ) .after_help("Note: At least one arg must be used. Using multiple is ok"), ); @@ -1854,7 +2011,10 @@ fn deprecated_arguments() -> Vec { .long("halt-on-known-validators-accounts-hash-mismatch") .requires("known_validators") .takes_value(false) - .help("Abort the validator if a bank hash mismatch is detected within known validator set"), + .help( + "Abort the validator if a bank hash mismatch is detected within known validator \ + set" + ), ); add_arg!(Arg::with_name("incremental_snapshots") .long("incremental-snapshots") @@ -1863,7 +2023,7 @@ fn deprecated_arguments() -> Vec { .help("Enable incremental snapshots") .long_help( "Enable incremental snapshots by setting this flag. When enabled, \ - --snapshot-interval-slots will set the incremental snapshot interval. To set the + --snapshot-interval-slots will set the incremental snapshot interval. To set the full snapshot interval, use --full-snapshot-interval-slots.", )); add_arg!(Arg::with_name("minimal_rpc_api") @@ -2166,7 +2326,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .validator(is_url_or_moniker) .help( "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", + [mainnet-beta, testnet, devnet, localhost]", ), ) .arg( @@ -2176,9 +2336,9 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .validator(is_pubkey) .takes_value(true) .help( - "Address of the mint account that will receive tokens \ - created at genesis. If the ledger already exists then \ - this parameter is silently ignored [default: client keypair]", + "Address of the mint account that will receive tokens created at genesis. If \ + the ledger already exists then this parameter is silently ignored \ + [default: client keypair]", ), ) .arg( @@ -2197,8 +2357,8 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .long("reset") .takes_value(false) .help( - "Reset the ledger to genesis if it exists. \ - By default the validator will resume an existing ledger (if present)", + "Reset the ledger to genesis if it exists. By default the validator will \ + resume an existing ledger (if present)", ), ) .arg( @@ -2248,8 +2408,10 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .long("enable-rpc-bigtable-ledger-storage") .takes_value(false) .hidden(hidden_unless_forced()) - .help("Fetch historical transaction info from a BigTable instance \ - as a fallback to local ledger data"), + .help( + "Fetch historical transaction info from a BigTable instance as a fallback to \ + local ledger data", + ), ) .arg( Arg::with_name("rpc_bigtable_instance") @@ -2267,7 +2429,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .takes_value(true) .hidden(hidden_unless_forced()) .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Application profile id to use in Bigtable requests") + .help("Application profile id to use in Bigtable requests"), ) .arg( Arg::with_name("rpc_pubsub_enable_vote_subscription") @@ -2289,9 +2451,9 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .number_of_values(2) .multiple(true) .help( - "Add a SBF program to the genesis configuration with upgrades disabled. \ - If the ledger already exists then this parameter is silently ignored. \ - First argument can be a pubkey string or path to a keypair", + "Add a SBF program to the genesis configuration with upgrades disabled. If \ + the ledger already exists then this parameter is silently ignored. The first \ + argument can be a pubkey string or path to a keypair", ), ) .arg( @@ -2302,10 +2464,10 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .number_of_values(3) .multiple(true) .help( - "Add an upgradeable SBF program to the genesis configuration. \ - If the ledger already exists then this parameter is silently ignored. \ - First and third arguments can be a pubkey string or path to a keypair. \ - Upgrade authority set to \"none\" disables upgrades", + "Add an upgradeable SBF program to the genesis configuration. If the ledger \ + already exists then this parameter is silently ignored. First and third \ + arguments can be a pubkey string or path to a keypair. Upgrade authority set \ + to \"none\" disables upgrades", ), ) .arg( @@ -2317,10 +2479,11 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .allow_hyphen_values(true) .multiple(true) .help( - "Load an account from the provided JSON file (see `solana account --help` on how to dump \ - an account to file). Files are searched for relatively to CWD and tests/fixtures. \ - If ADDRESS is omitted via the `-` placeholder, the one in the file will be used. \ - If the ledger already exists then this parameter is silently ignored", + "Load an account from the provided JSON file (see `solana account --help` on \ + how to dump an account to file). Files are searched for relatively to CWD \ + and tests/fixtures. If ADDRESS is omitted via the `-` placeholder, the one \ + in the file will be used. If the ledger already exists then this parameter \ + is silently ignored", ), ) .arg( @@ -2335,7 +2498,9 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< if path.exists() && path.is_dir() { Ok(()) } else { - Err(format!("path does not exist or is not a directory: {value}")) + Err(format!( + "path does not exist or is not a directory: {value}" + )) } }) }) @@ -2343,8 +2508,8 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .multiple(true) .help( "Load all the accounts from the JSON files found in the specified DIRECTORY \ - (see also the `--account` flag). \ - If the ledger already exists then this parameter is silently ignored", + (see also the `--account` flag). If the ledger already exists then this \ + parameter is silently ignored", ), ) .arg( @@ -2384,8 +2549,8 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< }) .takes_value(true) .help( - "Override the number of slots in an epoch. \ - If the ledger already exists then this parameter is silently ignored", + "Override the number of slots in an epoch. If the ledger already exists then \ + this parameter is silently ignored", ), ) .arg( @@ -2403,7 +2568,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .validator(solana_net_utils::is_host) .help( "Gossip DNS name or IP address for the validator to advertise in gossip \ - [default: 127.0.0.1]", + [default: 127.0.0.1]", ), ) .arg( @@ -2412,10 +2577,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .value_name("MIN_PORT-MAX_PORT") .takes_value(true) .validator(port_range_validator) - .help( - "Range to use for dynamically assigned ports \ - [default: 1024-65535]", - ), + .help("Range to use for dynamically assigned ports [default: 1024-65535]"), ) .arg( Arg::with_name("bind_address") @@ -2437,8 +2599,8 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .requires("json_rpc_url") .help( "Copy an account from the cluster referenced by the --url argument the \ - genesis configuration. \ - If the ledger already exists then this parameter is silently ignored", + genesis configuration. If the ledger already exists then this parameter is \ + silently ignored", ), ) .arg( @@ -2450,9 +2612,9 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .multiple(true) .requires("json_rpc_url") .help( - "Copy an account from the cluster referenced by the --url argument, \ - skipping it if it doesn't exist. \ - If the ledger already exists then this parameter is silently ignored", + "Copy an account from the cluster referenced by the --url argument, skipping \ + it if it doesn't exist. If the ledger already exists then this parameter is \ + silently ignored", ), ) .arg( @@ -2465,8 +2627,8 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .requires("json_rpc_url") .help( "Copy an upgradeable program and its executable data from the cluster \ - referenced by the --url argument the genesis configuration. \ - If the ledger already exists then this parameter is silently ignored", + referenced by the --url argument the genesis configuration. If the ledger \ + already exists then this parameter is silently ignored", ), ) .arg( @@ -2480,9 +2642,9 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .min_values(0) .max_values(1) .help( - "Warp the ledger to WARP_SLOT after starting the validator. \ - If no slot is provided then the current slot of the cluster \ - referenced by the --url argument will be used", + "Warp the ledger to WARP_SLOT after starting the validator. If no slot is \ + provided then the current slot of the cluster referenced by the --url \ + argument will be used", ), ) .arg( @@ -2500,8 +2662,8 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .value_name("SOL") .default_value(default_args.faucet_sol.as_str()) .help( - "Give the faucet address this much SOL in genesis. \ - If the ledger already exists then this parameter is silently ignored", + "Give the faucet address this much SOL in genesis. If the ledger already \ + exists then this parameter is silently ignored", ), ) .arg( @@ -2510,9 +2672,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .takes_value(true) .value_name("SECS") .default_value(default_args.faucet_time_slice_secs.as_str()) - .help( - "Time slice (in secs) over which to limit faucet requests", - ), + .help("Time slice (in secs) over which to limit faucet requests"), ) .arg( Arg::with_name("faucet_per_time_sol_cap") @@ -2521,9 +2681,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .value_name("SOL") .min_values(0) .max_values(1) - .help( - "Per-time slice limit for faucet requests, in SOL", - ), + .help("Per-time slice limit for faucet requests, in SOL"), ) .arg( Arg::with_name("faucet_per_request_sol_cap") @@ -2532,9 +2690,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .value_name("SOL") .min_values(0) .max_values(1) - .help( - "Per-request limit for faucet requests, in SOL", - ), + .help("Per-request limit for faucet requests, in SOL"), ) .arg( Arg::with_name("geyser_plugin_config") @@ -2552,7 +2708,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .value_name("FEATURE_PUBKEY") .validator(is_pubkey) .multiple(true) - .help("deactivate this feature in genesis.") + .help("deactivate this feature in genesis."), ) .arg( Arg::with_name("compute_unit_limit") @@ -2561,7 +2717,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .value_name("COMPUTE_UNITS") .validator(is_parsable::) .takes_value(true) - .help("Override the runtime's compute unit limit per transaction") + .help("Override the runtime's compute unit limit per transaction"), ) .arg( Arg::with_name("log_messages_bytes_limit") @@ -2569,7 +2725,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .value_name("BYTES") .validator(is_parsable::) .takes_value(true) - .help("Maximum number of bytes written to the program log before truncation") + .help("Maximum number of bytes written to the program log before truncation"), ) .arg( Arg::with_name("transaction_account_lock_limit") @@ -2577,7 +2733,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .value_name("NUM_ACCOUNTS") .validator(is_parsable::) .takes_value(true) - .help("Override the runtime's account lock limit per transaction") + .help("Override the runtime's account lock limit per transaction"), ); } @@ -2625,8 +2781,8 @@ mod test { assert!( curr_name != next_name, - "Arguments in `deprecated_arguments()` should be distinct.\n\ - Arguments {} and {} use the same name: {}", + "Arguments in `deprecated_arguments()` should be distinct.\nArguments {} and {} \ + use the same name: {}", i, i + 1, curr_name, @@ -2635,10 +2791,8 @@ mod test { assert!( curr_name < next_name, "To generate better diffs and for readability purposes, `deprecated_arguments()` \ - should list arguments in alphabetical order.\n\ - Arguments {} and {} are not.\n\ - Argument {} name: {}\n\ - Argument {} name: {}", + should list arguments in alphabetical order.\nArguments {} and {} are \ + not.\nArgument {} name: {}\nArgument {} name: {}", i, i + 1, i, diff --git a/validator/src/dashboard.rs b/validator/src/dashboard.rs index 365f02065ebc96..6b22898e0201d3 100644 --- a/validator/src/dashboard.rs +++ b/validator/src/dashboard.rs @@ -154,10 +154,9 @@ impl Dashboard { }; progress_bar.set_message(format!( - "{}{}| \ - Processed Slot: {} | Confirmed Slot: {} | Finalized Slot: {} | \ - Full Snapshot Slot: {} | Incremental Snapshot Slot: {} | \ - Transactions: {} | {}", + "{}{}| Processed Slot: {} | Confirmed Slot: {} | Finalized Slot: {} | \ + Full Snapshot Slot: {} | Incremental Snapshot Slot: {} | \ + Transactions: {} | {}", uptime, if health == "ok" { "".to_string() diff --git a/validator/src/main.rs b/validator/src/main.rs index 56b17e5d29c32e..3c27fec0199bcb 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -218,7 +218,8 @@ fn wait_for_restart_window( } if !leader_schedule.is_empty() && upcoming_idle_windows.is_empty() { return Err(format!( - "Validator has no idle window of at least {} slots. Largest idle window for epoch {} is {} slots", + "Validator has no idle window of at least {} slots. Largest idle window \ + for epoch {} is {} slots", min_idle_slots, epoch_info.epoch, max_idle_window ) .into()); @@ -272,7 +273,8 @@ fn wait_for_restart_window( ) } None => format!( - "Validator will be leader soon. Next leader slot is {next_leader_slot}" + "Validator will be leader soon. Next leader slot is \ + {next_leader_slot}" ), }) } @@ -865,11 +867,14 @@ pub fn main() { ("set-public-address", Some(subcommand_matches)) => { let parse_arg_addr = |arg_name: &str, arg_long: &str| -> Option { subcommand_matches.value_of(arg_name).map(|host_port| { - solana_net_utils::parse_host_port(host_port).unwrap_or_else(|err| { - eprintln!("Failed to parse --{arg_long} address. It must be in the HOST:PORT format. {err}"); - exit(1); - }) + solana_net_utils::parse_host_port(host_port).unwrap_or_else(|err| { + eprintln!( + "Failed to parse --{arg_long} address. It must be in the HOST:PORT \ + format. {err}" + ); + exit(1); }) + }) }; let tpu_addr = parse_arg_addr("tpu_addr", "tpu"); let tpu_forwards_addr = parse_arg_addr("tpu_forwards_addr", "tpu-forwards"); @@ -1081,7 +1086,8 @@ pub fn main() { let shrink_ratio = value_t_or_exit!(matches, "accounts_shrink_ratio", f64); if !(0.0..=1.0).contains(&shrink_ratio) { eprintln!( - "The specified account-shrink-ratio is invalid, it must be between 0. and 1.0 inclusive: {shrink_ratio}" + "The specified account-shrink-ratio is invalid, it must be between 0. and 1.0 \ + inclusive: {shrink_ratio}" ); exit(1); } @@ -1285,7 +1291,8 @@ pub fn main() { if rpc_send_batch_send_rate_ms > rpc_send_retry_rate_ms { eprintln!( - "The specified rpc-send-batch-ms ({rpc_send_batch_send_rate_ms}) is invalid, it must be <= rpc-send-retry-ms ({rpc_send_retry_rate_ms})" + "The specified rpc-send-batch-ms ({rpc_send_batch_send_rate_ms}) is invalid, it must \ + be <= rpc-send-retry-ms ({rpc_send_retry_rate_ms})" ); exit(1); } @@ -1294,7 +1301,7 @@ pub fn main() { if tps > send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND { eprintln!( "Either the specified rpc-send-batch-size ({}) or rpc-send-batch-ms ({}) is invalid, \ - 'rpc-send-batch-size * 1000 / rpc-send-batch-ms' must be smaller than ({}) .", + 'rpc-send-batch-size * 1000 / rpc-send-batch-ms' must be smaller than ({}) .", rpc_send_batch_size, rpc_send_batch_send_rate_ms, send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND @@ -1613,14 +1620,25 @@ pub fn main() { &validator_config.snapshot_config, validator_config.accounts_hash_interval_slots, ) { - eprintln!("Invalid snapshot configuration provided: snapshot intervals are incompatible. \ - \n\t- full snapshot interval MUST be a multiple of incremental snapshot interval (if enabled) \ - \n\t- full snapshot interval MUST be larger than incremental snapshot interval (if enabled) \ - \nSnapshot configuration values: \ - \n\tfull snapshot interval: {} \ - \n\tincremental snapshot interval: {}", - if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { "disabled".to_string() } else { full_snapshot_archive_interval_slots.to_string() }, - if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { "disabled".to_string() } else { incremental_snapshot_archive_interval_slots.to_string() }, + eprintln!( + "Invalid snapshot configuration provided: snapshot intervals are incompatible. \ + \n\t- full snapshot interval MUST be a multiple of incremental snapshot interval (if \ + enabled)\ + \n\t- full snapshot interval MUST be larger than incremental snapshot \ + interval (if enabled)\ + \nSnapshot configuration values:\ + \n\tfull snapshot interval: {}\ + \n\tincremental snapshot interval: {}", + if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { + "disabled".to_string() + } else { + full_snapshot_archive_interval_slots.to_string() + }, + if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { + "disabled".to_string() + } else { + incremental_snapshot_archive_interval_slots.to_string() + }, ); exit(1); } @@ -1632,7 +1650,8 @@ pub fn main() { }; if limit_ledger_size < DEFAULT_MIN_MAX_LEDGER_SHREDS { eprintln!( - "The provided --limit-ledger-size value was too small, the minimum value is {DEFAULT_MIN_MAX_LEDGER_SHREDS}" + "The provided --limit-ledger-size value was too small, the minimum value is \ + {DEFAULT_MIN_MAX_LEDGER_SHREDS}" ); exit(1); } From 024d6ecc4fa51e901879abd92176d02fa19f21a2 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 22 Feb 2024 09:05:17 +0900 Subject: [PATCH 232/401] Add --unified-scheduler-handler-threads (#35195) * Add --unified-scheduler-handler-threads * Adjust value name * Warn if the flag was ignored * Tweak message a bit --- Cargo.lock | 1 + core/src/validator.rs | 9 +++ ledger-tool/src/ledger_utils.rs | 9 +++ ledger-tool/src/main.rs | 13 +++- local-cluster/src/validator_configs.rs | 1 + programs/sbf/Cargo.lock | 1 + unified-scheduler-pool/src/lib.rs | 94 ++++++++++++++++++++------ validator/Cargo.toml | 1 + validator/src/cli.rs | 10 +++ validator/src/main.rs | 2 + 10 files changed, 120 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a727fae2c8b0ee..9a61e27a12c5e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7547,6 +7547,7 @@ dependencies = [ "solana-svm", "solana-test-validator", "solana-tpu-client", + "solana-unified-scheduler-pool", "solana-version", "solana-vote-program", "spl-token-2022", diff --git a/core/src/validator.rs b/core/src/validator.rs index b71c11cd967d34..97ef0a01ef87ad 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -262,6 +262,7 @@ pub struct ValidatorConfig { pub generator_config: Option, pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, pub wen_restart_proto_path: Option, + pub unified_scheduler_handler_threads: Option, } impl Default for ValidatorConfig { @@ -329,6 +330,7 @@ impl Default for ValidatorConfig { generator_config: None, use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, + unified_scheduler_handler_threads: None, } } } @@ -813,9 +815,16 @@ impl Validator { match &config.block_verification_method { BlockVerificationMethod::BlockstoreProcessor => { info!("no scheduler pool is installed for block verification..."); + if let Some(count) = config.unified_scheduler_handler_threads { + warn!( + "--unified-scheduler-handler-threads={count} is ignored because unified \ + scheduler isn't enabled" + ); + } } BlockVerificationMethod::UnifiedScheduler => { let scheduler_pool = DefaultSchedulerPool::new_dyn( + config.unified_scheduler_handler_threads, config.runtime_config.log_messages_bytes_limit, transaction_status_sender.clone(), Some(replay_vote_sender.clone()), diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 2663a205fb5f37..116b21527ae4d8 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -291,9 +291,17 @@ pub fn load_and_process_ledger( "Using: block-verification-method: {}", block_verification_method, ); + let unified_scheduler_handler_threads = + value_t!(arg_matches, "unified_scheduler_handler_threads", usize).ok(); match block_verification_method { BlockVerificationMethod::BlockstoreProcessor => { info!("no scheduler pool is installed for block verification..."); + if let Some(count) = unified_scheduler_handler_threads { + warn!( + "--unified-scheduler-handler-threads={count} is ignored because unified \ + scheduler isn't enabled" + ); + } } BlockVerificationMethod::UnifiedScheduler => { let no_transaction_status_sender = None; @@ -303,6 +311,7 @@ pub fn load_and_process_ledger( .write() .unwrap() .install_scheduler_pool(DefaultSchedulerPool::new_dyn( + unified_scheduler_handler_threads, process_options.runtime_config.log_messages_bytes_limit, no_transaction_status_sender, no_replay_vote_sender, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index d6218fad6915f0..9b299cfadcbcf2 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -28,7 +28,7 @@ use { input_parsers::{cluster_type_of, pubkey_of, pubkeys_of}, input_validators::{ is_parsable, is_pow2, is_pubkey, is_pubkey_or_keypair, is_slot, is_valid_percentage, - validate_maximum_full_snapshot_archives_to_retain, + is_within_range, validate_maximum_full_snapshot_archives_to_retain, validate_maximum_incremental_snapshot_archives_to_retain, }, }, @@ -72,6 +72,7 @@ use { transaction::{MessageHash, SanitizedTransaction, SimpleAddressLoader}, }, solana_stake_program::stake_state::{self, PointValue}, + solana_unified_scheduler_pool::DefaultSchedulerPool, solana_vote_program::{ self, vote_state::{self, VoteState}, @@ -852,6 +853,16 @@ fn main() { .hidden(hidden_unless_forced()) .help(BlockVerificationMethod::cli_message()), ) + .arg( + Arg::with_name("unified_scheduler_handler_threads") + .long("unified-scheduler-handler-threads") + .value_name("COUNT") + .takes_value(true) + .validator(|s| is_within_range(s, 1..)) + .global(true) + .hidden(hidden_unless_forced()) + .help(DefaultSchedulerPool::cli_message()), + ) .arg( Arg::with_name("output_format") .long("output") diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 537dd6495f32e1..33883bb02c1d77 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -68,6 +68,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { generator_config: config.generator_config.clone(), use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, wen_restart_proto_path: config.wen_restart_proto_path.clone(), + unified_scheduler_handler_threads: config.unified_scheduler_handler_threads, } } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 6ab36567f1a744..1b8d422d42ba7c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6546,6 +6546,7 @@ dependencies = [ "solana-svm", "solana-test-validator", "solana-tpu-client", + "solana-unified-scheduler-pool", "solana-version", "solana-vote-program", "symlink", diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index deae3697807705..09ded82ee88e7d 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -34,7 +34,7 @@ use { marker::PhantomData, sync::{ atomic::{AtomicU64, Ordering::Relaxed}, - Arc, Mutex, Weak, + Arc, Mutex, OnceLock, Weak, }, thread::{self, JoinHandle}, }, @@ -48,6 +48,7 @@ type AtomicSchedulerId = AtomicU64; #[derive(Debug)] pub struct SchedulerPool, TH: TaskHandler> { scheduler_inners: Mutex>, + handler_count: usize, handler_context: HandlerContext, // weak_self could be elided by changing InstalledScheduler::take_scheduler()'s receiver to // Arc from &Self, because SchedulerPool is used as in the form of Arc @@ -83,13 +84,20 @@ where // Some internal impl and test code want an actual concrete type, NOT the // `dyn InstalledSchedulerPool`. So don't merge this into `Self::new_dyn()`. fn new( + handler_count: Option, log_messages_bytes_limit: Option, transaction_status_sender: Option, replay_vote_sender: Option, prioritization_fee_cache: Arc, ) -> Arc { + let handler_count = handler_count.unwrap_or(1); + // we're hard-coding the number of handler thread to 1, meaning this impl is currently + // single-threaded still. + assert_eq!(handler_count, 1); // replace this with assert!(handler_count >= 1) later + Arc::new_cyclic(|weak_self| Self { scheduler_inners: Mutex::default(), + handler_count, handler_context: HandlerContext { log_messages_bytes_limit, transaction_status_sender, @@ -105,12 +113,14 @@ where // This apparently-meaningless wrapper is handy, because some callers explicitly want // `dyn InstalledSchedulerPool` to be returned for type inference convenience. pub fn new_dyn( + handler_count: Option, log_messages_bytes_limit: Option, transaction_status_sender: Option, replay_vote_sender: Option, prioritization_fee_cache: Arc, ) -> InstalledSchedulerPoolArc { Self::new( + handler_count, log_messages_bytes_limit, transaction_status_sender, replay_vote_sender, @@ -145,6 +155,37 @@ where S::spawn(self.self_arc(), context) } } + + pub fn default_handler_count() -> usize { + Self::calculate_default_handler_count( + thread::available_parallelism() + .ok() + .map(|non_zero| non_zero.get()), + ) + } + + pub fn calculate_default_handler_count(detected_cpu_core_count: Option) -> usize { + // Divide by 4 just not to consume all available CPUs just with handler threads, sparing for + // other active forks and other subsystems. + // Also, if available_parallelism fails (which should be very rare), use 4 threads, + // as a relatively conservatism assumption of modern multi-core systems ranging from + // engineers' laptops to production servers. + detected_cpu_core_count + .map(|core_count| (core_count / 4).max(1)) + .unwrap_or(4) + } + + pub fn cli_message() -> &'static str { + static MESSAGE: OnceLock = OnceLock::new(); + + MESSAGE.get_or_init(|| { + format!( + "Change the number of the unified scheduler's transaction execution threads \ + dedicated to each block, otherwise calculated as cpu_cores/4 [default: {}]", + Self::default_handler_count() + ) + }) + } } impl InstalledSchedulerPool for SchedulerPool @@ -372,7 +413,6 @@ pub struct PooledSchedulerInner, TH: TaskHandler> { struct ThreadManager, TH: TaskHandler> { scheduler_id: SchedulerId, pool: Arc>, - handler_count: usize, new_task_sender: Sender, new_task_receiver: Receiver, session_result_sender: Sender>, @@ -384,13 +424,9 @@ struct ThreadManager, TH: TaskHandler> { impl PooledScheduler { fn do_spawn(pool: Arc>, initial_context: SchedulingContext) -> Self { - // we're hard-coding the number of handler thread to 1, meaning this impl is currently - // single-threaded still. - let handler_count = 1; - Self::from_inner( PooledSchedulerInner:: { - thread_manager: ThreadManager::new(pool, handler_count), + thread_manager: ThreadManager::new(pool), }, initial_context, ) @@ -398,14 +434,14 @@ impl PooledScheduler { } impl, TH: TaskHandler> ThreadManager { - fn new(pool: Arc>, handler_count: usize) -> Self { + fn new(pool: Arc>) -> Self { let (new_task_sender, new_task_receiver) = unbounded(); let (session_result_sender, session_result_receiver) = unbounded(); + let handler_count = pool.handler_count; Self { scheduler_id: pool.new_scheduler_id(), pool, - handler_count, new_task_sender, new_task_receiver, session_result_sender, @@ -477,7 +513,7 @@ impl, TH: TaskHandler> ThreadManager { // 5. the handler thread reply back to the scheduler thread as an executed task. // 6. the scheduler thread post-processes the executed task. let scheduler_main_loop = || { - let handler_count = self.handler_count; + let handler_count = self.pool.handler_count; let session_result_sender = self.session_result_sender.clone(); let new_task_receiver = self.new_task_receiver.clone(); @@ -613,7 +649,7 @@ impl, TH: TaskHandler> ThreadManager { .unwrap(), ); - self.handler_threads = (0..self.handler_count) + self.handler_threads = (0..self.pool.handler_count) .map({ |thx| { thread::Builder::new() @@ -760,7 +796,7 @@ mod tests { let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = - DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + DefaultSchedulerPool::new_dyn(None, None, None, None, ignored_prioritization_fee_cache); // this indirectly proves that there should be circular link because there's only one Arc // at this moment now @@ -775,7 +811,7 @@ mod tests { let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = - DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + DefaultSchedulerPool::new_dyn(None, None, None, None, ignored_prioritization_fee_cache); let bank = Arc::new(Bank::default_for_tests()); let context = SchedulingContext::new(bank); let scheduler = pool.take_scheduler(context); @@ -789,7 +825,8 @@ mod tests { solana_logger::setup(); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); - let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let pool = + DefaultSchedulerPool::new(None, None, None, None, ignored_prioritization_fee_cache); let bank = Arc::new(Bank::default_for_tests()); let context = &SchedulingContext::new(bank); @@ -817,7 +854,8 @@ mod tests { solana_logger::setup(); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); - let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let pool = + DefaultSchedulerPool::new(None, None, None, None, ignored_prioritization_fee_cache); let bank = Arc::new(Bank::default_for_tests()); let context = &SchedulingContext::new(bank); let mut scheduler = pool.do_take_scheduler(context.clone()); @@ -835,7 +873,8 @@ mod tests { solana_logger::setup(); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); - let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let pool = + DefaultSchedulerPool::new(None, None, None, None, ignored_prioritization_fee_cache); let old_bank = &Arc::new(Bank::default_for_tests()); let new_bank = &Arc::new(Bank::default_for_tests()); assert!(!Arc::ptr_eq(old_bank, new_bank)); @@ -861,7 +900,7 @@ mod tests { let mut bank_forks = bank_forks.write().unwrap(); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = - DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + DefaultSchedulerPool::new_dyn(None, None, None, None, ignored_prioritization_fee_cache); bank_forks.install_scheduler_pool(pool); } @@ -875,7 +914,7 @@ mod tests { let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = - DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + DefaultSchedulerPool::new_dyn(None, None, None, None, ignored_prioritization_fee_cache); let bank = Bank::default_for_tests(); let bank_forks = BankForks::new_rw_arc(bank); @@ -928,7 +967,7 @@ mod tests { let bank = setup_dummy_fork_graph(bank); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = - DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + DefaultSchedulerPool::new_dyn(None, None, None, None, ignored_prioritization_fee_cache); let context = SchedulingContext::new(bank.clone()); assert_eq!(bank.transaction_count(), 0); @@ -953,7 +992,7 @@ mod tests { let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = - DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + DefaultSchedulerPool::new_dyn(None, None, None, None, ignored_prioritization_fee_cache); let context = SchedulingContext::new(bank.clone()); let mut scheduler = pool.take_scheduler(context); @@ -1159,6 +1198,7 @@ mod tests { None, None, None, + None, ignored_prioritization_fee_cache, ); let scheduler = pool.take_scheduler(context); @@ -1193,4 +1233,18 @@ mod tests { fn test_scheduler_schedule_execution_recent_blockhash_edge_case_without_race() { do_test_scheduler_schedule_execution_recent_blockhash_edge_case::(); } + + #[test] + fn test_default_handler_count() { + for (detected, expected) in [(32, 8), (4, 1), (2, 1)] { + assert_eq!( + DefaultSchedulerPool::calculate_default_handler_count(Some(detected)), + expected + ); + } + assert_eq!( + DefaultSchedulerPool::calculate_default_handler_count(None), + 4 + ); + } } diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 4028221cd7ce68..5cc76a810116b3 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -61,6 +61,7 @@ solana-streamer = { workspace = true } solana-svm = { workspace = true } solana-test-validator = { workspace = true } solana-tpu-client = { workspace = true } +solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } symlink = { workspace = true } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 84f63d3503a3c2..8424d7973f0705 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -47,6 +47,7 @@ use { self, MAX_BATCH_SEND_RATE_MS, MAX_TRANSACTION_BATCH_SIZE, }, solana_tpu_client::tpu_client::DEFAULT_TPU_CONNECTION_POOL_SIZE, + solana_unified_scheduler_pool::DefaultSchedulerPool, std::{path::PathBuf, str::FromStr}, }; @@ -1530,6 +1531,15 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .possible_values(BlockProductionMethod::cli_names()) .help(BlockProductionMethod::cli_message()), ) + .arg( + Arg::with_name("unified_scheduler_handler_threads") + .long("unified-scheduler-handler-threads") + .hidden(hidden_unless_forced()) + .value_name("COUNT") + .takes_value(true) + .validator(|s| is_within_range(s, 1..)) + .help(DefaultSchedulerPool::cli_message()), + ) .arg( Arg::with_name("wen_restart") .long("wen-restart") diff --git a/validator/src/main.rs b/validator/src/main.rs index 3c27fec0199bcb..ec70796130e7d2 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1671,6 +1671,8 @@ pub fn main() { BlockProductionMethod ) .unwrap_or_default(); + validator_config.unified_scheduler_handler_threads = + value_t!(matches, "unified_scheduler_handler_threads", usize).ok(); validator_config.ledger_column_options = LedgerColumnOptions { compression_type: match matches.value_of("rocksdb_ledger_compression") { From 60ccdb3fb4833ca73ae72acdeecab57ac97fc9b1 Mon Sep 17 00:00:00 2001 From: nathan haim Date: Thu, 22 Feb 2024 02:07:00 +0100 Subject: [PATCH 233/401] cli: program set-upgrade-authority sign-only (#35203) * cli: program set-upgrade-authority Add --sign-only flag * Apply suggestions from code review This fixes nits. It modifies tests: it will sign offline with `newAuthority` keypair instead of fee payer. Then it reuses the signature in the next command. Co-authored-by: Jon C * cli/tests/program: fix tests * cli/src/program: delete unsed import * fix to pass ci cargo-clippy-nightly --------- Co-authored-by: Jon C --- cli/src/program.rs | 169 ++++++++++++++++++++++++++++++++----------- cli/tests/program.rs | 32 ++++++-- 2 files changed, 155 insertions(+), 46 deletions(-) diff --git a/cli/src/program.rs b/cli/src/program.rs index 0d59f6a2564264..7605daf5912721 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -118,11 +118,17 @@ pub enum ProgramCliCommand { program_pubkey: Pubkey, upgrade_authority_index: Option, new_upgrade_authority: Option, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: BlockhashQuery, }, SetUpgradeAuthorityChecked { program_pubkey: Pubkey, upgrade_authority_index: SignerIndex, new_upgrade_authority_index: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: BlockhashQuery, }, Show { account_pubkey: Option, @@ -384,7 +390,8 @@ impl ProgramSubCommands for App<'_, '_> { "Set this flag if you don't want the new authority to sign \ the set-upgrade-authority transaction.", ), - ), + ) + .offline_args(), ) .subcommand( SubCommand::with_name("show") @@ -617,7 +624,6 @@ pub fn parse_program_subcommand( let sign_only = matches.is_present(SIGN_ONLY_ARG.name); let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); - let buffer_pubkey = pubkey_of_signer(matches, "buffer", wallet_manager) .unwrap() .unwrap(); @@ -723,6 +729,9 @@ pub fn parse_program_subcommand( } } ("set-upgrade-authority", Some(matches)) => { + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); + let blockhash_query = BlockhashQuery::new_from_matches(matches); let (upgrade_authority_signer, upgrade_authority_pubkey) = signer_of(matches, "upgrade_authority", wallet_manager)?; let program_pubkey = pubkey_of(matches, "program_id").unwrap(); @@ -753,6 +762,9 @@ pub fn parse_program_subcommand( program_pubkey, upgrade_authority_index: signer_info.index_of(upgrade_authority_pubkey), new_upgrade_authority, + sign_only, + dump_transaction_message, + blockhash_query, }), signers: signer_info.signers, } @@ -766,6 +778,9 @@ pub fn parse_program_subcommand( new_upgrade_authority_index: signer_info .index_of(new_upgrade_authority) .expect("new upgrade authority is missing from signers"), + sign_only, + dump_transaction_message, + blockhash_query, }), signers: signer_info.signers, } @@ -948,11 +963,17 @@ pub fn process_program_subcommand( Some(*buffer_pubkey), *buffer_authority_index, Some(*new_buffer_authority), + false, + false, + &BlockhashQuery::default(), ), ProgramCliCommand::SetUpgradeAuthority { program_pubkey, upgrade_authority_index, new_upgrade_authority, + sign_only, + dump_transaction_message, + blockhash_query, } => process_set_authority( &rpc_client, config, @@ -960,17 +981,26 @@ pub fn process_program_subcommand( None, *upgrade_authority_index, *new_upgrade_authority, + *sign_only, + *dump_transaction_message, + blockhash_query, ), ProgramCliCommand::SetUpgradeAuthorityChecked { program_pubkey, upgrade_authority_index, new_upgrade_authority_index, + sign_only, + dump_transaction_message, + blockhash_query, } => process_set_authority_checked( &rpc_client, config, *program_pubkey, *upgrade_authority_index, *new_upgrade_authority_index, + *sign_only, + *dump_transaction_message, + blockhash_query, ), ProgramCliCommand::Show { account_pubkey, @@ -1224,6 +1254,9 @@ fn process_program_deploy( None, Some(upgrade_authority_signer_index), None, + false, + false, + &BlockhashQuery::default(), )?; } if result.is_err() && !buffer_provided { @@ -1437,6 +1470,9 @@ fn process_set_authority( buffer_pubkey: Option, authority: Option, new_authority: Option, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: &BlockhashQuery, ) -> ProcessResult { let authority_signer = if let Some(index) = authority { config.signers[index] @@ -1445,7 +1481,7 @@ fn process_set_authority( }; trace!("Set a new authority"); - let blockhash = rpc_client.get_latest_blockhash()?; + let blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; let mut tx = if let Some(ref pubkey) = program_pubkey { Transaction::new_unsigned(Message::new( @@ -1473,29 +1509,42 @@ fn process_set_authority( return Err("Program or Buffer not provided".into()); }; - tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; - rpc_client - .send_and_confirm_transaction_with_spinner_and_config( + let signers = &[config.signers[0], authority_signer]; + + if sign_only { + tx.try_partial_sign(signers, blockhash)?; + return_signers_with_config( &tx, - config.commitment, - RpcSendTransactionConfig { - preflight_commitment: Some(config.commitment.commitment), - ..RpcSendTransactionConfig::default() + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, }, ) - .map_err(|e| format!("Setting authority failed: {e}"))?; - - let authority = CliProgramAuthority { - authority: new_authority - .map(|pubkey| pubkey.to_string()) - .unwrap_or_else(|| "none".to_string()), - account_type: if program_pubkey.is_some() { - CliProgramAccountType::Program - } else { - CliProgramAccountType::Buffer - }, - }; - Ok(config.output_format.formatted_string(&authority)) + } else { + tx.try_sign(signers, blockhash)?; + rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ) + .map_err(|e| format!("Setting authority failed: {e}"))?; + + let authority = CliProgramAuthority { + authority: new_authority + .map(|pubkey| pubkey.to_string()) + .unwrap_or_else(|| "none".to_string()), + account_type: if program_pubkey.is_some() { + CliProgramAccountType::Program + } else { + CliProgramAccountType::Buffer + }, + }; + Ok(config.output_format.formatted_string(&authority)) + } } fn process_set_authority_checked( @@ -1504,12 +1553,15 @@ fn process_set_authority_checked( program_pubkey: Pubkey, authority_index: SignerIndex, new_authority_index: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: &BlockhashQuery, ) -> ProcessResult { let authority_signer = config.signers[authority_index]; let new_authority_signer = config.signers[new_authority_index]; trace!("Set a new (checked) authority"); - let blockhash = rpc_client.get_latest_blockhash()?; + let blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; let mut tx = Transaction::new_unsigned(Message::new( &[bpf_loader_upgradeable::set_upgrade_authority_checked( @@ -1520,26 +1572,35 @@ fn process_set_authority_checked( Some(&config.signers[0].pubkey()), )); - tx.try_sign( - &[config.signers[0], authority_signer, new_authority_signer], - blockhash, - )?; - rpc_client - .send_and_confirm_transaction_with_spinner_and_config( + let signers = &[config.signers[0], authority_signer, new_authority_signer]; + if sign_only { + tx.try_partial_sign(signers, blockhash)?; + return_signers_with_config( &tx, - config.commitment, - RpcSendTransactionConfig { - preflight_commitment: Some(config.commitment.commitment), - ..RpcSendTransactionConfig::default() + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, }, ) - .map_err(|e| format!("Setting authority failed: {e}"))?; + } else { + tx.try_sign(signers, blockhash)?; + rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ) + .map_err(|e| format!("Setting authority failed: {e}"))?; - let authority = CliProgramAuthority { - authority: new_authority_signer.pubkey().to_string(), - account_type: CliProgramAccountType::Program, - }; - Ok(config.output_format.formatted_string(&authority)) + let authority = CliProgramAuthority { + authority: new_authority_signer.pubkey().to_string(), + account_type: CliProgramAccountType::Program, + }; + Ok(config.output_format.formatted_string(&authority)) + } } const ACCOUNT_TYPE_SIZE: usize = 4; @@ -2679,7 +2740,7 @@ mod tests { }, serde_json::Value, solana_cli_output::OutputFormat, - solana_sdk::signature::write_keypair_file, + solana_sdk::{hash::Hash, signature::write_keypair_file}, }; fn make_tmp_path(name: &str) -> String { @@ -3088,6 +3149,8 @@ mod tests { let program_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Pubkey::new_unique(); + let blockhash = Hash::new_unique(); + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", @@ -3096,6 +3159,10 @@ mod tests { "--new-upgrade-authority", &new_authority_pubkey.to_string(), "--skip-new-upgrade-authority-signer-check", + "--sign-only", + "--dump-transaction-message", + "--blockhash", + blockhash.to_string().as_str(), ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), @@ -3104,6 +3171,9 @@ mod tests { program_pubkey, upgrade_authority_index: Some(0), new_upgrade_authority: Some(new_authority_pubkey), + sign_only: true, + dump_transaction_message: true, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } @@ -3129,11 +3199,15 @@ mod tests { program_pubkey, upgrade_authority_index: Some(0), new_upgrade_authority: Some(new_authority_pubkey.pubkey()), + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); + let blockhash = Hash::new_unique(); let program_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Keypair::new(); let new_authority_pubkey_file = make_tmp_path("authority_keypair_file"); @@ -3145,6 +3219,10 @@ mod tests { &program_pubkey.to_string(), "--new-upgrade-authority", &new_authority_pubkey_file, + "--sign-only", + "--dump-transaction-message", + "--blockhash", + blockhash.to_string().as_str(), ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), @@ -3153,6 +3231,9 @@ mod tests { program_pubkey, upgrade_authority_index: 0, new_upgrade_authority_index: 1, + sign_only: true, + dump_transaction_message: true, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), @@ -3181,6 +3262,9 @@ mod tests { program_pubkey, upgrade_authority_index: Some(0), new_upgrade_authority: None, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } @@ -3206,6 +3290,9 @@ mod tests { program_pubkey, upgrade_authority_index: Some(1), new_upgrade_authority: None, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 894303b3ab8240..039df1d64b8ae8 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -488,13 +488,32 @@ fn test_cli_program_deploy_with_authority() { program_data[..] ); - // Set a new authority + let blockhash = rpc_client.get_latest_blockhash().unwrap(); + // Set a new authority sign offline first let new_upgrade_authority = Keypair::new(); - config.signers = vec![&keypair, &upgrade_authority]; - config.command = CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { + config.signers = vec![&keypair, &upgrade_authority, &new_upgrade_authority]; + config.command = CliCommand::Program(ProgramCliCommand::SetUpgradeAuthorityChecked { program_pubkey, - upgrade_authority_index: Some(1), - new_upgrade_authority: Some(new_upgrade_authority.pubkey()), + upgrade_authority_index: 1, + new_upgrade_authority_index: 2, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + }); + let sig_response = process_command(&config).unwrap(); + let sign_only = parse_sign_only_reply_string(&sig_response); + let offline_pre_signer = sign_only + .presigner_of(&new_upgrade_authority.pubkey()) + .unwrap(); + + config.signers = vec![&keypair, &upgrade_authority, &offline_pre_signer]; + config.command = CliCommand::Program(ProgramCliCommand::SetUpgradeAuthorityChecked { + program_pubkey, + upgrade_authority_index: 1, + new_upgrade_authority_index: 2, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), false, None), }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -577,6 +596,9 @@ fn test_cli_program_deploy_with_authority() { program_pubkey, upgrade_authority_index: Some(1), new_upgrade_authority: None, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::default(), }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); From 07955e79ad70f0e1cb7ea2a23d349ebe9a7e033b Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 21 Feb 2024 18:51:30 -0800 Subject: [PATCH 234/401] replay: gracefully exit if tower load fails (#35269) --- core/src/replay_stage.rs | 97 ++++++++++++++++++++++++++-------------- 1 file changed, 64 insertions(+), 33 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 485c58bdd57fa5..a80a04d47c1573 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -16,7 +16,7 @@ use { progress_map::{ForkProgress, ProgressMap, PropagatedStats, ReplaySlotStats}, tower_storage::{SavedTower, SavedTowerVersions, TowerStorage}, BlockhashStatus, ComputedBankState, Stake, SwitchForkDecision, ThresholdDecision, - Tower, VotedStakes, SWITCH_FORK_THRESHOLD, + Tower, TowerError, VotedStakes, SWITCH_FORK_THRESHOLD, }, cost_update_service::CostUpdate, repair::{ @@ -580,12 +580,25 @@ impl ReplayStage { // set-identity was called during the startup procedure, ensure the tower is consistent // before starting the loop. further calls to set-identity will reload the tower in the loop let my_old_pubkey = tower.node_pubkey; - tower = Self::load_tower( + tower = match Self::load_tower( tower_storage.as_ref(), &my_pubkey, &vote_account, &bank_forks, - ); + ) { + Ok(tower) => tower, + Err(err) => { + error!( + "Unable to load new tower when attempting to change identity from {} to {} on + ReplayStage startup, Exiting: {}", + my_old_pubkey, + my_pubkey, + err + ); + // drop(_exit) will set the exit flag, eventually tearing down the entire process + return; + } + }; warn!( "Identity changed during startup from {} to {}", my_old_pubkey, my_pubkey @@ -997,12 +1010,25 @@ impl ReplayStage { my_pubkey = identity_keypair.pubkey(); // Load the new identity's tower - tower = Self::load_tower( + tower = match Self::load_tower( tower_storage.as_ref(), &my_pubkey, &vote_account, &bank_forks, - ); + ) { + Ok(tower) => tower, + Err(err) => { + error!( + "Unable to load new tower when attempting to change identity + from {} to {} on set-identity, Exiting: {}", + my_old_pubkey, + my_pubkey, + err + ); + // drop(_exit) will set the exit flag, eventually tearing down the entire process + return; + } + }; // Ensure the validator can land votes with the new identity before // becoming leader has_new_vote_been_rooted = !wait_for_vote_to_start_leader; @@ -1152,37 +1178,40 @@ impl ReplayStage { }) } + /// Loads the tower from `tower_storage` with identity `node_pubkey`. + /// + /// If the tower is missing or too old, a tower is constructed from bank forks. fn load_tower( tower_storage: &dyn TowerStorage, node_pubkey: &Pubkey, vote_account: &Pubkey, bank_forks: &Arc>, - ) -> Tower { - Tower::restore(tower_storage, node_pubkey) - .and_then(|restored_tower| { - let root_bank = bank_forks.read().unwrap().root_bank(); - let slot_history = root_bank.get_slot_history(); - restored_tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history) - }) - .unwrap_or_else(|err| { - if err.is_file_missing() { - Tower::new_from_bankforks( - &bank_forks.read().unwrap(), - node_pubkey, - vote_account, - ) - } else if err.is_too_old() { - warn!("Failed to load tower, too old for {}: {}. Creating a new tower from bankforks.", node_pubkey, err); - Tower::new_from_bankforks( - &bank_forks.read().unwrap(), - node_pubkey, - vote_account, - ) - } else { - error!("Failed to load tower for {}: {}", node_pubkey, err); - std::process::exit(1); - } - }) + ) -> Result { + let tower = Tower::restore(tower_storage, node_pubkey).and_then(|restored_tower| { + let root_bank = bank_forks.read().unwrap().root_bank(); + let slot_history = root_bank.get_slot_history(); + restored_tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history) + }); + match tower { + Ok(tower) => Ok(tower), + Err(err) if err.is_file_missing() => { + warn!("Failed to load tower, file missing for {}: {}. Creating a new tower from bankforks.", node_pubkey, err); + Ok(Tower::new_from_bankforks( + &bank_forks.read().unwrap(), + node_pubkey, + vote_account, + )) + } + Err(err) if err.is_too_old() => { + warn!("Failed to load tower, too old for {}: {}. Creating a new tower from bankforks.", node_pubkey, err); + Ok(Tower::new_from_bankforks( + &bank_forks.read().unwrap(), + node_pubkey, + vote_account, + )) + } + Err(err) => Err(err), + } } fn check_for_vote_only_mode( @@ -8643,7 +8672,8 @@ pub(crate) mod tests { let bank_forks = vote_simulator.bank_forks; let tower = - ReplayStage::load_tower(&tower_storage, &node_pubkey, &vote_account, &bank_forks); + ReplayStage::load_tower(&tower_storage, &node_pubkey, &vote_account, &bank_forks) + .unwrap(); let expected_tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, VOTE_THRESHOLD_SIZE); assert_eq!(tower.vote_state, expected_tower.vote_state); assert_eq!(tower.node_pubkey, node_pubkey); @@ -8670,7 +8700,8 @@ pub(crate) mod tests { expected_tower.save(&tower_storage, &node_keypair).unwrap(); let tower = - ReplayStage::load_tower(&tower_storage, &node_pubkey, &vote_account, &bank_forks); + ReplayStage::load_tower(&tower_storage, &node_pubkey, &vote_account, &bank_forks) + .unwrap(); assert_eq!(tower.vote_state, expected_tower.vote_state); assert_eq!(tower.node_pubkey, expected_tower.node_pubkey); } From bfcd4c8656190412cd5d4ac23fb950dde3bbfc99 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 22 Feb 2024 00:19:28 -0600 Subject: [PATCH 235/401] ledger-tool: Use error handling in blockstore command code (#35157) There are lots of operations that could fail, including lots of the Blockstore calls. The old code matched on Ok(_) or did unwrap()'s which clutter the code and increase indentation. This change wraps the entire command in a function that returns a Result. The wrapper then does a single unwrap_or_else() and prints any error message. Everywhere else is now free to use the ? operator --- ledger-tool/src/blockstore.rs | 303 ++++++++++++++++------------------ ledger-tool/src/error.rs | 18 ++ ledger-tool/src/main.rs | 1 + 3 files changed, 158 insertions(+), 164 deletions(-) create mode 100644 ledger-tool/src/error.rs diff --git a/ledger-tool/src/blockstore.rs b/ledger-tool/src/blockstore.rs index c7653cf5643a55..453a801702f864 100644 --- a/ledger-tool/src/blockstore.rs +++ b/ledger-tool/src/blockstore.rs @@ -2,6 +2,7 @@ use { crate::{ + error::{LedgerToolError, Result}, ledger_path::canonicalize_ledger_path, ledger_utils::{get_program_ids, get_shred_storage_type}, output::{output_ledger, output_slot, SlotBounds, SlotInfo}, @@ -42,13 +43,13 @@ fn analyze_column< >( db: &Database, name: &str, -) { +) -> Result<()> { let mut key_len: u64 = 0; let mut key_tot: u64 = 0; let mut val_hist = histogram::Histogram::new(); let mut val_tot: u64 = 0; let mut row_hist = histogram::Histogram::new(); - for (key, val) in db.iter::(blockstore_db::IteratorMode::Start).unwrap() { + for (key, val) in db.iter::(blockstore_db::IteratorMode::Start)? { // Key length is fixed, only need to calculate it once if key_len == 0 { key_len = C::key(key).len() as u64; @@ -108,31 +109,32 @@ fn analyze_column< }) }; - println!("{}", serde_json::to_string_pretty(&json_result).unwrap()); + println!("{}", serde_json::to_string_pretty(&json_result)?); + Ok(()) } -fn analyze_storage(database: &Database) { +fn analyze_storage(database: &Database) -> Result<()> { use solana_ledger::blockstore_db::columns::*; - analyze_column::(database, "SlotMeta"); - analyze_column::(database, "Orphans"); - analyze_column::(database, "DeadSlots"); - analyze_column::(database, "DuplicateSlots"); - analyze_column::(database, "ErasureMeta"); - analyze_column::(database, "BankHash"); - analyze_column::(database, "Root"); - analyze_column::(database, "Index"); - analyze_column::(database, "ShredData"); - analyze_column::(database, "ShredCode"); - analyze_column::(database, "TransactionStatus"); - analyze_column::(database, "AddressSignatures"); - analyze_column::(database, "TransactionMemos"); - analyze_column::(database, "TransactionStatusIndex"); - analyze_column::(database, "Rewards"); - analyze_column::(database, "Blocktime"); - analyze_column::(database, "PerfSamples"); - analyze_column::(database, "BlockHeight"); - analyze_column::(database, "ProgramCosts"); - analyze_column::(database, "OptimisticSlots"); + analyze_column::(database, "SlotMeta")?; + analyze_column::(database, "Orphans")?; + analyze_column::(database, "DeadSlots")?; + analyze_column::(database, "DuplicateSlots")?; + analyze_column::(database, "ErasureMeta")?; + analyze_column::(database, "BankHash")?; + analyze_column::(database, "Root")?; + analyze_column::(database, "Index")?; + analyze_column::(database, "ShredData")?; + analyze_column::(database, "ShredCode")?; + analyze_column::(database, "TransactionStatus")?; + analyze_column::(database, "AddressSignatures")?; + analyze_column::(database, "TransactionMemos")?; + analyze_column::(database, "TransactionStatusIndex")?; + analyze_column::(database, "Rewards")?; + analyze_column::(database, "Blocktime")?; + analyze_column::(database, "PerfSamples")?; + analyze_column::(database, "BlockHeight")?; + analyze_column::(database, "ProgramCosts")?; + analyze_column::(database, "OptimisticSlots") } fn raw_key_to_slot(key: &[u8], column_name: &str) -> Option { @@ -235,13 +237,8 @@ fn get_latest_optimistic_slots( } } -fn print_blockstore_file_metadata( - blockstore: &Blockstore, - file_name: &Option<&str>, -) -> Result<(), String> { - let live_files = blockstore - .live_files_metadata() - .map_err(|err| format!("{err:?}"))?; +fn print_blockstore_file_metadata(blockstore: &Blockstore, file_name: &Option<&str>) -> Result<()> { + let live_files = blockstore.live_files_metadata()?; // All files under live_files_metadata are prefixed with "/". let sst_file_name = file_name.as_ref().map(|name| format!("/{name}")); @@ -264,9 +261,9 @@ fn print_blockstore_file_metadata( } } if sst_file_name.is_some() { - return Err(format!( - "Failed to find or load the metadata of the specified file {file_name:?}" - )); + return Err(LedgerToolError::BadArgument(format!( + "failed to find or load the metadata of the specified file {file_name:?}" + ))); } Ok(()) } @@ -600,81 +597,77 @@ pub fn blockstore_subcommands<'a, 'b>(hidden: bool) -> Vec> { } pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { + do_blockstore_process_command(ledger_path, matches).unwrap_or_else(|err| { + eprintln!("Failed to complete command: {err:?}"); + std::process::exit(1); + }); +} + +fn do_blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) -> Result<()> { let ledger_path = canonicalize_ledger_path(ledger_path); let verbose_level = matches.occurrences_of("verbose"); match matches.subcommand() { - ("analyze-storage", Some(arg_matches)) => { - analyze_storage( - &crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary).db(), - ); - } + ("analyze-storage", Some(arg_matches)) => analyze_storage( + &crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary).db(), + )?, ("bounds", Some(arg_matches)) => { + let output_format = OutputFormat::from_matches(arg_matches, "output_format", false); + let all = arg_matches.is_present("all"); + let blockstore = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let slot_meta_iterator = blockstore.slot_meta_iterator(0)?; + let slots: Vec<_> = slot_meta_iterator.map(|(slot, _)| slot).collect(); - match blockstore.slot_meta_iterator(0) { - Ok(metas) => { - let output_format = - OutputFormat::from_matches(arg_matches, "output_format", false); - let all = arg_matches.is_present("all"); - - let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); - - let slot_bounds = if slots.is_empty() { - SlotBounds::default() - } else { - // Collect info about slot bounds - let mut bounds = SlotBounds { - slots: SlotInfo { - total: slots.len(), - first: Some(*slots.first().unwrap()), - last: Some(*slots.last().unwrap()), - ..SlotInfo::default() - }, - ..SlotBounds::default() - }; - if all { - bounds.all_slots = Some(&slots); - } + let slot_bounds = if slots.is_empty() { + SlotBounds::default() + } else { + // Collect info about slot bounds + let mut bounds = SlotBounds { + slots: SlotInfo { + total: slots.len(), + first: Some(*slots.first().unwrap()), + last: Some(*slots.last().unwrap()), + ..SlotInfo::default() + }, + ..SlotBounds::default() + }; + if all { + bounds.all_slots = Some(&slots); + } - // Consider also rooted slots, if present - if let Ok(rooted) = blockstore.rooted_slot_iterator(0) { - let mut first_rooted = None; - let mut last_rooted = None; - let mut total_rooted = 0; - for (i, slot) in rooted.into_iter().enumerate() { - if i == 0 { - first_rooted = Some(slot); - } - last_rooted = Some(slot); - total_rooted += 1; - } - let last_root_for_comparison = last_rooted.unwrap_or_default(); - let count_past_root = slots - .iter() - .rev() - .take_while(|slot| *slot > &last_root_for_comparison) - .count(); + // Consider also rooted slots, if present + let rooted_slot_iterator = blockstore.rooted_slot_iterator(0)?; + let mut first_rooted = None; + let mut last_rooted = None; + let mut total_rooted = 0; + for (i, slot) in rooted_slot_iterator.into_iter().enumerate() { + if i == 0 { + first_rooted = Some(slot); + } + last_rooted = Some(slot); + total_rooted += 1; + } + let last_root_for_comparison = last_rooted.unwrap_or_default(); + let count_past_root = slots + .iter() + .rev() + .take_while(|slot| *slot > &last_root_for_comparison) + .count(); - bounds.roots = SlotInfo { - total: total_rooted, - first: first_rooted, - last: last_rooted, - num_after_last_root: Some(count_past_root), - }; - } - bounds - }; + bounds.roots = SlotInfo { + total: total_rooted, + first: first_rooted, + last: last_rooted, + num_after_last_root: Some(count_past_root), + }; - // Print collected data - println!("{}", output_format.formatted_string(&slot_bounds)); - } - Err(err) => { - eprintln!("Unable to read the Ledger: {err:?}"); - std::process::exit(1); - } + bounds }; + + // Print collected data + println!("{}", output_format.formatted_string(&slot_bounds)); } ("copy", Some(arg_matches)) => { let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); @@ -698,14 +691,13 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) ), ); let target = crate::open_blockstore(&target_db, arg_matches, AccessType::Primary); - for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { + for (slot, _meta) in source.slot_meta_iterator(starting_slot)? { if slot > ending_slot { break; } - if let Ok(shreds) = source.get_data_shreds_for_slot(slot, 0) { - if target.insert_shreds(shreds, None, true).is_err() { - warn!("error inserting shreds for slot {}", slot); - } + let shreds = source.get_data_shreds_for_slot(slot, 0)?; + if target.insert_shreds(shreds, None, true).is_err() { + warn!("error inserting shreds for slot {}", slot); } } } @@ -713,7 +705,7 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let blockstore = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() { + for slot in blockstore.dead_slots_iterator(starting_slot)? { println!("{slot}"); } } @@ -721,7 +713,7 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let blockstore = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { + for slot in blockstore.duplicate_slots_iterator(starting_slot)? { println!("{slot}"); } } @@ -778,9 +770,7 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let start_root = value_t!(arg_matches, "start_root", Slot).unwrap_or(0); let num_roots = value_t_or_exit!(arg_matches, "num_roots", usize); - let iter = blockstore - .rooted_slot_iterator(start_root) - .expect("Failed to get rooted slot"); + let iter = blockstore.rooted_slot_iterator(start_root)?; let mut output: Box = if let Some(path) = arg_matches.value_of("slot_list") { match File::create(path) { @@ -791,21 +781,16 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) Box::new(stdout()) }; - iter.take(num_roots) + for slot in iter + .take(num_roots) .take_while(|slot| *slot <= max_height as u64) .collect::>() .into_iter() .rev() - .for_each(|slot| { - let blockhash = blockstore - .get_slot_entries(slot, 0) - .unwrap() - .last() - .unwrap() - .hash; - - writeln!(output, "{slot}: {blockhash:?}").expect("failed to write"); - }); + { + let blockhash = blockstore.get_slot_entries(slot, 0)?.last().unwrap().hash; + writeln!(output, "{slot}: {blockhash:?}").expect("failed to write"); + } } ("parse_full_frozen", Some(arg_matches)) => { let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); @@ -814,7 +799,7 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); let mut ancestors = BTreeSet::new(); assert!( - blockstore.meta(ending_slot).unwrap().is_some(), + blockstore.meta(ending_slot)?.is_some(), "Ending slot doesn't exist" ); for a in AncestorIterator::new(ending_slot, &blockstore) { @@ -831,9 +816,9 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let full_regex = Regex::new(r"slot (\d*) is full").unwrap(); let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String)); - let f = BufReader::new(File::open(log_file).unwrap()); + let f = BufReader::new(File::open(log_file)?); println!("Reading log file"); - for line in f.lines().map_while(Result::ok) { + for line in f.lines().map_while(std::io::Result::ok) { let parse_results = { if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { Some((slot_string, &mut frozen)) @@ -888,9 +873,7 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let blockstore = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); let sst_file_name = arg_matches.value_of("file_name"); - if let Err(err) = print_blockstore_file_metadata(&blockstore, &sst_file_name) { - eprintln!("{err}"); - } + print_blockstore_file_metadata(&blockstore, &sst_file_name)?; } ("purge", Some(arg_matches)) => { let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot); @@ -910,25 +893,23 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let end_slot = match end_slot { Some(end_slot) => end_slot, - None => match blockstore.slot_meta_iterator(start_slot) { - Ok(metas) => { - let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); - if slots.is_empty() { - eprintln!("Purge range is empty"); - std::process::exit(1); - } - *slots.last().unwrap() - } - Err(err) => { - eprintln!("Unable to read the Ledger: {err:?}"); - std::process::exit(1); + None => { + let slot_meta_iterator = blockstore.slot_meta_iterator(start_slot)?; + let slots: Vec<_> = slot_meta_iterator.map(|(slot, _)| slot).collect(); + if slots.is_empty() { + return Err(LedgerToolError::BadArgument(format!( + "blockstore is empty beyond purge start slot {start_slot}" + ))); } - }, + *slots.last().unwrap() + } }; if end_slot < start_slot { - eprintln!("end slot {end_slot} is less than start slot {start_slot}"); - std::process::exit(1); + return Err(LedgerToolError::BadArgument(format!( + "starting slot {start_slot} should be less than or equal to \ + ending slot {end_slot}" + ))); } info!( "Purging data from slots {} to {} ({} slots) (do compaction: {}) \ @@ -965,8 +946,7 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) } } else { let dead_slots_iter = blockstore - .dead_slots_iterator(start_slot) - .unwrap() + .dead_slots_iterator(start_slot)? .take_while(|s| *s <= end_slot); for dead_slot in dead_slots_iter { info!("Purging dead slot {}", dead_slot); @@ -978,12 +958,9 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let slots = values_t_or_exit!(arg_matches, "slots", Slot); let blockstore = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Primary); for slot in slots { - match blockstore.remove_dead_slot(slot) { - Ok(_) => println!("Slot {slot} not longer marked dead"), - Err(err) => { - eprintln!("Failed to remove dead flag for slot {slot}, {err:?}") - } - } + blockstore + .remove_dead_slot(slot) + .map(|_| println!("Slot {slot} not longer marked dead"))?; } } ("repair-roots", Some(arg_matches)) => { @@ -995,31 +972,29 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let end_root = value_t!(arg_matches, "end_root", Slot) .unwrap_or_else(|_| start_root.saturating_sub(max_slots)); assert!(start_root > end_root); - let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked + // Adjust by one since start_root need not be checked + let num_slots = start_root - end_root - 1; if arg_matches.is_present("end_root") && num_slots > max_slots { - eprintln!( + return Err(LedgerToolError::BadArgument(format!( "Requested range {num_slots} too large, max {max_slots}. Either adjust \ - `--until` value, or pass a larger `--repair-limit` to override the limit", - ); - std::process::exit(1); + `--until` value, or pass a larger `--repair-limit` to override the limit", + ))); } - let num_repaired_roots = blockstore - .scan_and_fix_roots(Some(start_root), Some(end_root), &AtomicBool::new(false)) - .unwrap_or_else(|err| { - eprintln!("Unable to repair roots: {err}"); - std::process::exit(1); - }); + let num_repaired_roots = blockstore.scan_and_fix_roots( + Some(start_root), + Some(end_root), + &AtomicBool::new(false), + )?; println!("Successfully repaired {num_repaired_roots} roots"); } ("set-dead-slot", Some(arg_matches)) => { let slots = values_t_or_exit!(arg_matches, "slots", Slot); let blockstore = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Primary); for slot in slots { - match blockstore.set_dead_slot(slot) { - Ok(_) => println!("Slot {slot} dead"), - Err(err) => eprintln!("Failed to set slot {slot} dead slot: {err:?}"), - } + blockstore + .set_dead_slot(slot) + .map(|_| println!("Slot {slot} marked dead"))?; } } ("shred-meta", Some(arg_matches)) => { @@ -1039,8 +1014,7 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); let ledger = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); for (slot, _meta) in ledger - .slot_meta_iterator(starting_slot) - .unwrap() + .slot_meta_iterator(starting_slot)? .take_while(|(slot, _)| *slot <= ending_slot) { let full_slot = ledger.is_full(slot); @@ -1084,6 +1058,7 @@ pub fn blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) } _ => unreachable!(), } + Ok(()) } #[cfg(test)] diff --git a/ledger-tool/src/error.rs b/ledger-tool/src/error.rs new file mode 100644 index 00000000000000..9b32da4b8ff1de --- /dev/null +++ b/ledger-tool/src/error.rs @@ -0,0 +1,18 @@ +use {solana_ledger::blockstore::BlockstoreError, thiserror::Error}; + +pub type Result = std::result::Result; + +#[derive(Error, Debug)] +pub enum LedgerToolError { + #[error("{0}")] + Blockstore(#[from] BlockstoreError), + + #[error("{0}")] + SerdeJson(#[from] serde_json::Error), + + #[error("{0}")] + Io(#[from] std::io::Error), + + #[error("{0}")] + BadArgument(String), +} diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 9b299cfadcbcf2..778b1a3201ab0b 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -96,6 +96,7 @@ use { mod args; mod bigtable; mod blockstore; +mod error; mod ledger_path; mod ledger_utils; mod output; From 3284d34e041f737ddb0b946d0695d9c6ada0fc71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Feb 2024 20:12:59 +0800 Subject: [PATCH 236/401] build(deps): bump openssl from 0.10.63 to 0.10.64 (#35251) * build(deps): bump openssl from 0.10.63 to 0.10.64 Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.63 to 0.10.64. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.63...openssl-v0.10.64) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- programs/sbf/Cargo.lock | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a61e27a12c5e9..79e9adfaaf15e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3509,9 +3509,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.63" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ "bitflags 2.4.2", "cfg-if 1.0.0", @@ -3550,9 +3550,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.99" +version = "0.9.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" +checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" dependencies = [ "cc", "libc", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1b8d422d42ba7c..ba4ea3f309d352 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3145,9 +3145,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.63" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ "bitflags 2.4.2", "cfg-if 1.0.0", @@ -3186,9 +3186,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.99" +version = "0.9.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" +checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" dependencies = [ "cc", "libc", From c98ff7a6da989ffb93c9b5a273af4830a88fe332 Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Thu, 22 Feb 2024 12:11:00 -0800 Subject: [PATCH 237/401] loaded-programs: Remove unnecessary `allow(unused_mut)` (#35284) Only the `executable` might need to be mutable. This `allow(unused_mut)` is probably a leftover from a previous version. --- program-runtime/src/loaded_programs.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 6da84b0d1f0692..2739d44c36f4cd 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -358,9 +358,7 @@ impl LoadedProgram { metrics.jit_compile_us = jit_compile_time.as_us(); } - // Allowing mut here, since it may be needed for jit compile, which is under a config flag - #[allow(unused_mut)] - let mut program = if bpf_loader_deprecated::check_id(loader_key) { + let program = if bpf_loader_deprecated::check_id(loader_key) { LoadedProgramType::LegacyV0(executable) } else if bpf_loader::check_id(loader_key) || bpf_loader_upgradeable::check_id(loader_key) { LoadedProgramType::LegacyV1(executable) From d9802027cb3738f84df6624ea79f6989393517b5 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 22 Feb 2024 14:42:35 -0600 Subject: [PATCH 238/401] genesis: Skip inserting genesis accounts for Development clusters (#35266) solana-genesis currently includes a list of accounts that exist in MainnetBeta genesis. These accounts are added for all cluster types, including Development clusters. There is no need for these accounts to get added to dev clusters so skip adding them for ClusterType::Development case --- genesis/src/genesis_accounts.rs | 39 +++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/genesis/src/genesis_accounts.rs b/genesis/src/genesis_accounts.rs index 0704985913838f..55cb43ebb073b5 100644 --- a/genesis/src/genesis_accounts.rs +++ b/genesis/src/genesis_accounts.rs @@ -3,7 +3,10 @@ use { stakes::{create_and_add_stakes, StakerInfo}, unlocks::UnlockInfo, }, - solana_sdk::{genesis_config::GenesisConfig, native_token::LAMPORTS_PER_SOL}, + solana_sdk::{ + genesis_config::{ClusterType, GenesisConfig}, + native_token::LAMPORTS_PER_SOL, + }, }; // 9 month schedule is 100% after 9 months @@ -227,10 +230,14 @@ fn add_stakes( .sum::() } +/// Add acounts that should be present in genesis; skip for development clusters pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig, mut issued_lamports: u64) { + if genesis_config.cluster_type == ClusterType::Development { + return; + } + // add_stakes() and add_validators() award tokens for rent exemption and // to cover an initial transfer-free period of the network - issued_lamports += add_stakes( genesis_config, CREATOR_STAKER_INFOS, @@ -270,16 +277,26 @@ mod tests { #[test] fn test_add_genesis_accounts() { - let mut genesis_config = GenesisConfig::default(); - - add_genesis_accounts(&mut genesis_config, 0); + let clusters_and_expected_lamports = [ + (ClusterType::MainnetBeta, 500_000_000 * LAMPORTS_PER_SOL), + (ClusterType::Testnet, 500_000_000 * LAMPORTS_PER_SOL), + (ClusterType::Devnet, 500_000_000 * LAMPORTS_PER_SOL), + (ClusterType::Development, 0), + ]; - let lamports = genesis_config - .accounts - .values() - .map(|account| account.lamports) - .sum::(); + for (cluster_type, expected_lamports) in clusters_and_expected_lamports.iter() { + let mut genesis_config = GenesisConfig { + cluster_type: *cluster_type, + ..GenesisConfig::default() + }; + add_genesis_accounts(&mut genesis_config, 0); - assert_eq!(500_000_000 * LAMPORTS_PER_SOL, lamports); + let lamports = genesis_config + .accounts + .values() + .map(|account| account.lamports) + .sum::(); + assert_eq!(*expected_lamports, lamports); + } } } From 31a73ab73177301af9d70ac0b57b94009dfdf678 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Thu, 22 Feb 2024 18:37:09 -0300 Subject: [PATCH 239/401] Move `test_rent_state_list_len` to SVM (#35287) --- runtime/src/bank/tests.rs | 61 +------------------ svm/tests/mod.rs | 1 + svm/tests/rent_state.rs | 120 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 123 insertions(+), 59 deletions(-) create mode 100644 svm/tests/mod.rs create mode 100644 svm/tests/rent_state.rs diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index a01e9c19de6a39..9d3b518e9855af 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -104,13 +104,10 @@ use { Result, SanitizedTransaction, Transaction, TransactionError, TransactionVerificationMode, }, - transaction_context::{TransactionAccount, TransactionContext}, + transaction_context::TransactionAccount, }, solana_stake_program::stake_state::{self, StakeStateV2}, - solana_svm::{ - account_loader::load_accounts, transaction_account_state_info::TransactionAccountStateInfo, - transaction_error_metrics::TransactionErrorMetrics, transaction_results::DurableNonceFee, - }, + solana_svm::transaction_results::DurableNonceFee, solana_vote_program::{ vote_instruction, vote_state::{ @@ -10969,60 +10966,6 @@ fn test_rent_state_incinerator() { } } -#[test] -fn test_rent_state_list_len() { - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config_with_leader(sol_to_lamports(100.), &Pubkey::new_unique(), 42); - genesis_config.rent = Rent::default(); - - let bank = Bank::new_for_tests(&genesis_config); - let recipient = Pubkey::new_unique(); - let tx = system_transaction::transfer( - &mint_keypair, - &recipient, - sol_to_lamports(1.), - bank.last_blockhash(), - ); - let num_accounts = tx.message().account_keys.len(); - let sanitized_tx = SanitizedTransaction::try_from_legacy_transaction(tx).unwrap(); - let mut error_counters = TransactionErrorMetrics::default(); - let loaded_txs = load_accounts( - &bank, - &[sanitized_tx.clone()], - &[(Ok(()), None, Some(0))], - &mut error_counters, - &FeeStructure::default(), - None, - &HashMap::new(), - &LoadedProgramsForTxBatch::default(), - ); - - let compute_budget = bank.runtime_config.compute_budget.unwrap_or_else(|| { - ComputeBudget::new(u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - )) - }); - let transaction_context = TransactionContext::new( - loaded_txs[0].0.as_ref().unwrap().accounts.clone(), - Rent::default(), - compute_budget.max_invoke_stack_height, - compute_budget.max_instruction_trace_length, - ); - - assert_eq!( - TransactionAccountStateInfo::new( - &bank.rent_collector.rent, - &transaction_context, - sanitized_tx.message() - ) - .len(), - num_accounts, - ); -} - #[test] fn test_update_accounts_data_size() { // Test: Subtraction saturates at 0 diff --git a/svm/tests/mod.rs b/svm/tests/mod.rs new file mode 100644 index 00000000000000..d1932e1253fe58 --- /dev/null +++ b/svm/tests/mod.rs @@ -0,0 +1 @@ +mod rent_state; diff --git a/svm/tests/rent_state.rs b/svm/tests/rent_state.rs new file mode 100644 index 00000000000000..a97ee64ab98a2b --- /dev/null +++ b/svm/tests/rent_state.rs @@ -0,0 +1,120 @@ +#![cfg(test)] + +use { + solana_program_runtime::{ + compute_budget::ComputeBudget, compute_budget_processor, + loaded_programs::LoadedProgramsForTxBatch, + }, + solana_sdk::{ + account::{AccountSharedData, WritableAccount}, + feature_set::FeatureSet, + fee::FeeStructure, + hash::Hash, + native_loader, + native_token::sol_to_lamports, + pubkey::Pubkey, + rent::Rent, + rent_collector::RentCollector, + signature::{Keypair, Signer}, + system_transaction, + transaction::SanitizedTransaction, + transaction_context::TransactionContext, + }, + solana_svm::{ + account_loader::load_accounts, transaction_account_state_info::TransactionAccountStateInfo, + transaction_error_metrics::TransactionErrorMetrics, + transaction_processor::TransactionProcessingCallback, + }, + std::{collections::HashMap, sync::Arc}, +}; + +#[derive(Default)] +struct MockBankCallback { + rent_collector: RentCollector, + feature_set: Arc, + account_shared_data: HashMap, +} + +impl TransactionProcessingCallback for MockBankCallback { + fn account_matches_owners(&self, _account: &Pubkey, _owners: &[Pubkey]) -> Option { + todo!() + } + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + self.account_shared_data.get(pubkey).cloned() + } + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { + todo!() + } + + fn get_rent_collector(&self) -> &RentCollector { + &self.rent_collector + } + + fn get_feature_set(&self) -> Arc { + self.feature_set.clone() + } +} + +#[test] +fn test_rent_state_list_len() { + let mint_keypair = Keypair::new(); + let mut bank = MockBankCallback::default(); + let recipient = Pubkey::new_unique(); + let last_block_hash = Hash::new_unique(); + + let mut system_data = AccountSharedData::default(); + system_data.set_executable(true); + system_data.set_owner(native_loader::id()); + bank.account_shared_data + .insert(Pubkey::new_from_array([0u8; 32]), system_data); + + let mut mint_data = AccountSharedData::default(); + mint_data.set_lamports(2); + bank.account_shared_data + .insert(mint_keypair.pubkey(), mint_data); + + bank.account_shared_data + .insert(recipient, AccountSharedData::default()); + + let tx = system_transaction::transfer( + &mint_keypair, + &recipient, + sol_to_lamports(1.), + last_block_hash, + ); + let num_accounts = tx.message().account_keys.len(); + let sanitized_tx = SanitizedTransaction::try_from_legacy_transaction(tx).unwrap(); + let mut error_counters = TransactionErrorMetrics::default(); + let loaded_txs = load_accounts( + &bank, + &[sanitized_tx.clone()], + &[(Ok(()), None, Some(0))], + &mut error_counters, + &FeeStructure::default(), + None, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + ); + + let compute_budget = ComputeBudget::new(u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + )); + let transaction_context = TransactionContext::new( + loaded_txs[0].0.as_ref().unwrap().accounts.clone(), + Rent::default(), + compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_trace_length, + ); + + assert_eq!( + TransactionAccountStateInfo::new( + &Rent::default(), + &transaction_context, + sanitized_tx.message() + ) + .len(), + num_accounts, + ); +} From 367f489f632d6be0fd93e95cc2c5b7202515fe6e Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 22 Feb 2024 15:01:08 -0800 Subject: [PATCH 240/401] scheduler inner metrics (#35271) --- .../scheduler_metrics.rs | 57 ++++++++++++++++--- 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs index 2ab86bd684e4b4..a3891fdf245ea5 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -1,9 +1,17 @@ -use {itertools::MinMaxResult, solana_sdk::timing::AtomicInterval}; +use { + itertools::MinMaxResult, + solana_sdk::timing::AtomicInterval, + std::ops::{Deref, DerefMut}, +}; #[derive(Default)] pub struct SchedulerCountMetrics { interval: AtomicInterval, + metrics: SchedulerCountMetricsInner, +} +#[derive(Default)] +pub struct SchedulerCountMetricsInner { /// Number of packets received. pub num_received: usize, /// Number of packets buffered. @@ -41,20 +49,35 @@ pub struct SchedulerCountMetrics { pub max_prioritization_fees: u64, } +impl Deref for SchedulerCountMetrics { + type Target = SchedulerCountMetricsInner; + fn deref(&self) -> &Self::Target { + &self.metrics + } +} + +impl DerefMut for SchedulerCountMetrics { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.metrics + } +} + impl SchedulerCountMetrics { pub fn maybe_report_and_reset(&mut self, should_report: bool) { const REPORT_INTERVAL_MS: u64 = 1000; if self.interval.should_update(REPORT_INTERVAL_MS) { if should_report { - self.report(); + self.report("banking_stage_scheduler_counts"); } self.reset(); } } +} - fn report(&self) { +impl SchedulerCountMetricsInner { + fn report(&self, name: &'static str) { datapoint_info!( - "banking_stage_scheduler_counts", + name, ("num_received", self.num_received, i64), ("num_buffered", self.num_buffered, i64), ("num_scheduled", self.num_scheduled, i64), @@ -164,6 +187,11 @@ impl SchedulerCountMetrics { #[derive(Default)] pub struct SchedulerTimingMetrics { interval: AtomicInterval, + metrics: SchedulerTimingMetricsInner, +} + +#[derive(Default)] +pub struct SchedulerTimingMetricsInner { /// Time spent making processing decisions. pub decision_time_us: u64, /// Time spent receiving packets. @@ -182,20 +210,35 @@ pub struct SchedulerTimingMetrics { pub receive_completed_time_us: u64, } +impl Deref for SchedulerTimingMetrics { + type Target = SchedulerTimingMetricsInner; + fn deref(&self) -> &Self::Target { + &self.metrics + } +} + +impl DerefMut for SchedulerTimingMetrics { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.metrics + } +} + impl SchedulerTimingMetrics { pub fn maybe_report_and_reset(&mut self, should_report: bool) { const REPORT_INTERVAL_MS: u64 = 1000; if self.interval.should_update(REPORT_INTERVAL_MS) { if should_report { - self.report(); + self.report("banking_stage_scheduler_timing"); } self.reset(); } } +} - fn report(&self) { +impl SchedulerTimingMetricsInner { + fn report(&self, name: &'static str) { datapoint_info!( - "banking_stage_scheduler_timing", + name, ("decision_time_us", self.decision_time_us, i64), ("receive_time_us", self.receive_time_us, i64), ("buffer_time_us", self.buffer_time_us, i64), From 2891ce886bece8d09777959405faf694e2b21873 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 23 Feb 2024 15:15:28 +0100 Subject: [PATCH 241/401] Fix - program loading with effective slot at epoch boundary (#35283) * Always limit effective slot to the begin of the current epoch. * Adds comments. * Optimizes to avoid having two entries if there is no relevant feature activation. * Adds test_feature_activation_loaded_programs_epoch_transition(). --- runtime/src/bank/tests.rs | 54 ++++++++++++++++++++++++++++++++ svm/src/transaction_processor.rs | 14 ++++++++- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 9d3b518e9855af..15df308ae8d28a 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -11961,6 +11961,60 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { ); } +#[test] +fn test_feature_activation_loaded_programs_epoch_transition() { + solana_logger::setup(); + + // Bank Setup + let (mut genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config + .accounts + .remove(&feature_set::reject_callx_r10::id()); + let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + + // Program Setup + let program_keypair = Keypair::new(); + let program_data = include_bytes!("../../../programs/bpf_loader/test_elfs/out/noop_aligned.so"); + let program_account = AccountSharedData::from(Account { + lamports: Rent::default().minimum_balance(program_data.len()).min(1), + data: program_data.to_vec(), + owner: bpf_loader::id(), + executable: true, + rent_epoch: 0, + }); + root_bank.store_account(&program_keypair.pubkey(), &program_account); + + // Compose message using the desired program. + let instruction = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + let binding = mint_keypair.insecure_clone(); + let signers = vec![&binding]; + + // Advance the bank so that the program becomes effective. + goto_end_of_slot(root_bank.clone()); + let bank = new_from_parent_with_fork_next_slot(root_bank, bank_forks.as_ref()); + + // Load the program with the old environment. + let transaction = Transaction::new(&signers, message.clone(), bank.last_blockhash()); + assert!(bank.process_transaction(&transaction).is_ok()); + + // Schedule feature activation to trigger a change of environment at the epoch boundary. + let feature_account_balance = + std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); + bank.store_account( + &feature_set::reject_callx_r10::id(), + &feature::create_account(&Feature { activated_at: None }, feature_account_balance), + ); + + // Advance the bank to cross the epoch boundary and activate the feature. + goto_end_of_slot(bank.clone()); + let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 33); + + // Load the program with the new environment. + let transaction = Transaction::new(&signers, message, bank.last_blockhash()); + assert!(bank.process_transaction(&transaction).is_ok()); +} + #[test] fn test_bank_verify_accounts_hash_with_base() { let GenesisConfigInfo { diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index b58d178df4b963..dc3e59389cc295 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -739,10 +739,22 @@ impl TransactionBatchProcessor { let mut timings = ExecuteDetailsTimings::default(); load_program_metrics.submit_datapoint(&mut timings); - if let Some(recompile) = recompile { + if !Arc::ptr_eq( + &environments.program_runtime_v1, + &loaded_programs_cache.environments.program_runtime_v1, + ) || !Arc::ptr_eq( + &environments.program_runtime_v2, + &loaded_programs_cache.environments.program_runtime_v2, + ) { + // There can be two entries per program when the environment changes. + // One for the old environment before the epoch boundary and one for the new environment after the epoch boundary. + // These two entries have the same deployment slot, so they must differ in their effective slot instead. + // This is done by setting the effective slot of the entry for the new environment to the epoch boundary. loaded_program.effective_slot = loaded_program .effective_slot .max(self.epoch_schedule.get_first_slot_in_epoch(effective_epoch)); + } + if let Some(recompile) = recompile { loaded_program.tx_usage_counter = AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); loaded_program.ix_usage_counter = From 139b9c8c2583a12f84f4c49a9f4bfe57a430bcaa Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Fri, 23 Feb 2024 08:58:48 -0600 Subject: [PATCH 242/401] Add fee_details to fee calculation (#35021) * add fee_details to fee calculation * fix - no need to round after summing u64 * feature gate on removing unwanted rounding --- core/src/banking_stage/consumer.rs | 2 + .../scheduler_controller.rs | 11 +++- programs/sbf/tests/programs.rs | 2 + runtime/src/bank.rs | 7 +- runtime/src/bank/tests.rs | 3 +- sdk/src/feature_set.rs | 5 ++ sdk/src/fee.rs | 64 +++++++++++++++++-- svm/src/account_loader.rs | 8 ++- 8 files changed, 89 insertions(+), 13 deletions(-) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 660dc2ac977b0d..81de74022432d9 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -747,6 +747,8 @@ impl Consumer { bank.feature_set.is_active( &feature_set::include_loaded_accounts_data_size_in_fee_calculation::id(), ), + bank.feature_set + .is_active(&feature_set::remove_rounding_in_fee_calculation::id()), ); let (mut fee_payer_account, _slot) = bank .rc diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 7d9a70931b4410..b0c5e0f6ab3265 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -25,8 +25,13 @@ use { solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ clock::MAX_PROCESSING_AGE, - feature_set::include_loaded_accounts_data_size_in_fee_calculation, fee::FeeBudgetLimits, - saturating_add_assign, transaction::SanitizedTransaction, + feature_set::{ + include_loaded_accounts_data_size_in_fee_calculation, + remove_rounding_in_fee_calculation, + }, + fee::FeeBudgetLimits, + saturating_add_assign, + transaction::SanitizedTransaction, }, solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ @@ -422,6 +427,8 @@ impl SchedulerController { fee_budget_limits, bank.feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + bank.feature_set + .is_active(&remove_rounding_in_fee_calculation::id()), ); // We need a multiplier here to avoid rounding down too aggressively. diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 943713d7ad9bbd..1635850bb2a9c5 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -3713,6 +3713,7 @@ fn test_program_fees() { .unwrap_or_default() .into(), false, + true, ); bank_client .send_and_confirm_message(&[&mint_keypair], message) @@ -3736,6 +3737,7 @@ fn test_program_fees() { .unwrap_or_default() .into(), false, + true, ); assert!(expected_normal_fee < expected_prioritized_fee); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7e051019c99871..29dde36ac20116 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -117,7 +117,10 @@ use { epoch_info::EpochInfo, epoch_schedule::EpochSchedule, feature, - feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, + feature_set::{ + self, include_loaded_accounts_data_size_in_fee_calculation, + remove_rounding_in_fee_calculation, FeatureSet, + }, fee::FeeStructure, fee_calculator::{FeeCalculator, FeeRateGovernor}, genesis_config::{ClusterType, GenesisConfig}, @@ -4016,6 +4019,8 @@ impl Bank { .into(), self.feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + self.feature_set + .is_active(&remove_rounding_in_fee_calculation::id()), ) } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 15df308ae8d28a..5f5d0884ac13ec 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -3333,7 +3333,6 @@ fn test_bank_parent_account_spend() { let key2 = Keypair::new(); let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); - println!("==== amount {}", amount); let tx = system_transaction::transfer(&mint_keypair, &key1.pubkey(), amount, genesis_config.hash()); @@ -10029,7 +10028,7 @@ fn calculate_test_fee( .unwrap_or_default() .into(); - fee_structure.calculate_fee(message, lamports_per_signature, &budget_limits, false) + fee_structure.calculate_fee(message, lamports_per_signature, &budget_limits, false, true) } #[test] diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 82687673246293..abecf4fafb6b1d 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -780,6 +780,10 @@ pub mod enable_chained_merkle_shreds { solana_sdk::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier"); } +pub mod remove_rounding_in_fee_calculation { + solana_sdk::declare_id!("BtVN7YjDzNE6Dk7kTT7YTDgMNUZTNgiSJgsdzAeTg2jF"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -970,6 +974,7 @@ lazy_static! { (cost_model_requested_write_lock_cost::id(), "cost model uses number of requested write locks #34819"), (enable_gossip_duplicate_proof_ingestion::id(), "enable gossip duplicate proof ingestion #32963"), (enable_chained_merkle_shreds::id(), "Enable chained Merkle shreds #34916"), + (remove_rounding_in_fee_calculation::id(), "Removing unwanted rounding in fee calculation #34982"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index f3377b5254f0a6..b325a23ac08d9d 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -31,6 +31,24 @@ pub struct FeeStructure { pub compute_fee_bins: Vec, } +#[derive(Debug, Default, Clone, Eq, PartialEq)] +pub struct FeeDetails { + transaction_fee: u64, + prioritization_fee: u64, +} + +impl FeeDetails { + pub fn total_fee(&self, remove_rounding_in_fee_calculation: bool) -> u64 { + let total_fee = self.transaction_fee.saturating_add(self.prioritization_fee); + if remove_rounding_in_fee_calculation { + total_fee + } else { + // backward compatible behavior + (total_fee as f64).round() as u64 + } + } +} + pub const ACCOUNT_DATA_COST_PAGE_SIZE: u64 = 32_u64.saturating_mul(1024); impl FeeStructure { @@ -83,6 +101,7 @@ impl FeeStructure { lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, include_loaded_account_data_size_in_fee: bool, + remove_rounding_in_fee_calculation: bool, ) -> u64 { // Fee based on compute units and signatures let congestion_multiplier = if lamports_per_signature == 0 { @@ -91,6 +110,23 @@ impl FeeStructure { 1.0 // multiplier that has no effect }; + self.calculate_fee_details( + message, + budget_limits, + include_loaded_account_data_size_in_fee, + ) + .total_fee(remove_rounding_in_fee_calculation) + .saturating_mul(congestion_multiplier as u64) + } + + /// Calculate fee details for `SanitizedMessage` + #[cfg(not(target_os = "solana"))] + pub fn calculate_fee_details( + &self, + message: &SanitizedMessage, + budget_limits: &FeeBudgetLimits, + include_loaded_account_data_size_in_fee: bool, + ) -> FeeDetails { let signature_fee = message .num_signatures() .saturating_mul(self.lamports_per_signature); @@ -122,13 +158,12 @@ impl FeeStructure { .unwrap_or_default() }); - ((budget_limits - .prioritization_fee - .saturating_add(signature_fee) - .saturating_add(write_lock_fee) - .saturating_add(compute_fee) as f64) - * congestion_multiplier) - .round() as u64 + FeeDetails { + transaction_fee: signature_fee + .saturating_add(write_lock_fee) + .saturating_add(compute_fee), + prioritization_fee: budget_limits.prioritization_fee, + } } } @@ -180,4 +215,19 @@ mod tests { FeeStructure::calculate_memory_usage_cost(64 * K, heap_cost) ); } + + #[test] + fn test_total_fee_rounding() { + // round large `f64` can lost precision, see feature gate: + // "Removing unwanted rounding in fee calculation #34982" + + let large_fee_details = FeeDetails { + transaction_fee: u64::MAX - 11, + prioritization_fee: 1, + }; + let expected_large_fee = u64::MAX - 10; + + assert_eq!(large_fee_details.total_fee(true), expected_large_fee); + assert_ne!(large_fee_details.total_fee(false), expected_large_fee); + } } diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 854d59bac095cb..334ad7679561ee 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -15,7 +15,10 @@ use { create_executable_meta, is_builtin, is_executable, Account, AccountSharedData, ReadableAccount, WritableAccount, }, - feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation}, + feature_set::{ + self, include_loaded_accounts_data_size_in_fee_calculation, + remove_rounding_in_fee_calculation, + }, fee::FeeStructure, message::SanitizedMessage, native_loader, @@ -74,6 +77,7 @@ pub fn load_accounts( .into(), feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + feature_set.is_active(&remove_rounding_in_fee_calculation::id()), ) } else { return (Err(TransactionError::BlockhashNotFound), None); @@ -682,6 +686,7 @@ mod tests { .unwrap_or_default() .into(), false, + true, ); assert_eq!(fee, lamports_per_signature); @@ -1210,6 +1215,7 @@ mod tests { .unwrap_or_default() .into(), false, + true, ); assert_eq!(fee, lamports_per_signature + prioritization_fee); From b5ffc24a387a7e9eece6d0036c342e8b49b2051f Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 23 Feb 2024 11:12:24 -0500 Subject: [PATCH 243/401] Replaces ReadAccountMapEntry in retry_to_get_account_accessor() (#35244) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 6fc5297efb30d3..8f4738d3f6630e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5344,7 +5344,7 @@ impl AccountsDb { storage_location, load_hint, new_storage_location, - self.accounts_index.get_account_read_entry(pubkey) + self.accounts_index.get_cloned(pubkey) ); // Considering that we've failed to get accessor above and further that // the index still returned the same (slot, store_id) tuple, offset must be same From 923cac8d7f31fb04512315cd26bc00a01526a7c5 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 23 Feb 2024 11:13:57 -0500 Subject: [PATCH 244/401] Replaces ReadAccountMapEntry in exhaustively_verify_refcounts() (#35243) --- accounts-db/src/accounts_db.rs | 41 +++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 8f4738d3f6630e..ad96a305e632be 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3110,23 +3110,32 @@ impl AccountsDb { if failed.load(Ordering::Relaxed) { return; } - if let Some(idx) = self.accounts_index.get_account_read_entry(entry.key()) { - match (idx.ref_count() as usize).cmp(&entry.value().len()) { - std::cmp::Ordering::Greater => { - let list = idx.slot_list(); - let too_new = list.iter().filter_map(|(slot, _)| (slot > &max_slot_inclusive).then_some(())).count(); - - if ((idx.ref_count() as usize) - too_new) > entry.value().len() { - failed.store(true, Ordering::Relaxed); - error!("exhaustively_verify_refcounts: {} refcount too large: {}, should be: {}, {:?}, {:?}, original: {:?}, too_new: {too_new}", entry.key(), idx.ref_count(), entry.value().len(), *entry.value(), list, idx.slot_list()); + + self.accounts_index.get_and_then(entry.key(), |index_entry| { + if let Some(index_entry) = index_entry { + match (index_entry.ref_count() as usize).cmp(&entry.value().len()) { + std::cmp::Ordering::Equal => { + // ref counts match, nothing to do here + } + std::cmp::Ordering::Greater => { + let slot_list = index_entry.slot_list.read().unwrap(); + let num_too_new = slot_list + .iter() + .filter(|(slot, _)| slot > &max_slot_inclusive) + .count(); + + if ((index_entry.ref_count() as usize) - num_too_new) > entry.value().len() { + failed.store(true, Ordering::Relaxed); + error!("exhaustively_verify_refcounts: {} refcount too large: {}, should be: {}, {:?}, {:?}, too_new: {num_too_new}", entry.key(), index_entry.ref_count(), entry.value().len(), *entry.value(), slot_list); + } + } + std::cmp::Ordering::Less => { + error!("exhaustively_verify_refcounts: {} refcount too small: {}, should be: {}, {:?}, {:?}", entry.key(), index_entry.ref_count(), entry.value().len(), *entry.value(), index_entry.slot_list.read().unwrap()); + } } - } - std::cmp::Ordering::Less => { - error!("exhaustively_verify_refcounts: {} refcount too small: {}, should be: {}, {:?}, {:?}", entry.key(), idx.ref_count(), entry.value().len(), *entry.value(), idx.slot_list()); - } - _ => {} - } - } + }; + (false, ()) + }); }); }); if failed.load(Ordering::Relaxed) { From 7da8d82aa144202c51768ddbb5fe753303315483 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 23 Feb 2024 11:15:10 -0500 Subject: [PATCH 245/401] Adds snapshot_utils::purge_all_bank_snapshots() (#35291) --- ledger/src/bank_forks_utils.rs | 2 +- runtime/src/snapshot_bank_utils.rs | 15 ++++++++++++++- runtime/src/snapshot_utils.rs | 6 ++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 48c03e1e6cc8e5..b14d9facdd4c19 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -262,7 +262,7 @@ fn bank_forks_from_snapshot( // be released. They will be released by the account_background_service anyway. But in the case of the account_paths // using memory-mounted file system, they are not released early enough to give space for the new append-vecs from // the archives, causing the out-of-memory problem. So, purge the snapshot dirs upfront before loading from the archive. - snapshot_utils::purge_old_bank_snapshots(&snapshot_config.bank_snapshots_dir, 0, None); + snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); let (bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( &account_paths, diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 42680fa1e9357e..721021142f9258 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1261,7 +1261,7 @@ mod tests { snapshot_utils::{ clean_orphaned_account_snapshot_dirs, create_tmp_accounts_dir_for_tests, get_bank_snapshots, get_bank_snapshots_post, get_bank_snapshots_pre, - get_highest_bank_snapshot, purge_bank_snapshot, + get_highest_bank_snapshot, purge_all_bank_snapshots, purge_bank_snapshot, purge_bank_snapshots_older_than_slot, purge_incomplete_bank_snapshots, purge_old_bank_snapshots, purge_old_bank_snapshots_at_startup, snapshot_storage_rebuilder::get_slot_and_append_vec_id, ArchiveFormat, @@ -2405,6 +2405,19 @@ mod tests { ); } + #[test] + fn test_purge_all_bank_snapshots() { + let genesis_config = GenesisConfig::default(); + let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); + let _bank = create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 10, 5); + // Keep bank in this scope so that its account_paths tmp dirs are not released, and purge_all_bank_snapshots + // can clear the account hardlinks correctly. + + assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 10); + purge_all_bank_snapshots(&bank_snapshots_dir); + assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 0); + } + #[test] fn test_purge_old_bank_snapshots() { let genesis_config = GenesisConfig::default(); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 6dabb3d38e9669..77aab8f0fee1c2 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2138,6 +2138,12 @@ pub fn verify_snapshot_archive( assert!(!dir_diff::is_different(&storages_to_verify, unpack_account_dir).unwrap()); } +/// Purges all bank snapshots +pub fn purge_all_bank_snapshots(bank_snapshots_dir: impl AsRef) { + let bank_snapshots = get_bank_snapshots(&bank_snapshots_dir); + purge_bank_snapshots(&bank_snapshots); +} + /// Purges bank snapshots, retaining the newest `num_bank_snapshots_to_retain` pub fn purge_old_bank_snapshots( bank_snapshots_dir: impl AsRef, From 74758d9fbfa811f7c868ad2ecca05ce5db1add25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 23 Feb 2024 17:25:32 +0100 Subject: [PATCH 246/401] Refactor - Move recompilation out of program loading (#35297) Moves recompilation specifics out of load_program(). --- ledger-tool/src/program.rs | 2 +- runtime/src/bank.rs | 13 ++++++++++--- runtime/src/bank/tests.rs | 4 ++-- svm/src/transaction_processor.rs | 20 +++----------------- 4 files changed, 16 insertions(+), 23 deletions(-) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index b56affd4c905c2..af50d59bca0255 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -523,7 +523,7 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .clone(), ); for key in cached_account_keys { - loaded_programs.replenish(key, bank.load_program(&key, false, None)); + loaded_programs.replenish(key, bank.load_program(&key, false, bank.epoch())); debug!("Loaded program {}", key); } invoke_context.programs_loaded_for_tx_batch = &loaded_programs; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 29dde36ac20116..ccd3f7c522737f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1363,8 +1363,15 @@ impl Bank { if let Some((key, program_to_recompile)) = loaded_programs_cache.programs_to_recompile.pop() { + let effective_epoch = loaded_programs_cache.latest_root_epoch.saturating_add(1); drop(loaded_programs_cache); - let recompiled = new.load_program(&key, false, Some(program_to_recompile)); + let recompiled = new.load_program(&key, false, effective_epoch); + recompiled + .tx_usage_counter + .fetch_add(program_to_recompile.tx_usage_counter.load(Relaxed), Relaxed); + recompiled + .ix_usage_counter + .fetch_add(program_to_recompile.ix_usage_counter.load(Relaxed), Relaxed); let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); loaded_programs_cache.assign_program(key, recompiled); } @@ -7485,10 +7492,10 @@ impl Bank { &self, pubkey: &Pubkey, reload: bool, - recompile: Option>, + effective_epoch: Epoch, ) -> Arc { self.transaction_processor - .load_program(self, pubkey, reload, recompile) + .load_program(self, pubkey, reload, effective_epoch) } } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 5f5d0884ac13ec..00523353142fa6 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -7170,7 +7170,7 @@ fn test_bank_load_program() { programdata_account.set_rent_epoch(1); bank.store_account_and_update_capitalization(&key1, &program_account); bank.store_account_and_update_capitalization(&programdata_key, &programdata_account); - let program = bank.load_program(&key1, false, None); + let program = bank.load_program(&key1, false, bank.epoch()); assert_matches!(program.program, LoadedProgramType::LegacyV1(_)); assert_eq!( program.account_size, @@ -7325,7 +7325,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { assert_eq!(*elf.get(i).unwrap(), *byte); } - let loaded_program = bank.load_program(&program_keypair.pubkey(), false, None); + let loaded_program = bank.load_program(&program_keypair.pubkey(), false, bank.epoch()); // Invoke deployed program mock_process_instruction( diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index dc3e59389cc295..0c456c918d68ff 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -51,10 +51,7 @@ use { collections::{hash_map::Entry, HashMap}, fmt::{Debug, Formatter}, rc::Rc, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, RwLock, - }, + sync::{atomic::Ordering, Arc, RwLock}, }, }; @@ -419,7 +416,7 @@ impl TransactionBatchProcessor { if let Some((key, count)) = program_to_load { // Load, verify and compile one program. - let program = self.load_program(callback, &key, false, None); + let program = self.load_program(callback, &key, false, self.epoch); program.tx_usage_counter.store(count, Ordering::Relaxed); program_to_store = Some((key, program)); } else if missing_programs.is_empty() { @@ -654,14 +651,9 @@ impl TransactionBatchProcessor { callbacks: &CB, pubkey: &Pubkey, reload: bool, - recompile: Option>, + effective_epoch: Epoch, ) -> Arc { let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - let effective_epoch = if recompile.is_some() { - loaded_programs_cache.latest_root_epoch.saturating_add(1) - } else { - self.epoch - }; let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); let mut load_program_metrics = LoadProgramMetrics { program_id: pubkey.to_string(), @@ -754,12 +746,6 @@ impl TransactionBatchProcessor { .effective_slot .max(self.epoch_schedule.get_first_slot_in_epoch(effective_epoch)); } - if let Some(recompile) = recompile { - loaded_program.tx_usage_counter = - AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); - loaded_program.ix_usage_counter = - AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); - } loaded_program.update_access_slot(self.slot); Arc::new(loaded_program) } From 72734a9539284b36e9b796d9f88a2447ff750cab Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 23 Feb 2024 11:13:23 -0800 Subject: [PATCH 247/401] move changelog note of default central-scheduler (#35217) --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dadc45594b80e1..779a1301802391 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ Release channels have their own copy of this changelog: ## [2.0.0] - Unreleased +* Changes + * `central-scheduler` as default option for `--block-production-method` (#34891) ## [1.18.0] * Changes @@ -21,7 +23,6 @@ Release channels have their own copy of this changelog: * The default for `--use-snapshot-archives-at-startup` is now `when-newest` (#33883) * The default for `solana-ledger-tool`, however, remains `always` (#34228) * Added `central-scheduler` option for `--block-production-method` (#33890) - * `central-scheduler` as default option for `--block-production-method` (#34891) * Updated to Borsh v1 * Added allow_commission_decrease_at_any_time feature which will allow commission on a vote account to be decreased even in the second half of epochs when the commission_updates_only_allowed_in_first_half_of_epoch From fe571bbab339e4dc0f710a90b3f5a6926941af93 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Fri, 23 Feb 2024 11:50:41 -0800 Subject: [PATCH 248/401] Plumb `CommitmentConfig` through `bench-tps` client types (#35282) * use --commitment-config for setting blockhash commitment level for sending transactions with rpc-client * clarify default * leave get_balance_with_commitment at processed() * rm unused variable * refactor commitment_config flag read in * update cli and change send_batch's get_latest_blockhash() to get_latest_blockhash_with_client_commitment() and use client's internal commitment level * change fix some nits based on PR comments * rm unused import --- bench-tps/src/bench_tps_client/rpc_client.rs | 1 + bench-tps/src/cli.rs | 15 ++++++++++++++- bench-tps/src/main.rs | 11 ++++++++--- bench-tps/src/send_batch.rs | 4 ++-- 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/bench-tps/src/bench_tps_client/rpc_client.rs b/bench-tps/src/bench_tps_client/rpc_client.rs index 57e97120d0b4af..2535099b464351 100644 --- a/bench-tps/src/bench_tps_client/rpc_client.rs +++ b/bench-tps/src/bench_tps_client/rpc_client.rs @@ -11,6 +11,7 @@ impl BenchTpsClient for RpcClient { fn send_transaction(&self, transaction: Transaction) -> Result { RpcClient::send_transaction(self, &transaction).map_err(|err| err.into()) } + fn send_batch(&self, transactions: Vec) -> Result<()> { for transaction in transactions { BenchTpsClient::send_transaction(self, transaction)?; diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index 39de034730692e..d172329bed8662 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -1,11 +1,12 @@ use { - clap::{crate_description, crate_name, App, Arg, ArgMatches}, + clap::{crate_description, crate_name, value_t_or_exit, App, Arg, ArgMatches}, solana_clap_utils::{ hidden_unless_forced, input_validators::{is_keypair, is_url, is_url_or_moniker, is_within_range}, }, solana_cli_config::{ConfigInput, CONFIG_FILE}, solana_sdk::{ + commitment_config::CommitmentConfig, fee_calculator::FeeRateGovernor, pubkey::Pubkey, signature::{read_keypair_file, Keypair}, @@ -80,6 +81,7 @@ pub struct Config { pub num_conflict_groups: Option, pub bind_address: IpAddr, pub client_node_id: Option, + pub commitment_config: CommitmentConfig, } impl Eq for Config {} @@ -115,6 +117,7 @@ impl Default for Config { num_conflict_groups: None, bind_address: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), client_node_id: None, + commitment_config: CommitmentConfig::confirmed(), } } } @@ -396,6 +399,14 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .validator(is_keypair) .help("File containing the node identity (keypair) of a validator with active stake. This allows communicating with network using staked connection"), ) + .arg( + Arg::with_name("commitment_config") + .long("commitment-config") + .takes_value(true) + .possible_values(&["processed", "confirmed", "finalized"]) + .default_value("confirmed") + .help("Block commitment config for getting latest blockhash"), + ) } /// Parses a clap `ArgMatches` structure into a `Config` @@ -577,6 +588,8 @@ pub fn parse_args(matches: &ArgMatches) -> Result { args.client_node_id = Some(client_node_id); } + args.commitment_config = value_t_or_exit!(matches, "commitment_config", CommitmentConfig); + Ok(args) } diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 519612bd4237a7..7c8244584e8ec0 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -81,6 +81,7 @@ fn create_connection_cache( use_quic: bool, bind_address: IpAddr, client_node_id: Option<&Keypair>, + commitment_config: CommitmentConfig, ) -> ConnectionCache { if !use_quic { return ConnectionCache::with_udp( @@ -97,7 +98,7 @@ fn create_connection_cache( let rpc_client = Arc::new(RpcClient::new_with_commitment( json_rpc_url.to_string(), - CommitmentConfig::confirmed(), + commitment_config, )); let client_node_id = client_node_id.unwrap(); @@ -132,11 +133,12 @@ fn create_client( num_nodes: usize, target_node: Option, connection_cache: ConnectionCache, + commitment_config: CommitmentConfig, ) -> Arc { match external_client_type { ExternalClientType::RpcClient => Arc::new(RpcClient::new_with_commitment( json_rpc_url.to_string(), - CommitmentConfig::confirmed(), + commitment_config, )), ExternalClientType::ThinClient => { let connection_cache = Arc::new(connection_cache); @@ -188,7 +190,7 @@ fn create_client( ExternalClientType::TpuClient => { let rpc_client = Arc::new(RpcClient::new_with_commitment( json_rpc_url.to_string(), - CommitmentConfig::confirmed(), + commitment_config, )); match connection_cache { ConnectionCache::Udp(cache) => Arc::new( @@ -256,6 +258,7 @@ fn main() { instruction_padding_config, bind_address, client_node_id, + commitment_config, .. } = &cli_config; @@ -317,6 +320,7 @@ fn main() { *use_quic, *bind_address, client_node_id.as_ref(), + *commitment_config, ); let client = create_client( external_client_type, @@ -328,6 +332,7 @@ fn main() { *num_nodes, *target_node, connection_cache, + *commitment_config, ); if let Some(instruction_padding_config) = instruction_padding_config { info!( diff --git a/bench-tps/src/send_batch.rs b/bench-tps/src/send_batch.rs index 5ea916530ca23d..75079c72ab020a 100644 --- a/bench-tps/src/send_batch.rs +++ b/bench-tps/src/send_batch.rs @@ -30,8 +30,8 @@ use { pub fn get_latest_blockhash(client: &T) -> Hash { loop { - match client.get_latest_blockhash_with_commitment(CommitmentConfig::processed()) { - Ok((blockhash, _)) => return blockhash, + match client.get_latest_blockhash() { + Ok(blockhash) => return blockhash, Err(err) => { info!("Couldn't get last blockhash: {:?}", err); sleep(Duration::from_secs(1)); From c02f47a6fb0e9f2d76e2e4ea992dff22402d500c Mon Sep 17 00:00:00 2001 From: enjoyoor <147568088+bholuhacks@users.noreply.github.com> Date: Fri, 23 Feb 2024 14:59:52 -0500 Subject: [PATCH 249/401] fix: cleanup (#35298) --- core/src/banking_stage/forwarder.rs | 8 ++++---- core/src/banking_stage/qos_service.rs | 20 +++++++++---------- .../unprocessed_transaction_storage.rs | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 1092e5c57b07cb..e1c2bdc3049621 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -100,7 +100,7 @@ impl Forwarder { slot_metrics_tracker.increment_forwardable_batches_count(1); let batched_forwardable_packets_count = forward_batch.len(); - let (_forward_result, sucessful_forwarded_packets_count, leader_pubkey) = self + let (_forward_result, successful_forwarded_packets_count, leader_pubkey) = self .forward_buffered_packets( &forward_option, forward_batch.get_forwardable_packets(), @@ -114,7 +114,7 @@ impl Forwarder { ); } let failed_forwarded_packets_count = batched_forwardable_packets_count - .saturating_sub(sucessful_forwarded_packets_count); + .saturating_sub(successful_forwarded_packets_count); if failed_forwarded_packets_count > 0 { slot_metrics_tracker.increment_failed_forwarded_packets_count( @@ -123,9 +123,9 @@ impl Forwarder { slot_metrics_tracker.increment_packet_batch_forward_failure_count(1); } - if sucessful_forwarded_packets_count > 0 { + if successful_forwarded_packets_count > 0 { slot_metrics_tracker.increment_successful_forwarded_packets_count( - sucessful_forwarded_packets_count as u64, + successful_forwarded_packets_count as u64, ); } }); diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index abac9c70f854f1..77f05c73a3bc12 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -407,11 +407,11 @@ struct QosServiceMetricsStats { /// overhead introduced by cost_model compute_cost_time: AtomicU64, - /// total nummber of transactions in the reporting period to be computed for theit cost. It is + /// total number of transactions in the reporting period to be computed for their cost. It is /// usually the number of sanitized transactions leader receives. compute_cost_count: AtomicU64, - /// acumulated time in micro-sec spent in tracking each bank's cost. It is the second part of + /// accumulated time in micro-sec spent in tracking each bank's cost. It is the second part of /// overhead introduced cost_tracking_time: AtomicU64, @@ -424,7 +424,7 @@ struct QosServiceMetricsStats { /// accumulated estimated write locks Compute Units to be packed into block estimated_write_lock_cu: AtomicU64, - /// accumulated estimated instructino data Compute Units to be packed into block + /// accumulated estimated instruction data Compute Units to be packed into block estimated_data_bytes_cu: AtomicU64, /// accumulated estimated builtin programs Compute Units to be packed into block @@ -732,7 +732,7 @@ mod tests { bank.read_cost_tracker().unwrap().block_cost() ); // all transactions are committed with actual units more than estimated - let commited_status: Vec = qos_cost_results + let committed_status: Vec = qos_cost_results .iter() .map(|tx_cost| CommitTransactionDetails::Committed { compute_units: tx_cost.as_ref().unwrap().bpf_execution_cost() @@ -742,7 +742,7 @@ mod tests { let final_txs_cost = total_txs_cost + execute_units_adjustment * transaction_count; // All transactions are committed, no costs should be removed - QosService::remove_costs(qos_cost_results.iter(), Some(&commited_status), &bank); + QosService::remove_costs(qos_cost_results.iter(), Some(&committed_status), &bank); assert_eq!( total_txs_cost, bank.read_cost_tracker().unwrap().block_cost() @@ -752,7 +752,7 @@ mod tests { bank.read_cost_tracker().unwrap().transaction_count() ); - QosService::update_costs(qos_cost_results.iter(), Some(&commited_status), &bank); + QosService::update_costs(qos_cost_results.iter(), Some(&committed_status), &bank); assert_eq!( final_txs_cost, bank.read_cost_tracker().unwrap().block_cost() @@ -835,7 +835,7 @@ mod tests { .collect(); let execute_units_adjustment = 10u64; - // assert only commited tx_costs are applied cost_tracker + // assert only committed tx_costs are applied cost_tracker { let qos_service = QosService::new(1); let txs_costs = qos_service.compute_transaction_costs( @@ -854,7 +854,7 @@ mod tests { bank.read_cost_tracker().unwrap().block_cost() ); // Half of transactions are not committed, the rest with cost adjustment - let commited_status: Vec = qos_cost_results + let committed_status: Vec = qos_cost_results .iter() .enumerate() .map(|(n, tx_cost)| { @@ -869,8 +869,8 @@ mod tests { }) .collect(); - QosService::remove_costs(qos_cost_results.iter(), Some(&commited_status), &bank); - QosService::update_costs(qos_cost_results.iter(), Some(&commited_status), &bank); + QosService::remove_costs(qos_cost_results.iter(), Some(&committed_status), &bank); + QosService::update_costs(qos_cost_results.iter(), Some(&committed_status), &bank); // assert the final block cost let mut expected_final_txs_count = 0u64; diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index adfb11f0b28fc2..fcc68050b72d4c 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -199,7 +199,7 @@ fn consume_scan_should_process_packet( // sanitized_transactions vector. Otherwise, a transaction could // be blocked by a transaction that did not take batch locks. This // will lead to some transactions never being processed, and a - // mismatch in the priorty-queue and hash map sizes. + // mismatch in the priority-queue and hash map sizes. // // Always take locks during batch creation. // This prevents lower-priority transactions from taking locks From ec63b813f58e0564920a5ab81e81eb0a048a5b07 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Fri, 23 Feb 2024 17:44:26 -0300 Subject: [PATCH 250/401] Move account filter test to SVM (#35304) --- runtime/src/bank/tests.rs | 183 ------------------------- svm/tests/mock_bank.rs | 48 +++++++ svm/tests/mod.rs | 1 - svm/tests/rent_state.rs | 36 +---- svm/tests/transaction_processor.rs | 206 +++++++++++++++++++++++++++++ 5 files changed, 257 insertions(+), 217 deletions(-) create mode 100644 svm/tests/mock_bank.rs delete mode 100644 svm/tests/mod.rs create mode 100644 svm/tests/transaction_processor.rs diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 00523353142fa6..02bb7f5c08a0de 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13585,189 +13585,6 @@ fn test_last_restart_slot() { assert_eq!(get_last_restart_slot(&bank7), Some(6)); } -#[test] -fn test_filter_executable_program_accounts() { - let keypair1 = Keypair::new(); - let keypair2 = Keypair::new(); - - let non_program_pubkey1 = Pubkey::new_unique(); - let non_program_pubkey2 = Pubkey::new_unique(); - let program1_pubkey = Pubkey::new_unique(); - let program2_pubkey = Pubkey::new_unique(); - let account1_pubkey = Pubkey::new_unique(); - let account2_pubkey = Pubkey::new_unique(); - let account3_pubkey = Pubkey::new_unique(); - let account4_pubkey = Pubkey::new_unique(); - - let account5_pubkey = Pubkey::new_unique(); - - let (genesis_config, _mint_keypair) = create_genesis_config(10); - let bank = Bank::new_for_tests(&genesis_config); - bank.store_account( - &non_program_pubkey1, - &AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.store_account( - &non_program_pubkey2, - &AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.store_account( - &program1_pubkey, - &AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.store_account( - &program2_pubkey, - &AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.store_account( - &account1_pubkey, - &AccountSharedData::new(1, 10, &non_program_pubkey1), - ); - bank.store_account( - &account2_pubkey, - &AccountSharedData::new(1, 10, &non_program_pubkey2), - ); - bank.store_account( - &account3_pubkey, - &AccountSharedData::new(40, 1, &program1_pubkey), - ); - bank.store_account( - &account4_pubkey, - &AccountSharedData::new(40, 1, &program2_pubkey), - ); - - let tx1 = Transaction::new_with_compiled_instructions( - &[&keypair1], - &[non_program_pubkey1], - Hash::new_unique(), - vec![account1_pubkey, account2_pubkey, account3_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); - - let tx2 = Transaction::new_with_compiled_instructions( - &[&keypair2], - &[non_program_pubkey2], - Hash::new_unique(), - vec![account4_pubkey, account3_pubkey, account2_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - - let owners = &[program1_pubkey, program2_pubkey]; - let programs = TransactionBatchProcessor::::filter_executable_program_accounts( - &bank, - &[sanitized_tx1, sanitized_tx2], - &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], - owners, - ); - - // The result should contain only account3_pubkey, and account4_pubkey as the program accounts - assert_eq!(programs.len(), 2); - assert_eq!( - programs - .get(&account3_pubkey) - .expect("failed to find the program account"), - &(&program1_pubkey, 2) - ); - assert_eq!( - programs - .get(&account4_pubkey) - .expect("failed to find the program account"), - &(&program2_pubkey, 1) - ); -} - -#[test] -fn test_filter_executable_program_accounts_invalid_blockhash() { - let keypair1 = Keypair::new(); - let keypair2 = Keypair::new(); - - let non_program_pubkey1 = Pubkey::new_unique(); - let non_program_pubkey2 = Pubkey::new_unique(); - let program1_pubkey = Pubkey::new_unique(); - let program2_pubkey = Pubkey::new_unique(); - let account1_pubkey = Pubkey::new_unique(); - let account2_pubkey = Pubkey::new_unique(); - let account3_pubkey = Pubkey::new_unique(); - let account4_pubkey = Pubkey::new_unique(); - - let account5_pubkey = Pubkey::new_unique(); - - let (genesis_config, _mint_keypair) = create_genesis_config(10); - let bank = Bank::new_for_tests(&genesis_config); - bank.store_account( - &non_program_pubkey1, - &AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.store_account( - &non_program_pubkey2, - &AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.store_account( - &program1_pubkey, - &AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.store_account( - &program2_pubkey, - &AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.store_account( - &account1_pubkey, - &AccountSharedData::new(1, 10, &non_program_pubkey1), - ); - bank.store_account( - &account2_pubkey, - &AccountSharedData::new(1, 10, &non_program_pubkey2), - ); - bank.store_account( - &account3_pubkey, - &AccountSharedData::new(40, 1, &program1_pubkey), - ); - bank.store_account( - &account4_pubkey, - &AccountSharedData::new(40, 1, &program2_pubkey), - ); - - let tx1 = Transaction::new_with_compiled_instructions( - &[&keypair1], - &[non_program_pubkey1], - Hash::new_unique(), - vec![account1_pubkey, account2_pubkey, account3_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); - - let tx2 = Transaction::new_with_compiled_instructions( - &[&keypair2], - &[non_program_pubkey2], - Hash::new_unique(), - vec![account4_pubkey, account3_pubkey, account2_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - // Let's not register blockhash from tx2. This should cause the tx2 to fail - let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - - let owners = &[program1_pubkey, program2_pubkey]; - let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; - let programs = TransactionBatchProcessor::::filter_executable_program_accounts( - &bank, - &[sanitized_tx1, sanitized_tx2], - &mut lock_results, - owners, - ); - - // The result should contain only account3_pubkey as the program accounts - assert_eq!(programs.len(), 1); - assert_eq!( - programs - .get(&account3_pubkey) - .expect("failed to find the program account"), - &(&program1_pubkey, 1) - ); - assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); -} - /// Test that rehashing works with skipped rewrites /// /// Since `bank_to_xxx_snapshot_archive()` calls `Bank::rehash()`, we must ensure that rehashing diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs new file mode 100644 index 00000000000000..3548b5fbac32da --- /dev/null +++ b/svm/tests/mock_bank.rs @@ -0,0 +1,48 @@ +use { + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + feature_set::FeatureSet, + hash::Hash, + pubkey::Pubkey, + rent_collector::RentCollector, + }, + solana_svm::transaction_processor::TransactionProcessingCallback, + std::{collections::HashMap, sync::Arc}, +}; + +#[derive(Default)] +pub struct MockBankCallback { + rent_collector: RentCollector, + feature_set: Arc, + pub account_shared_data: HashMap, +} + +impl TransactionProcessingCallback for MockBankCallback { + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { + if let Some(data) = self.account_shared_data.get(account) { + if data.lamports() == 0 { + None + } else { + owners.iter().position(|entry| data.owner() == entry) + } + } else { + None + } + } + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + self.account_shared_data.get(pubkey).cloned() + } + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { + todo!() + } + + fn get_rent_collector(&self) -> &RentCollector { + &self.rent_collector + } + + fn get_feature_set(&self) -> Arc { + self.feature_set.clone() + } +} diff --git a/svm/tests/mod.rs b/svm/tests/mod.rs deleted file mode 100644 index d1932e1253fe58..00000000000000 --- a/svm/tests/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod rent_state; diff --git a/svm/tests/rent_state.rs b/svm/tests/rent_state.rs index a97ee64ab98a2b..d24a32ac352fbf 100644 --- a/svm/tests/rent_state.rs +++ b/svm/tests/rent_state.rs @@ -7,14 +7,12 @@ use { }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, - feature_set::FeatureSet, fee::FeeStructure, hash::Hash, native_loader, native_token::sol_to_lamports, pubkey::Pubkey, rent::Rent, - rent_collector::RentCollector, signature::{Keypair, Signer}, system_transaction, transaction::SanitizedTransaction, @@ -23,44 +21,16 @@ use { solana_svm::{ account_loader::load_accounts, transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, - transaction_processor::TransactionProcessingCallback, }, - std::{collections::HashMap, sync::Arc}, + std::collections::HashMap, }; -#[derive(Default)] -struct MockBankCallback { - rent_collector: RentCollector, - feature_set: Arc, - account_shared_data: HashMap, -} - -impl TransactionProcessingCallback for MockBankCallback { - fn account_matches_owners(&self, _account: &Pubkey, _owners: &[Pubkey]) -> Option { - todo!() - } - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { - self.account_shared_data.get(pubkey).cloned() - } - - fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { - todo!() - } - - fn get_rent_collector(&self) -> &RentCollector { - &self.rent_collector - } - - fn get_feature_set(&self) -> Arc { - self.feature_set.clone() - } -} +mod mock_bank; #[test] fn test_rent_state_list_len() { let mint_keypair = Keypair::new(); - let mut bank = MockBankCallback::default(); + let mut bank = mock_bank::MockBankCallback::default(); let recipient = Pubkey::new_unique(); let last_block_hash = Hash::new_unique(); diff --git a/svm/tests/transaction_processor.rs b/svm/tests/transaction_processor.rs new file mode 100644 index 00000000000000..1704054246748d --- /dev/null +++ b/svm/tests/transaction_processor.rs @@ -0,0 +1,206 @@ +#![cfg(test)] + +use { + solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph}, + solana_sdk::{ + account::AccountSharedData, + clock::Slot, + hash::Hash, + instruction::CompiledInstruction, + pubkey::Pubkey, + signature::Keypair, + transaction::{SanitizedTransaction, Transaction, TransactionError}, + }, + solana_svm::transaction_processor::TransactionBatchProcessor, +}; + +mod mock_bank; + +struct MockForkGraph {} + +impl ForkGraph for MockForkGraph { + fn relationship(&self, _a: Slot, _b: Slot) -> BlockRelation { + todo!() + } +} + +#[test] +fn test_filter_executable_program_accounts() { + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + + let non_program_pubkey1 = Pubkey::new_unique(); + let non_program_pubkey2 = Pubkey::new_unique(); + let program1_pubkey = Pubkey::new_unique(); + let program2_pubkey = Pubkey::new_unique(); + let account1_pubkey = Pubkey::new_unique(); + let account2_pubkey = Pubkey::new_unique(); + let account3_pubkey = Pubkey::new_unique(); + let account4_pubkey = Pubkey::new_unique(); + + let account5_pubkey = Pubkey::new_unique(); + + let mut bank = mock_bank::MockBankCallback::default(); + bank.account_shared_data.insert( + non_program_pubkey1, + AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.account_shared_data.insert( + non_program_pubkey2, + AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.account_shared_data.insert( + program1_pubkey, + AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.account_shared_data.insert( + program2_pubkey, + AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.account_shared_data.insert( + account1_pubkey, + AccountSharedData::new(1, 10, &non_program_pubkey1), + ); + bank.account_shared_data.insert( + account2_pubkey, + AccountSharedData::new(1, 10, &non_program_pubkey2), + ); + bank.account_shared_data.insert( + account3_pubkey, + AccountSharedData::new(40, 1, &program1_pubkey), + ); + bank.account_shared_data.insert( + account4_pubkey, + AccountSharedData::new(40, 1, &program2_pubkey), + ); + + let tx1 = Transaction::new_with_compiled_instructions( + &[&keypair1], + &[non_program_pubkey1], + Hash::new_unique(), + vec![account1_pubkey, account2_pubkey, account3_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); + + let tx2 = Transaction::new_with_compiled_instructions( + &[&keypair2], + &[non_program_pubkey2], + Hash::new_unique(), + vec![account4_pubkey, account3_pubkey, account2_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); + + let owners = &[program1_pubkey, program2_pubkey]; + let programs = TransactionBatchProcessor::::filter_executable_program_accounts( + &bank, + &[sanitized_tx1, sanitized_tx2], + &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], + owners, + ); + + // The result should contain only account3_pubkey, and account4_pubkey as the program accounts + assert_eq!(programs.len(), 2); + assert_eq!( + programs + .get(&account3_pubkey) + .expect("failed to find the program account"), + &(&program1_pubkey, 2) + ); + assert_eq!( + programs + .get(&account4_pubkey) + .expect("failed to find the program account"), + &(&program2_pubkey, 1) + ); +} + +#[test] +fn test_filter_executable_program_accounts_invalid_blockhash() { + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + + let non_program_pubkey1 = Pubkey::new_unique(); + let non_program_pubkey2 = Pubkey::new_unique(); + let program1_pubkey = Pubkey::new_unique(); + let program2_pubkey = Pubkey::new_unique(); + let account1_pubkey = Pubkey::new_unique(); + let account2_pubkey = Pubkey::new_unique(); + let account3_pubkey = Pubkey::new_unique(); + let account4_pubkey = Pubkey::new_unique(); + + let account5_pubkey = Pubkey::new_unique(); + + let mut bank = mock_bank::MockBankCallback::default(); + bank.account_shared_data.insert( + non_program_pubkey1, + AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.account_shared_data.insert( + non_program_pubkey2, + AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.account_shared_data.insert( + program1_pubkey, + AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.account_shared_data.insert( + program2_pubkey, + AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.account_shared_data.insert( + account1_pubkey, + AccountSharedData::new(1, 10, &non_program_pubkey1), + ); + bank.account_shared_data.insert( + account2_pubkey, + AccountSharedData::new(1, 10, &non_program_pubkey2), + ); + bank.account_shared_data.insert( + account3_pubkey, + AccountSharedData::new(40, 1, &program1_pubkey), + ); + bank.account_shared_data.insert( + account4_pubkey, + AccountSharedData::new(40, 1, &program2_pubkey), + ); + + let tx1 = Transaction::new_with_compiled_instructions( + &[&keypair1], + &[non_program_pubkey1], + Hash::new_unique(), + vec![account1_pubkey, account2_pubkey, account3_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); + + let tx2 = Transaction::new_with_compiled_instructions( + &[&keypair2], + &[non_program_pubkey2], + Hash::new_unique(), + vec![account4_pubkey, account3_pubkey, account2_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + // Let's not register blockhash from tx2. This should cause the tx2 to fail + let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); + + let owners = &[program1_pubkey, program2_pubkey]; + let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; + let programs = TransactionBatchProcessor::::filter_executable_program_accounts( + &bank, + &[sanitized_tx1, sanitized_tx2], + &mut lock_results, + owners, + ); + + // The result should contain only account3_pubkey as the program accounts + assert_eq!(programs.len(), 1); + assert_eq!( + programs + .get(&account3_pubkey) + .expect("failed to find the program account"), + &(&program1_pubkey, 1) + ); + assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); +} From 58c1b7aefb9c365232716b36c210811feeb628b0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 23 Feb 2024 16:20:42 -0500 Subject: [PATCH 251/401] Replaces ReadAccountMapEntry in more tests (#35308) --- accounts-db/src/accounts_db.rs | 40 +++++++------------------------ accounts-db/src/accounts_index.rs | 23 ++++-------------- 2 files changed, 14 insertions(+), 49 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ad96a305e632be..2ff852d1e39037 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9771,9 +9771,7 @@ pub mod tests { account_info::StoredSize, account_storage::meta::{AccountMeta, StoredMeta}, accounts_hash::MERKLE_FANOUT, - accounts_index::{ - tests::*, AccountSecondaryIndexesIncludeExclude, ReadAccountMapEntry, RefCount, - }, + accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, ancient_append_vecs, append_vec::{test_utils::TempFile, AppendVecStoredAccountMeta}, cache_hash_data::CacheHashDataFile, @@ -13366,22 +13364,10 @@ pub mod tests { const UPSERT_POPULATE_RECLAIMS: UpsertReclaim = UpsertReclaim::PopulateReclaims; - // returns the rooted entries and the storage ref count - fn roots_and_ref_count( - index: &AccountsIndex, - locked_account_entry: &ReadAccountMapEntry, - max_inclusive: Option, - ) -> (SlotList, RefCount) { - ( - index.get_rooted_entries(locked_account_entry.slot_list(), max_inclusive), - locked_account_entry.ref_count(), - ) - } - #[test] fn test_delete_dependencies() { solana_logger::setup(); - let accounts_index = AccountsIndex::default_for_tests(); + let accounts_index = AccountsIndex::::default_for_tests(); let key0 = Pubkey::new_from_array([0u8; 32]); let key1 = Pubkey::new_from_array([1u8; 32]); let key2 = Pubkey::new_from_array([2u8; 32]); @@ -13455,21 +13441,13 @@ pub mod tests { accounts_index.add_root(2); accounts_index.add_root(3); let mut purges = HashMap::new(); - let (key0_entry, _) = accounts_index.get_for_tests(&key0, None, None).unwrap(); - purges.insert( - key0, - roots_and_ref_count(&accounts_index, &key0_entry, None), - ); - let (key1_entry, _) = accounts_index.get_for_tests(&key1, None, None).unwrap(); - purges.insert( - key1, - roots_and_ref_count(&accounts_index, &key1_entry, None), - ); - let (key2_entry, _) = accounts_index.get_for_tests(&key2, None, None).unwrap(); - purges.insert( - key2, - roots_and_ref_count(&accounts_index, &key2_entry, None), - ); + for key in [&key0, &key1, &key2] { + let index_entry = accounts_index.get_cloned(key).unwrap(); + let rooted_entries = accounts_index + .get_rooted_entries(index_entry.slot_list.read().unwrap().as_slice(), None); + let ref_count = index_entry.ref_count(); + purges.insert(*key, (rooted_entries, ref_count)); + } for (key, (list, ref_count)) in &purges { info!(" purge {} ref_count {} =>", key, ref_count); for x in list { diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 51a04e3a4eb4b6..266e9d74ef44e3 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -2333,24 +2333,11 @@ pub mod tests { index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter()); assert_eq!(result.count, 1); index.set_startup(Startup::Normal); - if let AccountIndexGetResult::Found(entry, index) = - // the entry for - index.get_for_tests(pubkey, Some(&ancestors), None) - { - // make sure the one with the correct info is added - assert_eq!(entry.slot_list()[index], (slot, account_info2)); - // make sure it wasn't inserted twice - assert_eq!( - entry - .slot_list() - .iter() - .filter_map(|(entry_slot, _)| (entry_slot == &slot).then_some(true)) - .count(), - 1 - ); - } else { - panic!("failed"); - } + let index_entry = index.get_cloned(pubkey).unwrap(); + let slot_list = index_entry.slot_list.read().unwrap(); + // make sure the one with the correct info is added, and wasn't inserted twice + assert_eq!(slot_list.len(), 1); + assert_eq!(slot_list[0], (slot, account_info2)); } #[test] From 1e47aacd0d73bd3ee9b3caca232af6a3df79969e Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 23 Feb 2024 17:25:30 -0500 Subject: [PATCH 252/401] Removes get_account_read_entry() (#35309) --- accounts-db/src/accounts_index.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 266e9d74ef44e3..a515dff54f8dda 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1124,12 +1124,6 @@ impl + Into> AccountsIndex { } } - pub fn get_account_read_entry(&self, pubkey: &Pubkey) -> Option> { - let lock = self.get_bin(pubkey); - lock.get(pubkey) - .map(ReadAccountMapEntry::from_account_map_entry) - } - /// Gets the index's entry for `pubkey` and applies `callback` to it /// /// If `callback`'s boolean return value is true, add this entry to the in-mem cache. @@ -1457,7 +1451,12 @@ impl + Into> AccountsIndex { ancestors: Option<&Ancestors>, max_root: Option, ) -> AccountIndexGetResult { - self.get_account_read_entry(pubkey) + let read_account_map_entry = self + .get_bin(pubkey) + .get(pubkey) + .map(ReadAccountMapEntry::from_account_map_entry); + + read_account_map_entry .and_then(|locked_entry| { let slot_list = locked_entry.slot_list(); self.latest_slot(ancestors, slot_list, max_root) From 54706a885b0025bccb36f6e10ccbef31ca05112c Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 23 Feb 2024 18:48:36 -0500 Subject: [PATCH 253/401] Adds get_with_and_then() to AccountsIndex (#35307) --- accounts-db/src/accounts_db.rs | 65 +++++++------ accounts-db/src/accounts_index.rs | 156 +++++++++++++++++++++++------- 2 files changed, 153 insertions(+), 68 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2ff852d1e39037..00648c99b7d6ad 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -11640,13 +11640,16 @@ pub mod tests { accounts.add_root_and_flush_write_cache(0); let ancestors = vec![(0, 0)].into_iter().collect(); - let id = { - let (lock, idx) = accounts - .accounts_index - .get_for_tests(&pubkey, Some(&ancestors), None) - .unwrap(); - lock.slot_list()[idx].1.store_id() - }; + let id = accounts + .accounts_index + .get_with_and_then( + &pubkey, + Some(&ancestors), + None, + false, + |(_slot, account_info)| account_info.store_id(), + ) + .unwrap(); accounts.calculate_accounts_delta_hash(0); //slot is still there, since gc is lazy @@ -11701,13 +11704,23 @@ pub mod tests { let ancestors = vec![(0, 1)].into_iter().collect(); let (slot1, account_info1) = accounts .accounts_index - .get_for_tests(&pubkey1, Some(&ancestors), None) - .map(|(account_list1, index1)| account_list1.slot_list()[index1]) + .get_with_and_then( + &pubkey1, + Some(&ancestors), + None, + false, + |(slot, account_info)| (slot, account_info), + ) .unwrap(); let (slot2, account_info2) = accounts .accounts_index - .get_for_tests(&pubkey2, Some(&ancestors), None) - .map(|(account_list2, index2)| account_list2.slot_list()[index2]) + .get_with_and_then( + &pubkey2, + Some(&ancestors), + None, + false, + |(slot, account_info)| (slot, account_info), + ) .unwrap(); assert_eq!(slot1, 0); assert_eq!(slot1, slot2); @@ -11831,10 +11844,7 @@ pub mod tests { // zero lamport account, should no longer exist in accounts index // because it has been removed - assert!(accounts - .accounts_index - .get_for_tests(&pubkey, None, None) - .is_none()); + assert!(!accounts.accounts_index.contains_with(&pubkey, None, None)); } #[test] @@ -12020,10 +12030,7 @@ pub mod tests { // `pubkey1`, a zero lamport account, should no longer exist in accounts index // because it has been removed by the clean - assert!(accounts - .accounts_index - .get_for_tests(&pubkey1, None, None) - .is_none()); + assert!(!accounts.accounts_index.contains_with(&pubkey1, None, None)); // Secondary index should have purged `pubkey1` as well let mut found_accounts = vec![]; @@ -12067,10 +12074,7 @@ pub mod tests { accounts.clean_accounts(Some(0), false, None, &EpochSchedule::default()); assert_eq!(accounts.alive_account_count_in_slot(0), 1); assert_eq!(accounts.alive_account_count_in_slot(1), 1); - assert!(accounts - .accounts_index - .get_for_tests(&pubkey, None, None) - .is_some()); + assert!(accounts.accounts_index.contains_with(&pubkey, None, None)); // Now the account can be cleaned up accounts.clean_accounts(Some(1), false, None, &EpochSchedule::default()); @@ -12079,10 +12083,7 @@ pub mod tests { // The zero lamport account, should no longer exist in accounts index // because it has been removed - assert!(accounts - .accounts_index - .get_for_tests(&pubkey, None, None) - .is_none()); + assert!(!accounts.accounts_index.contains_with(&pubkey, None, None)); } #[test] @@ -12157,13 +12158,15 @@ pub mod tests { accounts.add_root_and_flush_write_cache(current_slot); let (slot1, account_info1) = accounts .accounts_index - .get_for_tests(&pubkey, None, None) - .map(|(account_list1, index1)| account_list1.slot_list()[index1]) + .get_with_and_then(&pubkey, None, None, false, |(slot, account_info)| { + (slot, account_info) + }) .unwrap(); let (slot2, account_info2) = accounts .accounts_index - .get_for_tests(&pubkey2, None, None) - .map(|(account_list2, index2)| account_list2.slot_list()[index2]) + .get_with_and_then(&pubkey2, None, None, false, |(slot, account_info)| { + (slot, account_info) + }) .unwrap(); assert_eq!(slot1, current_slot); assert_eq!(slot1, slot2); diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index a515dff54f8dda..3faae999bf1b9d 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1135,6 +1135,27 @@ impl + Into> AccountsIndex { self.get_bin(pubkey).get_internal(pubkey, callback) } + /// Gets the index's entry for `pubkey`, with `ancestors` and `max_root`, + /// and applies `callback` to it + #[cfg(test)] + pub(crate) fn get_with_and_then( + &self, + pubkey: &Pubkey, + ancestors: Option<&Ancestors>, + max_root: Option, + should_add_to_in_mem_cache: bool, + mut callback: impl FnMut((Slot, T)) -> R, + ) -> Option { + self.get_and_then(pubkey, |entry| { + let callback_result = entry.and_then(|entry| { + let slot_list = entry.slot_list.read().unwrap(); + self.latest_slot(ancestors, &slot_list, max_root) + .map(|found_index| callback(slot_list[found_index])) + }); + (should_add_to_in_mem_cache, callback_result) + }) + } + /// Gets the index's entry for `pubkey` and clones it /// /// Prefer `get_and_then()` whenever possible. @@ -1148,6 +1169,18 @@ impl + Into> AccountsIndex { self.get_and_then(pubkey, |entry| (false, entry.is_some())) } + /// Is `pubkey`, with `ancestors` and `max_root`, in the index? + #[cfg(test)] + pub(crate) fn contains_with( + &self, + pubkey: &Pubkey, + ancestors: Option<&Ancestors>, + max_root: Option, + ) -> bool { + self.get_with_and_then(pubkey, ancestors, max_root, false, |_| ()) + .is_some() + } + fn slot_list_mut( &self, pubkey: &Pubkey, @@ -2157,8 +2190,8 @@ pub mod tests { let index = AccountsIndex::::default_for_tests(); let ancestors = Ancestors::default(); let key = &key; - assert!(index.get_for_tests(key, Some(&ancestors), None).is_none()); - assert!(index.get_for_tests(key, None, None).is_none()); + assert!(!index.contains_with(key, Some(&ancestors), None)); + assert!(!index.contains_with(key, None, None)); let mut num = 0; index.unchecked_scan_accounts( @@ -2286,8 +2319,8 @@ pub mod tests { assert!(gc.is_empty()); let ancestors = Ancestors::default(); - assert!(index.get_for_tests(&key, Some(&ancestors), None).is_none()); - assert!(index.get_for_tests(&key, None, None).is_none()); + assert!(!index.contains_with(&key, Some(&ancestors), None)); + assert!(!index.contains_with(&key, None, None)); let mut num = 0; index.unchecked_scan_accounts( @@ -2356,10 +2389,8 @@ pub mod tests { index.set_startup(Startup::Normal); let mut ancestors = Ancestors::default(); - assert!(index - .get_for_tests(pubkey, Some(&ancestors), None) - .is_none()); - assert!(index.get_for_tests(pubkey, None, None).is_none()); + assert!(!index.contains_with(pubkey, Some(&ancestors), None)); + assert!(!index.contains_with(pubkey, None, None)); let mut num = 0; index.unchecked_scan_accounts( @@ -2370,9 +2401,7 @@ pub mod tests { ); assert_eq!(num, 0); ancestors.insert(slot, 0); - assert!(index - .get_for_tests(pubkey, Some(&ancestors), None) - .is_some()); + assert!(index.contains_with(pubkey, Some(&ancestors), None)); assert_eq!(index.ref_count_from_storage(pubkey), 1); index.unchecked_scan_accounts( "", @@ -2394,10 +2423,8 @@ pub mod tests { index.set_startup(Startup::Normal); let mut ancestors = Ancestors::default(); - assert!(index - .get_for_tests(pubkey, Some(&ancestors), None) - .is_none()); - assert!(index.get_for_tests(pubkey, None, None).is_none()); + assert!(!index.contains_with(pubkey, Some(&ancestors), None)); + assert!(!index.contains_with(pubkey, None, None)); let mut num = 0; index.unchecked_scan_accounts( @@ -2408,9 +2435,7 @@ pub mod tests { ); assert_eq!(num, 0); ancestors.insert(slot, 0); - assert!(index - .get_for_tests(pubkey, Some(&ancestors), None) - .is_some()); + assert!(index.contains_with(pubkey, Some(&ancestors), None)); assert_eq!(index.ref_count_from_storage(pubkey), 1); index.unchecked_scan_accounts( "", @@ -2672,8 +2697,8 @@ pub mod tests { assert_eq!(1, account_maps_stats_len(&index)); let mut ancestors = Ancestors::default(); - assert!(index.get_for_tests(&key, Some(&ancestors), None).is_none()); - assert!(index.get_for_tests(&key, None, None).is_none()); + assert!(!index.contains_with(&key, Some(&ancestors), None)); + assert!(!index.contains_with(&key, None, None)); let mut num = 0; index.unchecked_scan_accounts( @@ -2684,7 +2709,7 @@ pub mod tests { ); assert_eq!(num, 0); ancestors.insert(slot, 0); - assert!(index.get_for_tests(&key, Some(&ancestors), None).is_some()); + assert!(index.contains_with(&key, Some(&ancestors), None)); index.unchecked_scan_accounts( "", &ancestors, @@ -2712,7 +2737,7 @@ pub mod tests { assert!(gc.is_empty()); let ancestors = vec![(1, 1)].into_iter().collect(); - assert!(index.get_for_tests(&key, Some(&ancestors), None).is_none()); + assert!(!index.contains_with(&key, Some(&ancestors), None)); let mut num = 0; index.unchecked_scan_accounts( @@ -2837,8 +2862,18 @@ pub mod tests { assert!(gc.is_empty()); let ancestors = vec![(0, 0)].into_iter().collect(); - let (list, idx) = index.get_for_tests(&key, Some(&ancestors), None).unwrap(); - assert_eq!(list.slot_list()[idx], (0, true)); + index + .get_with_and_then( + &key, + Some(&ancestors), + None, + false, + |(slot, account_info)| { + assert_eq!(slot, 0); + assert!(account_info); + }, + ) + .unwrap(); let mut num = 0; let mut found_key = false; @@ -3062,8 +3097,12 @@ pub mod tests { assert!(gc.is_empty()); index.add_root(0); - let (list, idx) = index.get_for_tests(&key, None, None).unwrap(); - assert_eq!(list.slot_list()[idx], (0, true)); + index + .get_with_and_then(&key, None, None, false, |(slot, account_info)| { + assert_eq!(slot, 0); + assert!(account_info); + }) + .unwrap(); } #[test] @@ -3132,9 +3171,18 @@ pub mod tests { UPSERT_POPULATE_RECLAIMS, ); assert!(gc.is_empty()); - let (list, idx) = index.get_for_tests(&key, Some(&ancestors), None).unwrap(); - assert_eq!(list.slot_list()[idx], (0, true)); - drop(list); + index + .get_with_and_then( + &key, + Some(&ancestors), + None, + false, + |(slot, account_info)| { + assert_eq!(slot, 0); + assert!(account_info); + }, + ) + .unwrap(); let mut gc = Vec::new(); index.upsert( @@ -3148,8 +3196,18 @@ pub mod tests { UPSERT_POPULATE_RECLAIMS, ); assert_eq!(gc, vec![(0, true)]); - let (list, idx) = index.get_for_tests(&key, Some(&ancestors), None).unwrap(); - assert_eq!(list.slot_list()[idx], (0, false)); + index + .get_with_and_then( + &key, + Some(&ancestors), + None, + false, + |(slot, account_info)| { + assert_eq!(slot, 0); + assert!(!account_info); + }, + ) + .unwrap(); } #[test] @@ -3181,11 +3239,31 @@ pub mod tests { UPSERT_POPULATE_RECLAIMS, ); assert!(gc.is_empty()); - let (list, idx) = index.get_for_tests(&key, Some(&ancestors), None).unwrap(); - assert_eq!(list.slot_list()[idx], (0, true)); + index + .get_with_and_then( + &key, + Some(&ancestors), + None, + false, + |(slot, account_info)| { + assert_eq!(slot, 0); + assert!(account_info); + }, + ) + .unwrap(); let ancestors = vec![(1, 0)].into_iter().collect(); - let (list, idx) = index.get_for_tests(&key, Some(&ancestors), None).unwrap(); - assert_eq!(list.slot_list()[idx], (1, false)); + index + .get_with_and_then( + &key, + Some(&ancestors), + None, + false, + |(slot, account_info)| { + assert_eq!(slot, 1); + assert!(!account_info); + }, + ) + .unwrap(); } #[test] @@ -3251,8 +3329,12 @@ pub mod tests { // Updating index should not purge older roots, only purges // previous updates within the same slot assert_eq!(gc, vec![]); - let (list, idx) = index.get_for_tests(&key, None, None).unwrap(); - assert_eq!(list.slot_list()[idx], (3, true)); + index + .get_with_and_then(&key, None, None, false, |(slot, account_info)| { + assert_eq!(slot, 3); + assert!(account_info); + }) + .unwrap(); let mut num = 0; let mut found_key = false; From 9f581113bd435b63cdb1e29e48250ade7712332e Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 23 Feb 2024 17:06:22 -0800 Subject: [PATCH 254/401] Scheduler: Leader-Slot metrics for Scheduler (#35087) --- .../scheduler_controller.rs | 174 ++++++++++++------ .../scheduler_metrics.rs | 151 ++++++++++----- 2 files changed, 227 insertions(+), 98 deletions(-) diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index b0c5e0f6ab3265..12e8f7bf8bf0bf 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -54,9 +54,11 @@ pub(crate) struct SchedulerController { container: TransactionStateContainer, /// State for scheduling and communicating with worker threads. scheduler: PrioGraphScheduler, - /// Metrics tracking counts on transactions in different states. + /// Metrics tracking counts on transactions in different states + /// over an interval and during a leader slot. count_metrics: SchedulerCountMetrics, - /// Metrics tracking time spent in different code sections. + /// Metrics tracking time spent in difference code sections + /// over an interval and during a leader slot. timing_metrics: SchedulerTimingMetrics, /// Metric report handles for the worker threads. worker_metrics: Vec>, @@ -97,7 +99,15 @@ impl SchedulerController { // bypass sanitization and buffering and immediately drop the packets. let (decision, decision_time_us) = measure_us!(self.decision_maker.make_consume_or_forward_decision()); - saturating_add_assign!(self.timing_metrics.decision_time_us, decision_time_us); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.decision_time_us, decision_time_us); + }); + + let new_leader_slot = decision.bank_start().map(|b| b.working_bank.slot()); + self.count_metrics + .maybe_report_and_reset_slot(new_leader_slot); + self.timing_metrics + .maybe_report_and_reset_slot(new_leader_slot); self.process_transactions(&decision)?; self.receive_completed()?; @@ -106,11 +116,15 @@ impl SchedulerController { } // Report metrics only if there is data. // Reset intervals when appropriate, regardless of report. - let should_report = self.count_metrics.has_data(); + let should_report = self.count_metrics.interval_has_data(); + let priority_min_max = self.container.get_min_max_priority(); + self.count_metrics.update(|count_metrics| { + count_metrics.update_priority_stats(priority_min_max); + }); self.count_metrics - .update_priority_stats(self.container.get_min_max_priority()); - self.count_metrics.maybe_report_and_reset(should_report); - self.timing_metrics.maybe_report_and_reset(should_report); + .maybe_report_and_reset_interval(should_report); + self.timing_metrics + .maybe_report_and_reset_interval(should_report); self.worker_metrics .iter() .for_each(|metrics| metrics.maybe_report_and_reset()); @@ -133,31 +147,41 @@ impl SchedulerController { }, |_| true // no pre-lock filter for now )?); - saturating_add_assign!( - self.count_metrics.num_scheduled, - scheduling_summary.num_scheduled - ); - saturating_add_assign!( - self.count_metrics.num_unschedulable, - scheduling_summary.num_unschedulable - ); - saturating_add_assign!( - self.count_metrics.num_schedule_filtered_out, - scheduling_summary.num_filtered_out - ); - saturating_add_assign!( - self.timing_metrics.schedule_filter_time_us, - scheduling_summary.filter_time_us - ); - saturating_add_assign!(self.timing_metrics.schedule_time_us, schedule_time_us); + + self.count_metrics.update(|count_metrics| { + saturating_add_assign!( + count_metrics.num_scheduled, + scheduling_summary.num_scheduled + ); + saturating_add_assign!( + count_metrics.num_unschedulable, + scheduling_summary.num_unschedulable + ); + saturating_add_assign!( + count_metrics.num_schedule_filtered_out, + scheduling_summary.num_filtered_out + ); + }); + + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!( + timing_metrics.schedule_filter_time_us, + scheduling_summary.filter_time_us + ); + saturating_add_assign!(timing_metrics.schedule_time_us, schedule_time_us); + }); } BufferedPacketsDecision::Forward => { let (_, clear_time_us) = measure_us!(self.clear_container()); - saturating_add_assign!(self.timing_metrics.clear_time_us, clear_time_us); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.clear_time_us, clear_time_us); + }); } BufferedPacketsDecision::ForwardAndHold => { let (_, clean_time_us) = measure_us!(self.clean_queue()); - saturating_add_assign!(self.timing_metrics.clean_time_us, clean_time_us); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.clean_time_us, clean_time_us); + }); } BufferedPacketsDecision::Hold => {} } @@ -192,10 +216,15 @@ impl SchedulerController { /// Clears the transaction state container. /// This only clears pending transactions, and does **not** clear in-flight transactions. fn clear_container(&mut self) { + let mut num_dropped_on_clear: usize = 0; while let Some(id) = self.container.pop() { self.container.remove_by_id(&id.id); - saturating_add_assign!(self.count_metrics.num_dropped_on_clear, 1); + saturating_add_assign!(num_dropped_on_clear, 1); } + + self.count_metrics.update(|count_metrics| { + saturating_add_assign!(count_metrics.num_dropped_on_clear, num_dropped_on_clear); + }); } /// Clean unprocessable transactions from the queue. These will be transactions that are @@ -215,7 +244,7 @@ impl SchedulerController { const CHUNK_SIZE: usize = 128; let mut error_counters = TransactionErrorMetrics::default(); - + let mut num_dropped_on_age_and_status: usize = 0; for chunk in transaction_ids.chunks(CHUNK_SIZE) { let lock_results = vec![Ok(()); chunk.len()]; let sanitized_txs: Vec<_> = chunk @@ -238,23 +267,36 @@ impl SchedulerController { for ((result, _nonce, _lamports), id) in check_results.into_iter().zip(chunk.iter()) { if result.is_err() { - saturating_add_assign!(self.count_metrics.num_dropped_on_age_and_status, 1); + saturating_add_assign!(num_dropped_on_age_and_status, 1); self.container.remove_by_id(&id.id); } } } + + self.count_metrics.update(|count_metrics| { + saturating_add_assign!( + count_metrics.num_dropped_on_age_and_status, + num_dropped_on_age_and_status + ); + }); } /// Receives completed transactions from the workers and updates metrics. fn receive_completed(&mut self) -> Result<(), SchedulerError> { let ((num_transactions, num_retryable), receive_completed_time_us) = measure_us!(self.scheduler.receive_completed(&mut self.container)?); - saturating_add_assign!(self.count_metrics.num_finished, num_transactions); - saturating_add_assign!(self.count_metrics.num_retryable, num_retryable); - saturating_add_assign!( - self.timing_metrics.receive_completed_time_us, - receive_completed_time_us - ); + + self.count_metrics.update(|count_metrics| { + saturating_add_assign!(count_metrics.num_finished, num_transactions); + saturating_add_assign!(count_metrics.num_retryable, num_retryable); + }); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!( + timing_metrics.receive_completed_time_us, + receive_completed_time_us + ); + }); + Ok(()) } @@ -281,22 +323,33 @@ impl SchedulerController { let (received_packet_results, receive_time_us) = measure_us!(self .packet_receiver .receive_packets(recv_timeout, remaining_queue_capacity)); - saturating_add_assign!(self.timing_metrics.receive_time_us, receive_time_us); + + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.receive_time_us, receive_time_us); + }); match received_packet_results { Ok(receive_packet_results) => { let num_received_packets = receive_packet_results.deserialized_packets.len(); - saturating_add_assign!(self.count_metrics.num_received, num_received_packets); + + self.count_metrics.update(|count_metrics| { + saturating_add_assign!(count_metrics.num_received, num_received_packets); + }); + if should_buffer { let (_, buffer_time_us) = measure_us!( self.buffer_packets(receive_packet_results.deserialized_packets) ); - saturating_add_assign!(self.timing_metrics.buffer_time_us, buffer_time_us); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.buffer_time_us, buffer_time_us); + }); } else { - saturating_add_assign!( - self.count_metrics.num_dropped_on_receive, - num_received_packets - ); + self.count_metrics.update(|count_metrics| { + saturating_add_assign!( + count_metrics.num_dropped_on_receive, + num_received_packets + ); + }); } } Err(RecvTimeoutError::Timeout) => {} @@ -348,6 +401,8 @@ impl SchedulerController { let post_lock_validation_count = transactions.len(); let mut post_transaction_check_count: usize = 0; + let mut num_dropped_on_capacity: usize = 0; + let mut num_buffered: usize = 0; for ((transaction, fee_budget_limits), _) in transactions .into_iter() .zip(fee_budget_limits_vec) @@ -370,9 +425,9 @@ impl SchedulerController { priority, cost, ) { - saturating_add_assign!(self.count_metrics.num_dropped_on_capacity, 1); + saturating_add_assign!(num_dropped_on_capacity, 1); } - saturating_add_assign!(self.count_metrics.num_buffered, 1); + saturating_add_assign!(num_buffered, 1); } // Update metrics for transactions that were dropped. @@ -382,18 +437,25 @@ impl SchedulerController { let num_dropped_on_transaction_checks = post_lock_validation_count.saturating_sub(post_transaction_check_count); - saturating_add_assign!( - self.count_metrics.num_dropped_on_sanitization, - num_dropped_on_sanitization - ); - saturating_add_assign!( - self.count_metrics.num_dropped_on_validate_locks, - num_dropped_on_lock_validation - ); - saturating_add_assign!( - self.count_metrics.num_dropped_on_receive_transaction_checks, - num_dropped_on_transaction_checks - ); + self.count_metrics.update(|count_metrics| { + saturating_add_assign!( + count_metrics.num_dropped_on_capacity, + num_dropped_on_capacity + ); + saturating_add_assign!(count_metrics.num_buffered, num_buffered); + saturating_add_assign!( + count_metrics.num_dropped_on_sanitization, + num_dropped_on_sanitization + ); + saturating_add_assign!( + count_metrics.num_dropped_on_validate_locks, + num_dropped_on_lock_validation + ); + saturating_add_assign!( + count_metrics.num_dropped_on_receive_transaction_checks, + num_dropped_on_transaction_checks + ); + }); } } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs index a3891fdf245ea5..33999f0ef20a18 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -1,15 +1,45 @@ use { itertools::MinMaxResult, - solana_sdk::timing::AtomicInterval, - std::ops::{Deref, DerefMut}, + solana_sdk::{clock::Slot, timing::AtomicInterval}, }; #[derive(Default)] pub struct SchedulerCountMetrics { + interval: IntervalSchedulerCountMetrics, + slot: SlotSchedulerCountMetrics, +} + +impl SchedulerCountMetrics { + pub fn update(&mut self, update: impl Fn(&mut SchedulerCountMetricsInner)) { + update(&mut self.interval.metrics); + update(&mut self.slot.metrics); + } + + pub fn maybe_report_and_reset_slot(&mut self, slot: Option) { + self.slot.maybe_report_and_reset(slot); + } + + pub fn maybe_report_and_reset_interval(&mut self, should_report: bool) { + self.interval.maybe_report_and_reset(should_report); + } + + pub fn interval_has_data(&self) -> bool { + self.interval.metrics.has_data() + } +} + +#[derive(Default)] +struct IntervalSchedulerCountMetrics { interval: AtomicInterval, metrics: SchedulerCountMetricsInner, } +#[derive(Default)] +struct SlotSchedulerCountMetrics { + slot: Option, + metrics: SchedulerCountMetricsInner, +} + #[derive(Default)] pub struct SchedulerCountMetricsInner { /// Number of packets received. @@ -49,35 +79,36 @@ pub struct SchedulerCountMetricsInner { pub max_prioritization_fees: u64, } -impl Deref for SchedulerCountMetrics { - type Target = SchedulerCountMetricsInner; - fn deref(&self) -> &Self::Target { - &self.metrics - } -} - -impl DerefMut for SchedulerCountMetrics { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.metrics - } -} - -impl SchedulerCountMetrics { - pub fn maybe_report_and_reset(&mut self, should_report: bool) { +impl IntervalSchedulerCountMetrics { + fn maybe_report_and_reset(&mut self, should_report: bool) { const REPORT_INTERVAL_MS: u64 = 1000; if self.interval.should_update(REPORT_INTERVAL_MS) { if should_report { - self.report("banking_stage_scheduler_counts"); + self.metrics.report("banking_stage_scheduler_counts", None); + } + self.metrics.reset(); + } + } +} + +impl SlotSchedulerCountMetrics { + fn maybe_report_and_reset(&mut self, slot: Option) { + if self.slot != slot { + // Only report if there was an assigned slot. + if self.slot.is_some() { + self.metrics + .report("banking_stage_scheduler_slot_counts", self.slot); } - self.reset(); + self.metrics.reset(); + self.slot = slot; } } } impl SchedulerCountMetricsInner { - fn report(&self, name: &'static str) { - datapoint_info!( - name, + fn report(&self, name: &'static str, slot: Option) { + let mut datapoint = create_datapoint!( + @point name, ("num_received", self.num_received, i64), ("num_buffered", self.num_buffered, i64), ("num_scheduled", self.num_scheduled, i64), @@ -115,6 +146,10 @@ impl SchedulerCountMetricsInner { ("min_priority", self.get_min_priority(), i64), ("max_priority", self.get_max_priority(), i64) ); + if let Some(slot) = slot { + datapoint.add_field_i64("slot", slot as i64); + } + solana_metrics::submit(datapoint, log::Level::Info); } pub fn has_data(&self) -> bool { @@ -186,10 +221,37 @@ impl SchedulerCountMetricsInner { #[derive(Default)] pub struct SchedulerTimingMetrics { + interval: IntervalSchedulerTimingMetrics, + slot: SlotSchedulerTimingMetrics, +} + +impl SchedulerTimingMetrics { + pub fn update(&mut self, update: impl Fn(&mut SchedulerTimingMetricsInner)) { + update(&mut self.interval.metrics); + update(&mut self.slot.metrics); + } + + pub fn maybe_report_and_reset_slot(&mut self, slot: Option) { + self.slot.maybe_report_and_reset(slot); + } + + pub fn maybe_report_and_reset_interval(&mut self, should_report: bool) { + self.interval.maybe_report_and_reset(should_report); + } +} + +#[derive(Default)] +struct IntervalSchedulerTimingMetrics { interval: AtomicInterval, metrics: SchedulerTimingMetricsInner, } +#[derive(Default)] +struct SlotSchedulerTimingMetrics { + slot: Option, + metrics: SchedulerTimingMetricsInner, +} + #[derive(Default)] pub struct SchedulerTimingMetricsInner { /// Time spent making processing decisions. @@ -210,35 +272,36 @@ pub struct SchedulerTimingMetricsInner { pub receive_completed_time_us: u64, } -impl Deref for SchedulerTimingMetrics { - type Target = SchedulerTimingMetricsInner; - fn deref(&self) -> &Self::Target { - &self.metrics - } -} - -impl DerefMut for SchedulerTimingMetrics { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.metrics - } -} - -impl SchedulerTimingMetrics { - pub fn maybe_report_and_reset(&mut self, should_report: bool) { +impl IntervalSchedulerTimingMetrics { + fn maybe_report_and_reset(&mut self, should_report: bool) { const REPORT_INTERVAL_MS: u64 = 1000; if self.interval.should_update(REPORT_INTERVAL_MS) { if should_report { - self.report("banking_stage_scheduler_timing"); + self.metrics.report("banking_stage_scheduler_timing", None); } - self.reset(); + self.metrics.reset(); + } + } +} + +impl SlotSchedulerTimingMetrics { + fn maybe_report_and_reset(&mut self, slot: Option) { + if self.slot != slot { + // Only report if there was an assigned slot. + if self.slot.is_some() { + self.metrics + .report("banking_stage_scheduler_slot_counts", self.slot); + } + self.metrics.reset(); + self.slot = slot; } } } impl SchedulerTimingMetricsInner { - fn report(&self, name: &'static str) { - datapoint_info!( - name, + fn report(&self, name: &'static str, slot: Option) { + let mut datapoint = create_datapoint!( + @point name, ("decision_time_us", self.decision_time_us, i64), ("receive_time_us", self.receive_time_us, i64), ("buffer_time_us", self.buffer_time_us, i64), @@ -252,6 +315,10 @@ impl SchedulerTimingMetricsInner { i64 ) ); + if let Some(slot) = slot { + datapoint.add_field_i64("slot", slot as i64); + } + solana_metrics::submit(datapoint, log::Level::Info); } fn reset(&mut self) { From e74d5ccca32086b8ab5b7ba1e0584a1292a550f2 Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Sat, 24 Feb 2024 02:31:32 +0000 Subject: [PATCH 255/401] hash: Use `finalize().into()` instead of `try_into()` for hash results (#35300) `sha2` and `sha3` crates already moved to `generic-array` 0.14.7, which means that we can safely convert the hash result to a sized array just by calling `finalize().into()`, which doesn't return any errors. --- Cargo.lock | 15 +++++++++------ Cargo.toml | 2 +- frozen-abi/src/hash.rs | 6 ++---- programs/sbf/Cargo.lock | 15 +++++++++------ sdk/program/src/keccak.rs | 4 +--- sdk/program/src/message/legacy.rs | 2 +- sdk/program/src/message/versions/mod.rs | 2 +- 7 files changed, 24 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 79e9adfaaf15e5..53b43b2822138b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2888,9 +2888,12 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] [[package]] name = "kernel32-sys" @@ -4985,9 +4988,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.4" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaedf34ed289ea47c2b741bb72e5357a209512d67bcd4bda44359e5bf0470f56" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest 0.10.7", "keccak", @@ -6611,7 +6614,7 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.8", - "sha3 0.10.4", + "sha3 0.10.8", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", @@ -7055,7 +7058,7 @@ dependencies = [ "serde_json", "serde_with", "sha2 0.10.8", - "sha3 0.10.4", + "sha3 0.10.8", "siphasher", "solana-frozen-abi", "solana-frozen-abi-macro", diff --git a/Cargo.toml b/Cargo.toml index 0ec4b780fe13e4..6ff16004e775df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -301,7 +301,7 @@ serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.32" serial_test = "2.0.0" sha2 = "0.10.8" -sha3 = "0.10.4" +sha3 = "0.10.8" signal-hook = "0.3.17" siphasher = "0.3.11" smallvec = "1.13.1" diff --git a/frozen-abi/src/hash.rs b/frozen-abi/src/hash.rs index 58020994623a86..15b57c30b38c16 100644 --- a/frozen-abi/src/hash.rs +++ b/frozen-abi/src/hash.rs @@ -1,6 +1,6 @@ use { sha2::{Digest, Sha256}, - std::{convert::TryFrom, fmt}, + std::fmt, }; const HASH_BYTES: usize = 32; @@ -17,9 +17,7 @@ impl Hasher { self.hasher.update(val); } pub fn result(self) -> Hash { - // At the time of this writing, the sha2 library is stuck on an old version - // of generic_array (0.9.0). Decouple ourselves with a clone to our version. - Hash(<[u8; HASH_BYTES]>::try_from(self.hasher.finalize().as_slice()).unwrap()) + Hash(self.hasher.finalize().into()) } } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ba4ea3f309d352..ea77521d9fcf26 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2490,9 +2490,12 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] [[package]] name = "kernel32-sys" @@ -4409,9 +4412,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.4" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaedf34ed289ea47c2b741bb72e5357a209512d67bcd4bda44359e5bf0470f56" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest 0.10.7", "keccak", @@ -5381,7 +5384,7 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.8", - "sha3 0.10.4", + "sha3 0.10.8", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-sdk-macro", @@ -6169,7 +6172,7 @@ dependencies = [ "serde_json", "serde_with", "sha2 0.10.8", - "sha3 0.10.4", + "sha3 0.10.8", "siphasher", "solana-frozen-abi", "solana-frozen-abi-macro", diff --git a/sdk/program/src/keccak.rs b/sdk/program/src/keccak.rs index 6a1cfaf1113b7b..b25b0dfab19521 100644 --- a/sdk/program/src/keccak.rs +++ b/sdk/program/src/keccak.rs @@ -48,9 +48,7 @@ impl Hasher { } } pub fn result(self) -> Hash { - // At the time of this writing, the sha3 library is stuck on an old version - // of generic_array (0.9.0). Decouple ourselves with a clone to our version. - Hash(<[u8; HASH_BYTES]>::try_from(self.hasher.finalize().as_slice()).unwrap()) + Hash(self.hasher.finalize().into()) } } diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index 1a6a9239f4e0aa..32d7411ea4b476 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -481,7 +481,7 @@ impl Message { let mut hasher = blake3::Hasher::new(); hasher.update(b"solana-tx-message-v1"); hasher.update(message_bytes); - Hash(<[u8; crate::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap()) + Hash(hasher.finalize().into()) } pub fn compile_instruction(&self, ix: &Instruction) -> CompiledInstruction { diff --git a/sdk/program/src/message/versions/mod.rs b/sdk/program/src/message/versions/mod.rs index 301490a2aa7e7d..70a1091aec3cf8 100644 --- a/sdk/program/src/message/versions/mod.rs +++ b/sdk/program/src/message/versions/mod.rs @@ -148,7 +148,7 @@ impl VersionedMessage { let mut hasher = blake3::Hasher::new(); hasher.update(b"solana-tx-message-v1"); hasher.update(message_bytes); - Hash(<[u8; crate::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap()) + Hash(hasher.finalize().into()) } } From 2fd2f34b35f1435c77fcb0fc81e5f755bb36cb2d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:03:33 +0800 Subject: [PATCH 256/401] build(deps): bump syn from 2.0.50 to 2.0.51 (#35317) * build(deps): bump syn from 2.0.50 to 2.0.51 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.50 to 2.0.51. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.50...2.0.51) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 66 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 62 +++++++++++++++++++------------------- 2 files changed, 64 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53b43b2822138b..9c1ef7c057a209 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -459,7 +459,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -607,7 +607,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -775,7 +775,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "syn_derive", ] @@ -1535,7 +1535,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1546,7 +1546,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1608,7 +1608,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1732,7 +1732,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1838,7 +1838,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -2102,7 +2102,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3376,7 +3376,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3450,7 +3450,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3946,7 +3946,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -4114,7 +4114,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -4796,7 +4796,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -4850,7 +4850,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -4900,7 +4900,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -6031,7 +6031,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -7081,7 +7081,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -7796,7 +7796,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -7808,7 +7808,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.50", + "syn 2.0.51", "thiserror", ] @@ -7866,7 +7866,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -8054,9 +8054,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.50" +version = "2.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" +checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c" dependencies = [ "proc-macro2", "quote", @@ -8072,7 +8072,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -8243,7 +8243,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -8255,7 +8255,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "test-case-core", ] @@ -8291,7 +8291,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -8428,7 +8428,7 @@ source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7 dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -8674,7 +8674,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -8977,7 +8977,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "wasm-bindgen-shared", ] @@ -9011,7 +9011,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9305,7 +9305,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -9325,7 +9325,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ea77521d9fcf26..413846d72a34d2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -425,7 +425,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -573,7 +573,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -726,7 +726,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "syn_derive", ] @@ -1238,7 +1238,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1249,7 +1249,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1424,7 +1424,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1533,7 +1533,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1780,7 +1780,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3019,7 +3019,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3092,7 +3092,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3547,7 +3547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3688,7 +3688,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -4266,7 +4266,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -4311,7 +4311,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -5060,7 +5060,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -6192,7 +6192,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -6732,7 +6732,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -6744,7 +6744,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.50", + "syn 2.0.51", "thiserror", ] @@ -6792,7 +6792,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -6980,9 +6980,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.50" +version = "2.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" +checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c" dependencies = [ "proc-macro2", "quote", @@ -6998,7 +6998,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -7155,7 +7155,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -7167,7 +7167,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "test-case-core", ] @@ -7203,7 +7203,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -7326,7 +7326,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -7544,7 +7544,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -7826,7 +7826,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "wasm-bindgen-shared", ] @@ -7860,7 +7860,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8145,7 +8145,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -8165,7 +8165,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] From 6c6e691c808a5fe2ca52407f6ad31404155f388a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:04:25 +0800 Subject: [PATCH 257/401] build(deps): bump socket2 from 0.5.5 to 0.5.6 (#35318) * build(deps): bump socket2 from 0.5.5 to 0.5.6 Bumps [socket2](https://github.com/rust-lang/socket2) from 0.5.5 to 0.5.6. - [Release notes](https://github.com/rust-lang/socket2/releases) - [Changelog](https://github.com/rust-lang/socket2/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/socket2/compare/v0.5.5...v0.5.6) --- updated-dependencies: - dependency-name: socket2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 12 ++++++------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 12 ++++++------ 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c1ef7c057a209..db44628b30dec7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2511,7 +2511,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -4166,7 +4166,7 @@ checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" dependencies = [ "bytes", "libc", - "socket2 0.5.5", + "socket2 0.5.6", "tracing", "windows-sys 0.48.0", ] @@ -5104,12 +5104,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6475,7 +6475,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_derive", - "socket2 0.5.5", + "socket2 0.5.6", "solana-logger", "solana-sdk", "solana-version", diff --git a/Cargo.toml b/Cargo.toml index 6ff16004e775df..89d163684a22bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -306,7 +306,7 @@ signal-hook = "0.3.17" siphasher = "0.3.11" smallvec = "1.13.1" smpl_jwt = "0.7.1" -socket2 = "0.5.5" +socket2 = "0.5.6" soketto = "0.7" solana-account-decoder = { path = "account-decoder", version = "=1.19.0" } solana-accounts-db = { path = "accounts-db", version = "=1.19.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 413846d72a34d2..dd903dc12765bb 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2124,7 +2124,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -3734,7 +3734,7 @@ checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" dependencies = [ "bytes", "libc", - "socket2 0.5.5", + "socket2 0.5.6", "tracing", "windows-sys 0.48.0", ] @@ -4528,12 +4528,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -5284,7 +5284,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_derive", - "socket2 0.5.5", + "socket2 0.5.6", "solana-logger", "solana-sdk", "solana-version", From c8ee4f59ade88f8e5c097a6bc1569fd9a2d26c35 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 26 Feb 2024 15:58:40 +0000 Subject: [PATCH 258/401] uses struct instead of tuple for Merkle shreds variant (#35303) Working towards adding a new Merkle shred variant with retransmitter's signature, the commit uses struct instead of tuple to describe Merkle shred variant. --- ledger/src/shred.rs | 252 ++++++++++++++++++++++++--------- ledger/src/shred/common.rs | 4 +- ledger/src/shred/merkle.rs | 165 +++++++++++++++------ ledger/src/shred/shred_data.rs | 6 +- 4 files changed, 316 insertions(+), 111 deletions(-) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index e3c896f71befa8..c2219c1370d47c 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -200,8 +200,8 @@ enum ShredVariant { // 0b0110_???? MerkleCode chained // 0b1000_???? MerkleData // 0b1001_???? MerkleData chained - MerkleCode(/*proof_size:*/ u8, /*chained:*/ bool), // 0b01?0_???? - MerkleData(/*proof_size:*/ u8, /*chained:*/ bool), // 0b100?_???? + MerkleCode { proof_size: u8, chained: bool }, // 0b01?0_???? + MerkleData { proof_size: u8, chained: bool }, // 0b100?_???? } /// A common header that is present in data and code shred headers @@ -390,11 +390,11 @@ impl Shred { let shred = legacy::ShredData::from_payload(shred)?; Self::from(ShredData::from(shred)) } - ShredVariant::MerkleCode(..) => { + ShredVariant::MerkleCode { .. } => { let shred = merkle::ShredCode::from_payload(shred)?; Self::from(ShredCode::from(shred)) } - ShredVariant::MerkleData(..) => { + ShredVariant::MerkleData { .. } => { let shred = merkle::ShredData::from_payload(shred)?; Self::from(ShredData::from(shred)) } @@ -653,12 +653,18 @@ pub mod layout { let chunk = shred.get(self::legacy::SIGNED_MESSAGE_OFFSETS)?; SignedData::Chunk(chunk) } - ShredVariant::MerkleCode(proof_size, chained) => { + ShredVariant::MerkleCode { + proof_size, + chained, + } => { let merkle_root = self::merkle::ShredCode::get_merkle_root(shred, proof_size, chained)?; SignedData::MerkleRoot(merkle_root) } - ShredVariant::MerkleData(proof_size, chained) => { + ShredVariant::MerkleData { + proof_size, + chained, + } => { let merkle_root = self::merkle::ShredData::get_merkle_root(shred, proof_size, chained)?; SignedData::MerkleRoot(merkle_root) @@ -677,8 +683,8 @@ pub mod layout { // Merkle shreds sign merkle tree root which can be recovered from // the merkle proof embedded in the payload but itself is not // stored the payload. - ShredVariant::MerkleCode(..) => None, - ShredVariant::MerkleData(..) => None, + ShredVariant::MerkleCode { .. } => None, + ShredVariant::MerkleData { .. } => None, } } @@ -695,12 +701,14 @@ pub mod layout { pub fn get_merkle_root(shred: &[u8]) -> Option { match get_shred_variant(shred).ok()? { ShredVariant::LegacyCode | ShredVariant::LegacyData => None, - ShredVariant::MerkleCode(proof_size, chained) => { - merkle::ShredCode::get_merkle_root(shred, proof_size, chained) - } - ShredVariant::MerkleData(proof_size, chained) => { - merkle::ShredData::get_merkle_root(shred, proof_size, chained) - } + ShredVariant::MerkleCode { + proof_size, + chained, + } => merkle::ShredCode::get_merkle_root(shred, proof_size, chained), + ShredVariant::MerkleData { + proof_size, + chained, + } => merkle::ShredData::get_merkle_root(shred, proof_size, chained), } } @@ -719,9 +727,8 @@ pub mod layout { let shred = get_shred(packet).unwrap(); let merkle_proof_size = match get_shred_variant(shred).unwrap() { ShredVariant::LegacyCode | ShredVariant::LegacyData => None, - ShredVariant::MerkleCode(proof_size, _) | ShredVariant::MerkleData(proof_size, _) => { - Some(proof_size) - } + ShredVariant::MerkleCode { proof_size, .. } + | ShredVariant::MerkleData { proof_size, .. } => Some(proof_size), }; let coin_flip: bool = rng.gen(); if coin_flip { @@ -802,8 +809,8 @@ impl From for ShredType { match shred_variant { ShredVariant::LegacyCode => ShredType::Code, ShredVariant::LegacyData => ShredType::Data, - ShredVariant::MerkleCode(..) => ShredType::Code, - ShredVariant::MerkleData(..) => ShredType::Data, + ShredVariant::MerkleCode { .. } => ShredType::Code, + ShredVariant::MerkleData { .. } => ShredType::Data, } } } @@ -813,10 +820,22 @@ impl From for u8 { match shred_variant { ShredVariant::LegacyCode => u8::from(ShredType::Code), ShredVariant::LegacyData => u8::from(ShredType::Data), - ShredVariant::MerkleCode(proof_size, false) => proof_size | 0x40, - ShredVariant::MerkleCode(proof_size, true) => proof_size | 0x60, - ShredVariant::MerkleData(proof_size, false) => proof_size | 0x80, - ShredVariant::MerkleData(proof_size, true) => proof_size | 0x90, + ShredVariant::MerkleCode { + proof_size, + chained: false, + } => proof_size | 0x40, + ShredVariant::MerkleCode { + proof_size, + chained: true, + } => proof_size | 0x60, + ShredVariant::MerkleData { + proof_size, + chained: false, + } => proof_size | 0x80, + ShredVariant::MerkleData { + proof_size, + chained: true, + } => proof_size | 0x90, } } } @@ -831,14 +850,22 @@ impl TryFrom for ShredVariant { } else { let proof_size = shred_variant & 0x0F; match shred_variant & 0xF0 { - 0x40 => Ok(ShredVariant::MerkleCode( - proof_size, /*chained:*/ false, - )), - 0x60 => Ok(ShredVariant::MerkleCode(proof_size, /*chained:*/ true)), - 0x80 => Ok(ShredVariant::MerkleData( - proof_size, /*chained:*/ false, - )), - 0x90 => Ok(ShredVariant::MerkleData(proof_size, /*chained:*/ true)), + 0x40 => Ok(ShredVariant::MerkleCode { + proof_size, + chained: false, + }), + 0x60 => Ok(ShredVariant::MerkleCode { + proof_size, + chained: true, + }), + 0x80 => Ok(ShredVariant::MerkleData { + proof_size, + chained: false, + }), + 0x90 => Ok(ShredVariant::MerkleData { + proof_size, + chained: true, + }), _ => Err(Error::InvalidShredVariant), } } @@ -858,7 +885,7 @@ pub(crate) fn recover( ShredVariant::LegacyData | ShredVariant::LegacyCode => { Shredder::try_recovery(shreds, reed_solomon_cache) } - ShredVariant::MerkleCode(..) | ShredVariant::MerkleData(..) => { + ShredVariant::MerkleCode { .. } | ShredVariant::MerkleData { .. } => { let shreds = shreds .into_iter() .map(merkle::Shred::try_from) @@ -996,20 +1023,20 @@ pub fn should_discard_shred( return true; } } - ShredVariant::MerkleCode(_, /*chained:*/ false) => { + ShredVariant::MerkleCode { chained: false, .. } => { stats.num_shreds_merkle_code = stats.num_shreds_merkle_code.saturating_add(1); } - ShredVariant::MerkleCode(_, /*chained:*/ true) => { + ShredVariant::MerkleCode { chained: true, .. } => { if !enable_chained_merkle_shreds(slot) { return true; } stats.num_shreds_merkle_code_chained = stats.num_shreds_merkle_code_chained.saturating_add(1); } - ShredVariant::MerkleData(_, /*chained:*/ false) => { + ShredVariant::MerkleData { chained: false, .. } => { stats.num_shreds_merkle_data = stats.num_shreds_merkle_data.saturating_add(1); } - ShredVariant::MerkleData(_, /*chained:*/ true) => { + ShredVariant::MerkleData { chained: true, .. } => { if !enable_chained_merkle_shreds(slot) { return true; } @@ -1133,8 +1160,11 @@ mod tests { ); assert_eq!( SIZE_OF_SHRED_VARIANT, - bincode::serialized_size(&ShredVariant::MerkleCode(15, /*chained:*/ true)).unwrap() - as usize + bincode::serialized_size(&ShredVariant::MerkleCode { + proof_size: 15, + chained: true, + }) + .unwrap() as usize ); assert_eq!( SIZE_OF_SHRED_SLOT, @@ -1438,114 +1468,204 @@ mod tests { ); // Merkle coding shred. assert_eq!( - u8::from(ShredVariant::MerkleCode(5, /*chained:*/ false)), + u8::from(ShredVariant::MerkleCode { + proof_size: 5, + chained: false, + }), 0b0100_0101 ); assert_eq!( - u8::from(ShredVariant::MerkleCode(5, /*chained:*/ true)), + u8::from(ShredVariant::MerkleCode { + proof_size: 5, + chained: true, + }), 0b0110_0101 ); for chained in [false, true] { assert_eq!( - ShredType::from(ShredVariant::MerkleCode(5, chained)), + ShredType::from(ShredVariant::MerkleCode { + proof_size: 5, + chained, + }), ShredType::Code ); } assert_matches!( ShredVariant::try_from(0b0100_0101), - Ok(ShredVariant::MerkleCode(5, /*chained:*/ false)) + Ok(ShredVariant::MerkleCode { + proof_size: 5, + chained: false, + }) ); assert_matches!( ShredVariant::try_from(0b0110_0101), - Ok(ShredVariant::MerkleCode(5, /*chained:*/ true)) + Ok(ShredVariant::MerkleCode { + proof_size: 5, + chained: true, + }) ); - let buf = bincode::serialize(&ShredVariant::MerkleCode(5, /*chained:*/ false)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleCode { + proof_size: 5, + chained: false, + }) + .unwrap(); assert_eq!(buf, vec![0b0100_0101]); assert_matches!( bincode::deserialize::(&[0b0100_0101]), - Ok(ShredVariant::MerkleCode(5, /*chained:*/ false)) + Ok(ShredVariant::MerkleCode { + proof_size: 5, + chained: false, + }) ); - let buf = bincode::serialize(&ShredVariant::MerkleCode(5, /*chained:*/ true)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleCode { + proof_size: 5, + chained: true, + }) + .unwrap(); assert_eq!(buf, vec![0b0110_0101]); assert_matches!( bincode::deserialize::(&[0b0110_0101]), - Ok(ShredVariant::MerkleCode(5, /*chained:*/ true)) + Ok(ShredVariant::MerkleCode { + proof_size: 5, + chained: true, + }) ); for (proof_size, chained) in iproduct!(0..=15u8, [false, true]) { let byte = proof_size | if chained { 0b0110_0000 } else { 0b0100_0000 }; assert_eq!( - u8::from(ShredVariant::MerkleCode(proof_size, chained)), + u8::from(ShredVariant::MerkleCode { + proof_size, + chained, + }), byte ); assert_eq!( - ShredType::from(ShredVariant::MerkleCode(proof_size, chained)), + ShredType::from(ShredVariant::MerkleCode { + proof_size, + chained, + }), ShredType::Code ); assert_eq!( ShredVariant::try_from(byte).unwrap(), - ShredVariant::MerkleCode(proof_size, chained) + ShredVariant::MerkleCode { + proof_size, + chained, + }, ); - let buf = bincode::serialize(&ShredVariant::MerkleCode(proof_size, chained)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleCode { + proof_size, + chained, + }) + .unwrap(); assert_eq!(buf, vec![byte]); assert_eq!( bincode::deserialize::(&[byte]).unwrap(), - ShredVariant::MerkleCode(proof_size, chained) + ShredVariant::MerkleCode { + proof_size, + chained, + } ); } // Merkle data shred. assert_eq!( - u8::from(ShredVariant::MerkleData(10, /*chained:*/ false)), + u8::from(ShredVariant::MerkleData { + proof_size: 10, + chained: false, + }), 0b1000_1010 ); assert_eq!( - u8::from(ShredVariant::MerkleData(10, /*chained:*/ true)), + u8::from(ShredVariant::MerkleData { + proof_size: 10, + chained: true, + }), 0b1001_1010 ); for chained in [false, true] { assert_eq!( - ShredType::from(ShredVariant::MerkleData(10, chained)), + ShredType::from(ShredVariant::MerkleData { + proof_size: 10, + chained, + }), ShredType::Data ); } assert_matches!( ShredVariant::try_from(0b1000_1010), - Ok(ShredVariant::MerkleData(10, /*chained:*/ false)) + Ok(ShredVariant::MerkleData { + proof_size: 10, + chained: false, + }) ); assert_matches!( ShredVariant::try_from(0b1001_1010), - Ok(ShredVariant::MerkleData(10, /*chained:*/ true)) + Ok(ShredVariant::MerkleData { + proof_size: 10, + chained: true, + }) ); - let buf = bincode::serialize(&ShredVariant::MerkleData(10, /*chained:*/ false)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleData { + proof_size: 10, + chained: false, + }) + .unwrap(); assert_eq!(buf, vec![0b1000_1010]); assert_matches!( bincode::deserialize::(&[0b1000_1010]), - Ok(ShredVariant::MerkleData(10, /*chained:*/ false)) + Ok(ShredVariant::MerkleData { + proof_size: 10, + chained: false, + }) ); - let buf = bincode::serialize(&ShredVariant::MerkleData(10, /*chained:*/ true)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleData { + proof_size: 10, + chained: true, + }) + .unwrap(); assert_eq!(buf, vec![0b1001_1010]); assert_matches!( bincode::deserialize::(&[0b1001_1010]), - Ok(ShredVariant::MerkleData(10, /*chained:*/ true)) + Ok(ShredVariant::MerkleData { + proof_size: 10, + chained: true, + }) ); for (proof_size, chained) in iproduct!(0..=15u8, [false, true]) { let byte = proof_size | if chained { 0b1001_0000 } else { 0b1000_0000 }; assert_eq!( - u8::from(ShredVariant::MerkleData(proof_size, chained)), + u8::from(ShredVariant::MerkleData { + proof_size, + chained, + }), byte ); assert_eq!( - ShredType::from(ShredVariant::MerkleData(proof_size, chained)), + ShredType::from(ShredVariant::MerkleData { + proof_size, + chained, + }), ShredType::Data ); assert_eq!( ShredVariant::try_from(byte).unwrap(), - ShredVariant::MerkleData(proof_size, chained) + ShredVariant::MerkleData { + proof_size, + chained, + } ); - let buf = bincode::serialize(&ShredVariant::MerkleData(proof_size, chained)).unwrap(); + let buf = bincode::serialize(&ShredVariant::MerkleData { + proof_size, + chained, + }) + .unwrap(); assert_eq!(buf, vec![byte]); assert_eq!( bincode::deserialize::(&[byte]).unwrap(), - ShredVariant::MerkleData(proof_size, chained) + ShredVariant::MerkleData { + proof_size, + chained, + } ); } } diff --git a/ledger/src/shred/common.rs b/ledger/src/shred/common.rs index 64b4c775469a24..af05532a3e361c 100644 --- a/ledger/src/shred/common.rs +++ b/ledger/src/shred/common.rs @@ -56,7 +56,7 @@ macro_rules! impl_shred_common { self.common_header.index = index; bincode::serialize_into(&mut self.payload[..], &self.common_header).unwrap(); } - ShredVariant::MerkleCode(..) | ShredVariant::MerkleData(..) => { + ShredVariant::MerkleCode { .. } | ShredVariant::MerkleData { .. } => { panic!("Not Implemented!"); } } @@ -69,7 +69,7 @@ macro_rules! impl_shred_common { self.common_header.slot = slot; bincode::serialize_into(&mut self.payload[..], &self.common_header).unwrap(); } - ShredVariant::MerkleCode(..) | ShredVariant::MerkleData(..) => { + ShredVariant::MerkleCode { .. } | ShredVariant::MerkleData { .. } => { panic!("Not Implemented!"); } } diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index ebc4a711b8c774..f92c3616f5c86e 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -114,8 +114,8 @@ impl Shred { fn from_payload(shred: Vec) -> Result { match shred::layout::get_shred_variant(&shred)? { ShredVariant::LegacyCode | ShredVariant::LegacyData => Err(Error::InvalidShredVariant), - ShredVariant::MerkleCode(..) => Ok(Self::ShredCode(ShredCode::from_payload(shred)?)), - ShredVariant::MerkleData(..) => Ok(Self::ShredData(ShredData::from_payload(shred)?)), + ShredVariant::MerkleCode { .. } => Ok(Self::ShredCode(ShredCode::from_payload(shred)?)), + ShredVariant::MerkleData { .. } => Ok(Self::ShredData(ShredData::from_payload(shred)?)), } } } @@ -138,7 +138,7 @@ impl ShredData { // proof_size is the number of merkle proof entries. fn proof_size(&self) -> Result { match self.common_header.shred_variant { - ShredVariant::MerkleData(proof_size, _) => Ok(proof_size), + ShredVariant::MerkleData { proof_size, .. } => Ok(proof_size), _ => Err(Error::InvalidShredVariant), } } @@ -160,7 +160,11 @@ impl ShredData { // Where the merkle proof starts in the shred binary. fn proof_offset(&self) -> Result { - let ShredVariant::MerkleData(proof_size, chained) = self.common_header.shred_variant else { + let ShredVariant::MerkleData { + proof_size, + chained, + } = self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; Self::get_proof_offset(proof_size, chained) @@ -173,8 +177,10 @@ impl ShredData { } fn chained_merkle_root_offset(&self) -> Result { - let ShredVariant::MerkleData(proof_size, /*chained:*/ true) = - self.common_header.shred_variant + let ShredVariant::MerkleData { + proof_size, + chained: true, + } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; @@ -225,7 +231,11 @@ impl ShredData { // Deserialize headers. let mut cursor = Cursor::new(&shard[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - let ShredVariant::MerkleData(proof_size, chained) = common_header.shred_variant else { + let ShredVariant::MerkleData { + proof_size, + chained, + } = common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; if ShredCode::capacity(proof_size, chained)? != shard_size { @@ -264,7 +274,10 @@ impl ShredData { pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8, chained: bool) -> Option { debug_assert_eq!( shred::layout::get_shred_variant(shred).unwrap(), - ShredVariant::MerkleData(proof_size, chained) + ShredVariant::MerkleData { + proof_size, + chained, + }, ); // Shred index in the erasure batch. let index = { @@ -287,7 +300,7 @@ impl ShredCode { // proof_size is the number of merkle proof entries. fn proof_size(&self) -> Result { match self.common_header.shred_variant { - ShredVariant::MerkleCode(proof_size, _) => Ok(proof_size), + ShredVariant::MerkleCode { proof_size, .. } => Ok(proof_size), _ => Err(Error::InvalidShredVariant), } } @@ -307,7 +320,11 @@ impl ShredCode { // Where the merkle proof starts in the shred binary. fn proof_offset(&self) -> Result { - let ShredVariant::MerkleCode(proof_size, chained) = self.common_header.shred_variant else { + let ShredVariant::MerkleCode { + proof_size, + chained, + } = self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; Self::get_proof_offset(proof_size, chained) @@ -320,8 +337,10 @@ impl ShredCode { } fn chained_merkle_root_offset(&self) -> Result { - let ShredVariant::MerkleCode(proof_size, /*chained:*/ true) = - self.common_header.shred_variant + let ShredVariant::MerkleCode { + proof_size, + chained: true, + } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; @@ -371,7 +390,11 @@ impl ShredCode { chained_merkle_root: &Option, mut shard: Vec, ) -> Result { - let ShredVariant::MerkleCode(proof_size, chained) = common_header.shred_variant else { + let ShredVariant::MerkleCode { + proof_size, + chained, + } = common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; let shard_size = shard.len(); @@ -418,7 +441,10 @@ impl ShredCode { pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8, chained: bool) -> Option { debug_assert_eq!( shred::layout::get_shred_variant(shred).unwrap(), - ShredVariant::MerkleCode(proof_size, chained) + ShredVariant::MerkleCode { + proof_size, + chained, + }, ); // Shred index in the erasure batch. let index = { @@ -461,7 +487,7 @@ impl<'a> ShredTrait<'a> for ShredData { payload.truncate(Self::SIZE_OF_PAYLOAD); let mut cursor = Cursor::new(&payload[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - if !matches!(common_header.shred_variant, ShredVariant::MerkleData(..)) { + if !matches!(common_header.shred_variant, ShredVariant::MerkleData { .. }) { return Err(Error::InvalidShredVariant); } let data_header = deserialize_from_with_limit(&mut cursor)?; @@ -485,7 +511,11 @@ impl<'a> ShredTrait<'a> for ShredData { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - let ShredVariant::MerkleData(proof_size, chained) = self.common_header.shred_variant else { + let ShredVariant::MerkleData { + proof_size, + chained, + } = self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; @@ -499,7 +529,11 @@ impl<'a> ShredTrait<'a> for ShredData { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - let ShredVariant::MerkleData(proof_size, chained) = self.common_header.shred_variant else { + let ShredVariant::MerkleData { + proof_size, + chained, + } = self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; @@ -510,7 +544,7 @@ impl<'a> ShredTrait<'a> for ShredData { fn sanitize(&self) -> Result<(), Error> { let shred_variant = self.common_header.shred_variant; - if !matches!(shred_variant, ShredVariant::MerkleData(..)) { + if !matches!(shred_variant, ShredVariant::MerkleData { .. }) { return Err(Error::InvalidShredVariant); } let _ = self.merkle_proof()?; @@ -532,7 +566,7 @@ impl<'a> ShredTrait<'a> for ShredCode { fn from_payload(mut payload: Vec) -> Result { let mut cursor = Cursor::new(&payload[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - if !matches!(common_header.shred_variant, ShredVariant::MerkleCode(..)) { + if !matches!(common_header.shred_variant, ShredVariant::MerkleCode { .. }) { return Err(Error::InvalidShredVariant); } let coding_header = deserialize_from_with_limit(&mut cursor)?; @@ -561,7 +595,11 @@ impl<'a> ShredTrait<'a> for ShredCode { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - let ShredVariant::MerkleCode(proof_size, chained) = self.common_header.shred_variant else { + let ShredVariant::MerkleCode { + proof_size, + chained, + } = self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; @@ -575,7 +613,11 @@ impl<'a> ShredTrait<'a> for ShredCode { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - let ShredVariant::MerkleCode(proof_size, chained) = self.common_header.shred_variant else { + let ShredVariant::MerkleCode { + proof_size, + chained, + } = self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; @@ -586,7 +628,7 @@ impl<'a> ShredTrait<'a> for ShredCode { fn sanitize(&self) -> Result<(), Error> { let shred_variant = self.common_header.shred_variant; - if !matches!(shred_variant, ShredVariant::MerkleCode(..)) { + if !matches!(shred_variant, ShredVariant::MerkleCode { .. }) { return Err(Error::InvalidShredVariant); } let _ = self.merkle_proof()?; @@ -605,7 +647,11 @@ impl ShredDataTrait for ShredData { } fn data(&self) -> Result<&[u8], Error> { - let ShredVariant::MerkleData(proof_size, chained) = self.common_header.shred_variant else { + let ShredVariant::MerkleData { + proof_size, + chained, + } = self.common_header.shred_variant + else { return Err(Error::InvalidShredVariant); }; let data_buffer_size = Self::capacity(proof_size, chained)?; @@ -739,10 +785,13 @@ pub(super) fn recover( Some((common_header, coding_header, chained_merkle_root)) }) .ok_or(TooFewParityShards)?; - debug_assert_matches!(common_header.shred_variant, ShredVariant::MerkleCode(..)); + debug_assert_matches!(common_header.shred_variant, ShredVariant::MerkleCode { .. }); let (proof_size, chained) = match common_header.shred_variant { - ShredVariant::MerkleCode(proof_size, chained) => (proof_size, chained), - ShredVariant::MerkleData(..) | ShredVariant::LegacyCode | ShredVariant::LegacyData => { + ShredVariant::MerkleCode { + proof_size, + chained, + } => (proof_size, chained), + ShredVariant::MerkleData { .. } | ShredVariant::LegacyCode | ShredVariant::LegacyData => { return Err(Error::InvalidShredVariant); } }; @@ -763,7 +812,11 @@ pub(super) fn recover( && fec_set_index == &common_header.fec_set_index && match shred { Shred::ShredData(_) => { - shred_variant == &ShredVariant::MerkleData(proof_size, chained) + shred_variant + == &ShredVariant::MerkleData { + proof_size, + chained, + } } Shred::ShredCode(shred) => { let CodingShredHeader { @@ -771,7 +824,11 @@ pub(super) fn recover( num_coding_shreds, position: _, } = shred.coding_header; - shred_variant == &ShredVariant::MerkleCode(proof_size, chained) + shred_variant + == &ShredVariant::MerkleCode { + proof_size, + chained, + } && num_data_shreds == coding_header.num_data_shreds && num_coding_shreds == coding_header.num_coding_shreds } @@ -824,7 +881,11 @@ pub(super) fn recover( version, fec_set_index, } = shred.common_header; - if shred_variant != ShredVariant::MerkleData(proof_size, chained) + let expected_shred_variant = ShredVariant::MerkleData { + proof_size, + chained, + }; + if shred_variant != expected_shred_variant || common_header.slot != slot || common_header.version != version || common_header.fec_set_index != fec_set_index @@ -938,7 +999,10 @@ pub(super) fn make_shreds_from_data( let chunk_size = DATA_SHREDS_PER_FEC_BLOCK * data_buffer_size; let mut common_header = ShredCommonHeader { signature: Signature::default(), - shred_variant: ShredVariant::MerkleData(proof_size, chained), + shred_variant: ShredVariant::MerkleData { + proof_size, + chained, + }, slot, index: next_shred_index, version: shred_version, @@ -989,7 +1053,10 @@ pub(super) fn make_shreds_from_data( .then_some((proof_size, data_buffer_size)) }) .ok_or(Error::UnknownProofSize)?; - common_header.shred_variant = ShredVariant::MerkleData(proof_size, chained); + common_header.shred_variant = ShredVariant::MerkleData { + proof_size, + chained, + }; common_header.fec_set_index = common_header.index; let chunks = if data.is_empty() { // Generate one data shred with empty data. @@ -1132,10 +1199,11 @@ fn make_erasure_batch( let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot); let num_coding_shreds = erasure_batch_size - num_data_shreds; let proof_size = get_proof_size(erasure_batch_size); - debug_assert!(shreds - .iter() - .all(|shred| shred.common_header.shred_variant - == ShredVariant::MerkleData(proof_size, chained))); + debug_assert!(shreds.iter().all(|shred| shred.common_header.shred_variant + == ShredVariant::MerkleData { + proof_size, + chained, + })); let mut common_header = match shreds.first() { None => return Err(Error::from(TooFewShards)), Some(shred) => shred.common_header, @@ -1159,7 +1227,10 @@ fn make_erasure_batch( let mut shreds: Vec<_> = shreds.into_iter().map(Shred::ShredData).collect(); // Initialize coding shreds from erasure coding shards. common_header.index = next_code_index; - common_header.shred_variant = ShredVariant::MerkleCode(proof_size, chained); + common_header.shred_variant = ShredVariant::MerkleCode { + proof_size, + chained, + }; let mut coding_header = CodingShredHeader { num_data_shreds: num_data_shreds as u16, num_coding_shreds: num_coding_shreds as u16, @@ -1356,7 +1427,10 @@ mod test { let capacity = ShredData::capacity(proof_size, chained).unwrap(); let common_header = ShredCommonHeader { signature: Signature::default(), - shred_variant: ShredVariant::MerkleData(proof_size, chained), + shred_variant: ShredVariant::MerkleData { + proof_size, + chained, + }, slot: 145_865_705, index: 1835, version: rng.gen(), @@ -1411,7 +1485,10 @@ mod test { .unwrap(); for (i, code) in parity.into_iter().enumerate() { let common_header = ShredCommonHeader { - shred_variant: ShredVariant::MerkleCode(proof_size, chained), + shred_variant: ShredVariant::MerkleCode { + proof_size, + chained, + }, index: common_header.index + i as u32 + 7, ..common_header }; @@ -1457,7 +1534,7 @@ mod test { if shreds.iter().all(|shred| { matches!( shred.common_header().shred_variant, - ShredVariant::MerkleData(..) + ShredVariant::MerkleData { .. } ) }) { assert_matches!( @@ -1672,7 +1749,10 @@ mod test { assert_eq!(common_header.index, next_code_index + num_coding_shreds); assert_eq!( common_header.shred_variant, - ShredVariant::MerkleCode(proof_size, chained) + ShredVariant::MerkleCode { + proof_size, + chained, + } ); num_coding_shreds += 1; } @@ -1680,7 +1760,10 @@ mod test { assert_eq!(common_header.index, next_shred_index + num_data_shreds); assert_eq!( common_header.shred_variant, - ShredVariant::MerkleData(proof_size, chained) + ShredVariant::MerkleData { + proof_size, + chained, + } ); assert!(common_header.fec_set_index <= common_header.index); assert_eq!( diff --git a/ledger/src/shred/shred_data.rs b/ledger/src/shred/shred_data.rs index 5b9965afd787c8..725ec90e65a14c 100644 --- a/ledger/src/shred/shred_data.rs +++ b/ledger/src/shred/shred_data.rs @@ -97,8 +97,10 @@ impl ShredData { // Possibly zero pads bytes stored in blockstore. pub(crate) fn resize_stored_shred(shred: Vec) -> Result, Error> { match shred::layout::get_shred_variant(&shred)? { - ShredVariant::LegacyCode | ShredVariant::MerkleCode(..) => Err(Error::InvalidShredType), - ShredVariant::MerkleData(..) => { + ShredVariant::LegacyCode | ShredVariant::MerkleCode { .. } => { + Err(Error::InvalidShredType) + } + ShredVariant::MerkleData { .. } => { if shred.len() != merkle::ShredData::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(shred.len())); } From 0ab425b43b726612b4abce7649acd874b49bce52 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 26 Feb 2024 17:32:47 +0000 Subject: [PATCH 259/401] splits test_shred_variant_compat into separate test-cases (#35306) --- ledger/src/shred.rs | 148 +++++--------------------------------------- 1 file changed, 15 insertions(+), 133 deletions(-) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index c2219c1370d47c..d22d89943df78a 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -1102,10 +1102,10 @@ mod tests { super::*, assert_matches::assert_matches, bincode::serialized_size, - itertools::iproduct, rand::Rng, rand_chacha::{rand_core::SeedableRng, ChaChaRng}, solana_sdk::{shred_version, signature::Signer, signer::keypair::keypair_from_seed}, + test_case::test_case, }; const SIZE_OF_SHRED_INDEX: usize = 4; @@ -1466,72 +1466,13 @@ mod tests { bincode::deserialize::(&[0b1010_0101]), Ok(ShredVariant::LegacyData) ); - // Merkle coding shred. - assert_eq!( - u8::from(ShredVariant::MerkleCode { - proof_size: 5, - chained: false, - }), - 0b0100_0101 - ); - assert_eq!( - u8::from(ShredVariant::MerkleCode { - proof_size: 5, - chained: true, - }), - 0b0110_0101 - ); - for chained in [false, true] { - assert_eq!( - ShredType::from(ShredVariant::MerkleCode { - proof_size: 5, - chained, - }), - ShredType::Code - ); - } - assert_matches!( - ShredVariant::try_from(0b0100_0101), - Ok(ShredVariant::MerkleCode { - proof_size: 5, - chained: false, - }) - ); - assert_matches!( - ShredVariant::try_from(0b0110_0101), - Ok(ShredVariant::MerkleCode { - proof_size: 5, - chained: true, - }) - ); - let buf = bincode::serialize(&ShredVariant::MerkleCode { - proof_size: 5, - chained: false, - }) - .unwrap(); - assert_eq!(buf, vec![0b0100_0101]); - assert_matches!( - bincode::deserialize::(&[0b0100_0101]), - Ok(ShredVariant::MerkleCode { - proof_size: 5, - chained: false, - }) - ); - let buf = bincode::serialize(&ShredVariant::MerkleCode { - proof_size: 5, - chained: true, - }) - .unwrap(); - assert_eq!(buf, vec![0b0110_0101]); - assert_matches!( - bincode::deserialize::(&[0b0110_0101]), - Ok(ShredVariant::MerkleCode { - proof_size: 5, - chained: true, - }) - ); - for (proof_size, chained) in iproduct!(0..=15u8, [false, true]) { - let byte = proof_size | if chained { 0b0110_0000 } else { 0b0100_0000 }; + } + + #[test_case(false, 0b0100_0000)] + #[test_case(true, 0b0110_0000)] + fn test_shred_variant_compat_merkle_code(chained: bool, byte: u8) { + for proof_size in 0..=15u8 { + let byte = byte | proof_size; assert_eq!( u8::from(ShredVariant::MerkleCode { proof_size, @@ -1567,72 +1508,13 @@ mod tests { } ); } - // Merkle data shred. - assert_eq!( - u8::from(ShredVariant::MerkleData { - proof_size: 10, - chained: false, - }), - 0b1000_1010 - ); - assert_eq!( - u8::from(ShredVariant::MerkleData { - proof_size: 10, - chained: true, - }), - 0b1001_1010 - ); - for chained in [false, true] { - assert_eq!( - ShredType::from(ShredVariant::MerkleData { - proof_size: 10, - chained, - }), - ShredType::Data - ); - } - assert_matches!( - ShredVariant::try_from(0b1000_1010), - Ok(ShredVariant::MerkleData { - proof_size: 10, - chained: false, - }) - ); - assert_matches!( - ShredVariant::try_from(0b1001_1010), - Ok(ShredVariant::MerkleData { - proof_size: 10, - chained: true, - }) - ); - let buf = bincode::serialize(&ShredVariant::MerkleData { - proof_size: 10, - chained: false, - }) - .unwrap(); - assert_eq!(buf, vec![0b1000_1010]); - assert_matches!( - bincode::deserialize::(&[0b1000_1010]), - Ok(ShredVariant::MerkleData { - proof_size: 10, - chained: false, - }) - ); - let buf = bincode::serialize(&ShredVariant::MerkleData { - proof_size: 10, - chained: true, - }) - .unwrap(); - assert_eq!(buf, vec![0b1001_1010]); - assert_matches!( - bincode::deserialize::(&[0b1001_1010]), - Ok(ShredVariant::MerkleData { - proof_size: 10, - chained: true, - }) - ); - for (proof_size, chained) in iproduct!(0..=15u8, [false, true]) { - let byte = proof_size | if chained { 0b1001_0000 } else { 0b1000_0000 }; + } + + #[test_case(false, 0b1000_0000)] + #[test_case(true, 0b1001_0000)] + fn test_shred_variant_compat_merkle_data(chained: bool, byte: u8) { + for proof_size in 0..=15u8 { + let byte = byte | proof_size; assert_eq!( u8::from(ShredVariant::MerkleData { proof_size, From 8235feefc408853675e3711e6c2690521d5420b2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 26 Feb 2024 14:17:33 -0500 Subject: [PATCH 260/401] Removes get_for_tests() (#35311) --- accounts-db/src/accounts_index.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 3faae999bf1b9d..68e548c5eb3f35 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -2170,18 +2170,6 @@ pub mod tests { } } - impl + Into> AccountsIndex { - /// provides the ability to refactor this function on the api without bloody changes - pub fn get_for_tests( - &self, - pubkey: &Pubkey, - ancestors: Option<&Ancestors>, - max_root: Option, - ) -> AccountIndexGetResult { - self.get(pubkey, ancestors, max_root) - } - } - const COLLECT_ALL_UNSORTED_FALSE: bool = false; #[test] From 8143fc3f4ae1fe89a7c362639c39ba7b68d92d5c Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 26 Feb 2024 14:19:18 -0500 Subject: [PATCH 261/401] Replaces ReadAccountMapEntry in read_index_for_accessor_or_load_slow() (#35220) --- accounts-db/src/accounts_db.rs | 42 +++++++++---------------------- accounts-db/src/accounts_index.rs | 1 - 2 files changed, 12 insertions(+), 31 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 00648c99b7d6ad..5153d858559599 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5086,36 +5086,18 @@ impl AccountsDb { max_root: Option, clone_in_lock: bool, ) -> Option<(Slot, StorageLocation, Option>)> { - let (lock, index) = match self.accounts_index.get(pubkey, Some(ancestors), max_root) { - AccountIndexGetResult::Found(lock, index) => (lock, index), - // we bail out pretty early for missing. - AccountIndexGetResult::NotFound => { - return None; - } - }; - - let slot_list = lock.slot_list(); - let (slot, info) = slot_list[index]; - let storage_location = info.storage_location(); - let some_from_slow_path = if clone_in_lock { - // the fast path must have failed.... so take the slower approach - // of copying potentially large Account::data inside the lock. - - // calling check_and_get_loaded_account is safe as long as we're guaranteed to hold - // the lock during the time and there should be no purge thanks to alive ancestors - // held by our caller. - Some(self.get_account_accessor(slot, pubkey, &storage_location)) - } else { - None - }; - - Some((slot, storage_location, some_from_slow_path)) - // `lock` is dropped here rather pretty quickly with clone_in_lock = false, - // so the entry could be raced for mutation by other subsystems, - // before we actually provision an account data for caller's use from now on. - // This is traded for less contention and resultant performance, introducing fair amount of - // delicate handling in retry_to_get_account_accessor() below ;) - // you're warned! + self.accounts_index.get_with_and_then( + pubkey, + Some(ancestors), + max_root, + true, + |(slot, account_info)| { + let storage_location = account_info.storage_location(); + let account_accessor = clone_in_lock + .then(|| self.get_account_accessor(slot, pubkey, &storage_location)); + (slot, storage_location, account_accessor) + }, + ) } fn retry_to_get_account_accessor<'a>( diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 68e548c5eb3f35..3ddbb8e535e2fb 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1137,7 +1137,6 @@ impl + Into> AccountsIndex { /// Gets the index's entry for `pubkey`, with `ancestors` and `max_root`, /// and applies `callback` to it - #[cfg(test)] pub(crate) fn get_with_and_then( &self, pubkey: &Pubkey, From bf2e8ee32f2280692f94f1f21d6fe2cb82700852 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 26 Feb 2024 18:20:21 -0500 Subject: [PATCH 262/401] AccountsIndex::get_cloned() *must* add entry to in-mem cache (#35322) --- accounts-db/src/accounts_index.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 3ddbb8e535e2fb..5ac5d9a7a14908 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1158,9 +1158,11 @@ impl + Into> AccountsIndex { /// Gets the index's entry for `pubkey` and clones it /// /// Prefer `get_and_then()` whenever possible. - /// NOTE: The entry is *not* added to the in-mem cache. pub fn get_cloned(&self, pubkey: &Pubkey) -> Option> { - self.get_and_then(pubkey, |entry| (false, entry.cloned())) + // We *must* add the index entry to the in-mem cache! + // If the index entry is only on-disk, returning a clone would allow the entry + // to be modified, but those modifications would be lost on drop! + self.get_and_then(pubkey, |entry| (true, entry.cloned())) } /// Is `pubkey` in the index? From 09925a11ebdb80b88c8d21b107378646474b30db Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 26 Feb 2024 20:27:03 -0600 Subject: [PATCH 263/401] Remove the Blockstore thread pool used for fetching Entries (#34768) There are several cases for fetching entries from the Blockstore: - Fetching entries for block replay - Fetching entries for CompletedDataSetService - Fetching entries to service RPC getBlock requests All of these operations occur in a different calling thread. However, the currently implementation utilizes a shared thread-pool within the Blockstore function. There are several problems with this: - The thread pool is shared between all of the listed cases, despite block replay being the most critical. These other services shouldn't be able to interfere with block replay - The thread pool is overprovisioned for the average use; thread utilization on both regular validators and RPC nodes shows that many of the thread see very little activity. But, these thread existing introduce "accounting" overhead - rocksdb exposes an API to fetch multiple items at once, potentially with some parallelization under the hood. Using parallelization in our API and the underlying rocksdb is overkill and we're doing more damage than good. This change removes that threadpool completely, and instead fetches all of the desired entries in a single call. This has been observed to have a minor degradation on the time spent within the Blockstore get_slot_entries_with_shred_info() function. Namely, some buffer copying and deserialization that previously occurred in parallel now occur serially. However, the metric that tracks the amount of time spent replaying blocks (inclusive of fetch) is unchanged. Thus, despite spending marginally more time to fetch/copy/deserialize with only a single thread, the gains from not thrashing everything else with the pool keep us at parity. --- ledger/src/blockstore.rs | 106 +++++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 50 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index cda801bb296e45..c01b1806a8fa27 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -28,6 +28,7 @@ use { bincode::{deserialize, serialize}, crossbeam_channel::{bounded, Receiver, Sender, TrySendError}, dashmap::DashSet, + itertools::Itertools, log::*, rand::Rng, rayon::{ @@ -44,7 +45,6 @@ use { datapoint_debug, datapoint_error, poh_timing_point::{send_poh_timing_point, PohTimingSender, SlotPohTimingInfo}, }, - solana_rayon_threadlimit::get_max_thread_count, solana_runtime::bank::Bank, solana_sdk::{ account::ReadableAccount, @@ -97,11 +97,6 @@ pub use { // get_max_thread_count to match number of threads in the old code. // see: https://github.com/solana-labs/solana/pull/24853 lazy_static! { - static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() - .num_threads(get_max_thread_count()) - .thread_name(|i| format!("solBstore{i:02}")) - .build() - .unwrap(); static ref PAR_THREAD_POOL_ALL_CPUS: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(num_cpus::get()) .thread_name(|i| format!("solBstoreAll{i:02}")) @@ -3097,29 +3092,7 @@ impl Blockstore { .map(|(_, end_index)| u64::from(*end_index) - start_index + 1) .unwrap_or(0); - let entries: Result>> = if completed_ranges.len() <= 1 { - completed_ranges - .into_iter() - .map(|(start_index, end_index)| { - self.get_entries_in_data_block(slot, start_index, end_index, Some(&slot_meta)) - }) - .collect() - } else { - PAR_THREAD_POOL.install(|| { - completed_ranges - .into_par_iter() - .map(|(start_index, end_index)| { - self.get_entries_in_data_block( - slot, - start_index, - end_index, - Some(&slot_meta), - ) - }) - .collect() - }) - }; - let entries: Vec = entries?.into_iter().flatten().collect(); + let entries = self.get_slot_entries_in_block(slot, completed_ranges, Some(&slot_meta))?; Ok((entries, num_shreds, slot_meta.is_full())) } @@ -3229,14 +3202,24 @@ impl Blockstore { .collect() } - pub fn get_entries_in_data_block( + /// Fetch the entries corresponding to all of the shred indices in `completed_ranges` + /// This function takes advantage of the fact that `completed_ranges` are both + /// contiguous and in sorted order. To clarify, suppose completed_ranges is as follows: + /// completed_ranges = [..., (s_i, e_i), (s_i+1, e_i+1), ...] + /// Then, the following statements are true: + /// s_i < e_i < s_i+1 < e_i+1 + /// e_i == s_i+1 + 1 + fn get_slot_entries_in_block( &self, slot: Slot, - start_index: u32, - end_index: u32, + completed_ranges: CompletedRanges, slot_meta: Option<&SlotMeta>, ) -> Result> { - let keys: Vec<(Slot, u64)> = (start_index..=end_index) + assert!(!completed_ranges.is_empty()); + + let (all_ranges_start_index, _) = *completed_ranges.first().unwrap(); + let (_, all_ranges_end_index) = *completed_ranges.last().unwrap(); + let keys: Vec<(Slot, u64)> = (all_ranges_start_index..=all_ranges_end_index) .map(|index| (slot, u64::from(index))) .collect(); @@ -3246,7 +3229,6 @@ impl Blockstore { .into_iter() .collect(); let data_shreds = data_shreds?; - let data_shreds: Result> = data_shreds .into_iter() @@ -3262,8 +3244,8 @@ impl Blockstore { idx, slot_meta.consumed, slot_meta.completed_data_indexes, - start_index, - end_index + all_ranges_start_index, + all_ranges_end_index ); } } @@ -3281,21 +3263,46 @@ impl Blockstore { }) .collect(); let data_shreds = data_shreds?; - let last_shred = data_shreds.last().unwrap(); - assert!(last_shred.data_complete() || last_shred.last_in_slot()); - let deshred_payload = Shredder::deshred(&data_shreds).map_err(|e| { - BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!( - "Could not reconstruct data block from constituent shreds, error: {e:?}" - )))) - })?; + completed_ranges + .into_iter() + .map(|(start_index, end_index)| { + // The indices from completed_ranges refer to shred indices in the + // entire block; map those indices to indices within data_shreds + let range_start_index = (start_index - all_ranges_start_index) as usize; + let range_end_index = (end_index - all_ranges_start_index) as usize; + let range_shreds = &data_shreds[range_start_index..=range_end_index]; + + let last_shred = range_shreds.last().unwrap(); + assert!(last_shred.data_complete() || last_shred.last_in_slot()); + trace!("{:?} data shreds in last FEC set", data_shreds.len()); + + Shredder::deshred(range_shreds) + .map_err(|e| { + BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( + format!("could not reconstruct entries buffer from shreds: {e:?}"), + ))) + }) + .and_then(|payload| { + bincode::deserialize::>(&payload).map_err(|e| { + BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( + format!("could not reconstruct entries: {e:?}"), + ))) + }) + }) + }) + .flatten_ok() + .collect() + } - debug!("{:?} shreds in last FEC set", data_shreds.len(),); - bincode::deserialize::>(&deshred_payload).map_err(|e| { - BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!( - "could not reconstruct entries: {e:?}" - )))) - }) + pub fn get_entries_in_data_block( + &self, + slot: Slot, + start_index: u32, + end_index: u32, + slot_meta: Option<&SlotMeta>, + ) -> Result> { + self.get_slot_entries_in_block(slot, vec![(start_index, end_index)], slot_meta) } fn get_any_valid_slot_entries(&self, slot: Slot, start_index: u64) -> Vec { @@ -4795,7 +4802,6 @@ pub mod tests { assert_matches::assert_matches, bincode::serialize, crossbeam_channel::unbounded, - itertools::Itertools, rand::{seq::SliceRandom, thread_rng}, solana_account_decoder::parse_token::UiTokenAmount, solana_entry::entry::{next_entry, next_entry_mut}, From 8ad125d0c0688aaf2b62bb95b535ff988ed7f9ac Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Tue, 27 Feb 2024 02:08:29 -0500 Subject: [PATCH 264/401] rpc: optimize `getTokenLargestAccounts` (#35315) * rpc: optimize `getTokenLargestAccounts` * use tuple instead of struct * untuple Co-authored-by: Tyera --------- Co-authored-by: Tyera --- rpc/src/rpc.rs | 55 ++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 82eda9489ef247..7bde6b837f2a13 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -100,8 +100,8 @@ use { }, std::{ any::type_name, - cmp::{max, min}, - collections::{HashMap, HashSet}, + cmp::{max, min, Reverse}, + collections::{BinaryHeap, HashMap, HashSet}, convert::TryFrom, net::SocketAddr, str::FromStr, @@ -1861,36 +1861,39 @@ impl JsonRpcRequestProcessor { "Invalid param: not a Token mint".to_string(), )); } - let mut token_balances: Vec<_> = self - .get_filtered_spl_token_accounts_by_mint(&bank, &mint_owner, mint, vec![])? - .into_iter() - .map(|(address, account)| { - let amount = StateWithExtensions::::unpack(account.data()) - .map(|account| account.base.amount) - .unwrap_or(0); - (address, amount) - }) - .collect(); - let sort_largest = |a: &(_, u64), b: &(_, u64)| b.1.cmp(&a.1); + let mut token_balances = + BinaryHeap::>::with_capacity(NUM_LARGEST_ACCOUNTS); + for (address, account) in + self.get_filtered_spl_token_accounts_by_mint(&bank, &mint_owner, mint, vec![])? + { + let amount = StateWithExtensions::::unpack(account.data()) + .map(|account| account.base.amount) + .unwrap_or(0); - let largest_token_balances = if token_balances.len() > NUM_LARGEST_ACCOUNTS { - token_balances - .select_nth_unstable_by(NUM_LARGEST_ACCOUNTS, sort_largest) - .0 - } else { - token_balances.as_mut_slice() - }; - largest_token_balances.sort_unstable_by(sort_largest); + let new_entry = (amount, address); + if token_balances.len() >= NUM_LARGEST_ACCOUNTS { + let Reverse(entry) = token_balances + .peek() + .expect("BinaryHeap::peek should succeed when len > 0"); + if *entry >= new_entry { + continue; + } + token_balances.pop(); + } + token_balances.push(Reverse(new_entry)); + } - let largest_token_balances = largest_token_balances - .iter() - .map(|(address, amount)| RpcTokenAccountBalance { + let token_balances = token_balances + .into_sorted_vec() + .into_iter() + .map(|Reverse((amount, address))| RpcTokenAccountBalance { address: address.to_string(), - amount: token_amount_to_ui_amount(*amount, decimals), + amount: token_amount_to_ui_amount(amount, decimals), }) .collect(); - Ok(new_response(&bank, largest_token_balances)) + + Ok(new_response(&bank, token_balances)) } pub fn get_token_accounts_by_owner( From 8be9930c980129e449301e6bfaa8cd768c20a023 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Tue, 27 Feb 2024 17:58:26 -0300 Subject: [PATCH 265/401] Bump platform tools version (#35330) Co-authored-by: Dmitri Makarov --- programs/sbf/rust/sanity/src/lib.rs | 23 +++++++++++++++++++++++ programs/sbf/tests/programs.rs | 2 +- sdk/bpf/scripts/install.sh | 6 +++--- sdk/cargo-build-sbf/src/main.rs | 2 +- sdk/program/Cargo.toml | 2 +- sdk/sbf/c/sbf.mk | 2 +- sdk/sbf/scripts/install.sh | 4 ++-- 7 files changed, 32 insertions(+), 9 deletions(-) diff --git a/programs/sbf/rust/sanity/src/lib.rs b/programs/sbf/rust/sanity/src/lib.rs index 0c820eaa95eaea..cf7a72b4efddfa 100644 --- a/programs/sbf/rust/sanity/src/lib.rs +++ b/programs/sbf/rust/sanity/src/lib.rs @@ -16,6 +16,21 @@ struct SStruct { z: u64, } +#[allow(dead_code)] +#[repr(C)] +enum TestEnum { + VariantOne, + VariantTwo, +} + +#[allow(dead_code)] +#[allow(clippy::enum_clike_unportable_variant)] +#[repr(C)] +enum Test64BitEnum { + VariantOne, + VariantTwo = 0xFFFFFFFFF, +} + #[inline(never)] fn return_sstruct() -> SStruct { SStruct { x: 1, y: 2, z: 3 } @@ -72,6 +87,14 @@ pub fn process_instruction( assert!(1.9986f64 < num && num < 2.0f64); } + { + // #[repr(C) enums must not change size between compiler version + // 32-bit for #[repr(C)] enum + assert_eq!(std::mem::size_of::(), 4); + // 64-bit for enum with a declared value + assert_eq!(std::mem::size_of::(), 8); + } + check_type_assumptions(); sol_log_compute_units(); diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 1635850bb2a9c5..b29d78422dca51 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -1356,7 +1356,7 @@ fn assert_instruction_count() { #[cfg(feature = "sbf_c")] { programs.extend_from_slice(&[ - ("alloc", 11502), + ("alloc", 14575), ("sbf_to_sbf", 313), ("multiple_static", 208), ("noop", 5), diff --git a/sdk/bpf/scripts/install.sh b/sdk/bpf/scripts/install.sh index 55d2cbc19f4dc6..b7b59362c07a38 100755 --- a/sdk/bpf/scripts/install.sh +++ b/sdk/bpf/scripts/install.sh @@ -109,16 +109,16 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install Rust-BPF -version=v1.39 +version=v1.41 if [[ ! -e bpf-tools-$version.md || ! -e bpf-tools ]]; then ( set -e rm -rf bpf-tools* rm -rf xargo job="download \ - https://github.com/solana-labs/bpf-tools/releases/download \ + https://github.com/anza-xyz/platform-tools/releases/download \ $version \ - solana-bpf-tools-${machine}-${arch}.tar.bz2 \ + platform-tools-${machine}-${arch}.tar.bz2 \ bpf-tools" get $version bpf-tools "$job" ) diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 79a4a1378ed6ee..0da59ff230b385 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -913,7 +913,7 @@ fn main() { // The following line is scanned by CI configuration script to // separate cargo caches according to the version of platform-tools. - let platform_tools_version = String::from("v1.39"); + let platform_tools_version = String::from("v1.41"); let rust_base_version = get_base_rust_version(platform_tools_version.as_str()); let version = format!( "{}\nplatform-tools {}\n{}", diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 7bc414472f525f..04d93bcb4729c6 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -9,7 +9,7 @@ repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -rust-version = "1.72.0" # solana platform-tools rust version +rust-version = "1.75.0" # solana platform-tools rust version [dependencies] bincode = { workspace = true } diff --git a/sdk/sbf/c/sbf.mk b/sdk/sbf/c/sbf.mk index 6699daf120e639..84c152e2dd26e0 100644 --- a/sdk/sbf/c/sbf.mk +++ b/sdk/sbf/c/sbf.mk @@ -15,7 +15,7 @@ OUT_DIR ?= ./out OS := $(shell uname) LLVM_DIR = $(LOCAL_PATH)../dependencies/platform-tools/llvm -LLVM_SYSTEM_INC_DIRS := $(LLVM_DIR)/lib/clang/15.0.4/include +LLVM_SYSTEM_INC_DIRS := $(LLVM_DIR)/lib/clang/17/include COMPILER_RT_DIR = $(LOCAL_PATH)../dependencies/platform-tools/rust/lib/rustlib/sbf-solana-solana/lib STD_INC_DIRS := $(LLVM_DIR)/include STD_LIB_DIRS := $(LLVM_DIR)/lib diff --git a/sdk/sbf/scripts/install.sh b/sdk/sbf/scripts/install.sh index 08f5f79e1b3417..12343a413ed7b7 100755 --- a/sdk/sbf/scripts/install.sh +++ b/sdk/sbf/scripts/install.sh @@ -109,13 +109,13 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install platform tools -version=v1.39 +version=v1.41 if [[ ! -e platform-tools-$version.md || ! -e platform-tools ]]; then ( set -e rm -rf platform-tools* job="download \ - https://github.com/solana-labs/platform-tools/releases/download \ + https://github.com/anza-xyz/platform-tools/releases/download \ $version \ platform-tools-${machine}-${arch}.tar.bz2 \ platform-tools" From 94698b8dd0c2d48fe44da17fc24d6ec1dd05c322 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 27 Feb 2024 14:59:07 -0600 Subject: [PATCH 266/401] Name PubSubService tokio threads (#35331) Also add logs for service starting/stopping --- rpc/src/rpc_pubsub_service.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 76227b9708e8e6..3e32503691d78e 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -91,7 +91,9 @@ impl PubSubService { let thread_hdl = Builder::new() .name("solRpcPubSub".to_string()) .spawn(move || { + info!("PubSubService has started"); let runtime = tokio::runtime::Builder::new_multi_thread() + .thread_name("solRpcPubSubRt") .worker_threads(pubsub_config.worker_threads) .enable_all() .build() @@ -102,8 +104,9 @@ impl PubSubService { subscription_control, tripwire, )) { - error!("pubsub service failed: {}", err); + error!("PubSubService has stopped due to error: {err}"); }; + info!("PubSubService has stopped"); }) .expect("thread spawn failed"); From da088681ba8720cf56f8e3d1352b106b3b611135 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 27 Feb 2024 18:08:25 -0500 Subject: [PATCH 267/401] Adds safer alternatives to get_internal() (#35325) --- accounts-db/src/accounts_index.rs | 17 ++++++----- accounts-db/src/in_mem_accounts_index.rs | 39 ++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 11 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 5ac5d9a7a14908..7a5c75669a9a1e 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1130,9 +1130,9 @@ impl + Into> AccountsIndex { pub fn get_and_then( &self, pubkey: &Pubkey, - callback: impl FnOnce(Option<&AccountMapEntry>) -> (bool, R), + callback: impl FnOnce(Option<&AccountMapEntryInner>) -> (bool, R), ) -> R { - self.get_bin(pubkey).get_internal(pubkey, callback) + self.get_bin(pubkey).get_internal_inner(pubkey, callback) } /// Gets the index's entry for `pubkey`, with `ancestors` and `max_root`, @@ -1159,10 +1159,8 @@ impl + Into> AccountsIndex { /// /// Prefer `get_and_then()` whenever possible. pub fn get_cloned(&self, pubkey: &Pubkey) -> Option> { - // We *must* add the index entry to the in-mem cache! - // If the index entry is only on-disk, returning a clone would allow the entry - // to be modified, but those modifications would be lost on drop! - self.get_and_then(pubkey, |entry| (true, entry.cloned())) + self.get_bin(pubkey) + .get_internal_cloned(pubkey, |entry| entry) } /// Is `pubkey` in the index? @@ -1443,6 +1441,9 @@ impl + Into> AccountsIndex { lock = Some(&self.account_maps[bin]); last_bin = bin; } + // SAFETY: The caller must ensure that if `provide_entry_in_callback` is true, and + // if it's possible for `callback` to clone the entry Arc, then it must also add + // the entry to the in-mem cache if the entry is made dirty. lock.as_ref().unwrap().get_internal(pubkey, |entry| { let mut cache = false; match entry { @@ -1830,7 +1831,7 @@ impl + Into> AccountsIndex { pub fn ref_count_from_storage(&self, pubkey: &Pubkey) -> RefCount { let map = self.get_bin(pubkey); - map.get_internal(pubkey, |entry| { + map.get_internal_inner(pubkey, |entry| { ( false, entry.map(|entry| entry.ref_count()).unwrap_or_default(), @@ -4073,7 +4074,7 @@ pub mod tests { let map = index.get_bin(&key); for expected in [false, true] { - assert!(map.get_internal(&key, |entry| { + assert!(map.get_internal_inner(&key, |entry| { // check refcount BEFORE the unref assert_eq!(u64::from(!expected), entry.unwrap().ref_count()); // first time, ref count was at 1, we can unref once. Unref should return false. diff --git a/accounts-db/src/in_mem_accounts_index.rs b/accounts-db/src/in_mem_accounts_index.rs index 1e8e8a8fd73822..054fd7589df79f 100644 --- a/accounts-db/src/in_mem_accounts_index.rs +++ b/accounts-db/src/in_mem_accounts_index.rs @@ -320,7 +320,7 @@ impl + Into> InMemAccountsIndex Option> { - self.get_internal(pubkey, |entry| (true, entry.map(Arc::clone))) + self.get_internal_cloned(pubkey, |entry| entry) } /// set age of 'entry' to the future @@ -331,7 +331,40 @@ impl + Into> InMemAccountsIndex( + pub(crate) fn get_internal_inner( + &self, + pubkey: &K, + // return true if item should be added to in_mem cache + callback: impl for<'a> FnOnce(Option<&AccountMapEntryInner>) -> (bool, RT), + ) -> RT { + // SAFETY: The entry Arc is not passed to `callback`, so + // it cannot live beyond this function call. + self.get_internal(pubkey, |entry| callback(entry.map(Arc::as_ref))) + } + + /// lookup 'pubkey' in the index (in_mem or disk). + /// call 'callback' whether found or not + pub(crate) fn get_internal_cloned( + &self, + pubkey: &K, + callback: impl for<'a> FnOnce(Option>) -> RT, + ) -> RT { + // SAFETY: Since we're passing the entry Arc clone to `callback`, we must + // also add the entry to the in-mem cache. + self.get_internal(pubkey, |entry| (true, callback(entry.map(Arc::clone)))) + } + + /// lookup 'pubkey' in index (in_mem or disk). + /// call 'callback' whether found or not + /// + /// # Safety + /// + /// If the item is on-disk (and not in-mem), add if the item is/could be made dirty + /// *after* `callback` finishes (e.g. the entry Arc is cloned and saved by the caller), + /// then the disk entry *must* also be added to the in-mem cache. + /// + /// Prefer `get_internal_inner()` or `get_internal_cloned()` for safe alternatives. + pub(crate) fn get_internal( &self, pubkey: &K, // return true if item should be added to in_mem cache @@ -446,7 +479,7 @@ impl + Into> InMemAccountsIndex FnOnce(&mut RwLockWriteGuard<'a, SlotList>) -> RT, ) -> Option { - self.get_internal(pubkey, |entry| { + self.get_internal_inner(pubkey, |entry| { ( true, entry.map(|entry| { From a4e1a9ac980f8a9ab0c9b30350349e1b3c7de48c Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 27 Feb 2024 20:01:29 -0500 Subject: [PATCH 268/401] Adds AccountsIndex::get_account_info_with_and_then() (#35336) --- accounts-db/src/accounts_index.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 7a5c75669a9a1e..b021881d4c40d0 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1143,18 +1143,30 @@ impl + Into> AccountsIndex { ancestors: Option<&Ancestors>, max_root: Option, should_add_to_in_mem_cache: bool, - mut callback: impl FnMut((Slot, T)) -> R, + callback: impl FnOnce((Slot, T)) -> R, ) -> Option { self.get_and_then(pubkey, |entry| { let callback_result = entry.and_then(|entry| { - let slot_list = entry.slot_list.read().unwrap(); - self.latest_slot(ancestors, &slot_list, max_root) - .map(|found_index| callback(slot_list[found_index])) + self.get_account_info_with_and_then(entry, ancestors, max_root, callback) }); (should_add_to_in_mem_cache, callback_result) }) } + /// Gets the account info (and slot) in `entry`, with `ancestors` and `max_root`, + /// and applies `callback` to it + fn get_account_info_with_and_then( + &self, + entry: &AccountMapEntryInner, + ancestors: Option<&Ancestors>, + max_root: Option, + callback: impl FnOnce((Slot, T)) -> R, + ) -> Option { + let slot_list = entry.slot_list.read().unwrap(); + self.latest_slot(ancestors, &slot_list, max_root) + .map(|found_index| callback(slot_list[found_index])) + } + /// Gets the index's entry for `pubkey` and clones it /// /// Prefer `get_and_then()` whenever possible. From 6ee3bb973c60c7d45be06a51a89d08479bf0fad8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Feb 2024 11:36:14 +0800 Subject: [PATCH 269/401] build(deps): bump tempfile from 3.10.0 to 3.10.1 (#35328) * build(deps): bump tempfile from 3.10.0 to 3.10.1 Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.10.0 to 3.10.1. - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/compare/v3.10.0...v3.10.1) --- updated-dependencies: - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index db44628b30dec7..45c53adc642127 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2511,7 +2511,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -8199,9 +8199,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if 1.0.0", "fastrand", diff --git a/Cargo.toml b/Cargo.toml index 89d163684a22bc..c3de71a23f0ae7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -410,7 +410,7 @@ sysctl = "0.4.6" systemstat = "0.2.3" tar = "0.4.40" tarpc = "0.29.0" -tempfile = "3.10.0" +tempfile = "3.10.1" test-case = "3.3.1" thiserror = "1.0.57" tiny-bip39 = "0.8.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index dd903dc12765bb..c2a51067252331 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7111,9 +7111,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if 1.0.0", "fastrand", From e6f8cdce012e57684c7992b1a04c2b47d401a5ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Wed, 28 Feb 2024 09:20:11 +0100 Subject: [PATCH 270/401] Refactor - `LoadedPrograms::assign_program()` (#35233) * Forbids all program replacements except for reloads and builtins. * Adds test_assign_program_failure() and test_assign_program_success(). * Explicitly disallows LoadedProgramType::DelayVisibility to be inserted in the global cache. --- Cargo.lock | 1 + program-runtime/Cargo.toml | 1 + program-runtime/src/loaded_programs.rs | 204 ++++++++++++++++--------- 3 files changed, 133 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45c53adc642127..cd4c17c38fca18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6652,6 +6652,7 @@ dependencies = [ "solana-metrics", "solana-sdk", "solana_rbpf", + "test-case", "thiserror", ] diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index ed4b2a60aa3f0a..afec7352e1fb70 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -35,6 +35,7 @@ assert_matches = { workspace = true } libsecp256k1 = { workspace = true } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } +test-case = { workspace = true } [lib] crate-type = ["lib"] diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 2739d44c36f4cd..1c29adc8c6c246 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -711,6 +711,10 @@ impl LoadedPrograms { /// Insert a single entry. It's typically called during transaction loading, /// when the cache doesn't contain the entry corresponding to program `key`. pub fn assign_program(&mut self, key: Pubkey, entry: Arc) -> bool { + debug_assert!(!matches!( + &entry.program, + LoadedProgramType::DelayVisibility + )); let slot_versions = &mut self.entries.entry(key).or_default().slot_versions; match slot_versions.binary_search_by(|at| { at.effective_slot @@ -719,33 +723,39 @@ impl LoadedPrograms { }) { Ok(index) => { let existing = slot_versions.get_mut(index).unwrap(); - if std::mem::discriminant(&existing.program) - != std::mem::discriminant(&entry.program) - { - // Copy over the usage counter to the new entry - entry.tx_usage_counter.fetch_add( - existing.tx_usage_counter.load(Ordering::Relaxed), - Ordering::Relaxed, - ); - entry.ix_usage_counter.fetch_add( - existing.ix_usage_counter.load(Ordering::Relaxed), - Ordering::Relaxed, - ); - *existing = entry.clone(); - self.stats.reloads.fetch_add(1, Ordering::Relaxed); - false - } else { - // Something is wrong, I can feel it ... - self.stats.replacements.fetch_add(1, Ordering::Relaxed); - true + match (&existing.program, &entry.program) { + (LoadedProgramType::Builtin(_), LoadedProgramType::Builtin(_)) + | (LoadedProgramType::Unloaded(_), LoadedProgramType::LegacyV0(_)) + | (LoadedProgramType::Unloaded(_), LoadedProgramType::LegacyV1(_)) + | (LoadedProgramType::Unloaded(_), LoadedProgramType::Typed(_)) => {} + #[cfg(test)] + (LoadedProgramType::Unloaded(_), LoadedProgramType::TestLoaded(_)) => {} + _ => { + // Something is wrong, I can feel it ... + error!("LoadedPrograms::assign_program() failed key={:?} existing={:?} entry={:?}", key, slot_versions, entry); + debug_assert!(false, "Unexpected replacement of an entry"); + self.stats.replacements.fetch_add(1, Ordering::Relaxed); + return true; + } } + // Copy over the usage counter to the new entry + entry.tx_usage_counter.fetch_add( + existing.tx_usage_counter.load(Ordering::Relaxed), + Ordering::Relaxed, + ); + entry.ix_usage_counter.fetch_add( + existing.ix_usage_counter.load(Ordering::Relaxed), + Ordering::Relaxed, + ); + *existing = Arc::clone(&entry); + self.stats.reloads.fetch_add(1, Ordering::Relaxed); } Err(index) => { self.stats.insertions.fetch_add(1, Ordering::Relaxed); - slot_versions.insert(index, entry.clone()); - false + slot_versions.insert(index, Arc::clone(&entry)); } } + false } pub fn prune_by_deployment_slot(&mut self, slot: Slot) { @@ -1151,6 +1161,7 @@ mod tests { Arc, RwLock, }, }, + test_case::{test_case, test_matrix}, }; static MOCK_ENVIRONMENT: std::sync::OnceLock = @@ -1340,15 +1351,6 @@ mod tests { programs.push((program2, *deployment_slot, usage_counter)); }); - for slot in 21..31 { - set_tombstone( - &mut cache, - program2, - slot, - LoadedProgramType::DelayVisibility, - ); - } - for slot in 31..41 { insert_unloaded_program(&mut cache, program2, slot); } @@ -1400,12 +1402,12 @@ mod tests { // Test that the cache is constructed with the expected number of entries. assert_eq!(num_loaded, 8); assert_eq!(num_unloaded, 30); - assert_eq!(num_tombstones, 30); + assert_eq!(num_tombstones, 20); // Evicting to 2% should update cache with // * 5 active entries // * 33 unloaded entries (3 active programs will get unloaded) - // * 30 tombstones (tombstones are not evicted) + // * 20 tombstones (tombstones are not evicted) cache.evict_using_2s_random_selection(Percentage::from(2), 21); let num_loaded = num_matching_entries(&cache, |program_type| { @@ -1426,7 +1428,7 @@ mod tests { // Test that expected number of loaded entries get evicted/unloaded. assert_eq!(num_loaded, 5); assert_eq!(num_unloaded, 33); - assert_eq!(num_tombstones, 30); + assert_eq!(num_tombstones, 20); } #[test] @@ -1487,15 +1489,6 @@ mod tests { programs.push((program2, *deployment_slot, usage_counter)); }); - for slot in 21..31 { - set_tombstone( - &mut cache, - program2, - slot, - LoadedProgramType::DelayVisibility, - ); - } - for slot in 31..41 { insert_unloaded_program(&mut cache, program2, slot); } @@ -1546,12 +1539,12 @@ mod tests { assert_eq!(num_loaded, 8); assert_eq!(num_unloaded, 30); - assert_eq!(num_tombstones, 30); + assert_eq!(num_tombstones, 20); // Evicting to 2% should update cache with // * 5 active entries // * 33 unloaded entries (3 active programs will get unloaded) - // * 30 tombstones (tombstones are not evicted) + // * 20 tombstones (tombstones are not evicted) cache.sort_and_unload(Percentage::from(2)); // Check that every program is still in the cache. programs.iter().for_each(|entry| { @@ -1591,7 +1584,7 @@ mod tests { assert_eq!(num_loaded, 5); assert_eq!(num_unloaded, 33); - assert_eq!(num_tombstones, 30); + assert_eq!(num_tombstones, 20); } #[test] @@ -1673,36 +1666,102 @@ mod tests { } } - #[test] - fn test_assign_program_tombstones() { + #[test_matrix( + ( + LoadedProgramType::FailedVerification(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::Closed, + LoadedProgramType::TestLoaded(Arc::new(BuiltinProgram::new_mock())), + ), + ( + LoadedProgramType::FailedVerification(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::Closed, + LoadedProgramType::Unloaded(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::TestLoaded(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::Builtin(BuiltinProgram::new_mock()), + ) + )] + #[test_matrix( + (LoadedProgramType::Unloaded(Arc::new(BuiltinProgram::new_mock())),), + ( + LoadedProgramType::FailedVerification(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::Closed, + LoadedProgramType::Unloaded(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::Builtin(BuiltinProgram::new_mock()), + ) + )] + #[test_matrix( + (LoadedProgramType::Builtin(BuiltinProgram::new_mock()),), + ( + LoadedProgramType::FailedVerification(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::Closed, + LoadedProgramType::Unloaded(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::TestLoaded(Arc::new(BuiltinProgram::new_mock())), + ) + )] + #[should_panic(expected = "Unexpected replacement of an entry")] + fn test_assign_program_failure(old: LoadedProgramType, new: LoadedProgramType) { let mut cache = new_mock_cache::(); - let program1 = Pubkey::new_unique(); - let env = cache.environments.program_runtime_v1.clone(); - - set_tombstone( - &mut cache, - program1, - 10, - LoadedProgramType::FailedVerification(env.clone()), - ); - assert_eq!(cache.entries.get(&program1).unwrap().slot_versions.len(), 1); - set_tombstone(&mut cache, program1, 10, LoadedProgramType::Closed); - assert_eq!(cache.entries.get(&program1).unwrap().slot_versions.len(), 1); - set_tombstone( - &mut cache, - program1, - 10, - LoadedProgramType::FailedVerification(env.clone()), + let program_id = Pubkey::new_unique(); + assert!(!cache.assign_program( + program_id, + Arc::new(LoadedProgram { + program: old, + account_size: 0, + deployment_slot: 10, + effective_slot: 11, + tx_usage_counter: AtomicU64::default(), + ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), + }), + )); + cache.assign_program( + program_id, + Arc::new(LoadedProgram { + program: new, + account_size: 0, + deployment_slot: 10, + effective_slot: 11, + tx_usage_counter: AtomicU64::default(), + ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), + }), ); - assert_eq!(cache.entries.get(&program1).unwrap().slot_versions.len(), 1); + } - // Fail on exact replacement - assert!(cache.assign_program( - program1, - Arc::new(LoadedProgram::new_tombstone( - 10, - LoadedProgramType::FailedVerification(env) - )) + #[test_case( + LoadedProgramType::Unloaded(Arc::new(BuiltinProgram::new_mock())), + LoadedProgramType::TestLoaded(Arc::new(BuiltinProgram::new_mock())) + )] + #[test_case( + LoadedProgramType::Builtin(BuiltinProgram::new_mock()), + LoadedProgramType::Builtin(BuiltinProgram::new_mock()) + )] + fn test_assign_program_success(old: LoadedProgramType, new: LoadedProgramType) { + let mut cache = new_mock_cache::(); + let program_id = Pubkey::new_unique(); + assert!(!cache.assign_program( + program_id, + Arc::new(LoadedProgram { + program: old, + account_size: 0, + deployment_slot: 10, + effective_slot: 11, + tx_usage_counter: AtomicU64::default(), + ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), + }), + )); + assert!(!cache.assign_program( + program_id, + Arc::new(LoadedProgram { + program: new, + account_size: 0, + deployment_slot: 10, + effective_slot: 11, + tx_usage_counter: AtomicU64::default(), + ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), + }), )); } @@ -2383,7 +2442,6 @@ mod tests { for loaded_program_type in [ LoadedProgramType::FailedVerification(cache.environments.program_runtime_v1.clone()), LoadedProgramType::Closed, - LoadedProgramType::DelayVisibility, // Never inserted in the global cache LoadedProgramType::Unloaded(cache.environments.program_runtime_v1.clone()), LoadedProgramType::Builtin(BuiltinProgram::new_mock()), ] { From 089cead024481f7a90c1fa868057ffc1db2225c1 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Wed, 28 Feb 2024 09:06:55 -0600 Subject: [PATCH 271/401] add bench for precompiled programs (#35310) * add bench for ed25519 instruction * add bench for secp256k1 instruction * Apply suggestions from code review Co-authored-by: Andrew Fitzgerald * prepare unique txs for benching * use iter::Cycle for endless loop --------- Co-authored-by: Andrew Fitzgerald --- sdk/benches/ed25519_instructions.rs | 94 +++++++++++++++++++++++++++ sdk/benches/secp256k1_instructions.rs | 94 +++++++++++++++++++++++++++ 2 files changed, 188 insertions(+) create mode 100644 sdk/benches/ed25519_instructions.rs create mode 100644 sdk/benches/secp256k1_instructions.rs diff --git a/sdk/benches/ed25519_instructions.rs b/sdk/benches/ed25519_instructions.rs new file mode 100644 index 00000000000000..4dcbbc0e035353 --- /dev/null +++ b/sdk/benches/ed25519_instructions.rs @@ -0,0 +1,94 @@ +#![feature(test)] + +extern crate test; +use { + rand0_7::{thread_rng, Rng}, + solana_sdk::{ + ed25519_instruction::new_ed25519_instruction, + feature_set::FeatureSet, + hash::Hash, + signature::{Keypair, Signer}, + transaction::Transaction, + }, + test::Bencher, +}; + +// 5K transactions should be enough for benching loop +const TX_COUNT: u16 = 5120; + +// prepare a bunch of unique txs +fn create_test_transactions(message_length: u16) -> Vec { + (0..TX_COUNT) + .map(|_| { + let mut rng = thread_rng(); + let privkey = ed25519_dalek::Keypair::generate(&mut rng); + let message: Vec = (0..message_length).map(|_| rng.gen_range(0, 255)).collect(); + let instruction = new_ed25519_instruction(&privkey, &message); + let mint_keypair = Keypair::new(); + + Transaction::new_signed_with_payer( + &[instruction.clone()], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + ) + }) + .collect() +} + +#[bench] +fn bench_ed25519_len_032(b: &mut Bencher) { + let feature_set = FeatureSet::all_enabled(); + let txs = create_test_transactions(32); + let mut tx_iter = txs.iter().cycle(); + b.iter(|| { + tx_iter + .next() + .unwrap() + .verify_precompiles(&feature_set) + .unwrap(); + }); +} + +#[bench] +fn bench_ed25519_len_128(b: &mut Bencher) { + let feature_set = FeatureSet::all_enabled(); + let txs = create_test_transactions(128); + let mut tx_iter = txs.iter().cycle(); + b.iter(|| { + tx_iter + .next() + .unwrap() + .verify_precompiles(&feature_set) + .unwrap(); + }); +} + +#[bench] +fn bench_ed25519_len_32k(b: &mut Bencher) { + let feature_set = FeatureSet::all_enabled(); + let txs = create_test_transactions(32 * 1024); + let mut tx_iter = txs.iter().cycle(); + b.iter(|| { + tx_iter + .next() + .unwrap() + .verify_precompiles(&feature_set) + .unwrap(); + }); +} + +#[bench] +fn bench_ed25519_len_max(b: &mut Bencher) { + let required_extra_space = 113_u16; // len for pubkey, sig, and offsets + let feature_set = FeatureSet::all_enabled(); + let txs = create_test_transactions(u16::MAX - required_extra_space); + let mut tx_iter = txs.iter().cycle(); + b.iter(|| { + tx_iter + .next() + .unwrap() + .verify_precompiles(&feature_set) + .unwrap(); + }); +} diff --git a/sdk/benches/secp256k1_instructions.rs b/sdk/benches/secp256k1_instructions.rs new file mode 100644 index 00000000000000..339c50dc639aef --- /dev/null +++ b/sdk/benches/secp256k1_instructions.rs @@ -0,0 +1,94 @@ +#![feature(test)] + +extern crate test; +use { + rand0_7::{thread_rng, Rng}, + solana_sdk::{ + feature_set::FeatureSet, + hash::Hash, + secp256k1_instruction::new_secp256k1_instruction, + signature::{Keypair, Signer}, + transaction::Transaction, + }, + test::Bencher, +}; + +// 5K transactions should be enough for benching loop +const TX_COUNT: u16 = 5120; + +// prepare a bunch of unique txs +fn create_test_transactions(message_length: u16) -> Vec { + (0..TX_COUNT) + .map(|_| { + let mut rng = thread_rng(); + let secp_privkey = libsecp256k1::SecretKey::random(&mut thread_rng()); + let message: Vec = (0..message_length).map(|_| rng.gen_range(0, 255)).collect(); + let secp_instruction = new_secp256k1_instruction(&secp_privkey, &message); + let mint_keypair = Keypair::new(); + + Transaction::new_signed_with_payer( + &[secp_instruction.clone()], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + ) + }) + .collect() +} + +#[bench] +fn bench_secp256k1_len_032(b: &mut Bencher) { + let feature_set = FeatureSet::all_enabled(); + let txs = create_test_transactions(32); + let mut tx_iter = txs.iter().cycle(); + b.iter(|| { + tx_iter + .next() + .unwrap() + .verify_precompiles(&feature_set) + .unwrap(); + }); +} + +#[bench] +fn bench_secp256k1_len_256(b: &mut Bencher) { + let feature_set = FeatureSet::all_enabled(); + let txs = create_test_transactions(256); + let mut tx_iter = txs.iter().cycle(); + b.iter(|| { + tx_iter + .next() + .unwrap() + .verify_precompiles(&feature_set) + .unwrap(); + }); +} + +#[bench] +fn bench_secp256k1_len_32k(b: &mut Bencher) { + let feature_set = FeatureSet::all_enabled(); + let txs = create_test_transactions(32 * 1024); + let mut tx_iter = txs.iter().cycle(); + b.iter(|| { + tx_iter + .next() + .unwrap() + .verify_precompiles(&feature_set) + .unwrap(); + }); +} + +#[bench] +fn bench_secp256k1_len_max(b: &mut Bencher) { + let required_extra_space = 113_u16; // len for pubkey, sig, and offsets + let feature_set = FeatureSet::all_enabled(); + let txs = create_test_transactions(u16::MAX - required_extra_space); + let mut tx_iter = txs.iter().cycle(); + b.iter(|| { + tx_iter + .next() + .unwrap() + .verify_precompiles(&feature_set) + .unwrap(); + }); +} From 695fe1e7c8c9244db0dc9fcf82694458ed119cf4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Feb 2024 23:38:18 +0800 Subject: [PATCH 272/401] build(deps): bump ahash from 0.8.9 to 0.8.10 (#35347) * build(deps): bump ahash from 0.8.9 to 0.8.10 Bumps [ahash](https://github.com/tkaitchuck/ahash) from 0.8.9 to 0.8.10. - [Release notes](https://github.com/tkaitchuck/ahash/releases) - [Commits](https://github.com/tkaitchuck/ahash/compare/v0.8.9...v0.8.10) --- updated-dependencies: - dependency-name: ahash dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd4c17c38fca18..e936c66931cd6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,9 +75,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" +checksum = "8b79b82693f705137f8fb9b37871d99e4f9a7df12b917eed79c3d3954830a60b" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -2344,7 +2344,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.9", + "ahash 0.8.10", ] [[package]] @@ -6503,7 +6503,7 @@ dependencies = [ name = "solana-perf" version = "1.19.0" dependencies = [ - "ahash 0.8.9", + "ahash 0.8.10", "assert_matches", "bincode", "bv", diff --git a/Cargo.toml b/Cargo.toml index c3de71a23f0ae7..3b6a20013220e9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -138,7 +138,7 @@ edition = "2021" Inflector = "0.11.4" aquamarine = "0.3.3" aes-gcm-siv = "0.10.3" -ahash = "0.8.9" +ahash = "0.8.10" anyhow = "1.0.80" arbitrary = "1.3.2" ark-bn254 = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c2a51067252331..b41a66a56cdc18 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" +checksum = "8b79b82693f705137f8fb9b37871d99e4f9a7df12b917eed79c3d3954830a60b" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -1976,7 +1976,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.9", + "ahash 0.8.10", ] [[package]] @@ -5302,7 +5302,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" name = "solana-perf" version = "1.19.0" dependencies = [ - "ahash 0.8.9", + "ahash 0.8.10", "bincode", "bv", "caps", From f340c1c181f99b80ddb9c9371f9e355a0efc218d Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 28 Feb 2024 11:43:33 -0500 Subject: [PATCH 273/401] Replaces ReadAccountMapEntry in do_scan_secondary_index() (#35219) --- accounts-db/src/accounts_index.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index b021881d4c40d0..2f3ba4b581daa3 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1110,17 +1110,17 @@ impl + Into> AccountsIndex { F: FnMut(&Pubkey, (&T, Slot)), { for pubkey in index.get(index_key) { - // Maybe these reads from the AccountsIndex can be batched every time it - // grabs the read lock as well... - if let AccountIndexGetResult::Found(list_r, index) = - self.get(&pubkey, Some(ancestors), max_root) - { - let entry = &list_r.slot_list()[index]; - func(&pubkey, (&entry.1, entry.0)); - } if config.is_aborted() { break; } + if let Some(entry) = self.get_cloned(&pubkey) { + self.get_account_info_with_and_then( + &entry, + Some(ancestors), + max_root, + |(slot, account_info)| func(&pubkey, (&account_info, slot)), + ); + }; } } From 8f3e960640aa35737dc8570042b500ef6e953e24 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Wed, 28 Feb 2024 14:17:45 -0300 Subject: [PATCH 274/401] Add tests for private functions in SVM `account_loader.rs` (#35334) --- sdk/src/transaction/sanitized.rs | 14 + svm/src/account_loader.rs | 676 ++++++++++++++++++++++++++++++- 2 files changed, 685 insertions(+), 5 deletions(-) diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 4189f1b64b86e2..b7383b4a0a454c 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -271,6 +271,20 @@ impl SanitizedTransaction { Ok(()) } } + + #[cfg(feature = "dev-context-only-utils")] + pub fn new_for_tests( + message: SanitizedMessage, + signatures: Vec, + is_simple_vote_tx: bool, + ) -> SanitizedTransaction { + SanitizedTransaction { + message, + message_hash: Hash::new_unique(), + signatures, + is_simple_vote_tx, + } + } } #[cfg(test)] diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 334ad7679561ee..197d46250bcfcb 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -276,6 +276,7 @@ fn load_transaction_accounts( .map(|instruction| { let mut account_indices = Vec::new(); let mut program_index = instruction.program_id_index as usize; + // This command may never return error, because the transaction is sanitized let (program_id, program_account) = accounts .get(program_index) .ok_or(TransactionError::ProgramAccountNotFound)?; @@ -452,7 +453,7 @@ pub fn validate_fee_payer( ) } -pub fn construct_instructions_account(message: &SanitizedMessage) -> AccountSharedData { +fn construct_instructions_account(message: &SanitizedMessage) -> AccountSharedData { AccountSharedData::from(Account { data: construct_instructions_data(&message.decompile_instructions()), owner: sysvar::id(), @@ -464,31 +465,38 @@ pub fn construct_instructions_account(message: &SanitizedMessage) -> AccountShar mod tests { use { super::*, + crate::transaction_processor::TransactionProcessingCallback, nonce::state::Versions as NonceVersions, solana_program_runtime::{ compute_budget_processor, + loaded_programs::LoadedProgram, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, solana_sdk::{ - account::{AccountSharedData, WritableAccount}, + account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable, compute_budget::ComputeBudgetInstruction, epoch_schedule::EpochSchedule, feature_set::FeatureSet, hash::Hash, instruction::CompiledInstruction, - message::{Message, SanitizedMessage}, + message::{ + v0::{LoadedAddresses, LoadedMessage}, + LegacyMessage, Message, MessageHeader, SanitizedMessage, + }, nonce, + pubkey::Pubkey, rent::Rent, rent_collector::RentCollector, - signature::{Keypair, Signer}, + signature::{Keypair, Signature, Signer}, system_program, sysvar, transaction::{Result, Transaction, TransactionError}, transaction_context::TransactionAccount, }, - std::{convert::TryFrom, sync::Arc}, + std::{borrow::Cow, convert::TryFrom, sync::Arc}, }; + #[derive(Default)] struct TestCallbacks { accounts_map: HashMap, rent_collector: RentCollector, @@ -1380,4 +1388,662 @@ mod tests { &rent_collector, ); } + + #[test] + fn test_construct_instructions_account() { + let loaded_message = LoadedMessage { + message: Cow::Owned(solana_sdk::message::v0::Message::default()), + loaded_addresses: Cow::Owned(LoadedAddresses::default()), + is_writable_account_cache: vec![false], + }; + let message = SanitizedMessage::V0(loaded_message); + let shared_data = construct_instructions_account(&message); + let expected = AccountSharedData::from(Account { + data: construct_instructions_data(&message.decompile_instructions()), + owner: sysvar::id(), + ..Account::default() + }); + assert_eq!(shared_data, expected); + } + + #[test] + fn test_account_shared_data_from_program() { + let key = Keypair::new().pubkey(); + let other_key = Keypair::new().pubkey(); + + let mut accounts: HashMap = HashMap::new(); + + let result = account_shared_data_from_program(&key, &accounts); + assert_eq!(result.err(), Some(TransactionError::AccountNotFound)); + + accounts.insert(key, (&other_key, 32)); + + let result = account_shared_data_from_program(&key, &accounts); + let mut expected = AccountSharedData::default(); + expected.set_owner(other_key); + expected.set_executable(true); + expected.set_data_from_slice(create_executable_meta(&other_key)); + + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_load_transaction_accounts_failure() { + let message = Message::default(); + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mock_bank = TestCallbacks::default(); + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = + SanitizedTransaction::new_for_tests(sanitized_message, vec![], false); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + + assert_eq!(result.err(), Some(TransactionError::MissingSignatureForFee)); + } + + #[test] + fn test_load_transaction_accounts_fail_to_validate_fee_payer() { + let message = Message { + account_keys: vec![Pubkey::new_from_array([0; 32])], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mock_bank = TestCallbacks::default(); + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + + assert_eq!(result.err(), Some(TransactionError::AccountNotFound)); + } + + #[test] + fn test_load_transaction_accounts_native_loader() { + let key1 = Keypair::new(); + let message = Message { + account_keys: vec![key1.pubkey(), native_loader::id()], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + mock_bank + .accounts_map + .insert(native_loader::id(), AccountSharedData::default()); + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + mock_bank + .accounts_map + .get_mut(&key1.pubkey()) + .unwrap() + .set_lamports(200 - 32); + + assert_eq!( + result.unwrap(), + LoadedTransaction { + accounts: vec![ + ( + key1.pubkey(), + mock_bank.accounts_map[&key1.pubkey()].clone() + ), + ( + native_loader::id(), + mock_bank.accounts_map[&native_loader::id()].clone() + ) + ], + program_indices: vec![vec![]], + rent: 0, + rent_debits: RentDebits::default() + } + ); + } + + #[test] + fn test_load_transaction_accounts_program_account_not_found_but_loaded() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + + let message = Message { + account_keys: vec![key1.pubkey(), key2.pubkey()], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut error_counter = TransactionErrorMetrics::default(); + let mut loaded_programs = LoadedProgramsForTxBatch::default(); + loaded_programs.replenish(key2.pubkey(), Arc::new(LoadedProgram::default())); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + + assert_eq!(result.err(), Some(TransactionError::AccountNotFound)); + } + + #[test] + fn test_load_transaction_accounts_program_account_no_data() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + + let message = Message { + account_keys: vec![key1.pubkey(), key2.pubkey()], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0, 1], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + } + + #[test] + fn test_load_transaction_accounts_invalid_program_for_execution() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + + let message = Message { + account_keys: vec![key1.pubkey(), key2.pubkey()], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![0, 1], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + + assert_eq!( + result.err(), + Some(TransactionError::InvalidProgramForExecution) + ); + } + + #[test] + fn test_load_transaction_accounts_native_loader_owner() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey()], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_owner(native_loader::id()); + account_data.set_executable(true); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key2.pubkey(), account_data); + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + mock_bank + .accounts_map + .get_mut(&key2.pubkey()) + .unwrap() + .set_lamports(200 - 32); + + assert_eq!( + result.unwrap(), + LoadedTransaction { + accounts: vec![ + ( + key2.pubkey(), + mock_bank.accounts_map[&key2.pubkey()].clone() + ), + ( + key1.pubkey(), + mock_bank.accounts_map[&key1.pubkey()].clone() + ), + ], + program_indices: vec![vec![1]], + rent: 0, + rent_debits: RentDebits::default() + } + ); + } + + #[test] + fn test_load_transaction_accounts_program_account_not_found_after_all_checks() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey()], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key2.pubkey(), account_data); + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + mock_bank + .accounts_map + .get_mut(&key2.pubkey()) + .unwrap() + .set_lamports(200 - 32); + + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + } + + #[test] + fn test_load_transaction_accounts_program_account_invalid_program_for_execution_last_check() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + let key3 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey()], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(key3.pubkey()); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key2.pubkey(), account_data); + + mock_bank + .accounts_map + .insert(key3.pubkey(), AccountSharedData::default()); + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + mock_bank + .accounts_map + .get_mut(&key2.pubkey()) + .unwrap() + .set_lamports(200 - 32); + + assert_eq!( + result.err(), + Some(TransactionError::InvalidProgramForExecution) + ); + } + + #[test] + fn test_load_transaction_accounts_program_success_complete() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + let key3 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey()], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(key3.pubkey()); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key2.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(native_loader::id()); + mock_bank.accounts_map.insert(key3.pubkey(), account_data); + + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + mock_bank + .accounts_map + .get_mut(&key2.pubkey()) + .unwrap() + .set_lamports(200 - 32); + + assert_eq!( + result.unwrap(), + LoadedTransaction { + accounts: vec![ + ( + key2.pubkey(), + mock_bank.accounts_map[&key2.pubkey()].clone() + ), + ( + key1.pubkey(), + mock_bank.accounts_map[&key1.pubkey()].clone() + ), + ( + key3.pubkey(), + mock_bank.accounts_map[&key3.pubkey()].clone() + ), + ], + program_indices: vec![vec![2, 1]], + rent: 0, + rent_debits: RentDebits::default() + } + ); + } + + #[test] + fn test_load_transaction_accounts_program_builtin_saturating_add() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + let key3 = Keypair::new(); + let key4 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey(), key4.pubkey()], + header: MessageHeader::default(), + instructions: vec![ + CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }, + CompiledInstruction { + program_id_index: 1, + accounts: vec![2], + data: vec![], + }, + ], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(key3.pubkey()); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key2.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(native_loader::id()); + mock_bank.accounts_map.insert(key3.pubkey(), account_data); + + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let result = load_transaction_accounts( + &mock_bank, + &sanitized_transaction, + 32, + &mut error_counter, + None, + &HashMap::new(), + &loaded_programs, + ); + mock_bank + .accounts_map + .get_mut(&key2.pubkey()) + .unwrap() + .set_lamports(200 - 32); + + let mut account_data = AccountSharedData::default(); + account_data.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + assert_eq!( + result.unwrap(), + LoadedTransaction { + accounts: vec![ + ( + key2.pubkey(), + mock_bank.accounts_map[&key2.pubkey()].clone() + ), + ( + key1.pubkey(), + mock_bank.accounts_map[&key1.pubkey()].clone() + ), + (key4.pubkey(), account_data), + ( + key3.pubkey(), + mock_bank.accounts_map[&key3.pubkey()].clone() + ), + ], + program_indices: vec![vec![3, 1], vec![3, 1]], + rent: 0, + rent_debits: RentDebits::default() + } + ); + } } From 64021989021b833a4ca3f7e91bb714542ab2330e Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 28 Feb 2024 14:38:19 -0500 Subject: [PATCH 275/401] Replaces ReadAccountMapEntry in calculate_accounts_hash_from_index() (#35349) --- accounts-db/src/accounts_db.rs | 47 +++++++++++-------------------- accounts-db/src/accounts_index.rs | 2 +- 2 files changed, 18 insertions(+), 31 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 5153d858559599..3402c42f4eb2d5 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -41,11 +41,10 @@ use { ZeroLamportAccounts, }, accounts_index::{ - AccountIndexGetResult, AccountMapEntry, AccountSecondaryIndexes, AccountsIndex, - AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, DiskIndexValue, - IndexKey, IndexValue, IsCached, RefCount, ScanConfig, ScanResult, SlotList, - UpsertReclaim, ZeroLamport, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, - ACCOUNTS_INDEX_CONFIG_FOR_TESTING, + AccountMapEntry, AccountSecondaryIndexes, AccountsIndex, AccountsIndexConfig, + AccountsIndexRootsStats, AccountsIndexScanResult, DiskIndexValue, IndexKey, IndexValue, + IsCached, RefCount, ScanConfig, ScanResult, SlotList, UpsertReclaim, ZeroLamport, + ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING, }, accounts_index_storage::Startup, accounts_partition::RentPayingAccountsByPartition, @@ -6864,28 +6863,20 @@ impl AccountsDb { let result: Vec = pubkeys .iter() .filter_map(|pubkey| { - if let AccountIndexGetResult::Found(lock, index) = - self.accounts_index.get(pubkey, config.ancestors, Some(max_slot)) - { - let (slot, account_info) = &lock.slot_list()[index]; - if !account_info.is_zero_lamport() { - // Because we're keeping the `lock' here, there is no need - // to use retry_to_get_account_accessor() - // In other words, flusher/shrinker/cleaner is blocked to - // cause any Accessor(None) situation. - // Anyway this race condition concern is currently a moot - // point because calculate_accounts_hash() should not - // currently race with clean/shrink because the full hash - // is synchronous with clean/shrink in - // AccountsBackgroundService + let index_entry = self.accounts_index.get_cloned(pubkey)?; + self.accounts_index.get_account_info_with_and_then( + &index_entry, + config.ancestors, + Some(max_slot), + |(slot, account_info)| { + if account_info.is_zero_lamport() { return None; } self.get_account_accessor( - *slot, + slot, pubkey, &account_info.storage_location(), ) .get_loaded_account() - .and_then( - |loaded_account| { + .and_then(|loaded_account| { let mut loaded_hash = loaded_account.loaded_hash(); let balance = loaded_account.lamports(); let hash_is_missing = loaded_hash == AccountHash(Hash::default()); @@ -6905,14 +6896,10 @@ impl AccountsDb { sum += balance as u128; Some(loaded_hash.0) - }, - ) - } else { - None - } - } else { - None - } + }) + }, + ) + .flatten() }) .collect(); let mut total = total_lamports.lock().unwrap(); diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 2f3ba4b581daa3..5221ac43449869 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1155,7 +1155,7 @@ impl + Into> AccountsIndex { /// Gets the account info (and slot) in `entry`, with `ancestors` and `max_root`, /// and applies `callback` to it - fn get_account_info_with_and_then( + pub(crate) fn get_account_info_with_and_then( &self, entry: &AccountMapEntryInner, ancestors: Option<&Ancestors>, From 7c48cbb7aa702c68608cd5928975ec99dc274feb Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 28 Feb 2024 14:45:08 -0500 Subject: [PATCH 276/401] Replaces InMemAccountsIndex::get() with AccountsIndex::get_cloned() (#35352) --- accounts-db/src/accounts_db.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 3402c42f4eb2d5..167cbdbfae5db0 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9068,11 +9068,10 @@ impl AccountsDb { let mut lookup_time = Measure::start("lookup_time"); for account_info in storage.accounts.account_iter() { let key = account_info.pubkey(); - let lock = self.accounts_index.get_bin(key); - let x = lock.get(key).unwrap(); - let sl = x.slot_list.read().unwrap(); + let index_entry = self.accounts_index.get_cloned(key).unwrap(); + let slot_list = index_entry.slot_list.read().unwrap(); let mut count = 0; - for (slot2, account_info2) in sl.iter() { + for (slot2, account_info2) in slot_list.iter() { if slot2 == slot { count += 1; let ai = AccountInfo::new( From 140818221c0cd9fd5f7f0095b4bc2da7893f809b Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 28 Feb 2024 13:47:27 -0600 Subject: [PATCH 277/401] Rename SamplePerformanceService thread for consistency (#35332) - Rename thread - Add uniform service start/stop logs - Misc cleanup with variables / constants / exit flag check --- core/src/sample_performance_service.rs | 33 ++++++++++---------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/core/src/sample_performance_service.rs b/core/src/sample_performance_service.rs index a2ba724ee0d6da..7970f0c7c2d566 100644 --- a/core/src/sample_performance_service.rs +++ b/core/src/sample_performance_service.rs @@ -11,8 +11,8 @@ use { }, }; -const SAMPLE_INTERVAL: u64 = 60; -const SLEEP_INTERVAL: u64 = 500; +const SAMPLE_INTERVAL: Duration = Duration::from_secs(60); +const SLEEP_INTERVAL: Duration = Duration::from_millis(500); pub struct SamplePerformanceService { thread_hdl: JoinHandle<()>, @@ -26,34 +26,26 @@ impl SamplePerformanceService { ) -> Self { let bank_forks = bank_forks.clone(); - info!("Starting SamplePerformance service"); let thread_hdl = Builder::new() - .name("sample-performance".to_string()) + .name("solSamplePerf".to_string()) .spawn(move || { + info!("SamplePerformanceService has started"); Self::run(bank_forks, blockstore, exit); + info!("SamplePerformanceService has stopped"); }) .unwrap(); Self { thread_hdl } } - pub fn run( - bank_forks: Arc>, - blockstore: Arc, - exit: Arc, - ) { + fn run(bank_forks: Arc>, blockstore: Arc, exit: Arc) { let mut snapshot = StatsSnapshot::from_forks(&bank_forks); + let mut last_sample_time = Instant::now(); - let mut now = Instant::now(); - loop { - if exit.load(Ordering::Relaxed) { - break; - } - - let elapsed = now.elapsed(); - - if elapsed.as_secs() >= SAMPLE_INTERVAL { - now = Instant::now(); + while !exit.load(Ordering::Relaxed) { + let elapsed = last_sample_time.elapsed(); + if elapsed >= SAMPLE_INTERVAL { + last_sample_time = Instant::now(); let new_snapshot = StatsSnapshot::from_forks(&bank_forks); let (num_transactions, num_non_vote_transactions, num_slots) = @@ -78,8 +70,7 @@ impl SamplePerformanceService { error!("write_perf_sample failed: slot {:?} {:?}", highest_slot, e); } } - - sleep(Duration::from_millis(SLEEP_INTERVAL)); + sleep(SLEEP_INTERVAL); } } From 98ec72e6edeee0f81a1a7b407a5e16f53c2f28b5 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Wed, 28 Feb 2024 12:30:24 -0800 Subject: [PATCH 278/401] change default `bench-tps` client to `tpu-client` (#35335) * change default bench-tps client to tpu-client * remote client default to tpu-client * add --use-tpu-client back in. hide --use-thin-client * address nit, inform of future thinclient deprecation --- bench-tps/src/cli.rs | 41 +++++++++++++++++++++++++------------ net/net.sh | 4 ++-- net/remote/remote-client.sh | 17 ++++++++------- 3 files changed, 38 insertions(+), 24 deletions(-) diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index d172329bed8662..35c570aec5b7f8 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -34,7 +34,7 @@ pub enum ExternalClientType { impl Default for ExternalClientType { fn default() -> Self { - Self::ThinClient + Self::TpuClient } } @@ -167,19 +167,19 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .long("rpc-addr") .value_name("HOST:PORT") .takes_value(true) - .conflicts_with("tpu_client") .conflicts_with("rpc_client") .requires("tpu_addr") + .requires("thin_client") .help("Specify custom rpc_addr to create thin_client"), ) .arg( Arg::with_name("tpu_addr") .long("tpu-addr") .value_name("HOST:PORT") - .conflicts_with("tpu_client") .conflicts_with("rpc_client") .takes_value(true) .requires("rpc_addr") + .requires("thin_client") .help("Specify custom tpu_addr to create thin_client"), ) .arg( @@ -316,6 +316,7 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .arg( Arg::with_name("rpc_client") .long("use-rpc-client") + .conflicts_with("thin_client") .conflicts_with("tpu_client") .takes_value(false) .help("Submit transactions with a RpcClient") @@ -324,22 +325,33 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { Arg::with_name("tpu_client") .long("use-tpu-client") .conflicts_with("rpc_client") + .conflicts_with("thin_client") .takes_value(false) .help("Submit transactions with a TpuClient") ) + .arg( + Arg::with_name("thin_client") + .long("use-thin-client") + .conflicts_with("rpc_client") + .conflicts_with("tpu_client") + .takes_value(false) + .hidden(hidden_unless_forced()) + .help("Submit transactions with a ThinClient. Note: usage is discouraged. \ + ThinClient will be deprecated.") + ) .arg( Arg::with_name("tpu_disable_quic") .long("tpu-disable-quic") .takes_value(false) - .help("Do not submit transactions via QUIC; only affects ThinClient (default) \ - or TpuClient sends"), + .help("Do not submit transactions via QUIC; only affects ThinClient \ + or TpuClient (default) sends"), ) .arg( Arg::with_name("tpu_connection_pool_size") .long("tpu-connection-pool-size") .takes_value(true) - .help("Controls the connection pool size per remote address; only affects ThinClient (default) \ - or TpuClient sends"), + .help("Controls the connection pool size per remote address; only affects ThinClient \ + or TpuClient (default) sends"), ) .arg( Arg::with_name("compute_unit_price") @@ -442,10 +454,10 @@ pub fn parse_args(matches: &ArgMatches) -> Result { return Err("could not parse identity path"); } - if matches.is_present("tpu_client") { - args.external_client_type = ExternalClientType::TpuClient; - } else if matches.is_present("rpc_client") { + if matches.is_present("rpc_client") { args.external_client_type = ExternalClientType::RpcClient; + } else if matches.is_present("thin_client") { + args.external_client_type = ExternalClientType::ThinClient; } if matches.is_present("tpu_disable_quic") { @@ -679,7 +691,7 @@ mod tests { } ); - // select different client type + // select different client type and CommitmentConfig let keypair = read_keypair_file(&keypair_file_name).unwrap(); let matches = build_args("1.0.0").get_matches_from(vec![ "solana-bench-tps", @@ -687,7 +699,9 @@ mod tests { &keypair_file_name, "-u", "http://123.4.5.6:8899", - "--use-tpu-client", + "--use-rpc-client", + "--commitment-config", + "finalized", ]); let actual = parse_args(&matches).unwrap(); assert_eq!( @@ -696,7 +710,8 @@ mod tests { json_rpc_url: "http://123.4.5.6:8899".to_string(), websocket_url: "ws://123.4.5.6:8900/".to_string(), id: keypair, - external_client_type: ExternalClientType::TpuClient, + external_client_type: ExternalClientType::RpcClient, + commitment_config: CommitmentConfig::finalized(), ..Config::default() } ); diff --git a/net/net.sh b/net/net.sh index fe52116250545d..14b639a26dc1ea 100755 --- a/net/net.sh +++ b/net/net.sh @@ -118,7 +118,7 @@ Operate a configured testnet - Enable UDP for tpu transactions --client-type - - Specify backend client type for bench-tps. Valid options are (thin-client|rpc-client|tpu-client), thin-client is default + - Specify backend client type for bench-tps. Valid options are (thin-client|rpc-client|tpu-client), tpu-client is default sanity/start-specific options: -F - Discard validator nodes that didn't bootup successfully @@ -834,7 +834,7 @@ waitForNodeInit=true extraPrimordialStakes=0 disableQuic=false enableUdp=false -clientType=thin-client +clientType=tpu-client maybeUseUnstakedConnection="" command=$1 diff --git a/net/remote/remote-client.sh b/net/remote/remote-client.sh index 6a70fc5db9ddb0..8042bd19618083 100755 --- a/net/remote/remote-client.sh +++ b/net/remote/remote-client.sh @@ -11,7 +11,7 @@ if [[ -n $4 ]]; then fi benchTpsExtraArgs="$5" clientIndex="$6" -clientType="${7:-thin-client}" +clientType="${7:-tpu-client}" maybeUseUnstakedConnection="$8" missing() { @@ -43,19 +43,19 @@ skip) exit 1 esac -TPU_CLIENT=false +THIN_CLIENT=false RPC_CLIENT=false case "$clientType" in thin-client) - TPU_CLIENT=false + THIN_CLIENT=true RPC_CLIENT=false ;; tpu-client) - TPU_CLIENT=true + THIN_CLIENT=false RPC_CLIENT=false ;; rpc-client) - TPU_CLIENT=false + THIN_CLIENT=false RPC_CLIENT=true ;; *) @@ -74,12 +74,11 @@ solana-bench-tps) args=() - if ${TPU_CLIENT}; then - args+=(--use-tpu-client) + if ${THIN_CLIENT}; then + args+=(--entrypoint "$entrypointIp:8001") + args+=(--use-thin-client) elif ${RPC_CLIENT}; then args+=(--use-rpc-client) - else - args+=(--entrypoint "$entrypointIp:8001") fi if [[ -z "$maybeUseUnstakedConnection" ]]; then From a7a41e76318a4b0494b16981c08c40c0022ef6f4 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 28 Feb 2024 20:31:40 +0000 Subject: [PATCH 279/401] adds Merkle shred variant with retransmitter's signature (#35293) Moving towards locking down Turbine propagation path, the commit reserves a buffer within shred payload for retransmitter's signature. --- core/src/repair/repair_generic_traversal.rs | 2 +- ledger/src/blockstore.rs | 2 +- ledger/src/shred.rs | 116 ++++++++++--- ledger/src/shred/merkle.rs | 174 +++++++++++++------- ledger/src/shred/shred_data.rs | 11 +- 5 files changed, 224 insertions(+), 81 deletions(-) diff --git a/core/src/repair/repair_generic_traversal.rs b/core/src/repair/repair_generic_traversal.rs index 901b20c6241da1..f33a9b91e28bd8 100644 --- a/core/src/repair/repair_generic_traversal.rs +++ b/core/src/repair/repair_generic_traversal.rs @@ -270,7 +270,7 @@ pub mod test { &mut processed_slots, 1, ); - assert_eq!(repairs, [ShredRepairType::Shred(1, 3)]); + assert_eq!(repairs, [ShredRepairType::Shred(1, 4)]); } fn add_tree_with_missing_shreds( diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index c01b1806a8fa27..867761639d95d3 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -7451,7 +7451,7 @@ pub mod tests { #[test] fn test_insert_multiple_is_last() { solana_logger::setup(); - let (shreds, _) = make_slot_entries(0, 0, 19, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(0, 0, 18, /*merkle_variant:*/ true); let num_shreds = shreds.len() as u64; let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index d22d89943df78a..24d5000b65311b 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -198,10 +198,20 @@ enum ShredVariant { // the shred variant: // 0b0100_???? MerkleCode // 0b0110_???? MerkleCode chained + // 0b0111_???? MerkleCode chained resigned // 0b1000_???? MerkleData // 0b1001_???? MerkleData chained - MerkleCode { proof_size: u8, chained: bool }, // 0b01?0_???? - MerkleData { proof_size: u8, chained: bool }, // 0b100?_???? + // 0b1011_???? MerkleData chained resigned + MerkleCode { + proof_size: u8, + chained: bool, + resigned: bool, + }, // 0b01??_???? + MerkleData { + proof_size: u8, + chained: bool, + resigned: bool, + }, // 0b10??_???? } /// A common header that is present in data and code shred headers @@ -656,17 +666,19 @@ pub mod layout { ShredVariant::MerkleCode { proof_size, chained, + resigned, } => { let merkle_root = - self::merkle::ShredCode::get_merkle_root(shred, proof_size, chained)?; + self::merkle::ShredCode::get_merkle_root(shred, proof_size, chained, resigned)?; SignedData::MerkleRoot(merkle_root) } ShredVariant::MerkleData { proof_size, chained, + resigned, } => { let merkle_root = - self::merkle::ShredData::get_merkle_root(shred, proof_size, chained)?; + self::merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned)?; SignedData::MerkleRoot(merkle_root) } }; @@ -704,11 +716,13 @@ pub mod layout { ShredVariant::MerkleCode { proof_size, chained, - } => merkle::ShredCode::get_merkle_root(shred, proof_size, chained), + resigned, + } => merkle::ShredCode::get_merkle_root(shred, proof_size, chained, resigned), ShredVariant::MerkleData { proof_size, chained, - } => merkle::ShredData::get_merkle_root(shred, proof_size, chained), + resigned, + } => merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned), } } @@ -725,10 +739,18 @@ pub mod layout { *byte = rng.gen::().max(1u8).wrapping_add(*byte); } let shred = get_shred(packet).unwrap(); - let merkle_proof_size = match get_shred_variant(shred).unwrap() { + let merkle_variant = match get_shred_variant(shred).unwrap() { ShredVariant::LegacyCode | ShredVariant::LegacyData => None, - ShredVariant::MerkleCode { proof_size, .. } - | ShredVariant::MerkleData { proof_size, .. } => Some(proof_size), + ShredVariant::MerkleCode { + proof_size, + resigned, + .. + } + | ShredVariant::MerkleData { + proof_size, + resigned, + .. + } => Some((proof_size, resigned)), }; let coin_flip: bool = rng.gen(); if coin_flip { @@ -736,12 +758,13 @@ pub mod layout { modify_packet(rng, packet, 0..SIGNATURE_BYTES); } else { // Corrupt one byte within the signed data offsets. - let offsets = merkle_proof_size - .map(|merkle_proof_size| { + let offsets = merkle_variant + .map(|(proof_size, resigned)| { // Need to corrupt the merkle proof. // Proof entries are each 20 bytes at the end of shreds. - let offset = usize::from(merkle_proof_size) * 20; - shred.len() - offset..shred.len() + let offset = usize::from(proof_size) * 20; + let size = shred.len() - if resigned { SIZE_OF_SIGNATURE } else { 0 }; + size - offset..size }) .or_else(|| get_signed_data_offsets(shred)); modify_packet(rng, packet, offsets.unwrap()); @@ -823,19 +846,43 @@ impl From for u8 { ShredVariant::MerkleCode { proof_size, chained: false, + resigned: false, } => proof_size | 0x40, ShredVariant::MerkleCode { proof_size, chained: true, + resigned: false, } => proof_size | 0x60, + ShredVariant::MerkleCode { + proof_size, + chained: true, + resigned: true, + } => proof_size | 0x70, ShredVariant::MerkleData { proof_size, chained: false, + resigned: false, } => proof_size | 0x80, ShredVariant::MerkleData { proof_size, chained: true, + resigned: false, } => proof_size | 0x90, + ShredVariant::MerkleData { + proof_size, + chained: true, + resigned: true, + } => proof_size | 0xb0, + ShredVariant::MerkleCode { + proof_size: _, + chained: false, + resigned: true, + } + | ShredVariant::MerkleData { + proof_size: _, + chained: false, + resigned: true, + } => panic!("Invalid shred variant: {shred_variant:?}"), } } } @@ -853,18 +900,32 @@ impl TryFrom for ShredVariant { 0x40 => Ok(ShredVariant::MerkleCode { proof_size, chained: false, + resigned: false, }), 0x60 => Ok(ShredVariant::MerkleCode { proof_size, chained: true, + resigned: false, + }), + 0x70 => Ok(ShredVariant::MerkleCode { + proof_size, + chained: true, + resigned: true, }), 0x80 => Ok(ShredVariant::MerkleData { proof_size, chained: false, + resigned: false, }), 0x90 => Ok(ShredVariant::MerkleData { proof_size, chained: true, + resigned: false, + }), + 0xb0 => Ok(ShredVariant::MerkleData { + proof_size, + chained: true, + resigned: true, }), _ => Err(Error::InvalidShredVariant), } @@ -1058,7 +1119,9 @@ pub fn max_entries_per_n_shred( shred_data_size: Option, ) -> u64 { // Default 32:32 erasure batches yields 64 shreds; log2(64) = 6. - let merkle_variant = Some((/*proof_size:*/ 6, /*chained:*/ false)); + let merkle_variant = Some(( + /*proof_size:*/ 6, /*chained:*/ false, /*resigned:*/ false, + )); let data_buffer_size = ShredData::capacity(merkle_variant).unwrap(); let shred_data_size = shred_data_size.unwrap_or(data_buffer_size) as u64; let vec_size = bincode::serialized_size(&vec![entry]).unwrap(); @@ -1163,6 +1226,7 @@ mod tests { bincode::serialized_size(&ShredVariant::MerkleCode { proof_size: 15, chained: true, + resigned: true }) .unwrap() as usize ); @@ -1468,15 +1532,17 @@ mod tests { ); } - #[test_case(false, 0b0100_0000)] - #[test_case(true, 0b0110_0000)] - fn test_shred_variant_compat_merkle_code(chained: bool, byte: u8) { + #[test_case(false, false, 0b0100_0000)] + #[test_case(true, false, 0b0110_0000)] + #[test_case(true, true, 0b0111_0000)] + fn test_shred_variant_compat_merkle_code(chained: bool, resigned: bool, byte: u8) { for proof_size in 0..=15u8 { let byte = byte | proof_size; assert_eq!( u8::from(ShredVariant::MerkleCode { proof_size, chained, + resigned, }), byte ); @@ -1484,6 +1550,7 @@ mod tests { ShredType::from(ShredVariant::MerkleCode { proof_size, chained, + resigned, }), ShredType::Code ); @@ -1492,11 +1559,13 @@ mod tests { ShredVariant::MerkleCode { proof_size, chained, + resigned, }, ); let buf = bincode::serialize(&ShredVariant::MerkleCode { proof_size, chained, + resigned, }) .unwrap(); assert_eq!(buf, vec![byte]); @@ -1505,20 +1574,23 @@ mod tests { ShredVariant::MerkleCode { proof_size, chained, + resigned, } ); } } - #[test_case(false, 0b1000_0000)] - #[test_case(true, 0b1001_0000)] - fn test_shred_variant_compat_merkle_data(chained: bool, byte: u8) { + #[test_case(false, false, 0b1000_0000)] + #[test_case(true, false, 0b1001_0000)] + #[test_case(true, true, 0b1011_0000)] + fn test_shred_variant_compat_merkle_data(chained: bool, resigned: bool, byte: u8) { for proof_size in 0..=15u8 { let byte = byte | proof_size; assert_eq!( u8::from(ShredVariant::MerkleData { proof_size, chained, + resigned, }), byte ); @@ -1526,6 +1598,7 @@ mod tests { ShredType::from(ShredVariant::MerkleData { proof_size, chained, + resigned, }), ShredType::Data ); @@ -1534,11 +1607,13 @@ mod tests { ShredVariant::MerkleData { proof_size, chained, + resigned } ); let buf = bincode::serialize(&ShredVariant::MerkleData { proof_size, chained, + resigned, }) .unwrap(); assert_eq!(buf, vec![byte]); @@ -1547,6 +1622,7 @@ mod tests { ShredVariant::MerkleData { proof_size, chained, + resigned } ); } diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index f92c3616f5c86e..b785eeb6dc32cc 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -53,6 +53,7 @@ type MerkleProofEntry = [u8; 20]; // Layout: {common, data} headers | data buffer // | [Merkle root of the previous erasure batch if chained] // | Merkle proof +// | [Retransmitter's signature if resigned] // The slice past signature till the end of the data buffer is erasure coded. // The slice past signature and before the merkle proof is hashed to generate // the Merkle tree. The root of the Merkle tree is signed. @@ -66,6 +67,7 @@ pub struct ShredData { // Layout: {common, coding} headers | erasure coded shard // | [Merkle root of the previous erasure batch if chained] // | Merkle proof +// | [Retransmitter's signature if resigned] // The slice past signature and before the merkle proof is hashed to generate // the Merkle tree. The root of the Merkle tree is signed. #[derive(Clone, Debug, Eq, PartialEq)] @@ -145,15 +147,17 @@ impl ShredData { // Maximum size of ledger data that can be embedded in a data-shred. // Also equal to: - // ShredCode::capacity(proof_size).unwrap() + // ShredCode::capacity(proof_size, chained, resigned).unwrap() // - ShredData::SIZE_OF_HEADERS // + SIZE_OF_SIGNATURE - pub(super) fn capacity(proof_size: u8, chained: bool) -> Result { + pub(super) fn capacity(proof_size: u8, chained: bool, resigned: bool) -> Result { + debug_assert!(chained || !resigned); Self::SIZE_OF_PAYLOAD .checked_sub( Self::SIZE_OF_HEADERS + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } - + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY, + + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + + if resigned { SIZE_OF_SIGNATURE } else { 0 }, ) .ok_or(Error::InvalidProofSize(proof_size)) } @@ -163,16 +167,17 @@ impl ShredData { let ShredVariant::MerkleData { proof_size, chained, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - Self::get_proof_offset(proof_size, chained) + Self::get_proof_offset(proof_size, chained, resigned) } - fn get_proof_offset(proof_size: u8, chained: bool) -> Result { + fn get_proof_offset(proof_size: u8, chained: bool, resigned: bool) -> Result { Ok(Self::SIZE_OF_HEADERS - + Self::capacity(proof_size, chained)? + + Self::capacity(proof_size, chained, resigned)? + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }) } @@ -180,11 +185,12 @@ impl ShredData { let ShredVariant::MerkleData { proof_size, chained: true, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?) + Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true, resigned)?) } fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { @@ -234,11 +240,12 @@ impl ShredData { let ShredVariant::MerkleData { proof_size, chained, + resigned, } = common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - if ShredCode::capacity(proof_size, chained)? != shard_size { + if ShredCode::capacity(proof_size, chained, resigned)? != shard_size { return Err(Error::InvalidShardSize(shard_size)); } let data_header = deserialize_from_with_limit(&mut cursor)?; @@ -271,12 +278,18 @@ impl ShredData { Ok(()) } - pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8, chained: bool) -> Option { + pub(super) fn get_merkle_root( + shred: &[u8], + proof_size: u8, + chained: bool, + resigned: bool, + ) -> Option { debug_assert_eq!( shred::layout::get_shred_variant(shred).unwrap(), ShredVariant::MerkleData { proof_size, chained, + resigned, }, ); // Shred index in the erasure batch. @@ -289,7 +302,7 @@ impl ShredData { .map(usize::try_from)? .ok()? }; - let proof_offset = Self::get_proof_offset(proof_size, chained).ok()?; + let proof_offset = Self::get_proof_offset(proof_size, chained, resigned).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; get_merkle_root(index, node, proof).ok() @@ -306,14 +319,16 @@ impl ShredCode { } // Size of buffer embedding erasure codes. - fn capacity(proof_size: u8, chained: bool) -> Result { + fn capacity(proof_size: u8, chained: bool, resigned: bool) -> Result { + debug_assert!(chained || !resigned); // Merkle proof is generated and signed after coding shreds are // generated. Coding shred headers cannot be erasure coded either. Self::SIZE_OF_PAYLOAD .checked_sub( Self::SIZE_OF_HEADERS + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } - + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY, + + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + + if resigned { SIZE_OF_SIGNATURE } else { 0 }, ) .ok_or(Error::InvalidProofSize(proof_size)) } @@ -323,16 +338,17 @@ impl ShredCode { let ShredVariant::MerkleCode { proof_size, chained, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - Self::get_proof_offset(proof_size, chained) + Self::get_proof_offset(proof_size, chained, resigned) } - fn get_proof_offset(proof_size: u8, chained: bool) -> Result { + fn get_proof_offset(proof_size: u8, chained: bool, resigned: bool) -> Result { Ok(Self::SIZE_OF_HEADERS - + Self::capacity(proof_size, chained)? + + Self::capacity(proof_size, chained, resigned)? + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }) } @@ -340,11 +356,12 @@ impl ShredCode { let ShredVariant::MerkleCode { proof_size, chained: true, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?) + Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true, resigned)?) } fn chained_merkle_root(&self) -> Result { @@ -393,12 +410,13 @@ impl ShredCode { let ShredVariant::MerkleCode { proof_size, chained, + resigned, } = common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; let shard_size = shard.len(); - if Self::capacity(proof_size, chained)? != shard_size { + if Self::capacity(proof_size, chained, resigned)? != shard_size { return Err(Error::InvalidShardSize(shard_size)); } if shard_size + Self::SIZE_OF_HEADERS > Self::SIZE_OF_PAYLOAD { @@ -438,12 +456,18 @@ impl ShredCode { Ok(()) } - pub(super) fn get_merkle_root(shred: &[u8], proof_size: u8, chained: bool) -> Option { + pub(super) fn get_merkle_root( + shred: &[u8], + proof_size: u8, + chained: bool, + resigned: bool, + ) -> Option { debug_assert_eq!( shred::layout::get_shred_variant(shred).unwrap(), ShredVariant::MerkleCode { proof_size, chained, + resigned, }, ); // Shred index in the erasure batch. @@ -458,7 +482,7 @@ impl ShredCode { .ok()?; num_data_shreds.checked_add(position)? }; - let proof_offset = Self::get_proof_offset(proof_size, chained).ok()?; + let proof_offset = Self::get_proof_offset(proof_size, chained, resigned).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; get_merkle_root(index, node, proof).ok() @@ -472,9 +496,10 @@ impl<'a> ShredTrait<'a> for ShredData { // Also equal to: // ShredData::SIZE_OF_HEADERS - // + ShredData::capacity(proof_size, chained).unwrap() + // + ShredData::capacity(proof_size, chained, resigned).unwrap() // + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } // + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + // + if resigned { SIZE_OF_SIGNATURE } else { 0 } const SIZE_OF_PAYLOAD: usize = ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS + SIZE_OF_SIGNATURE; const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS; @@ -514,11 +539,12 @@ impl<'a> ShredTrait<'a> for ShredData { let ShredVariant::MerkleData { proof_size, chained, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?; let mut shard = self.payload; shard.truncate(offset); shard.drain(..SIZE_OF_SIGNATURE); @@ -532,11 +558,12 @@ impl<'a> ShredTrait<'a> for ShredData { let ShredVariant::MerkleData { proof_size, chained, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?; self.payload .get(SIZE_OF_SIGNATURE..offset) .ok_or(Error::InvalidPayloadSize(self.payload.len())) @@ -598,11 +625,12 @@ impl<'a> ShredTrait<'a> for ShredCode { let ShredVariant::MerkleCode { proof_size, chained, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?; let mut shard = self.payload; shard.truncate(offset); shard.drain(..Self::SIZE_OF_HEADERS); @@ -616,11 +644,12 @@ impl<'a> ShredTrait<'a> for ShredCode { let ShredVariant::MerkleCode { proof_size, chained, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained)?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?; self.payload .get(Self::SIZE_OF_HEADERS..offset) .ok_or(Error::InvalidPayloadSize(self.payload.len())) @@ -650,11 +679,12 @@ impl ShredDataTrait for ShredData { let ShredVariant::MerkleData { proof_size, chained, + resigned, } = self.common_header.shred_variant else { return Err(Error::InvalidShredVariant); }; - let data_buffer_size = Self::capacity(proof_size, chained)?; + let data_buffer_size = Self::capacity(proof_size, chained, resigned)?; let size = usize::from(self.data_header.size); if size > self.payload.len() || size < Self::SIZE_OF_HEADERS @@ -786,11 +816,12 @@ pub(super) fn recover( }) .ok_or(TooFewParityShards)?; debug_assert_matches!(common_header.shred_variant, ShredVariant::MerkleCode { .. }); - let (proof_size, chained) = match common_header.shred_variant { + let (proof_size, chained, resigned) = match common_header.shred_variant { ShredVariant::MerkleCode { proof_size, chained, - } => (proof_size, chained), + resigned, + } => (proof_size, chained, resigned), ShredVariant::MerkleData { .. } | ShredVariant::LegacyCode | ShredVariant::LegacyData => { return Err(Error::InvalidShredVariant); } @@ -816,6 +847,7 @@ pub(super) fn recover( == &ShredVariant::MerkleData { proof_size, chained, + resigned, } } Shred::ShredCode(shred) => { @@ -828,6 +860,7 @@ pub(super) fn recover( == &ShredVariant::MerkleCode { proof_size, chained, + resigned, } && num_data_shreds == coding_header.num_data_shreds && num_coding_shreds == coding_header.num_coding_shreds @@ -884,6 +917,7 @@ pub(super) fn recover( let expected_shred_variant = ShredVariant::MerkleData { proof_size, chained, + resigned, }; if shred_variant != expected_shred_variant || common_header.slot != slot @@ -992,16 +1026,18 @@ pub(super) fn make_shreds_from_data( } let now = Instant::now(); let chained = chained_merkle_root.is_some(); + let resigned = chained && is_last_in_slot; let erasure_batch_size = shredder::get_erasure_batch_size(DATA_SHREDS_PER_FEC_BLOCK, is_last_in_slot); let proof_size = get_proof_size(erasure_batch_size); - let data_buffer_size = ShredData::capacity(proof_size, chained)?; + let data_buffer_size = ShredData::capacity(proof_size, chained, resigned)?; let chunk_size = DATA_SHREDS_PER_FEC_BLOCK * data_buffer_size; let mut common_header = ShredCommonHeader { signature: Signature::default(), shred_variant: ShredVariant::MerkleData { proof_size, chained, + resigned, }, slot, index: next_shred_index, @@ -1044,7 +1080,7 @@ pub(super) fn make_shreds_from_data( // which can embed the remaining data. let (proof_size, data_buffer_size) = (1u8..32) .find_map(|proof_size| { - let data_buffer_size = ShredData::capacity(proof_size, chained).ok()?; + let data_buffer_size = ShredData::capacity(proof_size, chained, resigned).ok()?; let num_data_shreds = (data.len() + data_buffer_size - 1) / data_buffer_size; let num_data_shreds = num_data_shreds.max(1); let erasure_batch_size = @@ -1056,6 +1092,7 @@ pub(super) fn make_shreds_from_data( common_header.shred_variant = ShredVariant::MerkleData { proof_size, chained, + resigned, }; common_header.fec_set_index = common_header.index; let chunks = if data.is_empty() { @@ -1076,7 +1113,7 @@ pub(super) fn make_shreds_from_data( // Only the very last shred may have residual data buffer. debug_assert!(shreds.iter().rev().skip(1).all(|shred| { let proof_size = shred.proof_size().unwrap(); - let capacity = ShredData::capacity(proof_size, chained).unwrap(); + let capacity = ShredData::capacity(proof_size, chained, resigned).unwrap(); shred.data().unwrap().len() == capacity })); // Adjust flags for the very last shred. @@ -1196,6 +1233,7 @@ fn make_erasure_batch( ) -> Result<(/*merkle root:*/ Hash, Vec), Error> { let num_data_shreds = shreds.len(); let chained = chained_merkle_root.is_some(); + let resigned = chained && is_last_in_slot; let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot); let num_coding_shreds = erasure_batch_size - num_data_shreds; let proof_size = get_proof_size(erasure_batch_size); @@ -1203,6 +1241,7 @@ fn make_erasure_batch( == ShredVariant::MerkleData { proof_size, chained, + resigned })); let mut common_header = match shreds.first() { None => return Err(Error::from(TooFewShards)), @@ -1230,6 +1269,7 @@ fn make_erasure_batch( common_header.shred_variant = ShredVariant::MerkleCode { proof_size, chained, + resigned, }; let mut coding_header = CodingShredHeader { num_data_shreds: num_data_shreds as u16, @@ -1300,59 +1340,69 @@ mod test { }; // Total size of a data shred including headers and merkle proof. - fn shred_data_size_of_payload(proof_size: u8, chained: bool) -> usize { + fn shred_data_size_of_payload(proof_size: u8, chained: bool, resigned: bool) -> usize { + assert!(chained || !resigned); ShredData::SIZE_OF_HEADERS - + ShredData::capacity(proof_size, chained).unwrap() + + ShredData::capacity(proof_size, chained, resigned).unwrap() + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + + if resigned { SIZE_OF_SIGNATURE } else { 0 } } // Merkle proof is generated and signed after coding shreds are generated. // All payload excluding merkle proof and the signature are erasure coded. // Therefore the data capacity is equal to erasure encoded shard size minus // size of erasure encoded header. - fn shred_data_capacity(proof_size: u8, chained: bool) -> usize { + fn shred_data_capacity(proof_size: u8, chained: bool, resigned: bool) -> usize { const SIZE_OF_ERASURE_ENCODED_HEADER: usize = ShredData::SIZE_OF_HEADERS - SIZE_OF_SIGNATURE; - ShredCode::capacity(proof_size, chained).unwrap() - SIZE_OF_ERASURE_ENCODED_HEADER + ShredCode::capacity(proof_size, chained, resigned).unwrap() - SIZE_OF_ERASURE_ENCODED_HEADER } - fn shred_data_size_of_erasure_encoded_slice(proof_size: u8, chained: bool) -> usize { + fn shred_data_size_of_erasure_encoded_slice( + proof_size: u8, + chained: bool, + resigned: bool, + ) -> usize { ShredData::SIZE_OF_PAYLOAD - SIZE_OF_SIGNATURE - if chained { SIZE_OF_MERKLE_ROOT } else { 0 } - usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + - if resigned { SIZE_OF_SIGNATURE } else { 0 } } - #[test_case(false)] - #[test_case(true)] - fn test_shred_data_size_of_payload(chained: bool) { + #[test_case(false, false)] + #[test_case(true, false)] + #[test_case(true, true)] + fn test_shred_data_size_of_payload(chained: bool, resigned: bool) { for proof_size in 0..0x15 { assert_eq!( ShredData::SIZE_OF_PAYLOAD, - shred_data_size_of_payload(proof_size, chained) + shred_data_size_of_payload(proof_size, chained, resigned) ); } } - #[test_case(false)] - #[test_case(true)] - fn test_shred_data_capacity(chained: bool) { + #[test_case(false, false)] + #[test_case(true, false)] + #[test_case(true, true)] + fn test_shred_data_capacity(chained: bool, resigned: bool) { for proof_size in 0..0x15 { assert_eq!( - ShredData::capacity(proof_size, chained).unwrap(), - shred_data_capacity(proof_size, chained) + ShredData::capacity(proof_size, chained, resigned).unwrap(), + shred_data_capacity(proof_size, chained, resigned) ); } } - #[test_case(false)] - #[test_case(true)] - fn test_shred_code_capacity(chained: bool) { + #[test_case(false, false)] + #[test_case(true, false)] + #[test_case(true, true)] + fn test_shred_code_capacity(chained: bool, resigned: bool) { for proof_size in 0..0x15 { assert_eq!( - ShredCode::capacity(proof_size, chained).unwrap(), - shred_data_size_of_erasure_encoded_slice(proof_size, chained), + ShredCode::capacity(proof_size, chained, resigned).unwrap(), + shred_data_size_of_erasure_encoded_slice(proof_size, chained, resigned), ); } } @@ -1393,13 +1443,16 @@ mod test { } } - #[test_case(37, false)] - #[test_case(37, true)] - #[test_case(64, false)] - #[test_case(64, true)] - #[test_case(73, false)] - #[test_case(73, true)] - fn test_recover_merkle_shreds(num_shreds: usize, chained: bool) { + #[test_case(37, false, false)] + #[test_case(37, true, false)] + #[test_case(37, true, true)] + #[test_case(64, false, false)] + #[test_case(64, true, false)] + #[test_case(64, true, true)] + #[test_case(73, false, false)] + #[test_case(73, true, false)] + #[test_case(73, true, true)] + fn test_recover_merkle_shreds(num_shreds: usize, chained: bool, resigned: bool) { let mut rng = rand::thread_rng(); let reed_solomon_cache = ReedSolomonCache::default(); for num_data_shreds in 1..num_shreds { @@ -1407,6 +1460,7 @@ mod test { run_recover_merkle_shreds( &mut rng, chained, + resigned, num_data_shreds, num_coding_shreds, &reed_solomon_cache, @@ -1417,6 +1471,7 @@ mod test { fn run_recover_merkle_shreds( rng: &mut R, chained: bool, + resigned: bool, num_data_shreds: usize, num_coding_shreds: usize, reed_solomon_cache: &ReedSolomonCache, @@ -1424,12 +1479,13 @@ mod test { let keypair = Keypair::new(); let num_shreds = num_data_shreds + num_coding_shreds; let proof_size = get_proof_size(num_shreds); - let capacity = ShredData::capacity(proof_size, chained).unwrap(); + let capacity = ShredData::capacity(proof_size, chained, resigned).unwrap(); let common_header = ShredCommonHeader { signature: Signature::default(), shred_variant: ShredVariant::MerkleData { proof_size, chained, + resigned, }, slot: 145_865_705, index: 1835, @@ -1488,6 +1544,7 @@ mod test { shred_variant: ShredVariant::MerkleCode { proof_size, chained, + resigned, }, index: common_header.index + i as u32 + 7, ..common_header @@ -1660,6 +1717,7 @@ mod test { let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); let keypair = Keypair::new(); let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); + let resigned = chained && is_last_in_slot; let slot = 149_745_689; let parent_slot = slot - rng.gen_range(1..65536); let shred_version = rng.gen(); @@ -1752,6 +1810,7 @@ mod test { ShredVariant::MerkleCode { proof_size, chained, + resigned } ); num_coding_shreds += 1; @@ -1763,6 +1822,7 @@ mod test { ShredVariant::MerkleData { proof_size, chained, + resigned } ); assert!(common_header.fec_set_index <= common_header.index); diff --git a/ledger/src/shred/shred_data.rs b/ledger/src/shred/shred_data.rs index 725ec90e65a14c..15f407172cfc4b 100644 --- a/ledger/src/shred/shred_data.rs +++ b/ledger/src/shred/shred_data.rs @@ -114,11 +114,18 @@ impl ShredData { // merkle_proof_size is the number of merkle proof entries. // None indicates a legacy data-shred. pub fn capacity( - merkle_variant: Option<(/*proof_size:*/ u8, /*chained:*/ bool)>, + merkle_variant: Option<( + u8, // proof_size + bool, // chained + bool, // resigned + )>, ) -> Result { match merkle_variant { None => Ok(legacy::ShredData::CAPACITY), - Some((proof_size, chained)) => merkle::ShredData::capacity(proof_size, chained), + Some((proof_size, chained, resigned)) => { + debug_assert!(chained || !resigned); + merkle::ShredData::capacity(proof_size, chained, resigned) + } } } From 6aaaf858c98a59a7c266ee4b302eb932a929c638 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 28 Feb 2024 15:55:05 -0500 Subject: [PATCH 280/401] Adds more info to panic message in AccountsHashVerifier (#35353) --- accounts-db/src/accounts_db.rs | 12 ++++++++++++ core/src/accounts_hash_verifier.rs | 25 ++++++++++++++++++++----- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 167cbdbfae5db0..f077f7a412b32e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7430,6 +7430,11 @@ impl AccountsDb { self.accounts_hashes.lock().unwrap().get(&slot).cloned() } + /// Get all accounts hashes + pub fn get_accounts_hashes(&self) -> HashMap { + self.accounts_hashes.lock().unwrap().clone() + } + /// Set the incremental accounts hash for `slot` /// /// returns the previous incremental accounts hash for `slot` @@ -7466,6 +7471,13 @@ impl AccountsDb { .cloned() } + /// Get all incremental accounts hashes + pub fn get_incremental_accounts_hashes( + &self, + ) -> HashMap { + self.incremental_accounts_hashes.lock().unwrap().clone() + } + /// Purge accounts hashes that are older than `last_full_snapshot_slot` /// /// Should only be called by AccountsHashVerifier, since it consumes the accounts hashes and diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 43a3911e402bc4..0e427d0675a2b1 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -297,11 +297,26 @@ impl AccountsHashVerifier { else { panic!("Calculating incremental accounts hash requires a base slot"); }; - let (base_accounts_hash, base_capitalization) = accounts_package - .accounts - .accounts_db - .get_accounts_hash(base_slot) - .expect("incremental snapshot requires accounts hash and capitalization from the full snapshot it is based on"); + let accounts_db = &accounts_package.accounts.accounts_db; + let Some((base_accounts_hash, base_capitalization)) = + accounts_db.get_accounts_hash(base_slot) + else { + panic!( + "incremental snapshot requires accounts hash and capitalization \ + from the full snapshot it is based on \n\ + package: {accounts_package:?} \n\ + accounts hashes: {:?} \n\ + incremental accounts hashes: {:?} \n\ + full snapshot archives: {:?} \n\ + bank snapshots: {:?}", + accounts_db.get_accounts_hashes(), + accounts_db.get_incremental_accounts_hashes(), + snapshot_utils::get_full_snapshot_archives( + &snapshot_config.full_snapshot_archives_dir, + ), + snapshot_utils::get_bank_snapshots(&snapshot_config.bank_snapshots_dir), + ); + }; let (incremental_accounts_hash, incremental_capitalization) = Self::_calculate_incremental_accounts_hash(accounts_package, base_slot); let bank_incremental_snapshot_persistence = BankIncrementalSnapshotPersistence { From 140c21f8a906fda41feb251d0b32b82ea8760652 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 28 Feb 2024 16:08:00 -0500 Subject: [PATCH 281/401] Removes ReadAccountMapEntry (#35351) --- accounts-db/src/accounts_index.rs | 95 +------------------------------ 1 file changed, 1 insertion(+), 94 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 5221ac43449869..7c4baf1ee95908 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -13,7 +13,6 @@ use { secondary_index::*, }, log::*, - ouroboros::self_referencing, rand::{thread_rng, Rng}, rayon::{ iter::{IntoParallelIterator, ParallelIterator}, @@ -37,7 +36,7 @@ use { path::PathBuf, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - Arc, Mutex, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard, + Arc, Mutex, OnceLock, RwLock, RwLockWriteGuard, }, }, thiserror::Error, @@ -339,48 +338,6 @@ impl AccountMapEntryInner { } } -pub enum AccountIndexGetResult { - /// (index entry, index in slot list) - Found(ReadAccountMapEntry, usize), - NotFound, -} - -#[self_referencing] -pub struct ReadAccountMapEntry { - owned_entry: AccountMapEntry, - #[borrows(owned_entry)] - #[covariant] - slot_list_guard: RwLockReadGuard<'this, SlotList>, -} - -impl Debug for ReadAccountMapEntry { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:?}", self.borrow_owned_entry()) - } -} - -impl ReadAccountMapEntry { - pub fn from_account_map_entry(account_map_entry: AccountMapEntry) -> Self { - ReadAccountMapEntryBuilder { - owned_entry: account_map_entry, - slot_list_guard_builder: |lock| lock.slot_list.read().unwrap(), - } - .build() - } - - pub fn slot_list(&self) -> &SlotList { - self.borrow_slot_list_guard() - } - - pub fn ref_count(&self) -> RefCount { - self.borrow_owned_entry().ref_count() - } - - pub fn addref(&self) { - self.borrow_owned_entry().addref(); - } -} - /// can be used to pre-allocate structures for insertion into accounts index outside of lock pub enum PreAllocatedAccountMapEntry { Entry(AccountMapEntry), @@ -1490,28 +1447,6 @@ impl + Into> AccountsIndex { }); } - /// Get an account - /// The latest account that appears in `ancestors` or `roots` is returned. - pub fn get( - &self, - pubkey: &Pubkey, - ancestors: Option<&Ancestors>, - max_root: Option, - ) -> AccountIndexGetResult { - let read_account_map_entry = self - .get_bin(pubkey) - .get(pubkey) - .map(ReadAccountMapEntry::from_account_map_entry); - - read_account_map_entry - .and_then(|locked_entry| { - let slot_list = locked_entry.slot_list(); - self.latest_slot(ancestors, slot_list, max_root) - .map(|found_index| AccountIndexGetResult::Found(locked_entry, found_index)) - }) - .unwrap_or(AccountIndexGetResult::NotFound) - } - // Get the maximum root <= `max_allowed_root` from the given `slice` fn get_newest_root_in_slot_list( alive_roots: &RollingBitField, @@ -2076,34 +2011,6 @@ impl + Into> AccountsIndex { } } -// These functions/fields are only usable from a dev context (i.e. tests and benches) -#[cfg(feature = "dev-context-only-utils")] -impl AccountIndexGetResult { - pub fn unwrap(self) -> (ReadAccountMapEntry, usize) { - match self { - AccountIndexGetResult::Found(lock, size) => (lock, size), - _ => { - panic!("trying to unwrap AccountIndexGetResult with non-Success result"); - } - } - } - - pub fn is_none(&self) -> bool { - !self.is_some() - } - - pub fn is_some(&self) -> bool { - matches!(self, AccountIndexGetResult::Found(_lock, _size)) - } - - pub fn map, usize)) -> V>(self, f: F) -> Option { - match self { - AccountIndexGetResult::Found(lock, size) => Some(f((lock, size))), - _ => None, - } - } -} - #[cfg(test)] pub mod tests { use { From 2e10b3b64f390bb4239480a75a803c3ae8eb0273 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 28 Feb 2024 17:57:55 -0500 Subject: [PATCH 282/401] Removes InMemAccountsIndex::get() (#35354) --- accounts-db/src/in_mem_accounts_index.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/accounts-db/src/in_mem_accounts_index.rs b/accounts-db/src/in_mem_accounts_index.rs index 054fd7589df79f..918a5c41f9cd2a 100644 --- a/accounts-db/src/in_mem_accounts_index.rs +++ b/accounts-db/src/in_mem_accounts_index.rs @@ -318,11 +318,6 @@ impl + Into> InMemAccountsIndex Option> { - self.get_internal_cloned(pubkey, |entry| entry) - } - /// set age of 'entry' to the future /// if 'is_cached', age will be set farther fn set_age_to_future(&self, entry: &AccountMapEntry, is_cached: bool) { From 9146236f021f576e48d27a22d7c4fdbad32db79f Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 28 Feb 2024 17:58:14 -0500 Subject: [PATCH 283/401] Removes ouroboros dependency (#35355) --- Cargo.lock | 31 ------------------------------- Cargo.toml | 1 - accounts-db/Cargo.toml | 1 - programs/sbf/Cargo.lock | 31 ------------------------------- runtime/Cargo.toml | 1 - 5 files changed, 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e936c66931cd6a..81d6aa64125445 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -104,12 +104,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "aliasable" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" - [[package]] name = "alloc-no-stdlib" version = "2.0.3" @@ -3589,29 +3583,6 @@ version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -[[package]] -name = "ouroboros" -version = "0.15.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1358bd1558bd2a083fed428ffeda486fbfb323e698cdda7794259d592ca72db" -dependencies = [ - "aliasable", - "ouroboros_macro", -] - -[[package]] -name = "ouroboros_macro" -version = "0.15.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" -dependencies = [ - "Inflector", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "parity-tokio-ipc" version = "0.9.0" @@ -5228,7 +5199,6 @@ dependencies = [ "num-traits", "num_cpus", "num_enum", - "ouroboros", "percentage", "qualifier_attr", "rand 0.8.5", @@ -6953,7 +6923,6 @@ dependencies = [ "num-traits", "num_cpus", "num_enum", - "ouroboros", "percentage", "qualifier_attr", "rand 0.8.5", diff --git a/Cargo.toml b/Cargo.toml index 3b6a20013220e9..804e9ba19077da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -258,7 +258,6 @@ num-traits = "0.2" num_cpus = "1.16.0" num_enum = "0.7.2" openssl = "0.10" -ouroboros = "0.15.6" parking_lot = "0.12" pbkdf2 = { version = "0.11.0", default-features = false } pem = "1.1.1" diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 22cad43217c33b..b986c17de0636b 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -34,7 +34,6 @@ num-derive = { workspace = true } num-traits = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } -ouroboros = { workspace = true } percentage = { workspace = true } qualifier_attr = { workspace = true } rand = { workspace = true } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b41a66a56cdc18..1d11ce6e65542e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -105,12 +105,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "aliasable" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" - [[package]] name = "alloc-no-stdlib" version = "2.0.3" @@ -3228,29 +3222,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "ouroboros" -version = "0.15.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1358bd1558bd2a083fed428ffeda486fbfb323e698cdda7794259d592ca72db" -dependencies = [ - "aliasable", - "ouroboros_macro", -] - -[[package]] -name = "ouroboros_macro" -version = "0.15.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" -dependencies = [ - "Inflector", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "parity-tokio-ipc" version = "0.9.0" @@ -4602,7 +4573,6 @@ dependencies = [ "num-traits", "num_cpus", "num_enum", - "ouroboros", "percentage", "qualifier_attr", "rand 0.8.5", @@ -5664,7 +5634,6 @@ dependencies = [ "num-traits", "num_cpus", "num_enum", - "ouroboros", "percentage", "qualifier_attr", "rand 0.8.5", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index b14ffab2076ca3..02553d4215909d 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -38,7 +38,6 @@ num-derive = { workspace = true } num-traits = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } -ouroboros = { workspace = true } percentage = { workspace = true } qualifier_attr = { workspace = true } rand = { workspace = true } From 312f786abffe808becbdc67be6031aa831dd02df Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 29 Feb 2024 08:25:22 +0800 Subject: [PATCH 284/401] Rename `SanitizedMessage::try_from` to `try_from_legacy_message` (#35338) * Simplify and refactor tx message creation in tests * Rename SanitizedMessage::try_from to try_from_legacy_message --- banks-server/src/banks_server.rs | 3 +- program-runtime/src/message_processor.rs | 119 ++++++++++---------- programs/sbf/tests/programs.rs | 6 +- rpc/src/rpc.rs | 2 +- runtime/benches/prioritization_fee_cache.rs | 2 +- runtime/src/bank/tests.rs | 74 ++++++------ runtime/src/bank_client.rs | 3 +- runtime/src/compute_budget_details.rs | 9 +- runtime/src/prioritization_fee_cache.rs | 2 +- sdk/benches/serialize_instructions.rs | 25 ++-- sdk/program/src/message/sanitized.rs | 58 +++++----- sdk/program/src/sysvar/instructions.rs | 33 +++--- sdk/src/nonce_info.rs | 2 +- svm/src/account_loader.rs | 4 +- svm/tests/rent_state.rs | 2 +- 15 files changed, 173 insertions(+), 171 deletions(-) diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 22f63e9f60a0d5..b3028c0132ed48 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -31,7 +31,6 @@ use { }, solana_svm::transaction_results::TransactionExecutionResult, std::{ - convert::TryFrom, io, net::{Ipv4Addr, SocketAddr}, sync::{atomic::AtomicBool, Arc, RwLock}, @@ -418,7 +417,7 @@ impl Banks for BanksServer { commitment: CommitmentLevel, ) -> Option { let bank = self.bank(commitment); - let sanitized_message = SanitizedMessage::try_from(message).ok()?; + let sanitized_message = SanitizedMessage::try_from_legacy_message(message).ok()?; bank.get_fee_for_message(&sanitized_message) } } diff --git a/program-runtime/src/message_processor.rs b/program-runtime/src/message_processor.rs index a428cf930efeca..507197298479d9 100644 --- a/program-runtime/src/message_processor.rs +++ b/program-runtime/src/message_processor.rs @@ -180,12 +180,12 @@ mod tests { solana_sdk::{ account::{AccountSharedData, ReadableAccount}, instruction::{AccountMeta, Instruction, InstructionError}, - message::{AccountKeys, LegacyMessage, Message}, + message::{AccountKeys, Message}, native_loader::{self, create_loadable_account_for_test}, pubkey::Pubkey, rent::Rent, secp256k1_instruction::new_secp256k1_instruction, - secp256k1_program, + secp256k1_program, system_program, }, }; @@ -198,6 +198,10 @@ mod tests { ModifyReadonly, } + fn new_sanitized_message(message: Message) -> SanitizedMessage { + SanitizedMessage::try_from_legacy_message(message).unwrap() + } + #[test] fn test_process_message_readonly_handling() { #[derive(Serialize, Deserialize)] @@ -272,21 +276,20 @@ mod tests { AccountMeta::new_readonly(readonly_pubkey, false), ]; - let message = - SanitizedMessage::Legacy(LegacyMessage::new(Message::new_with_compiled_instructions( - 1, - 0, - 2, - account_keys.clone(), - Hash::default(), - AccountKeys::new(&account_keys, None).compile_instructions(&[ - Instruction::new_with_bincode( - mock_system_program_id, - &MockSystemInstruction::Correct, - account_metas.clone(), - ), - ]), - ))); + let message = new_sanitized_message(Message::new_with_compiled_instructions( + 1, + 0, + 2, + account_keys.clone(), + Hash::default(), + AccountKeys::new(&account_keys, None).compile_instructions(&[ + Instruction::new_with_bincode( + mock_system_program_id, + &MockSystemInstruction::Correct, + account_metas.clone(), + ), + ]), + )); let sysvar_cache = SysvarCache::default(); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( @@ -322,21 +325,20 @@ mod tests { 0 ); - let message = - SanitizedMessage::Legacy(LegacyMessage::new(Message::new_with_compiled_instructions( - 1, - 0, - 2, - account_keys.clone(), - Hash::default(), - AccountKeys::new(&account_keys, None).compile_instructions(&[ - Instruction::new_with_bincode( - mock_system_program_id, - &MockSystemInstruction::TransferLamports { lamports: 50 }, - account_metas.clone(), - ), - ]), - ))); + let message = new_sanitized_message(Message::new_with_compiled_instructions( + 1, + 0, + 2, + account_keys.clone(), + Hash::default(), + AccountKeys::new(&account_keys, None).compile_instructions(&[ + Instruction::new_with_bincode( + mock_system_program_id, + &MockSystemInstruction::TransferLamports { lamports: 50 }, + account_metas.clone(), + ), + ]), + )); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, @@ -361,21 +363,20 @@ mod tests { )) ); - let message = - SanitizedMessage::Legacy(LegacyMessage::new(Message::new_with_compiled_instructions( - 1, - 0, - 2, - account_keys.clone(), - Hash::default(), - AccountKeys::new(&account_keys, None).compile_instructions(&[ - Instruction::new_with_bincode( - mock_system_program_id, - &MockSystemInstruction::ChangeData { data: 50 }, - account_metas, - ), - ]), - ))); + let message = new_sanitized_message(Message::new_with_compiled_instructions( + 1, + 0, + 2, + account_keys.clone(), + Hash::default(), + AccountKeys::new(&account_keys, None).compile_instructions(&[ + Instruction::new_with_bincode( + mock_system_program_id, + &MockSystemInstruction::ChangeData { data: 50 }, + account_metas, + ), + ]), + )); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, @@ -496,14 +497,14 @@ mod tests { ]; // Try to borrow mut the same account - let message = SanitizedMessage::Legacy(LegacyMessage::new(Message::new( + let message = new_sanitized_message(Message::new( &[Instruction::new_with_bincode( mock_program_id, &MockSystemInstruction::BorrowFail, account_metas.clone(), )], Some(transaction_context.get_key_of_account_at_index(0).unwrap()), - ))); + )); let sysvar_cache = SysvarCache::default(); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( @@ -530,14 +531,14 @@ mod tests { ); // Try to borrow mut the same account in a safe way - let message = SanitizedMessage::Legacy(LegacyMessage::new(Message::new( + let message = new_sanitized_message(Message::new( &[Instruction::new_with_bincode( mock_program_id, &MockSystemInstruction::MultiBorrowMut, account_metas.clone(), )], Some(transaction_context.get_key_of_account_at_index(0).unwrap()), - ))); + )); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, @@ -557,7 +558,7 @@ mod tests { assert!(result.is_ok()); // Do work on the same transaction account but at different instruction accounts - let message = SanitizedMessage::Legacy(LegacyMessage::new(Message::new( + let message = new_sanitized_message(Message::new( &[Instruction::new_with_bincode( mock_program_id, &MockSystemInstruction::DoWork { @@ -567,7 +568,7 @@ mod tests { account_metas, )], Some(transaction_context.get_key_of_account_at_index(0).unwrap()), - ))); + )); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, @@ -623,6 +624,10 @@ mod tests { let mut mock_program_account = AccountSharedData::new(1, 0, &native_loader::id()); mock_program_account.set_executable(true); let accounts = vec![ + ( + Pubkey::new_unique(), + AccountSharedData::new(1, 0, &system_program::id()), + ), (secp256k1_program::id(), secp256k1_account), (mock_program_id, mock_program_account), ]; @@ -642,13 +647,13 @@ mod tests { } } }; - let message = SanitizedMessage::Legacy(LegacyMessage::new(Message::new( + let message = new_sanitized_message(Message::new( &[ new_secp256k1_instruction(&secret_key, b"hello"), Instruction::new_with_bytes(mock_program_id, &[], vec![]), ], - None, - ))); + Some(transaction_context.get_key_of_account_at_index(0).unwrap()), + )); let sysvar_cache = SysvarCache::default(); let mut programs_loaded_for_tx_batch = LoadedProgramsForTxBatch::default(); programs_loaded_for_tx_batch.replenish( @@ -658,7 +663,7 @@ mod tests { let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, - &[vec![0], vec![1]], + &[vec![1], vec![2]], &mut transaction_context, None, &programs_loaded_for_tx_batch, diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index b29d78422dca51..dc4867ce7e40fd 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -200,7 +200,7 @@ fn execute_transactions( } .expect("lamports_per_signature must be available"); let fee = bank.get_fee_for_message_with_lamports_per_signature( - &SanitizedMessage::try_from(tx.message().clone()).unwrap(), + &SanitizedMessage::try_from_legacy_message(tx.message().clone()).unwrap(), lamports_per_signature, ); @@ -3705,7 +3705,7 @@ fn test_program_fees() { Some(&mint_keypair.pubkey()), ); - let sanitized_message = SanitizedMessage::try_from(message.clone()).unwrap(); + let sanitized_message = SanitizedMessage::try_from_legacy_message(message.clone()).unwrap(); let expected_normal_fee = fee_structure.calculate_fee( &sanitized_message, congestion_multiplier, @@ -3729,7 +3729,7 @@ fn test_program_fees() { ], Some(&mint_keypair.pubkey()), ); - let sanitized_message = SanitizedMessage::try_from(message.clone()).unwrap(); + let sanitized_message = SanitizedMessage::try_from_legacy_message(message.clone()).unwrap(); let expected_prioritized_fee = fee_structure.calculate_fee( &sanitized_message, congestion_multiplier, diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 7bde6b837f2a13..caeb0953109fbb 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -5070,7 +5070,7 @@ pub mod tests { let prioritization_fee_cache = &self.meta.prioritization_fee_cache; let transactions: Vec<_> = transactions .into_iter() - .map(|tx| SanitizedTransaction::try_from_legacy_transaction(tx).unwrap()) + .map(SanitizedTransaction::from_transaction_for_tests) .collect(); prioritization_fee_cache.update(&bank, transactions.iter()); } diff --git a/runtime/benches/prioritization_fee_cache.rs b/runtime/benches/prioritization_fee_cache.rs index 506aac4fb729a3..8c6bf1fe0a7d68 100644 --- a/runtime/benches/prioritization_fee_cache.rs +++ b/runtime/benches/prioritization_fee_cache.rs @@ -36,7 +36,7 @@ fn build_sanitized_transaction( Some(signer_account), )); - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap() + SanitizedTransaction::from_transaction_for_tests(transaction) } #[bench] diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 02bb7f5c08a0de..2283899d3ca30d 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -117,7 +117,7 @@ use { }, std::{ collections::{HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryInto, fs::File, io::Read, str::FromStr, @@ -195,6 +195,10 @@ fn create_genesis_config(lamports: u64) -> (GenesisConfig, Keypair) { solana_sdk::genesis_config::create_genesis_config(lamports) } +fn new_sanitized_message(message: Message) -> SanitizedMessage { + SanitizedMessage::try_from_legacy_message(message).unwrap() +} + #[test] fn test_race_register_tick_freeze() { solana_logger::setup(); @@ -2666,7 +2670,7 @@ fn test_bank_tx_compute_unit_fee() { let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let expected_fee_paid = calculate_test_fee( - &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), + &new_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))), genesis_config .fee_rate_governor .create_fee_calculator() @@ -2794,7 +2798,7 @@ fn test_bank_blockhash_fee_structure() { assert_eq!(bank.process_transaction(&tx), Ok(())); assert_eq!(bank.get_balance(&key), 1); let cheap_fee = calculate_test_fee( - &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), + &new_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))), cheap_lamports_per_signature, &bank.fee_structure, ); @@ -2810,7 +2814,7 @@ fn test_bank_blockhash_fee_structure() { assert_eq!(bank.process_transaction(&tx), Ok(())); assert_eq!(bank.get_balance(&key), 1); let expensive_fee = calculate_test_fee( - &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), + &new_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))), expensive_lamports_per_signature, &bank.fee_structure, ); @@ -2856,7 +2860,7 @@ fn test_bank_blockhash_compute_unit_fee_structure() { assert_eq!(bank.process_transaction(&tx), Ok(())); assert_eq!(bank.get_balance(&key), 1); let cheap_fee = calculate_test_fee( - &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), + &new_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))), cheap_lamports_per_signature, &bank.fee_structure, ); @@ -2872,7 +2876,7 @@ fn test_bank_blockhash_compute_unit_fee_structure() { assert_eq!(bank.process_transaction(&tx), Ok(())); assert_eq!(bank.get_balance(&key), 1); let expensive_fee = calculate_test_fee( - &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), + &new_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))), expensive_lamports_per_signature, &bank.fee_structure, ); @@ -2979,8 +2983,7 @@ fn test_filter_program_errors_and_collect_compute_unit_fee() { .fee_rate_governor .burn( calculate_test_fee( - &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))) - .unwrap(), + &new_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))), genesis_config .fee_rate_governor .create_fee_calculator() @@ -5275,7 +5278,7 @@ fn test_nonce_transaction() { recent_message.recent_blockhash = bank.last_blockhash(); let mut expected_balance = 4_650_000 - bank - .get_fee_for_message(&recent_message.try_into().unwrap()) + .get_fee_for_message(&new_sanitized_message(recent_message)) .unwrap(); assert_eq!(bank.get_balance(&custodian_pubkey), expected_balance); assert_eq!(bank.get_balance(&nonce_pubkey), 250_000); @@ -5334,7 +5337,7 @@ fn test_nonce_transaction() { let mut recent_message = nonce_tx.message.clone(); recent_message.recent_blockhash = bank.last_blockhash(); expected_balance -= bank - .get_fee_for_message(&SanitizedMessage::try_from(recent_message).unwrap()) + .get_fee_for_message(&new_sanitized_message(recent_message)) .unwrap(); assert_eq!(bank.get_balance(&custodian_pubkey), expected_balance); assert_ne!( @@ -5402,7 +5405,7 @@ fn test_nonce_transaction_with_tx_wide_caps() { recent_message.recent_blockhash = bank.last_blockhash(); let mut expected_balance = 4_650_000 - bank - .get_fee_for_message(&recent_message.try_into().unwrap()) + .get_fee_for_message(&new_sanitized_message(recent_message)) .unwrap(); assert_eq!(bank.get_balance(&custodian_pubkey), expected_balance); assert_eq!(bank.get_balance(&nonce_pubkey), 250_000); @@ -5461,7 +5464,7 @@ fn test_nonce_transaction_with_tx_wide_caps() { let mut recent_message = nonce_tx.message.clone(); recent_message.recent_blockhash = bank.last_blockhash(); expected_balance -= bank - .get_fee_for_message(&SanitizedMessage::try_from(recent_message).unwrap()) + .get_fee_for_message(&new_sanitized_message(recent_message)) .unwrap(); assert_eq!(bank.get_balance(&custodian_pubkey), expected_balance); assert_ne!( @@ -5593,7 +5596,7 @@ fn test_nonce_payer() { bank.get_balance(&nonce_pubkey), nonce_starting_balance - bank - .get_fee_for_message(&recent_message.try_into().unwrap()) + .get_fee_for_message(&new_sanitized_message(recent_message)) .unwrap() ); assert_ne!( @@ -5660,7 +5663,7 @@ fn test_nonce_payer_tx_wide_cap() { bank.get_balance(&nonce_pubkey), nonce_starting_balance - bank - .get_fee_for_message(&recent_message.try_into().unwrap()) + .get_fee_for_message(&new_sanitized_message(recent_message)) .unwrap() ); assert_ne!( @@ -10034,8 +10037,7 @@ fn calculate_test_fee( #[test] fn test_calculate_fee() { // Default: no fee. - let message = - SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(); + let message = new_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))); assert_eq!( calculate_test_fee( &message, @@ -10066,7 +10068,7 @@ fn test_calculate_fee() { let key1 = Pubkey::new_unique(); let ix0 = system_instruction::transfer(&key0, &key1, 1); let ix1 = system_instruction::transfer(&key1, &key0, 1); - let message = SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&key0))).unwrap(); + let message = new_sanitized_message(Message::new(&[ix0, ix1], Some(&key0))); assert_eq!( calculate_test_fee( &message, @@ -10091,8 +10093,7 @@ fn test_calculate_fee_compute_units() { // One signature, no unit request - let message = - SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(); + let message = new_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))); assert_eq!( calculate_test_fee(&message, 1, &fee_structure,), max_fee + lamports_per_signature @@ -10102,8 +10103,7 @@ fn test_calculate_fee_compute_units() { let ix0 = system_instruction::transfer(&Pubkey::new_unique(), &Pubkey::new_unique(), 1); let ix1 = system_instruction::transfer(&Pubkey::new_unique(), &Pubkey::new_unique(), 1); - let message = - SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&Pubkey::new_unique()))).unwrap(); + let message = new_sanitized_message(Message::new(&[ix0, ix1], Some(&Pubkey::new_unique()))); assert_eq!( calculate_test_fee(&message, 1, &fee_structure,), max_fee + 3 * lamports_per_signature @@ -10129,15 +10129,14 @@ fn test_calculate_fee_compute_units() { PrioritizationFeeType::ComputeUnitPrice(PRIORITIZATION_FEE_RATE), requested_compute_units as u64, ); - let message = SanitizedMessage::try_from(Message::new( + let message = new_sanitized_message(Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit(requested_compute_units), ComputeBudgetInstruction::set_compute_unit_price(PRIORITIZATION_FEE_RATE), Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ], Some(&Pubkey::new_unique()), - )) - .unwrap(); + )); let fee = calculate_test_fee(&message, 1, &fee_structure); assert_eq!( fee, @@ -10161,14 +10160,13 @@ fn test_calculate_prioritization_fee() { ); let prioritization_fee = prioritization_fee_details.get_fee(); - let message = SanitizedMessage::try_from(Message::new( + let message = new_sanitized_message(Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit(request_units), ComputeBudgetInstruction::set_compute_unit_price(request_unit_price), ], Some(&Pubkey::new_unique()), - )) - .unwrap(); + )); let fee = calculate_test_fee( &message, @@ -10202,24 +10200,22 @@ fn test_calculate_fee_secp256k1() { data: vec![1], }; - let message = SanitizedMessage::try_from(Message::new( + let message = new_sanitized_message(Message::new( &[ ix0.clone(), secp_instruction1.clone(), secp_instruction2.clone(), ], Some(&key0), - )) - .unwrap(); + )); assert_eq!(calculate_test_fee(&message, 1, &fee_structure,), 2); secp_instruction1.data = vec![0]; secp_instruction2.data = vec![10]; - let message = SanitizedMessage::try_from(Message::new( + let message = new_sanitized_message(Message::new( &[ix0, secp_instruction1, secp_instruction2], Some(&key0), - )) - .unwrap(); + )); assert_eq!(calculate_test_fee(&message, 1, &fee_structure,), 11); } @@ -10745,7 +10741,7 @@ fn test_invalid_rent_state_changes_fee_payer() { .unwrap(); // Dummy message to determine fee amount - let dummy_message = SanitizedMessage::try_from(Message::new_with_blockhash( + let dummy_message = new_sanitized_message(Message::new_with_blockhash( &[system_instruction::transfer( &rent_exempt_fee_payer.pubkey(), &recipient, @@ -10753,8 +10749,7 @@ fn test_invalid_rent_state_changes_fee_payer() { )], Some(&rent_exempt_fee_payer.pubkey()), &recent_blockhash, - )) - .unwrap(); + )); let fee = bank.get_fee_for_message(&dummy_message).unwrap(); // RentPaying fee-payer can remain RentPaying @@ -11814,7 +11809,7 @@ fn test_calculate_fee_with_congestion_multiplier() { let key1 = Pubkey::new_unique(); let ix0 = system_instruction::transfer(&key0, &key1, 1); let ix1 = system_instruction::transfer(&key1, &key0, 1); - let message = SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&key0))).unwrap(); + let message = new_sanitized_message(Message::new(&[ix0, ix1], Some(&key0))); // assert when lamports_per_signature is less than BASE_LAMPORTS, turnning on/off // congestion_multiplier has no effect on fee. @@ -11843,7 +11838,7 @@ fn test_calculate_fee_with_request_heap_frame_flag() { lamports_per_signature: signature_fee, ..FeeStructure::default() }; - let message = SanitizedMessage::try_from(Message::new( + let message = new_sanitized_message(Message::new( &[ system_instruction::transfer(&key0, &key1, 1), ComputeBudgetInstruction::set_compute_unit_limit(request_cu as u32), @@ -11851,8 +11846,7 @@ fn test_calculate_fee_with_request_heap_frame_flag() { ComputeBudgetInstruction::set_compute_unit_price(lamports_per_cu * 1_000_000), ], Some(&key0), - )) - .unwrap(); + )); // assert when request_heap_frame is presented in tx, prioritization fee will be counted // into transaction fee diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index 7fe6418d4110b2..22a1631085870f 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -19,7 +19,6 @@ use { transport::{Result, TransportError}, }, std::{ - convert::TryFrom, io, sync::{Arc, Mutex}, thread::{sleep, Builder}, @@ -286,7 +285,7 @@ impl SyncClient for BankClient { } fn get_fee_for_message(&self, message: &Message) -> Result { - SanitizedMessage::try_from(message.clone()) + SanitizedMessage::try_from_legacy_message(message.clone()) .ok() .and_then(|sanitized_message| self.bank.get_fee_for_message(&sanitized_message)) .ok_or_else(|| { diff --git a/runtime/src/compute_budget_details.rs b/runtime/src/compute_budget_details.rs index 69756d4567ff70..72b10a11b33bcc 100644 --- a/runtime/src/compute_budget_details.rs +++ b/runtime/src/compute_budget_details.rs @@ -95,8 +95,7 @@ mod tests { ); // assert for SanitizedTransaction - let sanitized_transaction = - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests(transaction); assert_eq!( sanitized_transaction.get_compute_budget_details(false), Some(ComputeBudgetDetails { @@ -133,8 +132,7 @@ mod tests { ); // assert for SanitizedTransaction - let sanitized_transaction = - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests(transaction); assert_eq!( sanitized_transaction.get_compute_budget_details(false), Some(ComputeBudgetDetails { @@ -171,8 +169,7 @@ mod tests { ); // assert for SanitizedTransaction - let sanitized_transaction = - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests(transaction); assert_eq!( sanitized_transaction.get_compute_budget_details(false), Some(ComputeBudgetDetails { diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index 839519020ff42f..0490f594451b9c 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -459,7 +459,7 @@ mod tests { Some(signer_account), )); - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap() + SanitizedTransaction::from_transaction_for_tests(transaction) } // update fee cache is asynchronous, this test helper blocks until update is completed. diff --git a/sdk/benches/serialize_instructions.rs b/sdk/benches/serialize_instructions.rs index 955bb948fca0d2..adf36497ec67d4 100644 --- a/sdk/benches/serialize_instructions.rs +++ b/sdk/benches/serialize_instructions.rs @@ -9,7 +9,6 @@ use { pubkey::{self, Pubkey}, sysvar::instructions::{self, construct_instructions_data}, }, - std::convert::TryFrom, test::Bencher, }; @@ -30,9 +29,11 @@ fn bench_bincode_instruction_serialize(b: &mut Bencher) { #[bench] fn bench_construct_instructions_data(b: &mut Bencher) { let instructions = make_instructions(); - let message = - SanitizedMessage::try_from(Message::new(&instructions, Some(&Pubkey::new_unique()))) - .unwrap(); + let message = SanitizedMessage::try_from_legacy_message(Message::new( + &instructions, + Some(&Pubkey::new_unique()), + )) + .unwrap(); b.iter(|| { let instructions = message.decompile_instructions(); test::black_box(construct_instructions_data(&instructions)); @@ -51,9 +52,11 @@ fn bench_bincode_instruction_deserialize(b: &mut Bencher) { #[bench] fn bench_manual_instruction_deserialize(b: &mut Bencher) { let instructions = make_instructions(); - let message = - SanitizedMessage::try_from(Message::new(&instructions, Some(&Pubkey::new_unique()))) - .unwrap(); + let message = SanitizedMessage::try_from_legacy_message(Message::new( + &instructions, + Some(&Pubkey::new_unique()), + )) + .unwrap(); let serialized = construct_instructions_data(&message.decompile_instructions()); b.iter(|| { for i in 0..instructions.len() { @@ -66,9 +69,11 @@ fn bench_manual_instruction_deserialize(b: &mut Bencher) { #[bench] fn bench_manual_instruction_deserialize_single(b: &mut Bencher) { let instructions = make_instructions(); - let message = - SanitizedMessage::try_from(Message::new(&instructions, Some(&Pubkey::new_unique()))) - .unwrap(); + let message = SanitizedMessage::try_from_legacy_message(Message::new( + &instructions, + Some(&Pubkey::new_unique()), + )) + .unwrap(); let serialized = construct_instructions_data(&message.decompile_instructions()); b.iter(|| { #[allow(deprecated)] diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 098a781ea4dbf7..d4c7638e136a72 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -98,14 +98,6 @@ impl From for SanitizeMessageError { } } -impl TryFrom for SanitizedMessage { - type Error = SanitizeMessageError; - fn try_from(message: legacy::Message) -> Result { - message.sanitize()?; - Ok(Self::Legacy(LegacyMessage::new(message))) - } -} - impl SanitizedMessage { /// Create a sanitized message from a sanitized versioned message. /// If the input message uses address tables, attempt to look up the @@ -126,6 +118,12 @@ impl SanitizedMessage { }) } + /// Create a sanitized legacy message + pub fn try_from_legacy_message(message: legacy::Message) -> Result { + message.sanitize()?; + Ok(Self::Legacy(LegacyMessage::new(message))) + } + /// Return true if this message contains duplicate account keys pub fn has_duplicates(&self) -> bool { match self { @@ -374,14 +372,14 @@ mod tests { use {super::*, crate::message::v0, std::collections::HashSet}; #[test] - fn test_try_from_message() { + fn test_try_from_legacy_message() { let legacy_message_with_no_signers = legacy::Message { account_keys: vec![Pubkey::new_unique()], ..legacy::Message::default() }; assert_eq!( - SanitizedMessage::try_from(legacy_message_with_no_signers).err(), + SanitizedMessage::try_from_legacy_message(legacy_message_with_no_signers).err(), Some(SanitizeMessageError::IndexOutOfBounds), ); } @@ -396,14 +394,16 @@ mod tests { CompiledInstruction::new(2, &(), vec![0, 1]), ]; - let message = SanitizedMessage::try_from(legacy::Message::new_with_compiled_instructions( - 1, - 0, - 2, - vec![key0, key1, loader_key], - Hash::default(), - instructions, - )) + let message = SanitizedMessage::try_from_legacy_message( + legacy::Message::new_with_compiled_instructions( + 1, + 0, + 2, + vec![key0, key1, loader_key], + Hash::default(), + instructions, + ), + ) .unwrap(); assert!(message.is_non_loader_key(0)); @@ -420,7 +420,7 @@ mod tests { let key4 = Pubkey::new_unique(); let key5 = Pubkey::new_unique(); - let legacy_message = SanitizedMessage::try_from(legacy::Message { + let legacy_message = SanitizedMessage::try_from_legacy_message(legacy::Message { header: MessageHeader { num_required_signatures: 2, num_readonly_signed_accounts: 1, @@ -464,14 +464,16 @@ mod tests { CompiledInstruction::new(3, &(), vec![0, 0]), ]; - let message = SanitizedMessage::try_from(legacy::Message::new_with_compiled_instructions( - 2, - 1, - 2, - vec![signer0, signer1, non_signer, loader_key], - Hash::default(), - instructions, - )) + let message = SanitizedMessage::try_from_legacy_message( + legacy::Message::new_with_compiled_instructions( + 2, + 1, + 2, + vec![signer0, signer1, non_signer, loader_key], + Hash::default(), + instructions, + ), + ) .unwrap(); assert_eq!( @@ -502,7 +504,7 @@ mod tests { let key4 = Pubkey::new_unique(); let key5 = Pubkey::new_unique(); - let legacy_message = SanitizedMessage::try_from(legacy::Message { + let legacy_message = SanitizedMessage::try_from_legacy_message(legacy::Message { header: MessageHeader { num_required_signatures: 2, num_readonly_signed_accounts: 1, diff --git a/sdk/program/src/sysvar/instructions.rs b/sdk/program/src/sysvar/instructions.rs index a5a31735795832..28d5674177b838 100644 --- a/sdk/program/src/sysvar/instructions.rs +++ b/sdk/program/src/sysvar/instructions.rs @@ -302,9 +302,12 @@ mod tests { message::{Message as LegacyMessage, SanitizedMessage}, pubkey::Pubkey, }, - std::convert::TryFrom, }; + fn new_sanitized_message(message: LegacyMessage) -> SanitizedMessage { + SanitizedMessage::try_from_legacy_message(message).unwrap() + } + #[test] fn test_load_store_instruction() { let mut data = [4u8; 10]; @@ -327,11 +330,11 @@ mod tests { &0, vec![AccountMeta::new(Pubkey::new_unique(), false)], ); - let sanitized_message = SanitizedMessage::try_from(LegacyMessage::new( + let message = LegacyMessage::new( &[instruction0.clone(), instruction1.clone()], Some(&Pubkey::new_unique()), - )) - .unwrap(); + ); + let sanitized_message = new_sanitized_message(message); let key = id(); let mut lamports = 0; @@ -381,11 +384,9 @@ mod tests { &0, vec![AccountMeta::new(Pubkey::new_unique(), false)], ); - let sanitized_message = SanitizedMessage::try_from(LegacyMessage::new( - &[instruction0, instruction1], - Some(&Pubkey::new_unique()), - )) - .unwrap(); + let message = + LegacyMessage::new(&[instruction0, instruction1], Some(&Pubkey::new_unique())); + let sanitized_message = new_sanitized_message(message); let key = id(); let mut lamports = 0; @@ -435,15 +436,15 @@ mod tests { &0, vec![AccountMeta::new(Pubkey::new_unique(), false)], ); - let sanitized_message = SanitizedMessage::try_from(LegacyMessage::new( + let message = LegacyMessage::new( &[ instruction0.clone(), instruction1.clone(), instruction2.clone(), ], Some(&Pubkey::new_unique()), - )) - .unwrap(); + ); + let sanitized_message = new_sanitized_message(message); let key = id(); let mut lamports = 0; @@ -538,7 +539,7 @@ mod tests { ]; let message = LegacyMessage::new(&instructions, Some(&id1)); - let sanitized_message = SanitizedMessage::try_from(message).unwrap(); + let sanitized_message = new_sanitized_message(message); let serialized = serialize_instructions(&sanitized_message.decompile_instructions()); // assert that deserialize_instruction is compatible with SanitizedMessage::serialize_instructions @@ -560,9 +561,9 @@ mod tests { Instruction::new_with_bincode(program_id0, &0, vec![AccountMeta::new(id1, true)]), ]; - let message = - SanitizedMessage::try_from(LegacyMessage::new(&instructions, Some(&id1))).unwrap(); - let serialized = serialize_instructions(&message.decompile_instructions()); + let message = LegacyMessage::new(&instructions, Some(&id1)); + let sanitized_message = new_sanitized_message(message); + let serialized = serialize_instructions(&sanitized_message.decompile_instructions()); assert_eq!( deserialize_instruction(instructions.len(), &serialized).unwrap_err(), SanitizeError::IndexOutOfBounds, diff --git a/sdk/src/nonce_info.rs b/sdk/src/nonce_info.rs index 585f9fa2e3a687..c29d3db6bdb944 100644 --- a/sdk/src/nonce_info.rs +++ b/sdk/src/nonce_info.rs @@ -133,7 +133,7 @@ mod tests { instructions: &[Instruction], payer: Option<&Pubkey>, ) -> SanitizedMessage { - Message::new(instructions, payer).try_into().unwrap() + SanitizedMessage::try_from_legacy_message(Message::new(instructions, payer)).unwrap() } #[test] diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 197d46250bcfcb..58bd7c6161d396 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -686,7 +686,7 @@ mod tests { instructions, ); - let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); + let message = SanitizedMessage::try_from_legacy_message(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, @@ -1215,7 +1215,7 @@ mod tests { Hash::default(), ); - let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); + let message = SanitizedMessage::try_from_legacy_message(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, diff --git a/svm/tests/rent_state.rs b/svm/tests/rent_state.rs index d24a32ac352fbf..f3ea728f6b874f 100644 --- a/svm/tests/rent_state.rs +++ b/svm/tests/rent_state.rs @@ -55,7 +55,7 @@ fn test_rent_state_list_len() { last_block_hash, ); let num_accounts = tx.message().account_keys.len(); - let sanitized_tx = SanitizedTransaction::try_from_legacy_transaction(tx).unwrap(); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); let mut error_counters = TransactionErrorMetrics::default(); let loaded_txs = load_accounts( &bank, From 990ca1d0b89104fe69c0f4fdb570c0227034664d Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 28 Feb 2024 17:36:45 -0800 Subject: [PATCH 285/401] Add limit to looping in banking-stage (#35342) --- core/src/banking_stage/consumer.rs | 3 ++- program-runtime/src/loaded_programs.rs | 9 ++++++--- runtime/src/bank.rs | 6 +++++- svm/src/transaction_processor.rs | 26 ++++++++++++++++++++++---- 4 files changed, 35 insertions(+), 9 deletions(-) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 81de74022432d9..f4ac6c6040eda8 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -598,7 +598,8 @@ impl Consumer { transaction_status_sender_enabled, &mut execute_and_commit_timings.execute_timings, None, // account_overrides - self.log_messages_bytes_limit + self.log_messages_bytes_limit, + true, )); execute_and_commit_timings.load_execute_us = load_execute_us; diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 1c29adc8c6c246..8e3e670469c45c 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -195,7 +195,7 @@ impl Stats { ("reloads", reloads, i64), ("insertions", insertions, i64), ("lost_insertions", lost_insertions, i64), - ("replacements", replacements, i64), + ("replace_entry", replacements, i64), ("one_hit_wonders", one_hit_wonders, i64), ("prunes_orphan", prunes_orphan, i64), ("prunes_environment", prunes_environment, i64), @@ -618,6 +618,7 @@ pub struct LoadedProgramsForTxBatch { entries: HashMap>, slot: Slot, pub environments: ProgramRuntimeEnvironments, + pub hit_max_limit: bool, } impl LoadedProgramsForTxBatch { @@ -626,6 +627,7 @@ impl LoadedProgramsForTxBatch { entries: HashMap::new(), slot, environments, + hit_max_limit: false, } } @@ -964,7 +966,7 @@ impl LoadedPrograms { slot: Slot, key: Pubkey, loaded_program: Arc, - ) { + ) -> bool { let second_level = self.entries.entry(key).or_default(); debug_assert_eq!( second_level.cooperative_loading_lock, @@ -985,8 +987,9 @@ impl LoadedPrograms { { self.stats.lost_insertions.fetch_add(1, Ordering::Relaxed); } - self.assign_program(key, loaded_program); + let was_occupied = self.assign_program(key, loaded_program); self.loading_task_waiter.notify(); + was_occupied } pub fn merge(&mut self, tx_batch_cache: &LoadedProgramsForTxBatch) { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ccd3f7c522737f..d72e3771cb4408 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4299,6 +4299,7 @@ impl Bank { &mut timings, Some(&account_overrides), None, + true, ); let post_simulation_accounts = loaded_transactions @@ -4537,7 +4538,7 @@ impl Bank { balances } - #[allow(clippy::type_complexity)] + #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn load_and_execute_transactions( &self, batch: &TransactionBatch, @@ -4548,6 +4549,7 @@ impl Bank { timings: &mut ExecuteTimings, account_overrides: Option<&AccountOverrides>, log_messages_bytes_limit: Option, + limit_to_load_programs: bool, ) -> LoadAndExecuteTransactionsOutput { let sanitized_txs = batch.sanitized_transactions(); debug!("processing transactions: {}", sanitized_txs.len()); @@ -4614,6 +4616,7 @@ impl Bank { account_overrides, self.builtin_programs.iter(), log_messages_bytes_limit, + limit_to_load_programs, ); let mut signature_count = 0; @@ -5663,6 +5666,7 @@ impl Bank { timings, None, log_messages_bytes_limit, + false, ); let (last_blockhash, lamports_per_signature) = diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 0c456c918d68ff..62f06585fff4ac 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -190,6 +190,7 @@ impl TransactionBatchProcessor { account_overrides: Option<&AccountOverrides>, builtin_programs: impl Iterator, log_messages_bytes_limit: Option, + limit_to_load_programs: bool, ) -> LoadAndExecuteSanitizedTransactionsOutput { let mut program_accounts_map = Self::filter_executable_program_accounts( callbacks, @@ -202,9 +203,18 @@ impl TransactionBatchProcessor { program_accounts_map.insert(*builtin_program, (&native_loader, 0)); } - let programs_loaded_for_tx_batch = Rc::new(RefCell::new( - self.replenish_program_cache(callbacks, &program_accounts_map), - )); + let programs_loaded_for_tx_batch = Rc::new(RefCell::new(self.replenish_program_cache( + callbacks, + &program_accounts_map, + limit_to_load_programs, + ))); + + if programs_loaded_for_tx_batch.borrow().hit_max_limit { + return LoadAndExecuteSanitizedTransactionsOutput { + loaded_transactions: vec![], + execution_results: vec![], + }; + } let mut load_time = Measure::start("accounts_load"); let mut loaded_transactions = load_accounts( @@ -356,6 +366,7 @@ impl TransactionBatchProcessor { &self, callback: &CB, program_accounts_map: &HashMap, + limit_to_load_programs: bool, ) -> LoadedProgramsForTxBatch { let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = if self.check_program_modification_slot { @@ -401,7 +412,14 @@ impl TransactionBatchProcessor { } // Submit our last completed loading task. if let Some((key, program)) = program_to_store.take() { - loaded_programs_cache.finish_cooperative_loading_task(self.slot, key, program); + if loaded_programs_cache + .finish_cooperative_loading_task(self.slot, key, program) + && limit_to_load_programs + { + let mut ret = LoadedProgramsForTxBatch::default(); + ret.hit_max_limit = true; + return ret; + } } // Figure out which program needs to be loaded next. let program_to_load = loaded_programs_cache.extract( From c9c2fbbdd6e481c9e0c2552ff74fd272bc40499e Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 29 Feb 2024 10:27:33 +0800 Subject: [PATCH 286/401] Add `Message::is_maybe_writable` (#35340) --- sdk/program/src/message/legacy.rs | 26 +++++++++++++-- sdk/program/src/message/versions/mod.rs | 2 +- sdk/program/src/message/versions/v0/mod.rs | 7 ++-- sdk/src/transaction/mod.rs | 34 ++----------------- sdk/src/transaction/versioned/mod.rs | 39 ++-------------------- transaction-status/src/parse_accounts.rs | 2 +- 6 files changed, 33 insertions(+), 77 deletions(-) diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index 32d7411ea4b476..780259cd07fca4 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -548,12 +548,32 @@ impl Message { self.is_key_called_as_program(i) && !self.is_upgradeable_loader_present() } - pub fn is_writable(&self, i: usize) -> bool { - (i < (self.header.num_required_signatures - self.header.num_readonly_signed_accounts) + /// Returns true if the account at the specified index was requested to be + /// writable. This method should not be used directly. + fn is_writable_index(&self, i: usize) -> bool { + i < (self.header.num_required_signatures - self.header.num_readonly_signed_accounts) as usize || (i >= self.header.num_required_signatures as usize && i < self.account_keys.len() - - self.header.num_readonly_unsigned_accounts as usize)) + - self.header.num_readonly_unsigned_accounts as usize) + } + + /// Returns true if the account at the specified index should be write + /// locked when loaded for transaction processing in the runtime. This + /// method differs from `is_maybe_writable` because it is aware of the + /// latest reserved accounts which are not allowed to be write locked. + pub fn is_writable(&self, i: usize) -> bool { + (self.is_writable_index(i)) + && !is_builtin_key_or_sysvar(&self.account_keys[i]) + && !self.demote_program_id(i) + } + + /// Returns true if the account at the specified index is writable by the + /// instructions in this message. Since the dynamic set of reserved accounts + /// isn't used here to demote write locks, this shouldn't be used in the + /// runtime. + pub fn is_maybe_writable(&self, i: usize) -> bool { + (self.is_writable_index(i)) && !is_builtin_key_or_sysvar(&self.account_keys[i]) && !self.demote_program_id(i) } diff --git a/sdk/program/src/message/versions/mod.rs b/sdk/program/src/message/versions/mod.rs index 70a1091aec3cf8..f1481bfcbcd816 100644 --- a/sdk/program/src/message/versions/mod.rs +++ b/sdk/program/src/message/versions/mod.rs @@ -79,7 +79,7 @@ impl VersionedMessage { /// used in the runtime. pub fn is_maybe_writable(&self, index: usize) -> bool { match self { - Self::Legacy(message) => message.is_writable(index), + Self::Legacy(message) => message.is_maybe_writable(index), Self::V0(message) => message.is_maybe_writable(index), } } diff --git a/sdk/program/src/message/versions/v0/mod.rs b/sdk/program/src/message/versions/v0/mod.rs index df001bb19ce0bc..57f82c2703c0c7 100644 --- a/sdk/program/src/message/versions/v0/mod.rs +++ b/sdk/program/src/message/versions/v0/mod.rs @@ -334,9 +334,10 @@ impl Message { .any(|&key| key == bpf_loader_upgradeable::id()) } - /// Returns true if the account at the specified index was requested as writable. - /// Before loading addresses, we can't demote write locks for dynamically loaded - /// addresses so this should not be used by the runtime. + /// Returns true if the account at the specified index was requested as + /// writable. Before loading addresses and without the reserved account keys + /// set, we can't demote write locks properly so this should not be used by + /// the runtime. pub fn is_maybe_writable(&self, key_index: usize) -> bool { self.is_writable_index(key_index) && !{ diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 4173a93b62215e..00095510876b22 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -1074,6 +1074,7 @@ impl Transaction { } } +/// Returns true if transaction begins with an advance nonce instruction. pub fn uses_durable_nonce(tx: &Transaction) -> Option<&CompiledInstruction> { let message = tx.message(); message @@ -1090,11 +1091,6 @@ pub fn uses_durable_nonce(tx: &Transaction) -> Option<&CompiledInstruction> { limited_deserialize(&instruction.data), Ok(SystemInstruction::AdvanceNonceAccount) ) - // Nonce account is writable - && matches!( - instruction.accounts.first(), - Some(index) if message.is_writable(*index as usize) - ) }) } @@ -1119,7 +1115,7 @@ mod tests { hash::hash, instruction::AccountMeta, signature::{Keypair, Presigner, Signer}, - system_instruction, sysvar, + system_instruction, }, bincode::{deserialize, serialize, serialized_size}, std::mem::size_of, @@ -1583,32 +1579,6 @@ mod tests { assert!(uses_durable_nonce(&tx).is_none()); } - #[test] - fn tx_uses_ro_nonce_account() { - let from_keypair = Keypair::new(); - let from_pubkey = from_keypair.pubkey(); - let nonce_keypair = Keypair::new(); - let nonce_pubkey = nonce_keypair.pubkey(); - let account_metas = vec![ - AccountMeta::new_readonly(nonce_pubkey, false), - #[allow(deprecated)] - AccountMeta::new_readonly(sysvar::recent_blockhashes::id(), false), - AccountMeta::new_readonly(nonce_pubkey, true), - ]; - let nonce_instruction = Instruction::new_with_bincode( - system_program::id(), - &system_instruction::SystemInstruction::AdvanceNonceAccount, - account_metas, - ); - let tx = Transaction::new_signed_with_payer( - &[nonce_instruction], - Some(&from_pubkey), - &[&from_keypair, &nonce_keypair], - Hash::default(), - ); - assert!(uses_durable_nonce(&tx).is_none()); - } - #[test] fn tx_uses_nonce_wrong_first_nonce_ix_fail() { let from_keypair = Keypair::new(); diff --git a/sdk/src/transaction/versioned/mod.rs b/sdk/src/transaction/versioned/mod.rs index 9faecf2dceb7eb..ea06037d030587 100644 --- a/sdk/src/transaction/versioned/mod.rs +++ b/sdk/src/transaction/versioned/mod.rs @@ -185,10 +185,7 @@ impl VersionedTransaction { .collect() } - /// Returns true if transaction begins with a valid advance nonce - /// instruction. Since dynamically loaded addresses can't have write locks - /// demoted without loading addresses, this shouldn't be used in the - /// runtime. + /// Returns true if transaction begins with an advance nonce instruction. pub fn uses_durable_nonce(&self) -> bool { let message = &self.message; message @@ -205,11 +202,6 @@ impl VersionedTransaction { limited_deserialize(&instruction.data), Ok(SystemInstruction::AdvanceNonceAccount) ) - // Nonce account is writable - && matches!( - instruction.accounts.first(), - Some(index) if message.is_maybe_writable(*index as usize) - ) }) .is_some() } @@ -222,7 +214,7 @@ mod tests { crate::{ message::Message as LegacyMessage, signer::{keypair::Keypair, Signer}, - system_instruction, sysvar, + system_instruction, }, solana_program::{ instruction::{AccountMeta, Instruction}, @@ -327,33 +319,6 @@ mod tests { assert!(!tx.uses_durable_nonce()); } - #[test] - fn tx_uses_ro_nonce_account() { - let from_keypair = Keypair::new(); - let from_pubkey = from_keypair.pubkey(); - let nonce_keypair = Keypair::new(); - let nonce_pubkey = nonce_keypair.pubkey(); - let account_metas = vec![ - AccountMeta::new_readonly(nonce_pubkey, false), - #[allow(deprecated)] - AccountMeta::new_readonly(sysvar::recent_blockhashes::id(), false), - AccountMeta::new_readonly(nonce_pubkey, true), - ]; - let nonce_instruction = Instruction::new_with_bincode( - system_program::id(), - &system_instruction::SystemInstruction::AdvanceNonceAccount, - account_metas, - ); - let tx = Transaction::new_signed_with_payer( - &[nonce_instruction], - Some(&from_pubkey), - &[&from_keypair, &nonce_keypair], - Hash::default(), - ); - let tx = VersionedTransaction::from(tx); - assert!(!tx.uses_durable_nonce()); - } - #[test] fn tx_uses_nonce_wrong_first_nonce_ix_fail() { let from_keypair = Keypair::new(); diff --git a/transaction-status/src/parse_accounts.rs b/transaction-status/src/parse_accounts.rs index 6ad0ec82a6fdad..5388c15ecf858b 100644 --- a/transaction-status/src/parse_accounts.rs +++ b/transaction-status/src/parse_accounts.rs @@ -21,7 +21,7 @@ pub fn parse_legacy_message_accounts(message: &Message) -> Vec { for (i, account_key) in message.account_keys.iter().enumerate() { accounts.push(ParsedAccount { pubkey: account_key.to_string(), - writable: message.is_writable(i), + writable: message.is_maybe_writable(i), signer: message.is_signer(i), source: Some(ParsedAccountSource::Transaction), }); From 83de6a5930dc507d8db703033dc6280f4758118f Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 29 Feb 2024 06:51:18 -0500 Subject: [PATCH 287/401] Moves in_mem_accounts_index.rs into accounts_index directory (#35360) --- accounts-db/src/accounts_db.rs | 10 +++++----- accounts-db/src/accounts_index.rs | 3 ++- .../src/{ => accounts_index}/in_mem_accounts_index.rs | 6 +++--- accounts-db/src/accounts_index_storage.rs | 6 ++++-- accounts-db/src/bucket_map_holder.rs | 6 ++++-- accounts-db/src/lib.rs | 1 - 6 files changed, 18 insertions(+), 14 deletions(-) rename accounts-db/src/{ => accounts_index}/in_mem_accounts_index.rs (99%) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f077f7a412b32e..2909def64fc986 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -41,10 +41,11 @@ use { ZeroLamportAccounts, }, accounts_index::{ - AccountMapEntry, AccountSecondaryIndexes, AccountsIndex, AccountsIndexConfig, - AccountsIndexRootsStats, AccountsIndexScanResult, DiskIndexValue, IndexKey, IndexValue, - IsCached, RefCount, ScanConfig, ScanResult, SlotList, UpsertReclaim, ZeroLamport, - ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING, + in_mem_accounts_index::StartupStats, AccountMapEntry, AccountSecondaryIndexes, + AccountsIndex, AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, + DiskIndexValue, IndexKey, IndexValue, IsCached, RefCount, ScanConfig, ScanResult, + SlotList, UpsertReclaim, ZeroLamport, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, + ACCOUNTS_INDEX_CONFIG_FOR_TESTING, }, accounts_index_storage::Startup, accounts_partition::RentPayingAccountsByPartition, @@ -62,7 +63,6 @@ use { }, contains::Contains, epoch_accounts_hash::EpochAccountsHashManager, - in_mem_accounts_index::StartupStats, partitioned_rewards::{PartitionedEpochRewardsConfig, TestPartitionedEpochRewards}, pubkey_bins::PubkeyBinCalculator24, read_only_accounts_cache::ReadOnlyAccountsCache, diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 7c4baf1ee95908..bd57e0803846fc 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1,3 +1,4 @@ +pub(crate) mod in_mem_accounts_index; use { crate::{ accounts_index_storage::{AccountsIndexStorage, Startup}, @@ -5,13 +6,13 @@ use { ancestors::Ancestors, bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, contains::Contains, - in_mem_accounts_index::{InMemAccountsIndex, InsertNewEntryResults, StartupStats}, inline_spl_token::{self, GenericTokenAccount}, inline_spl_token_2022, pubkey_bins::PubkeyBinCalculator24, rolling_bit_field::RollingBitField, secondary_index::*, }, + in_mem_accounts_index::{InMemAccountsIndex, InsertNewEntryResults, StartupStats}, log::*, rand::{thread_rng, Rng}, rayon::{ diff --git a/accounts-db/src/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs similarity index 99% rename from accounts-db/src/in_mem_accounts_index.rs rename to accounts-db/src/accounts_index/in_mem_accounts_index.rs index 918a5c41f9cd2a..3df05ee5a28127 100644 --- a/accounts-db/src/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -326,7 +326,7 @@ impl + Into> InMemAccountsIndex( + pub(super) fn get_internal_inner( &self, pubkey: &K, // return true if item should be added to in_mem cache @@ -339,7 +339,7 @@ impl + Into> InMemAccountsIndex( + pub(super) fn get_internal_cloned( &self, pubkey: &K, callback: impl for<'a> FnOnce(Option>) -> RT, @@ -359,7 +359,7 @@ impl + Into> InMemAccountsIndex( + pub(super) fn get_internal( &self, pubkey: &K, // return true if item should be added to in_mem cache diff --git a/accounts-db/src/accounts_index_storage.rs b/accounts-db/src/accounts_index_storage.rs index db5ae2b35f7fa1..3a654c84c25f97 100644 --- a/accounts-db/src/accounts_index_storage.rs +++ b/accounts-db/src/accounts_index_storage.rs @@ -1,8 +1,10 @@ use { crate::{ - accounts_index::{AccountsIndexConfig, DiskIndexValue, IndexValue}, + accounts_index::{ + in_mem_accounts_index::InMemAccountsIndex, AccountsIndexConfig, DiskIndexValue, + IndexValue, + }, bucket_map_holder::BucketMapHolder, - in_mem_accounts_index::InMemAccountsIndex, waitable_condvar::WaitableCondvar, }, std::{ diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index fc7bf3ba4131f0..bc7e19112e516f 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -1,8 +1,10 @@ use { crate::{ - accounts_index::{AccountsIndexConfig, DiskIndexValue, IndexLimitMb, IndexValue}, + accounts_index::{ + in_mem_accounts_index::{InMemAccountsIndex, StartupStats}, + AccountsIndexConfig, DiskIndexValue, IndexLimitMb, IndexValue, + }, bucket_map_holder_stats::BucketMapHolderStats, - in_mem_accounts_index::{InMemAccountsIndex, StartupStats}, waitable_condvar::WaitableCondvar, }, solana_bucket_map::bucket_map::{BucketMap, BucketMapConfig}, diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 792de99c49b8d9..7883f852d1e3f2 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -27,7 +27,6 @@ pub mod cache_hash_data_stats; pub mod contains; pub mod epoch_accounts_hash; pub mod hardened_unpack; -pub mod in_mem_accounts_index; pub mod inline_spl_token; pub mod inline_spl_token_2022; pub mod partitioned_rewards; From 996de53309ee5cd4b21856082d03f02c43ac616b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 00:50:47 +0800 Subject: [PATCH 288/401] build(deps): bump syn from 2.0.51 to 2.0.52 (#35371) * build(deps): bump syn from 2.0.51 to 2.0.52 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.51 to 2.0.52. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.51...2.0.52) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 66 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 62 +++++++++++++++++++------------------- 2 files changed, 64 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81d6aa64125445..9dd9e5859adb26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -453,7 +453,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -601,7 +601,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -769,7 +769,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "syn_derive", ] @@ -1529,7 +1529,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1540,7 +1540,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1602,7 +1602,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1726,7 +1726,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1832,7 +1832,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -2096,7 +2096,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3370,7 +3370,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3444,7 +3444,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3917,7 +3917,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -4085,7 +4085,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -4767,7 +4767,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -4821,7 +4821,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -4871,7 +4871,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -6001,7 +6001,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -7051,7 +7051,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -7766,7 +7766,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -7778,7 +7778,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.51", + "syn 2.0.52", "thiserror", ] @@ -7836,7 +7836,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -8024,9 +8024,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.51" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -8042,7 +8042,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -8213,7 +8213,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -8225,7 +8225,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "test-case-core", ] @@ -8261,7 +8261,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -8398,7 +8398,7 @@ source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7 dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -8644,7 +8644,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -8947,7 +8947,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "wasm-bindgen-shared", ] @@ -8981,7 +8981,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9275,7 +9275,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -9295,7 +9295,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1d11ce6e65542e..84054e1b4c99c0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -419,7 +419,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -567,7 +567,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -720,7 +720,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "syn_derive", ] @@ -1232,7 +1232,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1243,7 +1243,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1418,7 +1418,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1527,7 +1527,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1774,7 +1774,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3013,7 +3013,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3086,7 +3086,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3518,7 +3518,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3659,7 +3659,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -4237,7 +4237,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -4282,7 +4282,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -5030,7 +5030,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -6161,7 +6161,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -6701,7 +6701,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -6713,7 +6713,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.51", + "syn 2.0.52", "thiserror", ] @@ -6761,7 +6761,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -6949,9 +6949,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.51" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -6967,7 +6967,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -7124,7 +7124,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -7136,7 +7136,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "test-case-core", ] @@ -7172,7 +7172,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -7295,7 +7295,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -7513,7 +7513,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -7795,7 +7795,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "wasm-bindgen-shared", ] @@ -7829,7 +7829,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8114,7 +8114,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -8134,7 +8134,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] From eef4afa782557bf8c6c15bc03e525756fa542507 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 00:51:01 +0800 Subject: [PATCH 289/401] build(deps): bump crossbeam-channel from 0.5.11 to 0.5.12 (#35372) * build(deps): bump crossbeam-channel from 0.5.11 to 0.5.12 Bumps [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam) from 0.5.11 to 0.5.12. - [Release notes](https://github.com/crossbeam-rs/crossbeam/releases) - [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md) - [Commits](https://github.com/crossbeam-rs/crossbeam/compare/crossbeam-channel-0.5.11...crossbeam-channel-0.5.12) --- updated-dependencies: - dependency-name: crossbeam-channel dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9dd9e5859adb26..3dee65badb8327 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1390,9 +1390,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] diff --git a/Cargo.toml b/Cargo.toml index 804e9ba19077da..ab42c916f8a396 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -181,7 +181,7 @@ const_format = "0.2.32" core_affinity = "0.5.10" criterion = "0.5.1" criterion-stats = "0.3.0" -crossbeam-channel = "0.5.11" +crossbeam-channel = "0.5.12" csv = "1.3.0" ctrlc = "3.4.2" curve25519-dalek = "3.2.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 84054e1b4c99c0..019c339d1c7e14 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1122,9 +1122,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] From 5f54f6a7d9739154591c1a454c16aa04c5cfdc64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 00:51:44 +0800 Subject: [PATCH 290/401] build(deps): bump log from 0.4.20 to 0.4.21 (#35370) * build(deps): bump log from 0.4.20 to 0.4.21 Bumps [log](https://github.com/rust-lang/log) from 0.4.20 to 0.4.21. - [Release notes](https://github.com/rust-lang/log/releases) - [Changelog](https://github.com/rust-lang/log/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/log/compare/0.4.20...0.4.21) --- updated-dependencies: - dependency-name: log dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3dee65badb8327..cc42e6da02df20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3043,9 +3043,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lru" diff --git a/Cargo.toml b/Cargo.toml index ab42c916f8a396..8cc38b69144d3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,7 @@ libc = "0.2.153" libloading = "0.7.4" libsecp256k1 = "0.6.0" light-poseidon = "0.2.0" -log = "0.4.20" +log = "0.4.21" lru = "0.7.7" lz4 = "1.24.0" memmap2 = "0.5.10" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 019c339d1c7e14..93e2a243e2004d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2684,9 +2684,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lru" From bdc5cceb183c99e3d3c471af3ece2a2f7f7978d5 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 29 Feb 2024 14:31:13 -0500 Subject: [PATCH 291/401] Purges all bank snapshots after fastboot (#35350) --- ledger/src/bank_forks_utils.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index b14d9facdd4c19..cc5b196b39f454 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -337,6 +337,15 @@ fn bank_forks_from_snapshot( source: err, path: bank_snapshot.snapshot_path(), })?; + + // If the node crashes before taking the next bank snapshot, the next startup will attempt + // to load from the same bank snapshot again. And if `shrink` has run, the account storage + // files that are hard linked in bank snapshot will be *different* than what the bank + // snapshot expects. This would cause the node to crash again. To prevent that, purge all + // the bank snapshots here. In the above scenario, this will cause the node to load from a + // snapshot archive next time, which is safe. + snapshot_utils::purge_old_bank_snapshots(&snapshot_config.bank_snapshots_dir, 0, None); + bank }; From e8c87e86ef2632ce6e1e974d8492578e92fae554 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 29 Feb 2024 12:05:20 -0800 Subject: [PATCH 292/401] local-cluster: fix flaky optimistic_confirmation tests (#35356) * local-cluster: fix flaky optimistic_confirmation tests * pr feedback: latest_vote -> newest_vote, reword some comments --- ledger/src/leader_schedule.rs | 2 +- local-cluster/tests/local_cluster.rs | 44 ++++++++++++++++++++++------ 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/ledger/src/leader_schedule.rs b/ledger/src/leader_schedule.rs index b0f16c1cf37a94..f13f37031a79f2 100644 --- a/ledger/src/leader_schedule.rs +++ b/ledger/src/leader_schedule.rs @@ -13,7 +13,7 @@ pub struct FixedSchedule { } /// Stake-weighted leader schedule for one epoch. -#[derive(Debug, Default, PartialEq, Eq)] +#[derive(Debug, Default, PartialEq, Eq, Clone)] pub struct LeaderSchedule { slot_leaders: Vec, // Inverted index from pubkeys to indices where they are the leader. diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 6f7de16df296b1..3b18ba44bf2d03 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -3122,7 +3122,7 @@ fn test_optimistic_confirmation_violation_without_tower() { // `A` should not be able to generate a switching proof. // fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: bool) { - solana_logger::setup_with("debug"); + solana_logger::setup_with("info"); // First set up the cluster with 4 nodes let slots_per_epoch = 2048; @@ -3172,22 +3172,25 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b // below only for slots <= `next_slot_on_a`, validator A will not know how it's last vote chains // to the other forks, and may violate switching proofs on restart. let mut default_config = ValidatorConfig::default_for_test(); - // Split leader schedule 50-50 between validators B and C, don't give validator A any slots because - // it's going to be deleting its ledger, so may create versions of slots it's already created, but - // on a different fork. + // Ensure B can make leader blocks up till the fork slot, and give the remaining slots to C. + // Don't give validator A any slots because it's going to be deleting its ledger, so it may create + // versions of slots it's already created, but on a different fork. let validator_to_slots = vec![ // Ensure validator b is leader for slots <= `next_slot_on_a` (validator_b_pubkey, next_slot_on_a as usize + 1), - (validator_c_pubkey, next_slot_on_a as usize + 1), + (validator_c_pubkey, DEFAULT_SLOTS_PER_EPOCH as usize), ]; + // Trick C into not producing any blocks, in case its leader slots come up before it gets killed + let c_validator_to_slots = vec![(validator_b_pubkey, DEFAULT_SLOTS_PER_EPOCH as usize)]; + let c_leader_schedule = create_custom_leader_schedule(c_validator_to_slots.into_iter()); let leader_schedule = create_custom_leader_schedule(validator_to_slots.into_iter()); for slot in 0..=next_slot_on_a { assert_eq!(leader_schedule[slot], validator_b_pubkey); } default_config.fixed_leader_schedule = Some(FixedSchedule { - leader_schedule: Arc::new(leader_schedule), + leader_schedule: Arc::new(leader_schedule.clone()), }); let mut validator_configs = make_identical_validator_configs(&default_config, node_stakes.len()); @@ -3195,6 +3198,10 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b // Disable voting on validators C, and D validator_configs[2].voting_disabled = true; validator_configs[3].voting_disabled = true; + // C should not produce any blocks at this time + validator_configs[2].fixed_leader_schedule = Some(FixedSchedule { + leader_schedule: Arc::new(c_leader_schedule), + }); let mut config = ClusterConfig { cluster_lamports: DEFAULT_CLUSTER_LAMPORTS + node_stakes.iter().sum::(), @@ -3336,6 +3343,10 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b // Run validator C only to make it produce and vote on its own fork. info!("Restart validator C again!!!"); validator_c_info.config.voting_disabled = false; + // C should now produce blocks + validator_c_info.config.fixed_leader_schedule = Some(FixedSchedule { + leader_schedule: Arc::new(leader_schedule), + }); cluster.restart_node( &validator_c_pubkey, validator_c_info, @@ -3343,10 +3354,25 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b ); let mut votes_on_c_fork = std::collections::BTreeSet::new(); // S4 and S5 - for _ in 0..100 { + let mut last_vote = 0; + let now = Instant::now(); + loop { + let elapsed = now.elapsed(); + assert!( + elapsed <= Duration::from_secs(30), + "C failed to create a fork past {} in {} second,s + last_vote {}, + votes_on_c_fork: {:?}", + base_slot, + elapsed.as_secs(), + last_vote, + votes_on_c_fork, + ); sleep(Duration::from_millis(100)); - if let Some((last_vote, _)) = last_vote_in_tower(&val_c_ledger_path, &validator_c_pubkey) { + if let Some((newest_vote, _)) = last_vote_in_tower(&val_c_ledger_path, &validator_c_pubkey) + { + last_vote = newest_vote; if last_vote != base_slot { votes_on_c_fork.insert(last_vote); // Collect 4 votes @@ -3357,7 +3383,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b } } assert!(!votes_on_c_fork.is_empty()); - info!("collected validator C's votes: {:?}", votes_on_c_fork); + info!("Collected validator C's votes: {:?}", votes_on_c_fork); // Step 4: // verify whether there was violation or not From 9bb59aa30f48fe50878cc7df88974b77c379582c Mon Sep 17 00:00:00 2001 From: Sean Young Date: Fri, 1 Mar 2024 01:39:30 -0700 Subject: [PATCH 293/401] ledger-tool: verify: add --record-slots and --verify-slots (#34246) ledger-tool: verify: add --verify-slots and --verify-slots-details This adds: --record-slots Write the slot hashes to this file. --record-slots-config hash-only|accounts Store the bank (=accounts) json file, or not. --verify-slots Verify slot hashes against this file. The first case can be used to dump a list of (slot, hash) to a json file during a replay. The second case can be used to check slot hashes against previously recorded values. This is useful for debugging consensus failures, eg: # on good commit/branch ledger-tool verify --record-slots good.json --record-slots-config=accounts # on bad commit or potentially consensus breaking branch ledger-tool verify --verify-slots good.json On a hash mismatch an error will be logged with the expected hash vs the computed hash. --- ledger-tool/src/main.rs | 149 +++++++++++++++++++++++++- ledger/src/blockstore_processor.rs | 11 +- runtime/src/bank/bank_hash_details.rs | 35 ++++-- 3 files changed, 185 insertions(+), 10 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 778b1a3201ab0b..13eb3c21031031 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -41,6 +41,7 @@ use { solana_ledger::{ blockstore::{create_new_ledger, Blockstore}, blockstore_options::{AccessType, LedgerColumnOptions}, + blockstore_processor::ProcessSlotCallback, use_snapshot_archives_at_startup, }, solana_measure::{measure, measure::Measure}, @@ -88,7 +89,7 @@ use { str::FromStr, sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, }, }; @@ -1060,6 +1061,28 @@ fn main() { information that went into computing the completed bank's bank hash. \ The file will be written within /bank_hash_details/", ), + ) + .arg( + Arg::with_name("record_slots") + .long("record-slots") + .default_value("slots.json") + .value_name("FILENAME") + .help("Record slots to a file"), + ) + .arg( + Arg::with_name("verify_slots") + .long("verify-slots") + .default_value("slots.json") + .value_name("FILENAME") + .help("Verify slots match contents of file"), + ) + .arg( + Arg::with_name("record_slots_config") + .long("record-slots-config") + .default_value("hash-only") + .possible_values(&["hash-only", "accounts"]) + .requires("record_slots") + .help("In the slot recording, include bank details or not"), ), ) .subcommand( @@ -1621,7 +1644,114 @@ fn main() { }, ); - let process_options = parse_process_options(&ledger_path, arg_matches); + let mut process_options = parse_process_options(&ledger_path, arg_matches); + + // .default_value() does not work with .conflicts_with() in clap 2.33 + // .conflicts_with("verify_slots") + // https://github.com/clap-rs/clap/issues/1605#issuecomment-722326915 + // So open-code the conflicts_with() here + if arg_matches.occurrences_of("record_slots") > 0 + && arg_matches.occurrences_of("verify_slots") > 0 + { + eprintln!( + "error: The argument '--verify-slots ' cannot be used with '--record-slots '" + ); + exit(1); + } + + let (slot_callback, record_slots_file, recorded_slots) = if arg_matches + .occurrences_of("record_slots") + > 0 + { + let filename = Path::new(arg_matches.value_of_os("record_slots").unwrap()); + + let file = File::create(filename).unwrap_or_else(|err| { + eprintln!("Unable to write to file: {}: {:#}", filename.display(), err); + exit(1); + }); + + let include_bank = + match arg_matches.value_of("record_slots_config").unwrap() { + "hash-only" => false, + "accounts" => true, + _ => unreachable!(), + }; + + let slot_hashes = Arc::new(Mutex::new(Vec::new())); + + let slot_callback = Arc::new({ + let slots = Arc::clone(&slot_hashes); + move |bank: &Bank| { + let slot_details = if include_bank { + bank_hash_details::BankHashSlotDetails::try_from(bank).unwrap() + } else { + bank_hash_details::BankHashSlotDetails { + slot: bank.slot(), + bank_hash: bank.hash().to_string(), + ..Default::default() + } + }; + + slots.lock().unwrap().push(slot_details); + } + }); + + ( + Some(slot_callback as ProcessSlotCallback), + Some(file), + Some(slot_hashes), + ) + } else if arg_matches.occurrences_of("verify_slots") > 0 { + let filename = Path::new(arg_matches.value_of_os("verify_slots").unwrap()); + + let file = File::open(filename).unwrap_or_else(|err| { + eprintln!("Unable to read file: {}: {err:#}", filename.display()); + exit(1); + }); + + let reader = std::io::BufReader::new(file); + + let details: bank_hash_details::BankHashDetails = + serde_json::from_reader(reader).unwrap_or_else(|err| { + eprintln!("Error loading slots file: {err:#}"); + exit(1); + }); + + let slots = Arc::new(Mutex::new(details.bank_hash_details)); + + let slot_callback = Arc::new(move |bank: &Bank| { + if slots.lock().unwrap().is_empty() { + error!( + "Expected slot: not found got slot: {} hash: {}", + bank.slot(), + bank.hash() + ); + } else { + let bank_hash_details::BankHashSlotDetails { + slot: expected_slot, + bank_hash: expected_hash, + .. + } = slots.lock().unwrap().remove(0); + if bank.slot() != expected_slot + || bank.hash().to_string() != expected_hash + { + error!("Expected slot: {expected_slot} hash: {expected_hash} got slot: {} hash: {}", + bank.slot(), bank.hash()); + } else { + info!( + "Expected slot: {expected_slot} hash: {expected_hash} correct" + ); + } + } + }); + + (Some(slot_callback as ProcessSlotCallback), None, None) + } else { + (None, None, None) + }; + + process_options.slot_callback = slot_callback; + let print_accounts_stats = arg_matches.is_present("print_accounts_stats"); let write_bank_file = arg_matches.is_present("write_bank_file"); let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); @@ -1653,6 +1783,21 @@ fn main() { }) .ok(); } + + if let Some(recorded_slots_file) = record_slots_file { + if let Ok(recorded_slots) = recorded_slots.clone().unwrap().lock() { + let bank_hashes = + bank_hash_details::BankHashDetails::new(recorded_slots.to_vec()); + + // writing the json file ends up with a syscall for each number, comma, indentation etc. + // use BufWriter to speed things up + + let writer = std::io::BufWriter::new(recorded_slots_file); + + serde_json::to_writer_pretty(writer, &bank_hashes).unwrap(); + } + } + exit_signal.store(true, Ordering::Relaxed); system_monitor_service.join().unwrap(); } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 63edb23e01cc18..2e172870d6e5f7 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -676,8 +676,9 @@ pub enum BlockstoreProcessorError { RootBankWithMismatchedCapitalization(Slot), } -/// Callback for accessing bank state while processing the blockstore -pub type ProcessCallback = Arc; +/// Callback for accessing bank state after each slot is confirmed while +/// processing the blockstore +pub type ProcessSlotCallback = Arc; #[derive(Default, Clone)] pub struct ProcessOptions { @@ -685,6 +686,7 @@ pub struct ProcessOptions { pub run_verification: bool, pub full_leader_cache: bool, pub halt_at_slot: Option, + pub slot_callback: Option, pub new_hard_forks: Option>, pub debug_keys: Option>>, pub account_indexes: AccountSecondaryIndexes, @@ -1810,6 +1812,11 @@ fn process_single_slot( result? } bank.freeze(); // all banks handled by this routine are created from complete slots + + if let Some(slot_callback) = &opts.slot_callback { + slot_callback(bank); + } + if blockstore.is_primary_access() { blockstore.insert_bank_hash(bank.slot(), bank.hash(), false); } diff --git a/runtime/src/bank/bank_hash_details.rs b/runtime/src/bank/bank_hash_details.rs index 9072f6a12f1496..25c1dbfc7df332 100644 --- a/runtime/src/bank/bank_hash_details.rs +++ b/runtime/src/bank/bank_hash_details.rs @@ -22,7 +22,7 @@ use { }; #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub(crate) struct BankHashDetails { +pub struct BankHashDetails { /// The client version pub version: String, /// The encoding format for account data buffers @@ -66,17 +66,35 @@ impl BankHashDetails { } /// The components that go into a bank hash calculation for a single bank/slot. -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub(crate) struct BankHashSlotDetails { +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Default)] +pub struct BankHashSlotDetails { pub slot: Slot, pub bank_hash: String, + #[serde(skip_serializing_if = "String::is_empty")] + #[serde(default)] pub parent_bank_hash: String, + #[serde(skip_serializing_if = "String::is_empty")] + #[serde(default)] pub accounts_delta_hash: String, + #[serde(skip_serializing_if = "u64_is_zero")] + #[serde(default)] pub signature_count: u64, + #[serde(skip_serializing_if = "String::is_empty")] + #[serde(default)] pub last_blockhash: String, + #[serde(skip_serializing_if = "bankhashaccounts_is_empty")] + #[serde(default)] pub accounts: BankHashAccounts, } +fn u64_is_zero(val: &u64) -> bool { + *val == 0 +} + +fn bankhashaccounts_is_empty(accounts: &BankHashAccounts) -> bool { + accounts.accounts.is_empty() +} + impl BankHashSlotDetails { pub fn new( slot: Slot, @@ -141,8 +159,8 @@ impl TryFrom<&Bank> for BankHashSlotDetails { /// Wrapper around a Vec<_> to facilitate custom Serialize/Deserialize trait /// implementations. -#[derive(Clone, Debug, Eq, PartialEq)] -pub(crate) struct BankHashAccounts { +#[derive(Clone, Debug, Eq, PartialEq, Default)] +pub struct BankHashAccounts { pub accounts: Vec, } @@ -257,7 +275,12 @@ pub fn write_bank_hash_details_file(bank: &Bank) -> std::result::Result<(), Stri _ = std::fs::create_dir_all(parent_dir); let file = std::fs::File::create(&path) .map_err(|err| format!("Unable to create file at {}: {err}", path.display()))?; - serde_json::to_writer_pretty(file, &details) + + // writing the json file ends up with a syscall for each number, comma, indentation etc. + // use BufWriter to speed things up + let writer = std::io::BufWriter::new(file); + + serde_json::to_writer_pretty(writer, &details) .map_err(|err| format!("Unable to write file at {}: {err}", path.display()))?; } Ok(()) From e3b9d7fbb3f127221fbbc8d4fa7255c01202dbc0 Mon Sep 17 00:00:00 2001 From: Han Yang Date: Fri, 1 Mar 2024 04:34:32 -0700 Subject: [PATCH 294/401] SDK: refactor `Signer` and `Signers` traits (#34984) * read_keypair_file\((.+?)\)[\n\r\s]+.unwrap\(\)[\n\r\s]+.into\(\) -> Box::new(read_keypair_file().unwrap()), Presigner::new\((.*?)\).into\(\) -> Box::new(Presigner::new()) * compiles * moar general * doc * Result impls FromIterator * doc --- cli/src/cli.rs | 43 ++++--- cli/src/cluster_query.rs | 2 +- cli/src/nonce.rs | 30 ++--- cli/src/program.rs | 72 ++++++----- cli/src/program_v4.rs | 24 ++-- cli/src/stake.rs | 244 ++++++++++++++++---------------------- cli/src/vote.rs | 96 ++++++++------- sdk/src/signer/mod.rs | 13 +- sdk/src/signer/signers.rs | 163 ++++++------------------- tokens/src/commands.rs | 62 ++++------ 10 files changed, 301 insertions(+), 448 deletions(-) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index e5bf78670d7dca..99a0de0a719c69 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1796,7 +1796,11 @@ mod tests { let keypair0_pubkey = keypair0.pubkey(); let keypair0_clone = keypair_from_seed(&[1u8; 32]).unwrap(); let keypair0_clone_pubkey = keypair0.pubkey(); - let signers = vec![None, Some(keypair0.into()), Some(keypair0_clone.into())]; + let signers: Vec>> = vec![ + None, + Some(Box::new(keypair0)), + Some(Box::new(keypair0_clone)), + ]; let signer_info = default_signer .generate_unique_signers(signers, &matches, &mut None) .unwrap(); @@ -1808,7 +1812,8 @@ mod tests { let keypair0 = keypair_from_seed(&[1u8; 32]).unwrap(); let keypair0_pubkey = keypair0.pubkey(); let keypair0_clone = keypair_from_seed(&[1u8; 32]).unwrap(); - let signers = vec![Some(keypair0.into()), Some(keypair0_clone.into())]; + let signers: Vec>> = + vec![Some(Box::new(keypair0)), Some(Box::new(keypair0_clone))]; let signer_info = default_signer .generate_unique_signers(signers, &matches, &mut None) .unwrap(); @@ -1825,11 +1830,11 @@ mod tests { let presigner0_pubkey = presigner0.pubkey(); let presigner1 = Presigner::new(&keypair1.pubkey(), &keypair1.sign_message(&message)); let presigner1_pubkey = presigner1.pubkey(); - let signers = vec![ - Some(keypair0.into()), - Some(presigner0.into()), - Some(presigner1.into()), - Some(keypair1.into()), + let signers: Vec>> = vec![ + Some(Box::new(keypair0)), + Some(Box::new(presigner0)), + Some(Box::new(presigner1)), + Some(Box::new(keypair1)), ]; let signer_info = default_signer .generate_unique_signers(signers, &matches, &mut None) @@ -1913,7 +1918,7 @@ mod tests { pubkey: None, use_lamports_unit: true, }, - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -1978,7 +1983,7 @@ mod tests { seed: "seed".to_string(), program_id: stake::program::id(), }, - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -2020,7 +2025,7 @@ mod tests { command: CliCommand::SignOffchainMessage { message: message.clone() }, - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -2040,7 +2045,7 @@ mod tests { signature, message }, - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); } @@ -2460,7 +2465,7 @@ mod tests { derived_address_program_id: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -2488,7 +2493,7 @@ mod tests { derived_address_program_id: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -2521,7 +2526,7 @@ mod tests { derived_address_program_id: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -2557,7 +2562,7 @@ mod tests { derived_address_program_id: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -2601,7 +2606,7 @@ mod tests { derived_address_program_id: None, compute_unit_price: None, }, - signers: vec![Presigner::new(&from_pubkey, &from_sig).into()], + signers: vec![Box::new(Presigner::new(&from_pubkey, &from_sig))], } ); @@ -2647,8 +2652,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&nonce_authority_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&nonce_authority_file).unwrap()) ], } ); @@ -2686,7 +2691,7 @@ mod tests { derived_address_program_id: Some(stake::program::id()), compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap()),], } ); } diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 87dc35810def95..cadd4154658689 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -2399,7 +2399,7 @@ mod tests { print_timestamp: true, compute_unit_price: None, }, - signers: vec![default_keypair.into()], + signers: vec![Box::new(default_keypair)], } ); } diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index bc6fd981cea951..e37129e3e9f326 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -728,7 +728,7 @@ mod tests { new_authority: Pubkey::default(), compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -752,8 +752,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); @@ -777,8 +777,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&keypair_file).unwrap()) ], } ); @@ -804,8 +804,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&keypair_file).unwrap()) ], } ); @@ -839,7 +839,7 @@ mod tests { memo: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -862,8 +862,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); @@ -909,7 +909,7 @@ mod tests { lamports: 42_000_000_000, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -940,8 +940,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); @@ -986,8 +986,8 @@ mod tests { compute_unit_price: Some(99), }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); diff --git a/cli/src/program.rs b/cli/src/program.rs index 7605daf5912721..92c3c657adc40a 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -2789,7 +2789,7 @@ mod tests { allow_excessive_balance: false, skip_fee_check: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -2817,7 +2817,7 @@ mod tests { allow_excessive_balance: false, skip_fee_check: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -2848,8 +2848,8 @@ mod tests { skip_fee_check: false, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&buffer_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&buffer_keypair_file).unwrap()), ], } ); @@ -2879,7 +2879,7 @@ mod tests { allow_excessive_balance: false, skip_fee_check: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -2911,8 +2911,8 @@ mod tests { skip_fee_check: false, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&program_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&program_keypair_file).unwrap()), ], } ); @@ -2945,8 +2945,8 @@ mod tests { skip_fee_check: false, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], } ); @@ -2974,7 +2974,7 @@ mod tests { skip_fee_check: false, allow_excessive_balance: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); } @@ -3008,7 +3008,7 @@ mod tests { max_len: None, skip_fee_check: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -3033,7 +3033,7 @@ mod tests { max_len: Some(42), skip_fee_check: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -3062,8 +3062,8 @@ mod tests { skip_fee_check: false, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&buffer_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&buffer_keypair_file).unwrap()), ], } ); @@ -3093,8 +3093,8 @@ mod tests { skip_fee_check: false, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], } ); @@ -3129,9 +3129,9 @@ mod tests { skip_fee_check: false, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&buffer_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&buffer_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], } ); @@ -3175,7 +3175,7 @@ mod tests { dump_transaction_message: true, blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -3203,7 +3203,7 @@ mod tests { dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -3236,10 +3236,8 @@ mod tests { blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&new_authority_pubkey_file) - .unwrap() - .into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&new_authority_pubkey_file).unwrap()), ], } ); @@ -3266,7 +3264,7 @@ mod tests { dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -3295,8 +3293,8 @@ mod tests { blockhash_query: BlockhashQuery::default(), }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], } ); @@ -3330,7 +3328,7 @@ mod tests { buffer_authority_index: Some(0), new_buffer_authority: new_authority_pubkey, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -3354,7 +3352,7 @@ mod tests { buffer_authority_index: Some(0), new_buffer_authority: new_authority_keypair.pubkey(), }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); } @@ -3521,7 +3519,7 @@ mod tests { use_lamports_unit: false, bypass_warning: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -3544,7 +3542,7 @@ mod tests { use_lamports_unit: false, bypass_warning: true, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); @@ -3569,8 +3567,8 @@ mod tests { bypass_warning: false, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], } ); @@ -3594,7 +3592,7 @@ mod tests { use_lamports_unit: false, bypass_warning: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into(),], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap()),], } ); @@ -3616,7 +3614,7 @@ mod tests { use_lamports_unit: true, bypass_warning: false, }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into(),], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap()),], } ); } @@ -3648,7 +3646,7 @@ mod tests { program_pubkey, additional_bytes }), - signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } ); } diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index 1f76c0594e6e7a..115f902127ad14 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -1666,9 +1666,9 @@ mod tests { authority_signer_index: 2, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&program_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&program_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); @@ -1712,8 +1712,8 @@ mod tests { buffer_signer_index: None, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); @@ -1744,9 +1744,9 @@ mod tests { authority_signer_index: 2, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&buffer_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&buffer_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); @@ -1787,8 +1787,8 @@ mod tests { authority_signer_index: 1, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); @@ -1829,8 +1829,8 @@ mod tests { authority_signer_index: 1, }), signers: vec![ - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into() + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()) ], } ); diff --git a/cli/src/stake.rs b/cli/src/stake.rs index f4bb8329278d94..45c6e12e072d4e 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -2874,7 +2874,7 @@ mod tests { no_wait: false, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap()),], }, ); let (withdraw_authority_keypair_file, mut tmp_file) = make_tmp_file(); @@ -2924,13 +2924,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&stake_authority_keypair_file) - .unwrap() - .into(), - read_keypair_file(&withdraw_authority_keypair_file) - .unwrap() - .into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&stake_authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_keypair_file).unwrap()), ], }, ); @@ -2977,10 +2973,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_keypair_file) - .unwrap() - .into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_keypair_file).unwrap()), ], }, ); @@ -3013,7 +3007,7 @@ mod tests { no_wait: false, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap()),], }, ); let test_stake_authorize = test_commands.clone().get_matches_from(vec![ @@ -3048,10 +3042,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&stake_authority_keypair_file) - .unwrap() - .into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&stake_authority_keypair_file).unwrap()), ], }, ); @@ -3088,10 +3080,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_keypair_file) - .unwrap() - .into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_keypair_file).unwrap()), ], }, ); @@ -3124,7 +3114,7 @@ mod tests { no_wait: false, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap()),], }, ); let test_stake_authorize = test_commands.clone().get_matches_from(vec![ @@ -3159,10 +3149,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_keypair_file) - .unwrap() - .into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_keypair_file).unwrap()), ], }, ); @@ -3198,7 +3186,7 @@ mod tests { no_wait: true, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -3246,8 +3234,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], }, ); @@ -3298,14 +3286,10 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&stake_authority_keypair_file) - .unwrap() - .into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_keypair_file) - .unwrap() - .into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&stake_authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_keypair_file).unwrap()), ], }, ); @@ -3352,11 +3336,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_keypair_file) - .unwrap() - .into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], }, ); @@ -3390,8 +3372,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], }, ); @@ -3427,11 +3409,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&stake_authority_keypair_file) - .unwrap() - .into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&stake_authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], }, ); @@ -3468,11 +3448,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_keypair_file) - .unwrap() - .into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], }, ); @@ -3506,8 +3484,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], }, ); @@ -3543,11 +3521,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_keypair_file) - .unwrap() - .into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], }, ); @@ -3584,8 +3560,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authority_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), ], } ); @@ -3625,7 +3601,7 @@ mod tests { no_wait: false, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); // Test Authorize Subcommand w/ offline feepayer @@ -3672,8 +3648,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - Presigner::new(&pubkey, &sig).into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(Presigner::new(&pubkey, &sig)) ], } ); @@ -3728,9 +3704,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - Presigner::new(&pubkey, &sig).into(), - Presigner::new(&pubkey2, &sig2).into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(Presigner::new(&pubkey, &sig)), + Box::new(Presigner::new(&pubkey2, &sig2)), ], } ); @@ -3769,7 +3745,7 @@ mod tests { no_wait: false, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); // Test Authorize Subcommand w/ nonce @@ -3817,8 +3793,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - nonce_authority_keypair.into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(nonce_authority_keypair) ], } ); @@ -3860,8 +3836,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&fee_payer_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&fee_payer_keypair_file).unwrap()), ], } ); @@ -3907,8 +3883,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - Presigner::new(&fee_payer_pubkey, &sig).into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(Presigner::new(&fee_payer_pubkey, &sig)) ], } ); @@ -3958,8 +3934,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - stake_account_keypair.into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(stake_account_keypair) ], } ); @@ -3999,8 +3975,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&keypair_file).unwrap()) ], } ); @@ -4039,9 +4015,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - stake_account_keypair.into(), - withdrawer_keypair.into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(stake_account_keypair), + Box::new(withdrawer_keypair), ], } ); @@ -4112,8 +4088,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - Presigner::new(&offline_pubkey, &offline_sig).into(), - read_keypair_file(&keypair_file).unwrap().into() + Box::new(Presigner::new(&offline_pubkey, &offline_sig)), + Box::new(read_keypair_file(&keypair_file).unwrap()) ], } ); @@ -4145,7 +4121,7 @@ mod tests { redelegation_stake_account: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4179,10 +4155,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&stake_authority_keypair_file) - .unwrap() - .into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&stake_authority_keypair_file).unwrap()) ], } ); @@ -4213,7 +4187,7 @@ mod tests { redelegation_stake_account: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4249,7 +4223,7 @@ mod tests { redelegation_stake_account: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4280,7 +4254,7 @@ mod tests { redelegation_stake_account: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4322,8 +4296,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - Presigner::new(&key1, &sig1).into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(Presigner::new(&key1, &sig1)) ], } ); @@ -4372,9 +4346,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - Presigner::new(&key1, &sig1).into(), - Presigner::new(&key2, &sig2).into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(Presigner::new(&key1, &sig1)), + Box::new(Presigner::new(&key2, &sig2)), ], } ); @@ -4410,8 +4384,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&fee_payer_keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&fee_payer_keypair_file).unwrap()) ], } ); @@ -4453,10 +4427,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&redelegation_stake_account_keypair_file) - .unwrap() - .into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&redelegation_stake_account_keypair_file).unwrap()) ], } ); @@ -4489,7 +4461,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4523,7 +4495,7 @@ mod tests { fee_payer: 0, compute_unit_price: Some(99), }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4558,10 +4530,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&stake_authority_keypair_file) - .unwrap() - .into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&stake_authority_keypair_file).unwrap()) ], } ); @@ -4597,8 +4567,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&custodian_keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&custodian_keypair_file).unwrap()) ], } ); @@ -4647,10 +4617,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&stake_authority_keypair_file) - .unwrap() - .into(), - Presigner::new(&offline_pubkey, &offline_sig).into() + Box::new(read_keypair_file(&stake_authority_keypair_file).unwrap()), + Box::new(Presigner::new(&offline_pubkey, &offline_sig)) ], } ); @@ -4678,7 +4646,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4706,7 +4674,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4736,10 +4704,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&stake_authority_keypair_file) - .unwrap() - .into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&stake_authority_keypair_file).unwrap()) ], } ); @@ -4774,7 +4740,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4803,7 +4769,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -4843,8 +4809,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - Presigner::new(&key1, &sig1).into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(Presigner::new(&key1, &sig1)) ], } ); @@ -4891,9 +4857,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - Presigner::new(&key1, &sig1).into(), - Presigner::new(&key2, &sig2).into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(Presigner::new(&key1, &sig1)), + Box::new(Presigner::new(&key2, &sig2)), ], } ); @@ -4924,8 +4890,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&fee_payer_keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&fee_payer_keypair_file).unwrap()) ], } ); @@ -4965,10 +4931,8 @@ mod tests { rent_exempt_reserve: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&split_stake_account_keypair_file) - .unwrap() - .into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&split_stake_account_keypair_file).unwrap()) ], } ); @@ -5033,11 +4997,9 @@ mod tests { rent_exempt_reserve: None, }, signers: vec![ - Presigner::new(&stake_auth_pubkey, &stake_sig).into(), - Presigner::new(&nonce_auth_pubkey, &nonce_sig).into(), - read_keypair_file(&split_stake_account_keypair_file) - .unwrap() - .into(), + Box::new(Presigner::new(&stake_auth_pubkey, &stake_sig)), + Box::new(Presigner::new(&nonce_auth_pubkey, &nonce_sig)), + Box::new(read_keypair_file(&split_stake_account_keypair_file).unwrap()), ], } ); @@ -5070,7 +5032,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap()),], } ); } diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 9107d170058d2e..9b668818e92add 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -1517,7 +1517,7 @@ mod tests { new_authorized: None, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -1551,8 +1551,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authorized_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authorized_keypair_file).unwrap()), ], } ); @@ -1586,8 +1586,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authorized_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authorized_keypair_file).unwrap()), ], } ); @@ -1635,8 +1635,11 @@ mod tests { compute_unit_price: None, }, signers: vec![ - Presigner::new(&pubkey2, &sig2).into(), - Presigner::new(&authorized_keypair.pubkey(), &authorized_sig).into(), + Box::new(Presigner::new(&pubkey2, &sig2)), + Box::new(Presigner::new( + &authorized_keypair.pubkey(), + &authorized_sig + )), ], } ); @@ -1672,8 +1675,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&voter_keypair_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&voter_keypair_file).unwrap()) ], } ); @@ -1704,9 +1707,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&authorized_keypair_file).unwrap().into(), - read_keypair_file(&voter_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&authorized_keypair_file).unwrap()), + Box::new(read_keypair_file(&voter_keypair_file).unwrap()), ], } ); @@ -1758,9 +1761,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&identity_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&identity_keypair_file).unwrap()), ], } ); @@ -1792,9 +1795,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&identity_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&identity_keypair_file).unwrap()), ], } ); @@ -1833,9 +1836,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&identity_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&identity_keypair_file).unwrap()), ], } ); @@ -1886,10 +1889,10 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&keypair_file).unwrap().into(), - Presigner::new(&identity_keypair.pubkey(), &identity_sig).into(), - Presigner::new(&pubkey2, &sig2).into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(Presigner::new(&identity_keypair.pubkey(), &identity_sig)), + Box::new(Presigner::new(&pubkey2, &sig2)), ], } ); @@ -1929,9 +1932,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), Box::new(keypair), - read_keypair_file(&identity_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&identity_keypair_file).unwrap()), ], } ); @@ -1968,9 +1971,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&keypair_file).unwrap().into(), - read_keypair_file(&identity_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&identity_keypair_file).unwrap()), ], } ); @@ -1999,9 +2002,9 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), Box::new(read_keypair_file(&keypair_file).unwrap()), - read_keypair_file(&identity_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&identity_keypair_file).unwrap()), ], } ); @@ -2030,7 +2033,7 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), + Box::new(read_keypair_file(&default_keypair_file).unwrap()), Box::new(read_keypair_file(&keypair_file).unwrap()), ], } @@ -2061,7 +2064,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -2090,7 +2093,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -2125,8 +2128,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_file).unwrap()) ], } ); @@ -2163,7 +2166,9 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&withdraw_authority_file).unwrap().into()], + signers: vec![Box::new( + read_keypair_file(&withdraw_authority_file).unwrap() + )], } ); @@ -2204,7 +2209,10 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![Presigner::new(&withdraw_authority.pubkey(), &authorized_sig).into(),], + signers: vec![Box::new(Presigner::new( + &withdraw_authority.pubkey(), + &authorized_sig + )),], } ); @@ -2226,7 +2234,7 @@ mod tests { fee_payer: 0, compute_unit_price: None, }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], + signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], } ); @@ -2254,8 +2262,8 @@ mod tests { compute_unit_price: None, }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_file).unwrap()) ], } ); @@ -2286,8 +2294,8 @@ mod tests { compute_unit_price: Some(99), }, signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - read_keypair_file(&withdraw_authority_file).unwrap().into() + Box::new(read_keypair_file(&default_keypair_file).unwrap()), + Box::new(read_keypair_file(&withdraw_authority_file).unwrap()) ], } ); diff --git a/sdk/src/signer/mod.rs b/sdk/src/signer/mod.rs index 79aab63d3ab208..4c90d2d10ff293 100644 --- a/sdk/src/signer/mod.rs +++ b/sdk/src/signer/mod.rs @@ -85,17 +85,8 @@ pub trait Signer { fn is_interactive(&self) -> bool; } -impl From for Box -where - T: Signer + 'static, -{ - fn from(signer: T) -> Self { - Box::new(signer) - } -} - -/// This impl allows using Signer with types like Box/Rc/Arc. -impl> Signer for Container { +/// This implements `Signer` for all ptr types - `Box/Rc/Arc/&/&mut` etc +impl> Signer for Container { #[inline] fn pubkey(&self) -> Pubkey { self.deref().pubkey() diff --git a/sdk/src/signer/signers.rs b/sdk/src/signer/signers.rs index f4cfc7dc9618a0..ad06e9ff3f3f24 100644 --- a/sdk/src/signer/signers.rs +++ b/sdk/src/signer/signers.rs @@ -1,11 +1,8 @@ #![cfg(feature = "full")] -use { - crate::{ - pubkey::Pubkey, - signature::{Signature, Signer, SignerError}, - }, - std::sync::Arc, +use crate::{ + pubkey::Pubkey, + signature::{Signature, Signer, SignerError}, }; /// Convenience trait for working with mixed collections of `Signer`s @@ -17,130 +14,44 @@ pub trait Signers { fn is_interactive(&self) -> bool; } -macro_rules! default_keypairs_impl { - () => { - fn pubkeys(&self) -> Vec { - self.iter().map(|keypair| keypair.pubkey()).collect() - } - - fn try_pubkeys(&self) -> Result, SignerError> { - let mut pubkeys = Vec::new(); - for keypair in self.iter() { - pubkeys.push(keypair.try_pubkey()?); - } - Ok(pubkeys) - } - - fn sign_message(&self, message: &[u8]) -> Vec { - self.iter() - .map(|keypair| keypair.sign_message(message)) - .collect() - } - - fn try_sign_message(&self, message: &[u8]) -> Result, SignerError> { - let mut signatures = Vec::new(); - for keypair in self.iter() { - signatures.push(keypair.try_sign_message(message)?); - } - Ok(signatures) - } - - fn is_interactive(&self) -> bool { - self.iter().any(|s| s.is_interactive()) - } - }; -} - -impl Signers for [&T] { - default_keypairs_impl!(); -} - -impl Signers for [Box] { - default_keypairs_impl!(); -} - -impl Signers for Vec> { - default_keypairs_impl!(); -} - -impl Signers for [Arc] { - default_keypairs_impl!(); -} - -impl Signers for [Arc; 0] { - default_keypairs_impl!(); -} - -impl Signers for [Arc; 1] { - default_keypairs_impl!(); -} - -impl Signers for [Arc; 2] { - default_keypairs_impl!(); -} - -impl Signers for [Arc; 3] { - default_keypairs_impl!(); -} - -impl Signers for [Arc; 4] { - default_keypairs_impl!(); -} - -impl Signers for Vec> { - default_keypairs_impl!(); -} - -impl Signers for Vec<&dyn Signer> { - default_keypairs_impl!(); -} - -impl Signers for [&dyn Signer] { - default_keypairs_impl!(); -} - -impl Signers for [&dyn Signer; 0] { - default_keypairs_impl!(); -} - -impl Signers for [&dyn Signer; 1] { - default_keypairs_impl!(); -} - -impl Signers for [&dyn Signer; 2] { - default_keypairs_impl!(); -} - -impl Signers for [&dyn Signer; 3] { - default_keypairs_impl!(); -} - -impl Signers for [&dyn Signer; 4] { - default_keypairs_impl!(); -} - -impl Signers for [&T; 0] { - default_keypairs_impl!(); -} - -impl Signers for [&T; 1] { - default_keypairs_impl!(); -} +/// Any `T` where `T` impls `IntoIterator` yielding +/// `Signer`s implements `Signers`. +/// +/// This includes [&dyn Signer], [Box], +/// [&dyn Signer; N], Vec, Vec, etc. +/// +/// When used as a generic function param, `&T` +/// should be used instead of `T` where T: Signers, due to the `?Sized` bounds on T. +/// E.g. [Signer] implements `Signers`, but `&[Signer]` does not +impl Signers for T +where + for<'a> &'a T: IntoIterator, +{ + fn pubkeys(&self) -> Vec { + self.into_iter().map(|keypair| keypair.pubkey()).collect() + } -impl Signers for [&T; 2] { - default_keypairs_impl!(); -} + fn try_pubkeys(&self) -> Result, SignerError> { + self.into_iter() + .map(|keypair| keypair.try_pubkey()) + .collect() + } -impl Signers for [&T; 3] { - default_keypairs_impl!(); -} + fn sign_message(&self, message: &[u8]) -> Vec { + self.into_iter() + .map(|keypair| keypair.sign_message(message)) + .collect() + } -impl Signers for [&T; 4] { - default_keypairs_impl!(); -} + fn try_sign_message(&self, message: &[u8]) -> Result, SignerError> { + self.into_iter() + .map(|keypair| keypair.try_sign_message(message)) + .collect() + } -impl Signers for Vec<&T> { - default_keypairs_impl!(); + fn is_interactive(&self) -> bool { + self.into_iter().any(|s| s.is_interactive()) + } } #[cfg(test)] diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index 8219ffa858ec24..ef570f8ef6a9f5 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -1850,8 +1850,8 @@ mod tests { lockup_date: None, }]; let args = DistributeTokensArgs { - sender_keypair: read_keypair_file(sender_keypair_file).unwrap().into(), - fee_payer: read_keypair_file(fee_payer).unwrap().into(), + sender_keypair: Box::new(read_keypair_file(sender_keypair_file).unwrap()), + fee_payer: Box::new(read_keypair_file(fee_payer).unwrap()), dry_run: false, input_csv: "".to_string(), transaction_db: "".to_string(), @@ -1893,12 +1893,8 @@ mod tests { let unfunded_payer = Keypair::new(); let unfunded_payer_keypair_file = tmp_file_path("keypair_file", &unfunded_payer.pubkey()); write_keypair_file(&unfunded_payer, &unfunded_payer_keypair_file).unwrap(); - args.sender_keypair = read_keypair_file(&unfunded_payer_keypair_file) - .unwrap() - .into(); - args.fee_payer = read_keypair_file(&unfunded_payer_keypair_file) - .unwrap() - .into(); + args.sender_keypair = Box::new(read_keypair_file(&unfunded_payer_keypair_file).unwrap()); + args.fee_payer = Box::new(read_keypair_file(&unfunded_payer_keypair_file).unwrap()); let err_result = check_payer_balances(&[one_signer_message(&client)], &allocations, &client, &args) @@ -1933,12 +1929,9 @@ mod tests { .send_and_confirm_transaction_with_spinner(&transaction) .unwrap(); - args.sender_keypair = read_keypair_file(&partially_funded_payer_keypair_file) - .unwrap() - .into(); - args.fee_payer = read_keypair_file(&partially_funded_payer_keypair_file) - .unwrap() - .into(); + args.sender_keypair = + Box::new(read_keypair_file(&partially_funded_payer_keypair_file).unwrap()); + args.fee_payer = Box::new(read_keypair_file(&partially_funded_payer_keypair_file).unwrap()); let err_result = check_payer_balances(&[one_signer_message(&client)], &allocations, &client, &args) .unwrap_err(); @@ -1999,10 +1992,8 @@ mod tests { let unfunded_payer = Keypair::new(); let unfunded_payer_keypair_file = tmp_file_path("keypair_file", &unfunded_payer.pubkey()); write_keypair_file(&unfunded_payer, &unfunded_payer_keypair_file).unwrap(); - args.sender_keypair = read_keypair_file(&unfunded_payer_keypair_file) - .unwrap() - .into(); - args.fee_payer = read_keypair_file(&sender_keypair_file).unwrap().into(); + args.sender_keypair = Box::new(read_keypair_file(&unfunded_payer_keypair_file).unwrap()); + args.fee_payer = Box::new(read_keypair_file(&sender_keypair_file).unwrap()); let err_result = check_payer_balances(&[one_signer_message(&client)], &allocations, &client, &args) @@ -2015,10 +2006,8 @@ mod tests { } // Unfunded fee payer - args.sender_keypair = read_keypair_file(&sender_keypair_file).unwrap().into(); - args.fee_payer = read_keypair_file(&unfunded_payer_keypair_file) - .unwrap() - .into(); + args.sender_keypair = Box::new(read_keypair_file(&sender_keypair_file).unwrap()); + args.fee_payer = Box::new(read_keypair_file(&unfunded_payer_keypair_file).unwrap()); let err_result = check_payer_balances(&[one_signer_message(&client)], &allocations, &client, &args) @@ -2145,12 +2134,8 @@ mod tests { let unfunded_payer = Keypair::new(); let unfunded_payer_keypair_file = tmp_file_path("keypair_file", &unfunded_payer.pubkey()); write_keypair_file(&unfunded_payer, &unfunded_payer_keypair_file).unwrap(); - args.sender_keypair = read_keypair_file(&unfunded_payer_keypair_file) - .unwrap() - .into(); - args.fee_payer = read_keypair_file(&unfunded_payer_keypair_file) - .unwrap() - .into(); + args.sender_keypair = Box::new(read_keypair_file(&unfunded_payer_keypair_file).unwrap()); + args.fee_payer = Box::new(read_keypair_file(&unfunded_payer_keypair_file).unwrap()); let err_result = check_payer_balances(&[one_signer_message(&client)], &allocations, &client, &args) @@ -2185,12 +2170,9 @@ mod tests { .send_and_confirm_transaction_with_spinner(&transaction) .unwrap(); - args.sender_keypair = read_keypair_file(&partially_funded_payer_keypair_file) - .unwrap() - .into(); - args.fee_payer = read_keypair_file(&partially_funded_payer_keypair_file) - .unwrap() - .into(); + args.sender_keypair = + Box::new(read_keypair_file(&partially_funded_payer_keypair_file).unwrap()); + args.fee_payer = Box::new(read_keypair_file(&partially_funded_payer_keypair_file).unwrap()); let err_result = check_payer_balances(&[one_signer_message(&client)], &allocations, &client, &args) .unwrap_err(); @@ -2258,10 +2240,8 @@ mod tests { let unfunded_payer = Keypair::new(); let unfunded_payer_keypair_file = tmp_file_path("keypair_file", &unfunded_payer.pubkey()); write_keypair_file(&unfunded_payer, &unfunded_payer_keypair_file).unwrap(); - args.sender_keypair = read_keypair_file(&unfunded_payer_keypair_file) - .unwrap() - .into(); - args.fee_payer = read_keypair_file(&sender_keypair_file).unwrap().into(); + args.sender_keypair = Box::new(read_keypair_file(&unfunded_payer_keypair_file).unwrap()); + args.fee_payer = Box::new(read_keypair_file(&sender_keypair_file).unwrap()); let err_result = check_payer_balances(&[one_signer_message(&client)], &allocations, &client, &args) @@ -2274,10 +2254,8 @@ mod tests { } // Unfunded fee payer - args.sender_keypair = read_keypair_file(&sender_keypair_file).unwrap().into(); - args.fee_payer = read_keypair_file(&unfunded_payer_keypair_file) - .unwrap() - .into(); + args.sender_keypair = Box::new(read_keypair_file(&sender_keypair_file).unwrap()); + args.fee_payer = Box::new(read_keypair_file(&unfunded_payer_keypair_file).unwrap()); let err_result = check_payer_balances(&[one_signer_message(&client)], &allocations, &client, &args) From 245530b28e6b85d78c23424a8302bb9ec240b3a6 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 1 Mar 2024 07:11:38 -0500 Subject: [PATCH 295/401] Uses purge_all_bank_snapshots() (#35380) --- ledger/src/bank_forks_utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index cc5b196b39f454..17412c1801ac68 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -344,7 +344,7 @@ fn bank_forks_from_snapshot( // snapshot expects. This would cause the node to crash again. To prevent that, purge all // the bank snapshots here. In the above scenario, this will cause the node to load from a // snapshot archive next time, which is safe. - snapshot_utils::purge_old_bank_snapshots(&snapshot_config.bank_snapshots_dir, 0, None); + snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); bank }; From a7f9fe103fcfa106d7fe801ca53122139363fecd Mon Sep 17 00:00:00 2001 From: Tyera Date: Fri, 1 Mar 2024 09:08:55 -0700 Subject: [PATCH 296/401] Split runtime utilities out of stake_state.rs (#35386) * Add points module * Add rewards module * Hide rewards doc * Fixup ledger-tool imports --- ledger-tool/src/main.rs | 4 +- programs/stake/src/lib.rs | 3 + programs/stake/src/points.rs | 249 +++++++++ programs/stake/src/rewards.rs | 647 +++++++++++++++++++++++ programs/stake/src/stake_state.rs | 838 +----------------------------- runtime/src/bank.rs | 17 +- 6 files changed, 912 insertions(+), 846 deletions(-) create mode 100644 programs/stake/src/points.rs create mode 100644 programs/stake/src/rewards.rs diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 13eb3c21031031..8445782f840931 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -72,7 +72,7 @@ use { system_program, transaction::{MessageHash, SanitizedTransaction, SimpleAddressLoader}, }, - solana_stake_program::stake_state::{self, PointValue}, + solana_stake_program::{points::PointValue, stake_state}, solana_unified_scheduler_pool::DefaultSchedulerPool, solana_vote_program::{ self, @@ -2589,7 +2589,7 @@ fn main() { new_credits_observed: Option, skipped_reasons: String, } - use solana_stake_program::stake_state::InflationPointCalculationEvent; + use solana_stake_program::points::InflationPointCalculationEvent; let stake_calculation_details: DashMap = DashMap::new(); let last_point_value = Arc::new(RwLock::new(None)); diff --git a/programs/stake/src/lib.rs b/programs/stake/src/lib.rs index b6d2ff478432b6..5f0f6c5f1342f6 100644 --- a/programs/stake/src/lib.rs +++ b/programs/stake/src/lib.rs @@ -12,6 +12,9 @@ use solana_sdk::{ }; pub mod config; +pub mod points; +#[doc(hidden)] +pub mod rewards; pub mod stake_instruction; pub mod stake_state; diff --git a/programs/stake/src/points.rs b/programs/stake/src/points.rs new file mode 100644 index 00000000000000..d19bd3435a366d --- /dev/null +++ b/programs/stake/src/points.rs @@ -0,0 +1,249 @@ +//! Information about points calculation based on stake state. +//! Used by `solana-runtime`. + +use { + solana_sdk::{ + clock::Epoch, + instruction::InstructionError, + pubkey::Pubkey, + stake::state::{Delegation, Stake, StakeStateV2}, + stake_history::StakeHistory, + }, + solana_vote_program::vote_state::VoteState, + std::cmp::Ordering, +}; + +/// captures a rewards round as lamports to be awarded +/// and the total points over which those lamports +/// are to be distributed +// basically read as rewards/points, but in integers instead of as an f64 +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct PointValue { + pub rewards: u64, // lamports to split + pub points: u128, // over these points +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct CalculatedStakePoints { + pub(crate) points: u128, + pub(crate) new_credits_observed: u64, + pub(crate) force_credits_update_with_skipped_reward: bool, +} + +#[derive(Debug)] +pub enum InflationPointCalculationEvent { + CalculatedPoints(u64, u128, u128, u128), + SplitRewards(u64, u64, u64, PointValue), + EffectiveStakeAtRewardedEpoch(u64), + RentExemptReserve(u64), + Delegation(Delegation, Pubkey), + Commission(u8), + CreditsObserved(u64, Option), + Skipped(SkippedReason), +} + +pub(crate) fn null_tracer() -> Option { + None:: +} + +#[derive(Debug)] +pub enum SkippedReason { + DisabledInflation, + JustActivated, + TooEarlyUnfairSplit, + ZeroPoints, + ZeroPointValue, + ZeroReward, + ZeroCreditsAndReturnZero, + ZeroCreditsAndReturnCurrent, + ZeroCreditsAndReturnRewinded, +} + +impl From for InflationPointCalculationEvent { + fn from(reason: SkippedReason) -> Self { + InflationPointCalculationEvent::Skipped(reason) + } +} + +// utility function, used by runtime +#[doc(hidden)] +pub fn calculate_points( + stake_state: &StakeStateV2, + vote_state: &VoteState, + stake_history: &StakeHistory, + new_rate_activation_epoch: Option, +) -> Result { + if let StakeStateV2::Stake(_meta, stake, _stake_flags) = stake_state { + Ok(calculate_stake_points( + stake, + vote_state, + stake_history, + null_tracer(), + new_rate_activation_epoch, + )) + } else { + Err(InstructionError::InvalidAccountData) + } +} + +fn calculate_stake_points( + stake: &Stake, + vote_state: &VoteState, + stake_history: &StakeHistory, + inflation_point_calc_tracer: Option, + new_rate_activation_epoch: Option, +) -> u128 { + calculate_stake_points_and_credits( + stake, + vote_state, + stake_history, + inflation_point_calc_tracer, + new_rate_activation_epoch, + ) + .points +} + +/// for a given stake and vote_state, calculate how many +/// points were earned (credits * stake) and new value +/// for credits_observed were the points paid +pub(crate) fn calculate_stake_points_and_credits( + stake: &Stake, + new_vote_state: &VoteState, + stake_history: &StakeHistory, + inflation_point_calc_tracer: Option, + new_rate_activation_epoch: Option, +) -> CalculatedStakePoints { + let credits_in_stake = stake.credits_observed; + let credits_in_vote = new_vote_state.credits(); + // if there is no newer credits since observed, return no point + match credits_in_vote.cmp(&credits_in_stake) { + Ordering::Less => { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&SkippedReason::ZeroCreditsAndReturnRewinded.into()); + } + // Don't adjust stake.activation_epoch for simplicity: + // - generally fast-forwarding stake.activation_epoch forcibly (for + // artificial re-activation with re-warm-up) skews the stake + // history sysvar. And properly handling all the cases + // regarding deactivation epoch/warm-up/cool-down without + // introducing incentive skew is hard. + // - Conceptually, it should be acceptable for the staked SOLs at + // the recreated vote to receive rewards again immediately after + // rewind even if it looks like instant activation. That's + // because it must have passed the required warmed-up at least + // once in the past already + // - Also such a stake account remains to be a part of overall + // effective stake calculation even while the vote account is + // missing for (indefinite) time or remains to be pre-remove + // credits score. It should be treated equally to staking with + // delinquent validator with no differentiation. + + // hint with true to indicate some exceptional credits handling is needed + return CalculatedStakePoints { + points: 0, + new_credits_observed: credits_in_vote, + force_credits_update_with_skipped_reward: true, + }; + } + Ordering::Equal => { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&SkippedReason::ZeroCreditsAndReturnCurrent.into()); + } + // don't hint caller and return current value if credits remain unchanged (= delinquent) + return CalculatedStakePoints { + points: 0, + new_credits_observed: credits_in_stake, + force_credits_update_with_skipped_reward: false, + }; + } + Ordering::Greater => {} + } + + let mut points = 0; + let mut new_credits_observed = credits_in_stake; + + for (epoch, final_epoch_credits, initial_epoch_credits) in + new_vote_state.epoch_credits().iter().copied() + { + let stake_amount = u128::from(stake.delegation.stake( + epoch, + stake_history, + new_rate_activation_epoch, + )); + + // figure out how much this stake has seen that + // for which the vote account has a record + let earned_credits = if credits_in_stake < initial_epoch_credits { + // the staker observed the entire epoch + final_epoch_credits - initial_epoch_credits + } else if credits_in_stake < final_epoch_credits { + // the staker registered sometime during the epoch, partial credit + final_epoch_credits - new_credits_observed + } else { + // the staker has already observed or been redeemed this epoch + // or was activated after this epoch + 0 + }; + let earned_credits = u128::from(earned_credits); + + // don't want to assume anything about order of the iterator... + new_credits_observed = new_credits_observed.max(final_epoch_credits); + + // finally calculate points for this epoch + let earned_points = stake_amount * earned_credits; + points += earned_points; + + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&InflationPointCalculationEvent::CalculatedPoints( + epoch, + stake_amount, + earned_credits, + earned_points, + )); + } + } + + CalculatedStakePoints { + points, + new_credits_observed, + force_credits_update_with_skipped_reward: false, + } +} + +#[cfg(test)] +mod tests { + use {super::*, crate::stake_state::new_stake, solana_sdk::native_token}; + + #[test] + fn test_stake_state_calculate_points_with_typical_values() { + let mut vote_state = VoteState::default(); + + // bootstrap means fully-vested stake at epoch 0 with + // 10_000_000 SOL is a big but not unreasaonable stake + let stake = new_stake( + native_token::sol_to_lamports(10_000_000f64), + &Pubkey::default(), + &vote_state, + std::u64::MAX, + ); + + let epoch_slots: u128 = 14 * 24 * 3600 * 160; + // put 193,536,000 credits in at epoch 0, typical for a 14-day epoch + // this loop takes a few seconds... + for _ in 0..epoch_slots { + vote_state.increment_credits(0, 1); + } + + // no overflow on points + assert_eq!( + u128::from(stake.delegation.stake) * epoch_slots, + calculate_stake_points( + &stake, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None + ) + ); + } +} diff --git a/programs/stake/src/rewards.rs b/programs/stake/src/rewards.rs new file mode 100644 index 00000000000000..08416996520289 --- /dev/null +++ b/programs/stake/src/rewards.rs @@ -0,0 +1,647 @@ +//! Information about stake and voter rewards based on stake state. +//! Used by `solana-runtime`. + +use { + crate::points::{ + calculate_stake_points_and_credits, CalculatedStakePoints, InflationPointCalculationEvent, + PointValue, SkippedReason, + }, + solana_sdk::{ + account::{AccountSharedData, WritableAccount}, + account_utils::StateMut, + clock::Epoch, + instruction::InstructionError, + stake::{ + instruction::StakeError, + state::{Stake, StakeStateV2}, + }, + stake_history::StakeHistory, + }, + solana_vote_program::vote_state::VoteState, +}; + +#[derive(Debug, PartialEq, Eq)] +struct CalculatedStakeRewards { + staker_rewards: u64, + voter_rewards: u64, + new_credits_observed: u64, +} + +// utility function, used by runtime +// returns a tuple of (stakers_reward,voters_reward) +#[doc(hidden)] +pub fn redeem_rewards( + rewarded_epoch: Epoch, + stake_state: StakeStateV2, + stake_account: &mut AccountSharedData, + vote_state: &VoteState, + point_value: &PointValue, + stake_history: &StakeHistory, + inflation_point_calc_tracer: Option, + new_rate_activation_epoch: Option, +) -> Result<(u64, u64), InstructionError> { + if let StakeStateV2::Stake(meta, mut stake, stake_flags) = stake_state { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer( + &InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch(stake.stake( + rewarded_epoch, + stake_history, + new_rate_activation_epoch, + )), + ); + inflation_point_calc_tracer(&InflationPointCalculationEvent::RentExemptReserve( + meta.rent_exempt_reserve, + )); + inflation_point_calc_tracer(&InflationPointCalculationEvent::Commission( + vote_state.commission, + )); + } + + if let Some((stakers_reward, voters_reward)) = redeem_stake_rewards( + rewarded_epoch, + &mut stake, + point_value, + vote_state, + stake_history, + inflation_point_calc_tracer, + new_rate_activation_epoch, + ) { + stake_account.checked_add_lamports(stakers_reward)?; + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags))?; + + Ok((stakers_reward, voters_reward)) + } else { + Err(StakeError::NoCreditsToRedeem.into()) + } + } else { + Err(InstructionError::InvalidAccountData) + } +} + +fn redeem_stake_rewards( + rewarded_epoch: Epoch, + stake: &mut Stake, + point_value: &PointValue, + vote_state: &VoteState, + stake_history: &StakeHistory, + inflation_point_calc_tracer: Option, + new_rate_activation_epoch: Option, +) -> Option<(u64, u64)> { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&InflationPointCalculationEvent::CreditsObserved( + stake.credits_observed, + None, + )); + } + calculate_stake_rewards( + rewarded_epoch, + stake, + point_value, + vote_state, + stake_history, + inflation_point_calc_tracer.as_ref(), + new_rate_activation_epoch, + ) + .map(|calculated_stake_rewards| { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer { + inflation_point_calc_tracer(&InflationPointCalculationEvent::CreditsObserved( + stake.credits_observed, + Some(calculated_stake_rewards.new_credits_observed), + )); + } + stake.credits_observed = calculated_stake_rewards.new_credits_observed; + stake.delegation.stake += calculated_stake_rewards.staker_rewards; + ( + calculated_stake_rewards.staker_rewards, + calculated_stake_rewards.voter_rewards, + ) + }) +} + +/// for a given stake and vote_state, calculate what distributions and what updates should be made +/// returns a tuple in the case of a payout of: +/// * staker_rewards to be distributed +/// * voter_rewards to be distributed +/// * new value for credits_observed in the stake +/// returns None if there's no payout or if any deserved payout is < 1 lamport +fn calculate_stake_rewards( + rewarded_epoch: Epoch, + stake: &Stake, + point_value: &PointValue, + vote_state: &VoteState, + stake_history: &StakeHistory, + inflation_point_calc_tracer: Option, + new_rate_activation_epoch: Option, +) -> Option { + // ensure to run to trigger (optional) inflation_point_calc_tracer + let CalculatedStakePoints { + points, + new_credits_observed, + mut force_credits_update_with_skipped_reward, + } = calculate_stake_points_and_credits( + stake, + vote_state, + stake_history, + inflation_point_calc_tracer.as_ref(), + new_rate_activation_epoch, + ); + + // Drive credits_observed forward unconditionally when rewards are disabled + // or when this is the stake's activation epoch + if point_value.rewards == 0 { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&SkippedReason::DisabledInflation.into()); + } + force_credits_update_with_skipped_reward = true; + } else if stake.delegation.activation_epoch == rewarded_epoch { + // not assert!()-ed; but points should be zero + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&SkippedReason::JustActivated.into()); + } + force_credits_update_with_skipped_reward = true; + } + + if force_credits_update_with_skipped_reward { + return Some(CalculatedStakeRewards { + staker_rewards: 0, + voter_rewards: 0, + new_credits_observed, + }); + } + + if points == 0 { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&SkippedReason::ZeroPoints.into()); + } + return None; + } + if point_value.points == 0 { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&SkippedReason::ZeroPointValue.into()); + } + return None; + } + + let rewards = points + .checked_mul(u128::from(point_value.rewards)) + .unwrap() + .checked_div(point_value.points) + .unwrap(); + + let rewards = u64::try_from(rewards).unwrap(); + + // don't bother trying to split if fractional lamports got truncated + if rewards == 0 { + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&SkippedReason::ZeroReward.into()); + } + return None; + } + let (voter_rewards, staker_rewards, is_split) = vote_state.commission_split(rewards); + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&InflationPointCalculationEvent::SplitRewards( + rewards, + voter_rewards, + staker_rewards, + (*point_value).clone(), + )); + } + + if (voter_rewards == 0 || staker_rewards == 0) && is_split { + // don't collect if we lose a whole lamport somewhere + // is_split means there should be tokens on both sides, + // uncool to move credits_observed if one side didn't get paid + if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { + inflation_point_calc_tracer(&SkippedReason::TooEarlyUnfairSplit.into()); + } + return None; + } + + Some(CalculatedStakeRewards { + staker_rewards, + voter_rewards, + new_credits_observed, + }) +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{points::null_tracer, stake_state::new_stake}, + solana_sdk::{native_token, pubkey::Pubkey}, + }; + + #[test] + fn test_stake_state_redeem_rewards() { + let mut vote_state = VoteState::default(); + // assume stake.stake() is right + // bootstrap means fully-vested stake at epoch 0 + let stake_lamports = 1; + let mut stake = new_stake( + stake_lamports, + &Pubkey::default(), + &vote_state, + std::u64::MAX, + ); + + // this one can't collect now, credits_observed == vote_state.credits() + assert_eq!( + None, + redeem_stake_rewards( + 0, + &mut stake, + &PointValue { + rewards: 1_000_000_000, + points: 1 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + // put 2 credits in at epoch 0 + vote_state.increment_credits(0, 1); + vote_state.increment_credits(0, 1); + + // this one should be able to collect exactly 2 + assert_eq!( + Some((stake_lamports * 2, 0)), + redeem_stake_rewards( + 0, + &mut stake, + &PointValue { + rewards: 1, + points: 1 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + assert_eq!( + stake.delegation.stake, + stake_lamports + (stake_lamports * 2) + ); + assert_eq!(stake.credits_observed, 2); + } + + #[test] + fn test_stake_state_calculate_rewards() { + let mut vote_state = VoteState::default(); + // assume stake.stake() is right + // bootstrap means fully-vested stake at epoch 0 + let mut stake = new_stake(1, &Pubkey::default(), &vote_state, std::u64::MAX); + + // this one can't collect now, credits_observed == vote_state.credits() + assert_eq!( + None, + calculate_stake_rewards( + 0, + &stake, + &PointValue { + rewards: 1_000_000_000, + points: 1 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + // put 2 credits in at epoch 0 + vote_state.increment_credits(0, 1); + vote_state.increment_credits(0, 1); + + // this one should be able to collect exactly 2 + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: stake.delegation.stake * 2, + voter_rewards: 0, + new_credits_observed: 2, + }), + calculate_stake_rewards( + 0, + &stake, + &PointValue { + rewards: 2, + points: 2 // all his + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + stake.credits_observed = 1; + // this one should be able to collect exactly 1 (already observed one) + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: stake.delegation.stake, + voter_rewards: 0, + new_credits_observed: 2, + }), + calculate_stake_rewards( + 0, + &stake, + &PointValue { + rewards: 1, + points: 1 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + // put 1 credit in epoch 1 + vote_state.increment_credits(1, 1); + + stake.credits_observed = 2; + // this one should be able to collect the one just added + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: stake.delegation.stake, + voter_rewards: 0, + new_credits_observed: 3, + }), + calculate_stake_rewards( + 1, + &stake, + &PointValue { + rewards: 2, + points: 2 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + // put 1 credit in epoch 2 + vote_state.increment_credits(2, 1); + // this one should be able to collect 2 now + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: stake.delegation.stake * 2, + voter_rewards: 0, + new_credits_observed: 4, + }), + calculate_stake_rewards( + 2, + &stake, + &PointValue { + rewards: 2, + points: 2 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + stake.credits_observed = 0; + // this one should be able to collect everything from t=0 a warmed up stake of 2 + // (2 credits at stake of 1) + (1 credit at a stake of 2) + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: stake.delegation.stake * 2 // epoch 0 + + stake.delegation.stake // epoch 1 + + stake.delegation.stake, // epoch 2 + voter_rewards: 0, + new_credits_observed: 4, + }), + calculate_stake_rewards( + 2, + &stake, + &PointValue { + rewards: 4, + points: 4 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + // same as above, but is a really small commission out of 32 bits, + // verify that None comes back on small redemptions where no one gets paid + vote_state.commission = 1; + assert_eq!( + None, // would be Some((0, 2 * 1 + 1 * 2, 4)), + calculate_stake_rewards( + 2, + &stake, + &PointValue { + rewards: 4, + points: 4 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + vote_state.commission = 99; + assert_eq!( + None, // would be Some((0, 2 * 1 + 1 * 2, 4)), + calculate_stake_rewards( + 2, + &stake, + &PointValue { + rewards: 4, + points: 4 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + // now one with inflation disabled. no one gets paid, but we still need + // to advance the stake state's credits_observed field to prevent back- + // paying rewards when inflation is turned on. + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: 0, + voter_rewards: 0, + new_credits_observed: 4, + }), + calculate_stake_rewards( + 2, + &stake, + &PointValue { + rewards: 0, + points: 4 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + // credits_observed remains at previous level when vote_state credits are + // not advancing and inflation is disabled + stake.credits_observed = 4; + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: 0, + voter_rewards: 0, + new_credits_observed: 4, + }), + calculate_stake_rewards( + 2, + &stake, + &PointValue { + rewards: 0, + points: 4 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + assert_eq!( + CalculatedStakePoints { + points: 0, + new_credits_observed: 4, + force_credits_update_with_skipped_reward: false, + }, + calculate_stake_points_and_credits( + &stake, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None + ) + ); + + // credits_observed is auto-rewinded when vote_state credits are assumed to have been + // recreated + stake.credits_observed = 1000; + // this is new behavior 1; return the post-recreation rewinded credits from the vote account + assert_eq!( + CalculatedStakePoints { + points: 0, + new_credits_observed: 4, + force_credits_update_with_skipped_reward: true, + }, + calculate_stake_points_and_credits( + &stake, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None + ) + ); + // this is new behavior 2; don't hint when credits both from stake and vote are identical + stake.credits_observed = 4; + assert_eq!( + CalculatedStakePoints { + points: 0, + new_credits_observed: 4, + force_credits_update_with_skipped_reward: false, + }, + calculate_stake_points_and_credits( + &stake, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None + ) + ); + + // get rewards and credits observed when not the activation epoch + vote_state.commission = 0; + stake.credits_observed = 3; + stake.delegation.activation_epoch = 1; + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: stake.delegation.stake, // epoch 2 + voter_rewards: 0, + new_credits_observed: 4, + }), + calculate_stake_rewards( + 2, + &stake, + &PointValue { + rewards: 1, + points: 1 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + + // credits_observed is moved forward for the stake's activation epoch, + // and no rewards are perceived + stake.delegation.activation_epoch = 2; + stake.credits_observed = 3; + assert_eq!( + Some(CalculatedStakeRewards { + staker_rewards: 0, + voter_rewards: 0, + new_credits_observed: 4, + }), + calculate_stake_rewards( + 2, + &stake, + &PointValue { + rewards: 1, + points: 1 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + } + + #[test] + fn test_stake_state_calculate_points_with_typical_values() { + let vote_state = VoteState::default(); + + // bootstrap means fully-vested stake at epoch 0 with + // 10_000_000 SOL is a big but not unreasaonable stake + let stake = new_stake( + native_token::sol_to_lamports(10_000_000f64), + &Pubkey::default(), + &vote_state, + std::u64::MAX, + ); + + // this one can't collect now, credits_observed == vote_state.credits() + assert_eq!( + None, + calculate_stake_rewards( + 0, + &stake, + &PointValue { + rewards: 1_000_000_000, + points: 1 + }, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None, + ) + ); + } +} diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 17232d083f06ec..eda238f93dd8d7 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -11,7 +11,7 @@ pub use solana_sdk::stake::state::*; use { solana_program_runtime::{ic_msg, invoke_context::InvokeContext}, solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, + account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, clock::{Clock, Epoch}, feature_set::{self, FeatureSet}, @@ -30,44 +30,9 @@ use { }, }, solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, - std::{cmp::Ordering, collections::HashSet, convert::TryFrom}, + std::{collections::HashSet, convert::TryFrom}, }; -#[derive(Debug)] -pub enum SkippedReason { - DisabledInflation, - JustActivated, - TooEarlyUnfairSplit, - ZeroPoints, - ZeroPointValue, - ZeroReward, - ZeroCreditsAndReturnZero, - ZeroCreditsAndReturnCurrent, - ZeroCreditsAndReturnRewinded, -} - -impl From for InflationPointCalculationEvent { - fn from(reason: SkippedReason) -> Self { - InflationPointCalculationEvent::Skipped(reason) - } -} - -#[derive(Debug)] -pub enum InflationPointCalculationEvent { - CalculatedPoints(u64, u128, u128, u128), - SplitRewards(u64, u64, u64, PointValue), - EffectiveStakeAtRewardedEpoch(u64), - RentExemptReserve(u64), - Delegation(Delegation, Pubkey), - Commission(u8), - CreditsObserved(u64, Option), - Skipped(SkippedReason), -} - -pub(crate) fn null_tracer() -> Option { - None:: -} - // utility function, used by Stakes, tests pub fn from>(account: &T) -> Option { account.state().ok() @@ -179,300 +144,6 @@ pub(crate) fn new_stake( } } -/// captures a rewards round as lamports to be awarded -/// and the total points over which those lamports -/// are to be distributed -// basically read as rewards/points, but in integers instead of as an f64 -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PointValue { - pub rewards: u64, // lamports to split - pub points: u128, // over these points -} - -fn redeem_stake_rewards( - rewarded_epoch: Epoch, - stake: &mut Stake, - point_value: &PointValue, - vote_state: &VoteState, - stake_history: &StakeHistory, - inflation_point_calc_tracer: Option, - new_rate_activation_epoch: Option, -) -> Option<(u64, u64)> { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&InflationPointCalculationEvent::CreditsObserved( - stake.credits_observed, - None, - )); - } - calculate_stake_rewards( - rewarded_epoch, - stake, - point_value, - vote_state, - stake_history, - inflation_point_calc_tracer.as_ref(), - new_rate_activation_epoch, - ) - .map(|calculated_stake_rewards| { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer { - inflation_point_calc_tracer(&InflationPointCalculationEvent::CreditsObserved( - stake.credits_observed, - Some(calculated_stake_rewards.new_credits_observed), - )); - } - stake.credits_observed = calculated_stake_rewards.new_credits_observed; - stake.delegation.stake += calculated_stake_rewards.staker_rewards; - ( - calculated_stake_rewards.staker_rewards, - calculated_stake_rewards.voter_rewards, - ) - }) -} - -fn calculate_stake_points( - stake: &Stake, - vote_state: &VoteState, - stake_history: &StakeHistory, - inflation_point_calc_tracer: Option, - new_rate_activation_epoch: Option, -) -> u128 { - calculate_stake_points_and_credits( - stake, - vote_state, - stake_history, - inflation_point_calc_tracer, - new_rate_activation_epoch, - ) - .points -} - -#[derive(Debug, PartialEq, Eq)] -struct CalculatedStakePoints { - points: u128, - new_credits_observed: u64, - force_credits_update_with_skipped_reward: bool, -} - -/// for a given stake and vote_state, calculate how many -/// points were earned (credits * stake) and new value -/// for credits_observed were the points paid -fn calculate_stake_points_and_credits( - stake: &Stake, - new_vote_state: &VoteState, - stake_history: &StakeHistory, - inflation_point_calc_tracer: Option, - new_rate_activation_epoch: Option, -) -> CalculatedStakePoints { - let credits_in_stake = stake.credits_observed; - let credits_in_vote = new_vote_state.credits(); - // if there is no newer credits since observed, return no point - match credits_in_vote.cmp(&credits_in_stake) { - Ordering::Less => { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&SkippedReason::ZeroCreditsAndReturnRewinded.into()); - } - // Don't adjust stake.activation_epoch for simplicity: - // - generally fast-forwarding stake.activation_epoch forcibly (for - // artificial re-activation with re-warm-up) skews the stake - // history sysvar. And properly handling all the cases - // regarding deactivation epoch/warm-up/cool-down without - // introducing incentive skew is hard. - // - Conceptually, it should be acceptable for the staked SOLs at - // the recreated vote to receive rewards again immediately after - // rewind even if it looks like instant activation. That's - // because it must have passed the required warmed-up at least - // once in the past already - // - Also such a stake account remains to be a part of overall - // effective stake calculation even while the vote account is - // missing for (indefinite) time or remains to be pre-remove - // credits score. It should be treated equally to staking with - // delinquent validator with no differentiation. - - // hint with true to indicate some exceptional credits handling is needed - return CalculatedStakePoints { - points: 0, - new_credits_observed: credits_in_vote, - force_credits_update_with_skipped_reward: true, - }; - } - Ordering::Equal => { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&SkippedReason::ZeroCreditsAndReturnCurrent.into()); - } - // don't hint caller and return current value if credits remain unchanged (= delinquent) - return CalculatedStakePoints { - points: 0, - new_credits_observed: credits_in_stake, - force_credits_update_with_skipped_reward: false, - }; - } - Ordering::Greater => {} - } - - let mut points = 0; - let mut new_credits_observed = credits_in_stake; - - for (epoch, final_epoch_credits, initial_epoch_credits) in - new_vote_state.epoch_credits().iter().copied() - { - let stake_amount = u128::from(stake.delegation.stake( - epoch, - stake_history, - new_rate_activation_epoch, - )); - - // figure out how much this stake has seen that - // for which the vote account has a record - let earned_credits = if credits_in_stake < initial_epoch_credits { - // the staker observed the entire epoch - final_epoch_credits - initial_epoch_credits - } else if credits_in_stake < final_epoch_credits { - // the staker registered sometime during the epoch, partial credit - final_epoch_credits - new_credits_observed - } else { - // the staker has already observed or been redeemed this epoch - // or was activated after this epoch - 0 - }; - let earned_credits = u128::from(earned_credits); - - // don't want to assume anything about order of the iterator... - new_credits_observed = new_credits_observed.max(final_epoch_credits); - - // finally calculate points for this epoch - let earned_points = stake_amount * earned_credits; - points += earned_points; - - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&InflationPointCalculationEvent::CalculatedPoints( - epoch, - stake_amount, - earned_credits, - earned_points, - )); - } - } - - CalculatedStakePoints { - points, - new_credits_observed, - force_credits_update_with_skipped_reward: false, - } -} - -#[derive(Debug, PartialEq, Eq)] -struct CalculatedStakeRewards { - staker_rewards: u64, - voter_rewards: u64, - new_credits_observed: u64, -} - -/// for a given stake and vote_state, calculate what distributions and what updates should be made -/// returns a tuple in the case of a payout of: -/// * staker_rewards to be distributed -/// * voter_rewards to be distributed -/// * new value for credits_observed in the stake -/// returns None if there's no payout or if any deserved payout is < 1 lamport -fn calculate_stake_rewards( - rewarded_epoch: Epoch, - stake: &Stake, - point_value: &PointValue, - vote_state: &VoteState, - stake_history: &StakeHistory, - inflation_point_calc_tracer: Option, - new_rate_activation_epoch: Option, -) -> Option { - // ensure to run to trigger (optional) inflation_point_calc_tracer - let CalculatedStakePoints { - points, - new_credits_observed, - mut force_credits_update_with_skipped_reward, - } = calculate_stake_points_and_credits( - stake, - vote_state, - stake_history, - inflation_point_calc_tracer.as_ref(), - new_rate_activation_epoch, - ); - - // Drive credits_observed forward unconditionally when rewards are disabled - // or when this is the stake's activation epoch - if point_value.rewards == 0 { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&SkippedReason::DisabledInflation.into()); - } - force_credits_update_with_skipped_reward = true; - } else if stake.delegation.activation_epoch == rewarded_epoch { - // not assert!()-ed; but points should be zero - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&SkippedReason::JustActivated.into()); - } - force_credits_update_with_skipped_reward = true; - } - - if force_credits_update_with_skipped_reward { - return Some(CalculatedStakeRewards { - staker_rewards: 0, - voter_rewards: 0, - new_credits_observed, - }); - } - - if points == 0 { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&SkippedReason::ZeroPoints.into()); - } - return None; - } - if point_value.points == 0 { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&SkippedReason::ZeroPointValue.into()); - } - return None; - } - - let rewards = points - .checked_mul(u128::from(point_value.rewards)) - .unwrap() - .checked_div(point_value.points) - .unwrap(); - - let rewards = u64::try_from(rewards).unwrap(); - - // don't bother trying to split if fractional lamports got truncated - if rewards == 0 { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&SkippedReason::ZeroReward.into()); - } - return None; - } - let (voter_rewards, staker_rewards, is_split) = vote_state.commission_split(rewards); - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&InflationPointCalculationEvent::SplitRewards( - rewards, - voter_rewards, - staker_rewards, - (*point_value).clone(), - )); - } - - if (voter_rewards == 0 || staker_rewards == 0) && is_split { - // don't collect if we lose a whole lamport somewhere - // is_split means there should be tokens on both sides, - // uncool to move credits_observed if one side didn't get paid - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer(&SkippedReason::TooEarlyUnfairSplit.into()); - } - return None; - } - - Some(CalculatedStakeRewards { - staker_rewards, - voter_rewards, - new_credits_observed, - }) -} - pub fn initialize( stake_account: &mut BorrowedAccount, authorized: &Authorized, @@ -1577,78 +1248,6 @@ fn stake_weighted_credits_observed( } } -// utility function, used by runtime -// returns a tuple of (stakers_reward,voters_reward) -#[doc(hidden)] -pub fn redeem_rewards( - rewarded_epoch: Epoch, - stake_state: StakeStateV2, - stake_account: &mut AccountSharedData, - vote_state: &VoteState, - point_value: &PointValue, - stake_history: &StakeHistory, - inflation_point_calc_tracer: Option, - new_rate_activation_epoch: Option, -) -> Result<(u64, u64), InstructionError> { - if let StakeStateV2::Stake(meta, mut stake, stake_flags) = stake_state { - if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { - inflation_point_calc_tracer( - &InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch(stake.stake( - rewarded_epoch, - stake_history, - new_rate_activation_epoch, - )), - ); - inflation_point_calc_tracer(&InflationPointCalculationEvent::RentExemptReserve( - meta.rent_exempt_reserve, - )); - inflation_point_calc_tracer(&InflationPointCalculationEvent::Commission( - vote_state.commission, - )); - } - - if let Some((stakers_reward, voters_reward)) = redeem_stake_rewards( - rewarded_epoch, - &mut stake, - point_value, - vote_state, - stake_history, - inflation_point_calc_tracer, - new_rate_activation_epoch, - ) { - stake_account.checked_add_lamports(stakers_reward)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags))?; - - Ok((stakers_reward, voters_reward)) - } else { - Err(StakeError::NoCreditsToRedeem.into()) - } - } else { - Err(InstructionError::InvalidAccountData) - } -} - -// utility function, used by runtime -#[doc(hidden)] -pub fn calculate_points( - stake_state: &StakeStateV2, - vote_state: &VoteState, - stake_history: &StakeHistory, - new_rate_activation_epoch: Option, -) -> Result { - if let StakeStateV2::Stake(_meta, stake, _stake_flags) = stake_state { - Ok(calculate_stake_points( - stake, - vote_state, - stake_history, - null_tracer(), - new_rate_activation_epoch, - )) - } else { - Err(InstructionError::InvalidAccountData) - } -} - pub type RewriteStakeStatus = (&'static str, (u64, u64), (u64, u64)); // utility function, used by runtime::Stakes, tests @@ -1804,7 +1403,6 @@ mod tests { solana_sdk::{ account::{create_account_shared_data_for_test, AccountSharedData}, epoch_schedule::EpochSchedule, - native_token, pubkey::Pubkey, stake::state::warmup_cooldown_rate, sysvar::{epoch_schedule, SysvarId}, @@ -2526,438 +2124,6 @@ mod tests { } } - #[test] - fn test_stake_state_redeem_rewards() { - let mut vote_state = VoteState::default(); - // assume stake.stake() is right - // bootstrap means fully-vested stake at epoch 0 - let stake_lamports = 1; - let mut stake = new_stake( - stake_lamports, - &Pubkey::default(), - &vote_state, - std::u64::MAX, - ); - - // this one can't collect now, credits_observed == vote_state.credits() - assert_eq!( - None, - redeem_stake_rewards( - 0, - &mut stake, - &PointValue { - rewards: 1_000_000_000, - points: 1 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - // put 2 credits in at epoch 0 - vote_state.increment_credits(0, 1); - vote_state.increment_credits(0, 1); - - // this one should be able to collect exactly 2 - assert_eq!( - Some((stake_lamports * 2, 0)), - redeem_stake_rewards( - 0, - &mut stake, - &PointValue { - rewards: 1, - points: 1 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - assert_eq!( - stake.delegation.stake, - stake_lamports + (stake_lamports * 2) - ); - assert_eq!(stake.credits_observed, 2); - } - - #[test] - fn test_stake_state_calculate_points_with_typical_values() { - let mut vote_state = VoteState::default(); - - // bootstrap means fully-vested stake at epoch 0 with - // 10_000_000 SOL is a big but not unreasaonable stake - let stake = new_stake( - native_token::sol_to_lamports(10_000_000f64), - &Pubkey::default(), - &vote_state, - std::u64::MAX, - ); - - // this one can't collect now, credits_observed == vote_state.credits() - assert_eq!( - None, - calculate_stake_rewards( - 0, - &stake, - &PointValue { - rewards: 1_000_000_000, - points: 1 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - let epoch_slots: u128 = 14 * 24 * 3600 * 160; - // put 193,536,000 credits in at epoch 0, typical for a 14-day epoch - // this loop takes a few seconds... - for _ in 0..epoch_slots { - vote_state.increment_credits(0, 1); - } - - // no overflow on points - assert_eq!( - u128::from(stake.delegation.stake) * epoch_slots, - calculate_stake_points( - &stake, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None - ) - ); - } - - #[test] - fn test_stake_state_calculate_rewards() { - let mut vote_state = VoteState::default(); - // assume stake.stake() is right - // bootstrap means fully-vested stake at epoch 0 - let mut stake = new_stake(1, &Pubkey::default(), &vote_state, std::u64::MAX); - - // this one can't collect now, credits_observed == vote_state.credits() - assert_eq!( - None, - calculate_stake_rewards( - 0, - &stake, - &PointValue { - rewards: 1_000_000_000, - points: 1 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - // put 2 credits in at epoch 0 - vote_state.increment_credits(0, 1); - vote_state.increment_credits(0, 1); - - // this one should be able to collect exactly 2 - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: stake.delegation.stake * 2, - voter_rewards: 0, - new_credits_observed: 2, - }), - calculate_stake_rewards( - 0, - &stake, - &PointValue { - rewards: 2, - points: 2 // all his - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - stake.credits_observed = 1; - // this one should be able to collect exactly 1 (already observed one) - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: stake.delegation.stake, - voter_rewards: 0, - new_credits_observed: 2, - }), - calculate_stake_rewards( - 0, - &stake, - &PointValue { - rewards: 1, - points: 1 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - // put 1 credit in epoch 1 - vote_state.increment_credits(1, 1); - - stake.credits_observed = 2; - // this one should be able to collect the one just added - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: stake.delegation.stake, - voter_rewards: 0, - new_credits_observed: 3, - }), - calculate_stake_rewards( - 1, - &stake, - &PointValue { - rewards: 2, - points: 2 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - // put 1 credit in epoch 2 - vote_state.increment_credits(2, 1); - // this one should be able to collect 2 now - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: stake.delegation.stake * 2, - voter_rewards: 0, - new_credits_observed: 4, - }), - calculate_stake_rewards( - 2, - &stake, - &PointValue { - rewards: 2, - points: 2 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - stake.credits_observed = 0; - // this one should be able to collect everything from t=0 a warmed up stake of 2 - // (2 credits at stake of 1) + (1 credit at a stake of 2) - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: stake.delegation.stake * 2 // epoch 0 - + stake.delegation.stake // epoch 1 - + stake.delegation.stake, // epoch 2 - voter_rewards: 0, - new_credits_observed: 4, - }), - calculate_stake_rewards( - 2, - &stake, - &PointValue { - rewards: 4, - points: 4 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - // same as above, but is a really small commission out of 32 bits, - // verify that None comes back on small redemptions where no one gets paid - vote_state.commission = 1; - assert_eq!( - None, // would be Some((0, 2 * 1 + 1 * 2, 4)), - calculate_stake_rewards( - 2, - &stake, - &PointValue { - rewards: 4, - points: 4 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - vote_state.commission = 99; - assert_eq!( - None, // would be Some((0, 2 * 1 + 1 * 2, 4)), - calculate_stake_rewards( - 2, - &stake, - &PointValue { - rewards: 4, - points: 4 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - // now one with inflation disabled. no one gets paid, but we still need - // to advance the stake state's credits_observed field to prevent back- - // paying rewards when inflation is turned on. - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: 0, - voter_rewards: 0, - new_credits_observed: 4, - }), - calculate_stake_rewards( - 2, - &stake, - &PointValue { - rewards: 0, - points: 4 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - // credits_observed remains at previous level when vote_state credits are - // not advancing and inflation is disabled - stake.credits_observed = 4; - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: 0, - voter_rewards: 0, - new_credits_observed: 4, - }), - calculate_stake_rewards( - 2, - &stake, - &PointValue { - rewards: 0, - points: 4 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - assert_eq!( - CalculatedStakePoints { - points: 0, - new_credits_observed: 4, - force_credits_update_with_skipped_reward: false, - }, - calculate_stake_points_and_credits( - &stake, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None - ) - ); - - // credits_observed is auto-rewinded when vote_state credits are assumed to have been - // recreated - stake.credits_observed = 1000; - // this is new behavior 1; return the post-recreation rewinded credits from the vote account - assert_eq!( - CalculatedStakePoints { - points: 0, - new_credits_observed: 4, - force_credits_update_with_skipped_reward: true, - }, - calculate_stake_points_and_credits( - &stake, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None - ) - ); - // this is new behavior 2; don't hint when credits both from stake and vote are identical - stake.credits_observed = 4; - assert_eq!( - CalculatedStakePoints { - points: 0, - new_credits_observed: 4, - force_credits_update_with_skipped_reward: false, - }, - calculate_stake_points_and_credits( - &stake, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None - ) - ); - - // get rewards and credits observed when not the activation epoch - vote_state.commission = 0; - stake.credits_observed = 3; - stake.delegation.activation_epoch = 1; - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: stake.delegation.stake, // epoch 2 - voter_rewards: 0, - new_credits_observed: 4, - }), - calculate_stake_rewards( - 2, - &stake, - &PointValue { - rewards: 1, - points: 1 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - - // credits_observed is moved forward for the stake's activation epoch, - // and no rewards are perceived - stake.delegation.activation_epoch = 2; - stake.credits_observed = 3; - assert_eq!( - Some(CalculatedStakeRewards { - staker_rewards: 0, - voter_rewards: 0, - new_credits_observed: 4, - }), - calculate_stake_rewards( - 2, - &stake, - &PointValue { - rewards: 1, - points: 1 - }, - &vote_state, - &StakeHistory::default(), - null_tracer(), - None, - ) - ); - } - #[test] fn test_lockup_is_expired() { let custodian = solana_sdk::pubkey::new_rand(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d72e3771cb4408..84c9e2093ebf50 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -156,8 +156,9 @@ use { }, transaction_context::{TransactionAccount, TransactionReturnData}, }, - solana_stake_program::stake_state::{ - self, InflationPointCalculationEvent, PointValue, StakeStateV2, + solana_stake_program::{ + points::{InflationPointCalculationEvent, PointValue}, + stake_state::StakeStateV2, }, solana_svm::{ account_loader::{TransactionCheckResult, TransactionLoadResult}, @@ -2989,7 +2990,7 @@ impl Bank { return 0; }; - stake_state::calculate_points( + solana_stake_program::points::calculate_points( stake_account.stake_state(), vote_state, stake_history, @@ -3026,7 +3027,7 @@ impl Bank { delegations .par_iter() .map(|(_stake_pubkey, stake_account)| { - stake_state::calculate_points( + solana_stake_program::points::calculate_points( stake_account.stake_state(), vote_state, stake_history, @@ -3106,7 +3107,7 @@ impl Bank { let pre_lamport = stake_account.lamports(); - let redeemed = stake_state::redeem_rewards( + let redeemed = solana_stake_program::rewards::redeem_rewards( rewarded_epoch, stake_state, &mut stake_account, @@ -3154,7 +3155,7 @@ impl Bank { }); } else { debug!( - "stake_state::redeem_rewards() failed for {}: {:?}", + "solana_stake_program::rewards::redeem_rewards() failed for {}: {:?}", stake_pubkey, redeemed ); } @@ -3225,7 +3226,7 @@ impl Bank { }); let (mut stake_account, stake_state) = <(AccountSharedData, StakeStateV2)>::from(stake_account); - let redeemed = stake_state::redeem_rewards( + let redeemed = solana_stake_program::rewards::redeem_rewards( rewarded_epoch, stake_state, &mut stake_account, @@ -3261,7 +3262,7 @@ impl Bank { }); } else { debug!( - "stake_state::redeem_rewards() failed for {}: {:?}", + "solana_stake_program::rewards::redeem_rewards() failed for {}: {:?}", stake_pubkey, redeemed ); } From 7399178c7837e71381b022c38481644f7c8d846f Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 1 Mar 2024 08:20:17 -0800 Subject: [PATCH 297/401] Set slot and env in tx batch specific cache (#35377) --- svm/src/transaction_processor.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 62f06585fff4ac..d319e2103838b8 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -416,7 +416,12 @@ impl TransactionBatchProcessor { .finish_cooperative_loading_task(self.slot, key, program) && limit_to_load_programs { - let mut ret = LoadedProgramsForTxBatch::default(); + let mut ret = LoadedProgramsForTxBatch::new( + self.slot, + loaded_programs_cache + .get_environments_for_epoch(self.epoch) + .clone(), + ); ret.hit_max_limit = true; return ret; } From 7c878973e2dcf6476a2bcff5eaebb45afb683235 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 1 Mar 2024 12:30:50 -0600 Subject: [PATCH 298/401] Cleanup ReplayStage loop timing struct (#35361) - Track loop_count in the struct - Rename ReplayTiming ==> ReplayLoopTiming - Make all metrics consistent to end with "_elapsed_us" --- core/src/replay_stage.rs | 244 +++++++++++++++++++++------------------ 1 file changed, 130 insertions(+), 114 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index a80a04d47c1573..46014e3f7912de 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -297,86 +297,97 @@ pub struct ReplayStageConfig { pub replay_slots_concurrently: bool, } +/// Timing information for the ReplayStage main processing loop #[derive(Default)] -pub struct ReplayTiming { - last_print: u64, - collect_frozen_banks_elapsed: u64, - compute_bank_stats_elapsed: u64, - select_vote_and_reset_forks_elapsed: u64, - start_leader_elapsed: u64, - reset_bank_elapsed: u64, - voting_elapsed: u64, +struct ReplayLoopTiming { + last_submit: u64, + loop_count: u64, + collect_frozen_banks_elapsed_us: u64, + compute_bank_stats_elapsed_us: u64, + select_vote_and_reset_forks_elapsed_us: u64, + start_leader_elapsed_us: u64, + reset_bank_elapsed_us: u64, + voting_elapsed_us: u64, generate_vote_us: u64, update_commitment_cache_us: u64, - select_forks_elapsed: u64, - compute_slot_stats_elapsed: u64, - generate_new_bank_forks_elapsed: u64, - replay_active_banks_elapsed: u64, - wait_receive_elapsed: u64, - heaviest_fork_failures_elapsed: u64, + select_forks_elapsed_us: u64, + compute_slot_stats_elapsed_us: u64, + generate_new_bank_forks_elapsed_us: u64, + replay_active_banks_elapsed_us: u64, + wait_receive_elapsed_us: u64, + heaviest_fork_failures_elapsed_us: u64, bank_count: u64, - process_ancestor_hashes_duplicate_slots_elapsed: u64, - process_duplicate_confirmed_slots_elapsed: u64, - process_duplicate_slots_elapsed: u64, - process_unfrozen_gossip_verified_vote_hashes_elapsed: u64, - process_popular_pruned_forks_elapsed: u64, - repair_correct_slots_elapsed: u64, - retransmit_not_propagated_elapsed: u64, + process_ancestor_hashes_duplicate_slots_elapsed_us: u64, + process_duplicate_confirmed_slots_elapsed_us: u64, + process_duplicate_slots_elapsed_us: u64, + process_unfrozen_gossip_verified_vote_hashes_elapsed_us: u64, + process_popular_pruned_forks_elapsed_us: u64, + repair_correct_slots_elapsed_us: u64, + retransmit_not_propagated_elapsed_us: u64, generate_new_bank_forks_read_lock_us: u64, generate_new_bank_forks_get_slots_since_us: u64, generate_new_bank_forks_loop_us: u64, generate_new_bank_forks_write_lock_us: u64, - replay_blockstore_us: u64, //< When processing forks concurrently, only captures the longest fork + // When processing multiple forks concurrently, only captures the longest fork + replay_blockstore_us: u64, } -impl ReplayTiming { +impl ReplayLoopTiming { #[allow(clippy::too_many_arguments)] fn update( &mut self, - collect_frozen_banks_elapsed: u64, - compute_bank_stats_elapsed: u64, - select_vote_and_reset_forks_elapsed: u64, - start_leader_elapsed: u64, - reset_bank_elapsed: u64, - voting_elapsed: u64, - select_forks_elapsed: u64, - compute_slot_stats_elapsed: u64, - generate_new_bank_forks_elapsed: u64, - replay_active_banks_elapsed: u64, - wait_receive_elapsed: u64, - heaviest_fork_failures_elapsed: u64, + collect_frozen_banks_elapsed_us: u64, + compute_bank_stats_elapsed_us: u64, + select_vote_and_reset_forks_elapsed_us: u64, + start_leader_elapsed_us: u64, + reset_bank_elapsed_us: u64, + voting_elapsed_us: u64, + select_forks_elapsed_us: u64, + compute_slot_stats_elapsed_us: u64, + generate_new_bank_forks_elapsed_us: u64, + replay_active_banks_elapsed_us: u64, + wait_receive_elapsed_us: u64, + heaviest_fork_failures_elapsed_us: u64, bank_count: u64, - process_ancestor_hashes_duplicate_slots_elapsed: u64, - process_duplicate_confirmed_slots_elapsed: u64, - process_unfrozen_gossip_verified_vote_hashes_elapsed: u64, - process_popular_pruned_forks_elapsed: u64, - process_duplicate_slots_elapsed: u64, - repair_correct_slots_elapsed: u64, - retransmit_not_propagated_elapsed: u64, + process_ancestor_hashes_duplicate_slots_elapsed_us: u64, + process_duplicate_confirmed_slots_elapsed_us: u64, + process_unfrozen_gossip_verified_vote_hashes_elapsed_us: u64, + process_popular_pruned_forks_elapsed_us: u64, + process_duplicate_slots_elapsed_us: u64, + repair_correct_slots_elapsed_us: u64, + retransmit_not_propagated_elapsed_us: u64, ) { - self.collect_frozen_banks_elapsed += collect_frozen_banks_elapsed; - self.compute_bank_stats_elapsed += compute_bank_stats_elapsed; - self.select_vote_and_reset_forks_elapsed += select_vote_and_reset_forks_elapsed; - self.start_leader_elapsed += start_leader_elapsed; - self.reset_bank_elapsed += reset_bank_elapsed; - self.voting_elapsed += voting_elapsed; - self.select_forks_elapsed += select_forks_elapsed; - self.compute_slot_stats_elapsed += compute_slot_stats_elapsed; - self.generate_new_bank_forks_elapsed += generate_new_bank_forks_elapsed; - self.replay_active_banks_elapsed += replay_active_banks_elapsed; - self.wait_receive_elapsed += wait_receive_elapsed; - self.heaviest_fork_failures_elapsed += heaviest_fork_failures_elapsed; + self.loop_count += 1; + self.collect_frozen_banks_elapsed_us += collect_frozen_banks_elapsed_us; + self.compute_bank_stats_elapsed_us += compute_bank_stats_elapsed_us; + self.select_vote_and_reset_forks_elapsed_us += select_vote_and_reset_forks_elapsed_us; + self.start_leader_elapsed_us += start_leader_elapsed_us; + self.reset_bank_elapsed_us += reset_bank_elapsed_us; + self.voting_elapsed_us += voting_elapsed_us; + self.select_forks_elapsed_us += select_forks_elapsed_us; + self.compute_slot_stats_elapsed_us += compute_slot_stats_elapsed_us; + self.generate_new_bank_forks_elapsed_us += generate_new_bank_forks_elapsed_us; + self.replay_active_banks_elapsed_us += replay_active_banks_elapsed_us; + self.wait_receive_elapsed_us += wait_receive_elapsed_us; + self.heaviest_fork_failures_elapsed_us += heaviest_fork_failures_elapsed_us; self.bank_count += bank_count; - self.process_ancestor_hashes_duplicate_slots_elapsed += - process_ancestor_hashes_duplicate_slots_elapsed; - self.process_duplicate_confirmed_slots_elapsed += process_duplicate_confirmed_slots_elapsed; - self.process_unfrozen_gossip_verified_vote_hashes_elapsed += - process_unfrozen_gossip_verified_vote_hashes_elapsed; - self.process_popular_pruned_forks_elapsed += process_popular_pruned_forks_elapsed; - self.process_duplicate_slots_elapsed += process_duplicate_slots_elapsed; - self.repair_correct_slots_elapsed += repair_correct_slots_elapsed; - self.retransmit_not_propagated_elapsed += retransmit_not_propagated_elapsed; + self.process_ancestor_hashes_duplicate_slots_elapsed_us += + process_ancestor_hashes_duplicate_slots_elapsed_us; + self.process_duplicate_confirmed_slots_elapsed_us += + process_duplicate_confirmed_slots_elapsed_us; + self.process_unfrozen_gossip_verified_vote_hashes_elapsed_us += + process_unfrozen_gossip_verified_vote_hashes_elapsed_us; + self.process_popular_pruned_forks_elapsed_us += process_popular_pruned_forks_elapsed_us; + self.process_duplicate_slots_elapsed_us += process_duplicate_slots_elapsed_us; + self.repair_correct_slots_elapsed_us += repair_correct_slots_elapsed_us; + self.retransmit_not_propagated_elapsed_us += retransmit_not_propagated_elapsed_us; + + self.maybe_submit(); + } + + fn maybe_submit(&mut self) { let now = timestamp(); - let elapsed_ms = now - self.last_print; + let elapsed_ms = now - self.last_submit; + if elapsed_ms > 1000 { datapoint_info!( "replay-loop-voting-stats", @@ -389,93 +400,98 @@ impl ReplayTiming { ); datapoint_info!( "replay-loop-timing-stats", + ("loop_count", self.loop_count as i64, i64), ("total_elapsed_us", elapsed_ms * 1000, i64), ( - "collect_frozen_banks_elapsed", - self.collect_frozen_banks_elapsed as i64, + "collect_frozen_banks_elapsed_us", + self.collect_frozen_banks_elapsed_us as i64, + i64 + ), + ( + "compute_bank_stats_elapsed_us", + self.compute_bank_stats_elapsed_us as i64, i64 ), ( - "compute_bank_stats_elapsed", - self.compute_bank_stats_elapsed as i64, + "select_vote_and_reset_forks_elapsed_us", + self.select_vote_and_reset_forks_elapsed_us as i64, i64 ), ( - "select_vote_and_reset_forks_elapsed", - self.select_vote_and_reset_forks_elapsed as i64, + "start_leader_elapsed_us", + self.start_leader_elapsed_us as i64, i64 ), ( - "start_leader_elapsed", - self.start_leader_elapsed as i64, + "reset_bank_elapsed_us", + self.reset_bank_elapsed_us as i64, i64 ), - ("reset_bank_elapsed", self.reset_bank_elapsed as i64, i64), - ("voting_elapsed", self.voting_elapsed as i64, i64), + ("voting_elapsed_us", self.voting_elapsed_us as i64, i64), ( - "select_forks_elapsed", - self.select_forks_elapsed as i64, + "select_forks_elapsed_us", + self.select_forks_elapsed_us as i64, i64 ), ( - "compute_slot_stats_elapsed", - self.compute_slot_stats_elapsed as i64, + "compute_slot_stats_elapsed_us", + self.compute_slot_stats_elapsed_us as i64, i64 ), ( - "generate_new_bank_forks_elapsed", - self.generate_new_bank_forks_elapsed as i64, + "generate_new_bank_forks_elapsed_us", + self.generate_new_bank_forks_elapsed_us as i64, i64 ), ( - "replay_active_banks_elapsed", - self.replay_active_banks_elapsed as i64, + "replay_active_banks_elapsed_us", + self.replay_active_banks_elapsed_us as i64, i64 ), ( - "process_ancestor_hashes_duplicate_slots_elapsed", - self.process_ancestor_hashes_duplicate_slots_elapsed as i64, + "process_ancestor_hashes_duplicate_slots_elapsed_us", + self.process_ancestor_hashes_duplicate_slots_elapsed_us as i64, i64 ), ( - "process_duplicate_confirmed_slots_elapsed", - self.process_duplicate_confirmed_slots_elapsed as i64, + "process_duplicate_confirmed_slots_elapsed_us", + self.process_duplicate_confirmed_slots_elapsed_us as i64, i64 ), ( - "process_unfrozen_gossip_verified_vote_hashes_elapsed", - self.process_unfrozen_gossip_verified_vote_hashes_elapsed as i64, + "process_unfrozen_gossip_verified_vote_hashes_elapsed_us", + self.process_unfrozen_gossip_verified_vote_hashes_elapsed_us as i64, i64 ), ( - "process_popular_pruned_forks_elapsed", - self.process_popular_pruned_forks_elapsed as i64, + "process_popular_pruned_forks_elapsed_us", + self.process_popular_pruned_forks_elapsed_us as i64, i64 ), ( - "wait_receive_elapsed", - self.wait_receive_elapsed as i64, + "wait_receive_elapsed_us", + self.wait_receive_elapsed_us as i64, i64 ), ( - "heaviest_fork_failures_elapsed", - self.heaviest_fork_failures_elapsed as i64, + "heaviest_fork_failures_elapsed_us", + self.heaviest_fork_failures_elapsed_us as i64, i64 ), ("bank_count", self.bank_count as i64, i64), ( - "process_duplicate_slots_elapsed", - self.process_duplicate_slots_elapsed as i64, + "process_duplicate_slots_elapsed_us", + self.process_duplicate_slots_elapsed_us as i64, i64 ), ( - "repair_correct_slots_elapsed", - self.repair_correct_slots_elapsed as i64, + "repair_correct_slots_elapsed_us", + self.repair_correct_slots_elapsed_us as i64, i64 ), ( - "retransmit_not_propagated_elapsed", - self.retransmit_not_propagated_elapsed as i64, + "retransmit_not_propagated_elapsed_us", + self.retransmit_not_propagated_elapsed_us as i64, i64 ), ( @@ -504,8 +520,8 @@ impl ReplayTiming { i64 ), ); - *self = ReplayTiming::default(); - self.last_print = now; + *self = ReplayLoopTiming::default(); + self.last_submit = now; } } } @@ -615,7 +631,7 @@ impl ReplayStage { let mut last_reset = Hash::default(); let mut partition_info = PartitionInfo::new(); let mut skipped_slots_info = SkippedSlotsInfo::default(); - let mut replay_timing = ReplayTiming::default(); + let mut replay_timing = ReplayLoopTiming::default(); let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); let mut duplicate_confirmed_slots: DuplicateConfirmedSlots = DuplicateConfirmedSlots::default(); @@ -2272,7 +2288,7 @@ impl ReplayStage { unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, vote_signatures: &mut Vec, has_new_vote_been_rooted: &mut bool, - replay_timing: &mut ReplayTiming, + replay_timing: &mut ReplayLoopTiming, voting_sender: &Sender, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, drop_bank_sender: &Sender>>, @@ -2604,7 +2620,7 @@ impl ReplayStage { switch_fork_decision: &SwitchForkDecision, vote_signatures: &mut Vec, has_new_vote_been_rooted: bool, - replay_timing: &mut ReplayTiming, + replay_timing: &mut ReplayLoopTiming, voting_sender: &Sender, wait_to_vote_slot: Option, ) { @@ -2697,7 +2713,7 @@ impl ReplayStage { entry_notification_sender: Option<&EntryNotifierSender>, verify_recyclers: &VerifyRecyclers, replay_vote_sender: &ReplayVoteSender, - replay_timing: &mut ReplayTiming, + replay_timing: &mut ReplayLoopTiming, log_messages_bytes_limit: Option, active_bank_slots: &[Slot], prioritization_fee_cache: &PrioritizationFeeCache, @@ -2810,7 +2826,7 @@ impl ReplayStage { entry_notification_sender: Option<&EntryNotifierSender>, verify_recyclers: &VerifyRecyclers, replay_vote_sender: &ReplayVoteSender, - replay_timing: &mut ReplayTiming, + replay_timing: &mut ReplayLoopTiming, log_messages_bytes_limit: Option, bank_slot: Slot, prioritization_fee_cache: &PrioritizationFeeCache, @@ -3157,7 +3173,7 @@ impl ReplayStage { duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, block_metadata_notifier: Option, - replay_timing: &mut ReplayTiming, + replay_timing: &mut ReplayLoopTiming, log_messages_bytes_limit: Option, replay_slots_concurrently: bool, prioritization_fee_cache: &PrioritizationFeeCache, @@ -4140,7 +4156,7 @@ impl ReplayStage { leader_schedule_cache: &Arc, rpc_subscriptions: &Arc, progress: &mut ProgressMap, - replay_timing: &mut ReplayTiming, + replay_timing: &mut ReplayLoopTiming, ) { // Find the next slot that chains to the old slot let mut generate_new_bank_forks_read_lock = @@ -4530,7 +4546,7 @@ pub(crate) mod tests { .unwrap() .get(NUM_CONSECUTIVE_LEADER_SLOTS) .is_none()); - let mut replay_timing = ReplayTiming::default(); + let mut replay_timing = ReplayLoopTiming::default(); ReplayStage::generate_new_bank_forks( &blockstore, &bank_forks, @@ -6351,7 +6367,7 @@ pub(crate) mod tests { .. } = vote_simulator; - let mut replay_timing = ReplayTiming::default(); + let mut replay_timing = ReplayLoopTiming::default(); // Create bank 7 and insert to blockstore and bank forks let root_bank = bank_forks.read().unwrap().root_bank(); @@ -7543,7 +7559,7 @@ pub(crate) mod tests { &SwitchForkDecision::SameFork, &mut voted_signatures, has_new_vote_been_rooted, - &mut ReplayTiming::default(), + &mut ReplayLoopTiming::default(), &voting_sender, None, ); @@ -7618,7 +7634,7 @@ pub(crate) mod tests { &SwitchForkDecision::SameFork, &mut voted_signatures, has_new_vote_been_rooted, - &mut ReplayTiming::default(), + &mut ReplayLoopTiming::default(), &voting_sender, None, ); @@ -7819,7 +7835,7 @@ pub(crate) mod tests { &SwitchForkDecision::SameFork, voted_signatures, has_new_vote_been_rooted, - &mut ReplayTiming::default(), + &mut ReplayLoopTiming::default(), voting_sender, None, ); From 532b806bef8a80fe6c80f063b40cb437d8df378f Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Fri, 1 Mar 2024 16:04:08 -0300 Subject: [PATCH 299/401] Add more unit tests to SVM (#35383) --- svm/src/account_loader.rs | 2 + svm/src/account_overrides.rs | 31 ++++ svm/src/account_rent_state.rs | 67 +++++++ svm/src/transaction_account_state_info.rs | 169 +++++++++++++++++ svm/tests/account_loader.rs | 214 ++++++++++++++++++++++ 5 files changed, 483 insertions(+) create mode 100644 svm/tests/account_loader.rs diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 58bd7c6161d396..126be625e9cf15 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -106,6 +106,8 @@ pub fn load_accounts( &loaded_transaction.rent_debits, ) { Ok(nonce) => Some(nonce), + // This error branch is never reached, because `load_transaction_accounts` + // already validates the fee payer account. Err(e) => return (Err(e), None), } } else { diff --git a/svm/src/account_overrides.rs b/svm/src/account_overrides.rs index ee8e7ec9e21f94..c88d77d54f30a9 100644 --- a/svm/src/account_overrides.rs +++ b/svm/src/account_overrides.rs @@ -29,3 +29,34 @@ impl AccountOverrides { self.accounts.get(pubkey) } } + +#[cfg(test)] +mod test { + use { + crate::account_overrides::AccountOverrides, + solana_sdk::{account::AccountSharedData, pubkey::Pubkey, sysvar}, + }; + + #[test] + fn test_set_account() { + let mut accounts = AccountOverrides::default(); + let data = AccountSharedData::default(); + let key = Pubkey::new_unique(); + accounts.set_account(&key, Some(data.clone())); + assert_eq!(accounts.get(&key), Some(&data)); + + accounts.set_account(&key, None); + assert!(accounts.get(&key).is_none()); + } + + #[test] + fn test_slot_history() { + let mut accounts = AccountOverrides::default(); + let data = AccountSharedData::default(); + + assert_eq!(accounts.get(&sysvar::slot_history::id()), None); + accounts.set_slot_history(Some(data.clone())); + + assert_eq!(accounts.get(&sysvar::slot_history::id()), Some(&data)); + } +} diff --git a/svm/src/account_rent_state.rs b/svm/src/account_rent_state.rs index 38cda820f8ceb7..6fae6e9033bd39 100644 --- a/svm/src/account_rent_state.rs +++ b/svm/src/account_rent_state.rs @@ -237,4 +237,71 @@ mod tests { }), ); } + + #[test] + fn test_check_rent_state_with_account() { + let pre_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 3, + }; + + let post_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 5, + }; + let account_index = 2 as IndexOfAccount; + let key = Pubkey::new_unique(); + let result = RentState::check_rent_state_with_account( + &pre_rent_state, + &post_rent_state, + &key, + &AccountSharedData::default(), + account_index, + ); + assert_eq!( + result.err(), + Some(TransactionError::InsufficientFundsForRent { + account_index: account_index as u8 + }) + ); + + let result = RentState::check_rent_state_with_account( + &pre_rent_state, + &post_rent_state, + &solana_sdk::incinerator::id(), + &AccountSharedData::default(), + account_index, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_check_rent_state() { + let context = TransactionContext::new( + vec![(Pubkey::new_unique(), AccountSharedData::default())], + Rent::default(), + 20, + 20, + ); + + let pre_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 3, + }; + + let post_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 5, + }; + + let result = + RentState::check_rent_state(Some(&pre_rent_state), Some(&post_rent_state), &context, 0); + assert_eq!( + result.err(), + Some(TransactionError::InsufficientFundsForRent { account_index: 0 }) + ); + + let result = RentState::check_rent_state(None, Some(&post_rent_state), &context, 0); + assert!(result.is_ok()); + } } diff --git a/svm/src/transaction_account_state_info.rs b/svm/src/transaction_account_state_info.rs index 02d6f0228de2a7..ff5b93f6a6c459 100644 --- a/svm/src/transaction_account_state_info.rs +++ b/svm/src/transaction_account_state_info.rs @@ -10,6 +10,7 @@ use { }, }; +#[derive(PartialEq, Debug)] pub struct TransactionAccountStateInfo { rent_state: Option, // None: readonly account } @@ -67,3 +68,171 @@ impl TransactionAccountStateInfo { Ok(()) } } + +#[cfg(test)] +mod test { + use { + crate::{ + account_rent_state::RentState, + transaction_account_state_info::TransactionAccountStateInfo, + }, + solana_sdk::{ + account::AccountSharedData, + hash::Hash, + instruction::CompiledInstruction, + message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, + rent::Rent, + signature::{Keypair, Signer}, + transaction::TransactionError, + transaction_context::TransactionContext, + }, + }; + + #[test] + fn test_new() { + let rent = Rent::default(); + let key1 = Keypair::new(); + let key2 = Keypair::new(); + let key3 = Keypair::new(); + let key4 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey(), key4.pubkey()], + header: MessageHeader::default(), + instructions: vec![ + CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }, + CompiledInstruction { + program_id_index: 1, + accounts: vec![2], + data: vec![], + }, + ], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + + let transaction_accounts = vec![ + (key1.pubkey(), AccountSharedData::default()), + (key2.pubkey(), AccountSharedData::default()), + (key3.pubkey(), AccountSharedData::default()), + ]; + + let context = TransactionContext::new(transaction_accounts, rent.clone(), 20, 20); + let result = TransactionAccountStateInfo::new(&rent, &context, &sanitized_message); + assert_eq!( + result, + vec![ + TransactionAccountStateInfo { + rent_state: Some(RentState::Uninitialized) + }, + TransactionAccountStateInfo { rent_state: None }, + TransactionAccountStateInfo { + rent_state: Some(RentState::Uninitialized) + } + ] + ); + } + + #[test] + #[should_panic(expected = "message and transaction context out of sync, fatal")] + fn test_new_panic() { + let rent = Rent::default(); + let key1 = Keypair::new(); + let key2 = Keypair::new(); + let key3 = Keypair::new(); + let key4 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey(), key4.pubkey(), key3.pubkey()], + header: MessageHeader::default(), + instructions: vec![ + CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }, + CompiledInstruction { + program_id_index: 1, + accounts: vec![2], + data: vec![], + }, + ], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + + let transaction_accounts = vec![ + (key1.pubkey(), AccountSharedData::default()), + (key2.pubkey(), AccountSharedData::default()), + (key3.pubkey(), AccountSharedData::default()), + ]; + + let context = TransactionContext::new(transaction_accounts, rent.clone(), 20, 20); + let _result = TransactionAccountStateInfo::new(&rent, &context, &sanitized_message); + } + + #[test] + fn test_verify_changes() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + let pre_rent_state = vec![ + TransactionAccountStateInfo { + rent_state: Some(RentState::Uninitialized), + }, + TransactionAccountStateInfo { + rent_state: Some(RentState::Uninitialized), + }, + ]; + let post_rent_state = vec![TransactionAccountStateInfo { + rent_state: Some(RentState::Uninitialized), + }]; + + let transaction_accounts = vec![ + (key1.pubkey(), AccountSharedData::default()), + (key2.pubkey(), AccountSharedData::default()), + ]; + + let context = TransactionContext::new(transaction_accounts, Rent::default(), 20, 20); + + let result = TransactionAccountStateInfo::verify_changes( + &pre_rent_state, + &post_rent_state, + &context, + ); + assert!(result.is_ok()); + + let pre_rent_state = vec![TransactionAccountStateInfo { + rent_state: Some(RentState::Uninitialized), + }]; + let post_rent_state = vec![TransactionAccountStateInfo { + rent_state: Some(RentState::RentPaying { + data_size: 2, + lamports: 5, + }), + }]; + + let transaction_accounts = vec![ + (key1.pubkey(), AccountSharedData::default()), + (key2.pubkey(), AccountSharedData::default()), + ]; + + let context = TransactionContext::new(transaction_accounts, Rent::default(), 20, 20); + let result = TransactionAccountStateInfo::verify_changes( + &pre_rent_state, + &post_rent_state, + &context, + ); + assert_eq!( + result.err(), + Some(TransactionError::InsufficientFundsForRent { account_index: 0 }) + ); + } +} diff --git a/svm/tests/account_loader.rs b/svm/tests/account_loader.rs new file mode 100644 index 00000000000000..dd4cd046046399 --- /dev/null +++ b/svm/tests/account_loader.rs @@ -0,0 +1,214 @@ +use { + crate::mock_bank::MockBankCallback, + solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, + solana_sdk::{ + account::{AccountSharedData, WritableAccount}, + fee::FeeStructure, + hash::Hash, + instruction::CompiledInstruction, + message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, + native_loader, + nonce_info::{NonceFull, NoncePartial}, + pubkey::Pubkey, + rent_collector::RENT_EXEMPT_RENT_EPOCH, + rent_debits::RentDebits, + signature::{Keypair, Signature, Signer}, + transaction::{SanitizedTransaction, TransactionError}, + }, + solana_svm::{ + account_loader::{load_accounts, LoadedTransaction, TransactionCheckResult}, + transaction_error_metrics::TransactionErrorMetrics, + }, + std::collections::HashMap, +}; + +mod mock_bank; + +#[test] +fn test_load_accounts_success() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + let key3 = Keypair::new(); + let key4 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey(), key4.pubkey()], + header: MessageHeader::default(), + instructions: vec![ + CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }, + CompiledInstruction { + program_id_index: 1, + accounts: vec![2], + data: vec![], + }, + ], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = MockBankCallback::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(key3.pubkey()); + mock_bank + .account_shared_data + .insert(key1.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank + .account_shared_data + .insert(key2.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(native_loader::id()); + mock_bank + .account_shared_data + .insert(key3.pubkey(), account_data); + + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let lock_results = + (Ok(()), Some(NoncePartial::default()), Some(20u64)) as TransactionCheckResult; + + let results = load_accounts( + &mock_bank, + &[sanitized_transaction], + &[lock_results], + &mut error_counter, + &FeeStructure::default(), + None, + &HashMap::new(), + &loaded_programs, + ); + + let mut account_data = AccountSharedData::default(); + account_data.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + + assert_eq!(results.len(), 1); + let (loaded_result, nonce) = results[0].clone(); + assert_eq!( + loaded_result.unwrap(), + LoadedTransaction { + accounts: vec![ + ( + key2.pubkey(), + mock_bank.account_shared_data[&key2.pubkey()].clone() + ), + ( + key1.pubkey(), + mock_bank.account_shared_data[&key1.pubkey()].clone() + ), + (key4.pubkey(), account_data), + ( + key3.pubkey(), + mock_bank.account_shared_data[&key3.pubkey()].clone() + ), + ], + program_indices: vec![vec![3, 1], vec![3, 1]], + rent: 0, + rent_debits: RentDebits::default() + } + ); + + assert_eq!( + nonce.unwrap(), + NonceFull::new( + Pubkey::from([0; 32]), + AccountSharedData::default(), + Some(mock_bank.account_shared_data[&key2.pubkey()].clone()) + ) + ); +} + +#[test] +fn test_load_accounts_error() { + let mock_bank = MockBankCallback::default(); + let message = Message { + account_keys: vec![Pubkey::new_from_array([0; 32])], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + + let lock_results = (Ok(()), Some(NoncePartial::default()), None) as TransactionCheckResult; + let fee_structure = FeeStructure::default(); + + let result = load_accounts( + &mock_bank, + &[sanitized_transaction.clone()], + &[lock_results], + &mut TransactionErrorMetrics::default(), + &fee_structure, + None, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + ); + + assert_eq!( + result, + vec![(Err(TransactionError::BlockhashNotFound), None)] + ); + + let lock_results = + (Ok(()), Some(NoncePartial::default()), Some(20u64)) as TransactionCheckResult; + + let result = load_accounts( + &mock_bank, + &[sanitized_transaction.clone()], + &[lock_results.clone()], + &mut TransactionErrorMetrics::default(), + &fee_structure, + None, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + ); + + assert_eq!(result, vec![(Err(TransactionError::AccountNotFound), None)]); + + let lock_results = ( + Err(TransactionError::InvalidWritableAccount), + Some(NoncePartial::default()), + Some(20u64), + ) as TransactionCheckResult; + + let result = load_accounts( + &mock_bank, + &[sanitized_transaction.clone()], + &[lock_results], + &mut TransactionErrorMetrics::default(), + &fee_structure, + None, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + ); + + assert_eq!( + result, + vec![(Err(TransactionError::InvalidWritableAccount), None)] + ); +} From ede9163633d16e0b747aa7bcabac64b4b2f02c92 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 1 Mar 2024 11:18:42 -0800 Subject: [PATCH 300/401] Comments clarifying non-emptiness of threadset (#35388) --- .../transaction_scheduler/prio_graph_scheduler.rs | 3 ++- .../transaction_scheduler/thread_aware_account_locks.rs | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index e0b53a97ab020e..d983fcf4d163c3 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -388,7 +388,8 @@ impl PrioGraphScheduler { /// If the `chain_thread` is available, this thread will be selected, regardless of /// load-balancing. /// - /// Panics if the `thread_set` is empty. + /// Panics if the `thread_set` is empty. This should never happen, see comment + /// on `ThreadAwareAccountLocks::try_lock_accounts`. fn select_thread( thread_set: ThreadSet, batches_per_thread: &[Vec], diff --git a/core/src/banking_stage/transaction_scheduler/thread_aware_account_locks.rs b/core/src/banking_stage/transaction_scheduler/thread_aware_account_locks.rs index d17cc25556f92f..4a9cfd2df9edcf 100644 --- a/core/src/banking_stage/transaction_scheduler/thread_aware_account_locks.rs +++ b/core/src/banking_stage/transaction_scheduler/thread_aware_account_locks.rs @@ -65,6 +65,8 @@ impl ThreadAwareAccountLocks { /// `allowed_threads` is a set of threads that the caller restricts locking to. /// If accounts are schedulable, then they are locked for the thread /// selected by the `thread_selector` function. + /// `thread_selector` is only called if all accounts are schdulable, meaning + /// that the `thread_set` passed to `thread_selector` is non-empty. pub(crate) fn try_lock_accounts<'a>( &mut self, write_account_locks: impl Iterator + Clone, From 672943224eb6432e122bc184dccc9cf4c5e149db Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 1 Mar 2024 14:22:19 -0500 Subject: [PATCH 301/401] Removes cap_accounts_data_len feature (#35378) --- sdk/src/feature_set.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index abecf4fafb6b1d..09faa63fb1b11e 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -262,10 +262,6 @@ pub mod allow_votes_to_directly_update_vote_state { solana_sdk::declare_id!("Ff8b1fBeB86q8cjq47ZhsQLgv5EkHu3G1C99zjUfAzrq"); } -pub mod cap_accounts_data_len { - solana_sdk::declare_id!("capRxUrBjNkkCpjrJxPGfPaWijB7q3JoDfsWXAnt46r"); -} - pub mod max_tx_account_locks { solana_sdk::declare_id!("CBkDroRDqm8HwHe6ak9cguPjUomrASEkfmxEaZ5CNNxz"); } @@ -843,7 +839,6 @@ lazy_static! { (reject_non_rent_exempt_vote_withdraws::id(), "fail vote withdraw instructions which leave the account non-rent-exempt"), (evict_invalid_stakes_cache_entries::id(), "evict invalid stakes cache entries on epoch boundaries"), (allow_votes_to_directly_update_vote_state::id(), "enable direct vote state update"), - (cap_accounts_data_len::id(), "cap the accounts data len"), (max_tx_account_locks::id(), "enforce max number of locked accounts per transaction"), (require_rent_exempt_accounts::id(), "require all new transaction accounts with data to be rent-exempt"), (filter_votes_outside_slot_hashes::id(), "filter vote slots older than the slot hashes history"), From 3ec020f74269b20a1d9f95d04af2241d9b85a729 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 1 Mar 2024 14:22:25 -0500 Subject: [PATCH 302/401] Removes cap_accounts_data_size_per_block feature (#35381) --- sdk/src/feature_set.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 09faa63fb1b11e..98dc5a4037bd05 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -450,10 +450,6 @@ pub mod vote_authorize_with_seed { solana_sdk::declare_id!("6tRxEYKuy2L5nnv5bgn7iT28MxUbYxp5h7F3Ncf1exrT"); } -pub mod cap_accounts_data_size_per_block { - solana_sdk::declare_id!("qywiJyZmqTKspFg2LeuUHqcA5nNvBgobqb9UprywS9N"); -} - pub mod preserve_rent_epoch_for_rent_exempt_accounts { solana_sdk::declare_id!("HH3MUYReL2BvqqA3oEcAa7txju5GY6G4nxJ51zvsEjEZ"); } @@ -885,7 +881,6 @@ lazy_static! { (nonce_must_be_authorized::id(), "nonce must be authorized"), (nonce_must_be_advanceable::id(), "durable nonces must be advanceable"), (vote_authorize_with_seed::id(), "An instruction you can use to change a vote accounts authority when the current authority is a derived key #25860"), - (cap_accounts_data_size_per_block::id(), "cap the accounts data size per block #25517"), (stake_redelegate_instruction::id(), "enable the redelegate stake instruction #26294"), (preserve_rent_epoch_for_rent_exempt_accounts::id(), "preserve rent epoch for rent exempt accounts #26479"), (enable_bpf_loader_extend_program_ix::id(), "enable bpf upgradeable loader ExtendProgram instruction #25234"), From 59ed049dc53042d56ce970bb7da46b7a136cf205 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 1 Mar 2024 14:27:48 -0500 Subject: [PATCH 303/401] Gives back 8 bytes to stack buffer for account data when hashing (#35374) --- accounts-db/src/accounts_db.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2909def64fc986..281feb90fc99e4 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6120,18 +6120,16 @@ impl AccountsDb { let mut hasher = blake3::Hasher::new(); // allocate 128 bytes buffer on the stack - const BUF_SIZE: usize = 128; - const TOTAL_FIELD_SIZE: usize = 8 /* lamports */ + 8 /* slot */ + 8 /* rent_epoch */ + 1 /* exec_flag */ + 32 /* owner_key */ + 32 /* pubkey */; - const DATA_SIZE_CAN_FIT: usize = BUF_SIZE - TOTAL_FIELD_SIZE; + const BUFFER_SIZE: usize = 128; + const METADATA_SIZE: usize = 8 /* lamports */ + 8 /* rent_epoch */ + 1 /* executable */ + 32 /* owner */ + 32 /* pubkey */; + const REMAINING_SIZE: usize = BUFFER_SIZE - METADATA_SIZE; + let mut buffer = SmallVec::<[u8; BUFFER_SIZE]>::new(); - let mut buffer = SmallVec::<[u8; BUF_SIZE]>::new(); - - // collect lamports, slot, rent_epoch into buffer to hash + // collect lamports, rent_epoch into buffer to hash buffer.extend_from_slice(&lamports.to_le_bytes()); - buffer.extend_from_slice(&rent_epoch.to_le_bytes()); - if data.len() > DATA_SIZE_CAN_FIT { + if data.len() > REMAINING_SIZE { // For larger accounts whose data can't fit into the buffer, update the hash now. hasher.update(&buffer); buffer.clear(); From 564a9f78a064eff9c78b0edf3735270b5cf4fd14 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 1 Mar 2024 14:28:28 -0500 Subject: [PATCH 304/401] Casts executable bool to integer when computing account hash (#35373) --- accounts-db/src/accounts_db.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 281feb90fc99e4..1f3c36876f4531 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6142,11 +6142,7 @@ impl AccountsDb { } // collect exec_flag, owner, pubkey into buffer to hash - if executable { - buffer.push(1_u8); - } else { - buffer.push(0_u8); - } + buffer.push(executable.into()); buffer.extend_from_slice(owner.as_ref()); buffer.extend_from_slice(pubkey.as_ref()); hasher.update(&buffer); From 7d6f1d59116dc44a229b07867cbac59adf596c69 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 1 Mar 2024 13:36:08 -0600 Subject: [PATCH 305/401] Give streamer::receiver() threads unique names (#35369) The name was previously hard-coded to solReceiver. The use of the same name makes it hard to figure out which thread is which when these threads are handling many services (Gossip, Tvu, etc). --- bench-streamer/src/main.rs | 1 + core/src/fetch_stage.rs | 12 +++++++++--- core/src/repair/ancestor_hashes_service.rs | 2 ++ core/src/repair/serve_repair_service.rs | 1 + core/src/shred_fetch_stage.rs | 9 +++++++-- gossip/src/gossip_service.rs | 1 + streamer/src/streamer.rs | 4 +++- 7 files changed, 24 insertions(+), 6 deletions(-) diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index 987df411341672..de300345ebad42 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -108,6 +108,7 @@ fn main() -> Result<()> { let (s_reader, r_reader) = unbounded(); read_channels.push(r_reader); read_threads.push(receiver( + "solRcvrBenStrmr".to_string(), Arc::new(read), exit.clone(), s_reader, diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index b3eb36201fc637..5e972e626166ce 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -159,8 +159,10 @@ impl FetchStage { let tpu_threads: Vec<_> = if tpu_enable_udp { tpu_sockets .into_iter() - .map(|socket| { + .enumerate() + .map(|(i, socket)| { streamer::receiver( + format!("solRcvrTpu{i:02}"), socket, exit.clone(), sender.clone(), @@ -180,8 +182,10 @@ impl FetchStage { let tpu_forwards_threads: Vec<_> = if tpu_enable_udp { tpu_forwards_sockets .into_iter() - .map(|socket| { + .enumerate() + .map(|(i, socket)| { streamer::receiver( + format!("solRcvrTpuFwd{i:02}"), socket, exit.clone(), forward_sender.clone(), @@ -200,8 +204,10 @@ impl FetchStage { let tpu_vote_stats = Arc::new(StreamerReceiveStats::new("tpu_vote_receiver")); let tpu_vote_threads: Vec<_> = tpu_vote_sockets .into_iter() - .map(|socket| { + .enumerate() + .map(|(i, socket)| { streamer::receiver( + format!("solRcvrTpuVot{i:02}"), socket, exit.clone(), vote_sender.clone(), diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index e980ddb46b4745..87b1f49bc9cfc1 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -160,6 +160,7 @@ impl AncestorHashesService { let outstanding_requests = Arc::>::default(); let (response_sender, response_receiver) = unbounded(); let t_receiver = streamer::receiver( + "solRcvrAncHash".to_string(), ancestor_hashes_request_socket.clone(), exit.clone(), response_sender.clone(), @@ -1294,6 +1295,7 @@ mod test { // Set up repair request receiver threads let t_request_receiver = streamer::receiver( + "solRcvrTest".to_string(), Arc::new(responder_node.sockets.serve_repair), exit.clone(), requests_sender, diff --git a/core/src/repair/serve_repair_service.rs b/core/src/repair/serve_repair_service.rs index 9819d0ea43855d..3fe424d0768a85 100644 --- a/core/src/repair/serve_repair_service.rs +++ b/core/src/repair/serve_repair_service.rs @@ -38,6 +38,7 @@ impl ServeRepairService { serve_repair_socket.local_addr().unwrap() ); let t_receiver = streamer::receiver( + "solRcvrServeRep".to_string(), serve_repair_socket.clone(), exit.clone(), request_sender, diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 481e5333b14198..39cc193adad96e 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -147,6 +147,7 @@ impl ShredFetchStage { #[allow(clippy::too_many_arguments)] fn packet_modifier( + receiver_thread_name: &'static str, sockets: Vec>, exit: Arc, sender: Sender, @@ -161,9 +162,11 @@ impl ShredFetchStage { let (packet_sender, packet_receiver) = unbounded(); let streamers = sockets .into_iter() - .map(|s| { + .enumerate() + .map(|(i, socket)| { streamer::receiver( - s, + format!("{receiver_thread_name}{i:02}"), + socket, exit.clone(), packet_sender.clone(), recycler.clone(), @@ -211,6 +214,7 @@ impl ShredFetchStage { let recycler = PacketBatchRecycler::warmed(100, 1024); let (mut tvu_threads, tvu_filter) = Self::packet_modifier( + "solRcvrShred", sockets, exit.clone(), sender.clone(), @@ -224,6 +228,7 @@ impl ShredFetchStage { ); let (repair_receiver, repair_handler) = Self::packet_modifier( + "solRcvrShredRep", vec![repair_socket.clone()], exit.clone(), sender.clone(), diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index b587a5e0672421..806ee23a4fb0be 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -50,6 +50,7 @@ impl GossipService { ); let socket_addr_space = *cluster_info.socket_addr_space(); let t_receiver = streamer::receiver( + "solRcvrGossip".to_string(), gossip_socket.clone(), exit.clone(), request_sender, diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 1fd7bfc97404cc..f90d1079fe180d 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -157,6 +157,7 @@ fn recv_loop( } pub fn receiver( + thread_name: String, socket: Arc, exit: Arc, packet_batch_sender: PacketBatchSender, @@ -169,7 +170,7 @@ pub fn receiver( let res = socket.set_read_timeout(Some(Duration::new(1, 0))); assert!(res.is_ok(), "streamer::receiver set_read_timeout error"); Builder::new() - .name("solReceiver".to_string()) + .name(thread_name) .spawn(move || { let _ = recv_loop( &socket, @@ -480,6 +481,7 @@ mod test { let (s_reader, r_reader) = unbounded(); let stats = Arc::new(StreamerReceiveStats::new("test")); let t_receiver = receiver( + "solRcvrTest".to_string(), Arc::new(read), exit.clone(), s_reader, From 5f6d66e87b96f5f5947f83305fe2f0e88a8e7ab5 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Fri, 1 Mar 2024 12:14:52 -0800 Subject: [PATCH 306/401] Deprecate `ThinClient` and remove `ThinClient` from `bench-tps` (#35365) * deprecate ThinClient * switch localcluster bench test to use tpuclient add back in command line args for thinclient. add thin-client deprecation README refactor TpuClient connection * remove thin-client from net/ * change 2.0.0 to 1.19.0 --- bench-tps/src/cli.rs | 86 +++++++++++---------------------- bench-tps/src/main.rs | 87 +--------------------------------- bench-tps/src/perf_utils.rs | 1 - bench-tps/tests/bench_tps.rs | 28 +++++++---- client/src/thin_client.rs | 4 +- net/net.sh | 4 +- net/remote/remote-client.sh | 12 +---- thin-client/README.md | 4 ++ thin-client/src/thin_client.rs | 5 ++ 9 files changed, 64 insertions(+), 167 deletions(-) create mode 100644 thin-client/README.md diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index 35c570aec5b7f8..e2ee75fc551400 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -13,7 +13,7 @@ use { }, solana_tpu_client::tpu_client::{DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_USE_QUIC}, std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, + net::{IpAddr, Ipv4Addr}, time::Duration, }, }; @@ -24,9 +24,6 @@ const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS pub enum ExternalClientType { // Submits transactions to an Rpc node using an RpcClient RpcClient, - // Submits transactions directly to leaders using a ThinClient, broadcasting to multiple - // leaders when num_nodes > 1 - ThinClient, // Submits transactions directly to leaders using a TpuClient, broadcasting to upcoming leaders // via TpuClient default configuration TpuClient, @@ -53,12 +50,10 @@ pub enum ComputeUnitPrice { /// Holds the configuration for a single run of the benchmark #[derive(PartialEq, Debug)] pub struct Config { - pub entrypoint_addr: SocketAddr, pub json_rpc_url: String, pub websocket_url: String, pub id: Keypair, pub threads: usize, - pub num_nodes: usize, pub duration: Duration, pub tx_count: usize, pub keypair_multiplier: usize, @@ -68,10 +63,8 @@ pub struct Config { pub write_to_client_file: bool, pub read_from_client_file: bool, pub target_lamports_per_signature: u64, - pub multi_client: bool, pub num_lamports_per_account: u64, pub target_slots_per_epoch: u64, - pub target_node: Option, pub external_client_type: ExternalClientType, pub use_quic: bool, pub tpu_connection_pool_size: usize, @@ -89,12 +82,10 @@ impl Eq for Config {} impl Default for Config { fn default() -> Config { Config { - entrypoint_addr: SocketAddr::from((Ipv4Addr::LOCALHOST, 8001)), json_rpc_url: ConfigInput::default().json_rpc_url, websocket_url: ConfigInput::default().websocket_url, id: Keypair::new(), threads: 4, - num_nodes: 1, duration: Duration::new(std::u64::MAX, 0), tx_count: 50_000, keypair_multiplier: 8, @@ -104,10 +95,8 @@ impl Default for Config { write_to_client_file: false, read_from_client_file: false, target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature, - multi_client: true, num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT, target_slots_per_epoch: 0, - target_node: None, external_client_type: ExternalClientType::default(), use_quic: DEFAULT_TPU_USE_QUIC, tpu_connection_pool_size: DEFAULT_TPU_CONNECTION_POOL_SIZE, @@ -169,8 +158,10 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .takes_value(true) .conflicts_with("rpc_client") .requires("tpu_addr") - .requires("thin_client") - .help("Specify custom rpc_addr to create thin_client"), + .hidden(hidden_unless_forced()) + .help("Specify custom rpc_addr to create thin_client. \ + Note: ThinClient is deprecated. Argument will not be used. \ + Use tpc_client or rpc_client instead"), ) .arg( Arg::with_name("tpu_addr") @@ -179,8 +170,10 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .conflicts_with("rpc_client") .takes_value(true) .requires("rpc_addr") - .requires("thin_client") - .help("Specify custom tpu_addr to create thin_client"), + .hidden(hidden_unless_forced()) + .help("Specify custom tpu_addr to create thin_client. \ + Note: ThinClient is deprecated. Argument will not be used. \ + Use tpc_client or rpc_client instead"), ) .arg( Arg::with_name("entrypoint") @@ -188,7 +181,10 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .long("entrypoint") .value_name("HOST:PORT") .takes_value(true) - .help("Rendezvous with the cluster at this entry point; defaults to 127.0.0.1:8001"), + .hidden(hidden_unless_forced()) + .help("Rendezvous with the cluster at this entry point; defaults to 127.0.0.1:8001. \ + Note: ThinClient is deprecated. Argument will not be used. \ + Use tpc_client or rpc_client instead"), ) .arg( Arg::with_name("faucet") @@ -213,7 +209,10 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .long("num-nodes") .value_name("NUM") .takes_value(true) - .help("Wait for NUM nodes to converge"), + .hidden(hidden_unless_forced()) + .help("Wait for NUM nodes to converge. \ + Note: ThinClient is deprecated. Argument will not be used. \ + Use tpc_client or rpc_client instead"), ) .arg( Arg::with_name("threads") @@ -238,7 +237,10 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .arg( Arg::with_name("no-multi-client") .long("no-multi-client") - .help("Disable multi-client support, only transact with the entrypoint."), + .hidden(hidden_unless_forced()) + .help("Disable multi-client support, only transact with the entrypoint. \ + Note: ThinClient is deprecated. Flag will not be used. \ + Use tpc_client or rpc_client instead"), ) .arg( Arg::with_name("target_node") @@ -246,7 +248,10 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .requires("no-multi-client") .takes_value(true) .value_name("PUBKEY") - .help("Specify an exact node to send transactions to."), + .hidden(hidden_unless_forced()) + .help("Specify an exact node to send transactions to. \ + Note: ThinClient is deprecated. Argument will not be used. \ + Use tpc_client or rpc_client instead"), ) .arg( Arg::with_name("tx_count") @@ -316,7 +321,6 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .arg( Arg::with_name("rpc_client") .long("use-rpc-client") - .conflicts_with("thin_client") .conflicts_with("tpu_client") .takes_value(false) .help("Submit transactions with a RpcClient") @@ -325,33 +329,20 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { Arg::with_name("tpu_client") .long("use-tpu-client") .conflicts_with("rpc_client") - .conflicts_with("thin_client") .takes_value(false) .help("Submit transactions with a TpuClient") ) - .arg( - Arg::with_name("thin_client") - .long("use-thin-client") - .conflicts_with("rpc_client") - .conflicts_with("tpu_client") - .takes_value(false) - .hidden(hidden_unless_forced()) - .help("Submit transactions with a ThinClient. Note: usage is discouraged. \ - ThinClient will be deprecated.") - ) .arg( Arg::with_name("tpu_disable_quic") .long("tpu-disable-quic") .takes_value(false) - .help("Do not submit transactions via QUIC; only affects ThinClient \ - or TpuClient (default) sends"), + .help("Do not submit transactions via QUIC; only affects TpuClient (default) sends"), ) .arg( Arg::with_name("tpu_connection_pool_size") .long("tpu-connection-pool-size") .takes_value(true) - .help("Controls the connection pool size per remote address; only affects ThinClient \ - or TpuClient (default) sends"), + .help("Controls the connection pool size per remote address; only affects TpuClient (default) sends"), ) .arg( Arg::with_name("compute_unit_price") @@ -456,8 +447,6 @@ pub fn parse_args(matches: &ArgMatches) -> Result { if matches.is_present("rpc_client") { args.external_client_type = ExternalClientType::RpcClient; - } else if matches.is_present("thin_client") { - args.external_client_type = ExternalClientType::ThinClient; } if matches.is_present("tpu_disable_quic") { @@ -471,19 +460,10 @@ pub fn parse_args(matches: &ArgMatches) -> Result { .map_err(|_| "can't parse tpu-connection-pool-size")?; } - if let Some(addr) = matches.value_of("entrypoint") { - args.entrypoint_addr = solana_net_utils::parse_host_port(addr) - .map_err(|_| "failed to parse entrypoint address")?; - } - if let Some(t) = matches.value_of("threads") { args.threads = t.to_string().parse().map_err(|_| "can't parse threads")?; } - if let Some(n) = matches.value_of("num-nodes") { - args.num_nodes = n.to_string().parse().map_err(|_| "can't parse num-nodes")?; - } - if let Some(duration) = matches.value_of("duration") { let seconds = duration .to_string() @@ -533,13 +513,6 @@ pub fn parse_args(matches: &ArgMatches) -> Result { .map_err(|_| "can't parse target-lamports-per-signature")?; } - args.multi_client = !matches.is_present("no-multi-client"); - args.target_node = matches - .value_of("target_node") - .map(|target_str| target_str.parse::()) - .transpose() - .map_err(|_| "Failed to parse target-node")?; - if let Some(v) = matches.value_of("num_lamports_per_account") { args.num_lamports_per_account = v .to_string() @@ -611,7 +584,7 @@ mod tests { super::*, solana_sdk::signature::{read_keypair_file, write_keypair_file, Keypair, Signer}, std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, + net::{IpAddr, Ipv4Addr}, time::Duration, }, tempfile::{tempdir, TempDir}, @@ -671,8 +644,6 @@ mod tests { "4", "--read-client-keys", "./client-accounts.yml", - "--entrypoint", - "192.1.2.3:8001", ]); let actual = parse_args(&matches).unwrap(); assert_eq!( @@ -686,7 +657,6 @@ mod tests { threads: 4, read_from_client_file: true, client_ids_and_stake_file: "./client-accounts.yml".to_string(), - entrypoint_addr: SocketAddr::from((Ipv4Addr::new(192, 1, 2, 3), 8001)), ..Config::default() } ); diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 7c8244584e8ec0..1560b9346ed28c 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -1,6 +1,5 @@ #![allow(clippy::arithmetic_side_effects)] use { - clap::value_t, log::*, solana_bench_tps::{ bench::{do_bench_tps, max_lamports_for_prioritization}, @@ -11,11 +10,9 @@ use { }, solana_client::{ connection_cache::ConnectionCache, - thin_client::ThinClient, tpu_client::{TpuClient, TpuClientConfig}, }, solana_genesis::Base64Account, - solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client}, solana_rpc_client::rpc_client::RpcClient, solana_sdk::{ commitment_config::CommitmentConfig, @@ -24,12 +21,12 @@ use { signature::{Keypair, Signer}, system_program, }, - solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, + solana_streamer::streamer::StakedNodes, std::{ collections::HashMap, fs::File, io::prelude::*, - net::{IpAddr, SocketAddr}, + net::IpAddr, path::Path, process::exit, sync::{Arc, RwLock}, @@ -125,13 +122,8 @@ fn create_connection_cache( #[allow(clippy::too_many_arguments)] fn create_client( external_client_type: &ExternalClientType, - entrypoint_addr: &SocketAddr, json_rpc_url: &str, websocket_url: &str, - multi_client: bool, - rpc_tpu_sockets: Option<(SocketAddr, SocketAddr)>, - num_nodes: usize, - target_node: Option, connection_cache: ConnectionCache, commitment_config: CommitmentConfig, ) -> Arc { @@ -140,53 +132,6 @@ fn create_client( json_rpc_url.to_string(), commitment_config, )), - ExternalClientType::ThinClient => { - let connection_cache = Arc::new(connection_cache); - if let Some((rpc, tpu)) = rpc_tpu_sockets { - Arc::new(ThinClient::new(rpc, tpu, connection_cache)) - } else { - let nodes = - discover_cluster(entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified) - .unwrap_or_else(|err| { - eprintln!("Failed to discover {num_nodes} nodes: {err:?}"); - exit(1); - }); - if multi_client { - let (client, num_clients) = - get_multi_client(&nodes, &SocketAddrSpace::Unspecified, connection_cache); - if nodes.len() < num_clients { - eprintln!( - "Error: Insufficient nodes discovered. Expecting {num_nodes} or more" - ); - exit(1); - } - Arc::new(client) - } else if let Some(target_node) = target_node { - info!("Searching for target_node: {:?}", target_node); - let mut target_client = None; - for node in nodes { - if node.pubkey() == &target_node { - target_client = Some(get_client( - &[node], - &SocketAddrSpace::Unspecified, - connection_cache, - )); - break; - } - } - Arc::new(target_client.unwrap_or_else(|| { - eprintln!("Target node {target_node} not found"); - exit(1); - })) - } else { - Arc::new(get_client( - &nodes, - &SocketAddrSpace::Unspecified, - connection_cache, - )) - } - } - } ExternalClientType::TpuClient => { let rpc_client = Arc::new(RpcClient::new_with_commitment( json_rpc_url.to_string(), @@ -236,20 +181,16 @@ fn main() { }; let cli::Config { - entrypoint_addr, json_rpc_url, websocket_url, id, - num_nodes, tx_count, keypair_multiplier, client_ids_and_stake_file, write_to_client_file, read_from_client_file, target_lamports_per_signature, - multi_client, num_lamports_per_account, - target_node, external_client_type, use_quic, tpu_connection_pool_size, @@ -295,25 +236,6 @@ fn main() { return; } - info!("Connecting to the cluster"); - let rpc_tpu_sockets: Option<(SocketAddr, SocketAddr)> = - if let Ok(rpc_addr) = value_t!(matches, "rpc_addr", String) { - let rpc = rpc_addr.parse().unwrap_or_else(|e| { - eprintln!("RPC address should parse as socketaddr {e:?}"); - exit(1); - }); - let tpu = value_t!(matches, "tpu_addr", String) - .unwrap() - .parse() - .unwrap_or_else(|e| { - eprintln!("TPU address should parse to a socket: {e:?}"); - exit(1); - }); - Some((rpc, tpu)) - } else { - None - }; - let connection_cache = create_connection_cache( json_rpc_url, *tpu_connection_pool_size, @@ -324,13 +246,8 @@ fn main() { ); let client = create_client( external_client_type, - entrypoint_addr, json_rpc_url, websocket_url, - *multi_client, - rpc_tpu_sockets, - *num_nodes, - *target_node, connection_cache, *commitment_config, ); diff --git a/bench-tps/src/perf_utils.rs b/bench-tps/src/perf_utils.rs index 49399723cf0fb1..c677d83a2745d5 100644 --- a/bench-tps/src/perf_utils.rs +++ b/bench-tps/src/perf_utils.rs @@ -47,7 +47,6 @@ pub fn sample_txs( let mut txs = match client.get_transaction_count_with_commitment(CommitmentConfig::processed()) { Err(e) => { - // ThinClient with multiple options should pick a better one now. info!("Couldn't get transaction count {:?}", e); sleep(Duration::from_secs(sample_period)); continue; diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index e7cabdac44baed..2efdd6c8ff6ef4 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -8,7 +8,7 @@ use { send_batch::generate_durable_nonce_accounts, }, solana_client::{ - thin_client::ThinClient, + connection_cache::ConnectionCache, tpu_client::{TpuClient, TpuClientConfig}, }, solana_core::validator::ValidatorConfig, @@ -78,14 +78,24 @@ fn test_bench_tps_local_cluster(config: Config) { cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000); - let client = Arc::new(ThinClient::new( - cluster.entry_point_info.rpc().unwrap(), - cluster - .entry_point_info - .tpu(cluster.connection_cache.protocol()) - .unwrap(), - cluster.connection_cache.clone(), - )); + let ConnectionCache::Quic(cache) = &*cluster.connection_cache else { + panic!("Expected a Quic ConnectionCache."); + }; + + let rpc_pubsub_url = format!("ws://{}/", cluster.entry_point_info.rpc_pubsub().unwrap()); + let rpc_url = format!("http://{}", cluster.entry_point_info.rpc().unwrap()); + + let client = Arc::new( + TpuClient::new_with_connection_cache( + Arc::new(RpcClient::new(rpc_url)), + rpc_pubsub_url.as_str(), + TpuClientConfig::default(), + cache.clone(), + ) + .unwrap_or_else(|err| { + panic!("Could not create TpuClient {err:?}"); + }), + ); let lamports_per_account = 100; diff --git a/client/src/thin_client.rs b/client/src/thin_client.rs index 559b904c13f36a..61f24018e8c778 100644 --- a/client/src/thin_client.rs +++ b/client/src/thin_client.rs @@ -2,7 +2,7 @@ //! a server-side TPU. Client code should use this object instead of writing //! messages to the network directly. The binary encoding of its messages are //! unstable and may change in future releases. - +#[allow(deprecated)] use { crate::connection_cache::{dispatch, ConnectionCache}, solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, @@ -32,11 +32,13 @@ use { /// A thin wrapper over thin-client/ThinClient to ease /// construction of the ThinClient for code dealing both with udp and quic. /// For the scenario only using udp or quic, use thin-client/ThinClient directly. +#[allow(deprecated)] pub enum ThinClient { Quic(BackendThinClient), Udp(BackendThinClient), } +#[allow(deprecated)] impl ThinClient { /// Create a new ThinClient that will interface with the Rpc at `rpc_addr` using TCP /// and the Tpu at `tpu_addr` over `transactions_socket` using Quic or UDP diff --git a/net/net.sh b/net/net.sh index 14b639a26dc1ea..a2d16cef20f417 100755 --- a/net/net.sh +++ b/net/net.sh @@ -118,7 +118,7 @@ Operate a configured testnet - Enable UDP for tpu transactions --client-type - - Specify backend client type for bench-tps. Valid options are (thin-client|rpc-client|tpu-client), tpu-client is default + - Specify backend client type for bench-tps. Valid options are (rpc-client|tpu-client), tpu-client is default sanity/start-specific options: -F - Discard validator nodes that didn't bootup successfully @@ -972,7 +972,7 @@ while [[ -n $1 ]]; do elif [[ $1 = --client-type ]]; then clientType=$2 case "$clientType" in - thin-client|tpu-client|rpc-client) + tpu-client|rpc-client) ;; *) echo "Unexpected client type: \"$clientType\"" diff --git a/net/remote/remote-client.sh b/net/remote/remote-client.sh index 8042bd19618083..c66f3c783885c0 100755 --- a/net/remote/remote-client.sh +++ b/net/remote/remote-client.sh @@ -43,19 +43,12 @@ skip) exit 1 esac -THIN_CLIENT=false RPC_CLIENT=false case "$clientType" in - thin-client) - THIN_CLIENT=true - RPC_CLIENT=false - ;; tpu-client) - THIN_CLIENT=false RPC_CLIENT=false ;; rpc-client) - THIN_CLIENT=false RPC_CLIENT=true ;; *) @@ -74,10 +67,7 @@ solana-bench-tps) args=() - if ${THIN_CLIENT}; then - args+=(--entrypoint "$entrypointIp:8001") - args+=(--use-thin-client) - elif ${RPC_CLIENT}; then + if ${RPC_CLIENT}; then args+=(--use-rpc-client) fi diff --git a/thin-client/README.md b/thin-client/README.md new file mode 100644 index 00000000000000..147b287b2d62b2 --- /dev/null +++ b/thin-client/README.md @@ -0,0 +1,4 @@ +# thin-client +This crate for `thin-client` is deprecated as of v1.19.0. It will receive no bugfixes or updates. + +Please use `tpu-client` or `rpc-client`. \ No newline at end of file diff --git a/thin-client/src/thin_client.rs b/thin-client/src/thin_client.rs index b1ae08fd7c01a3..cbf01e6561a16f 100644 --- a/thin-client/src/thin_client.rs +++ b/thin-client/src/thin_client.rs @@ -111,6 +111,7 @@ impl ClientOptimizer { } /// An object for querying and sending transactions to the network. +#[deprecated(since = "1.19.0", note = "Use [RpcClient] or [TpuClient] instead.")] pub struct ThinClient< P, // ConnectionPool M, // ConnectionManager @@ -122,6 +123,7 @@ pub struct ThinClient< connection_cache: Arc>, } +#[allow(deprecated)] impl ThinClient where P: ConnectionPool, @@ -323,6 +325,7 @@ where } } +#[allow(deprecated)] impl Client for ThinClient where P: ConnectionPool, @@ -334,6 +337,7 @@ where } } +#[allow(deprecated)] impl SyncClient for ThinClient where P: ConnectionPool, @@ -619,6 +623,7 @@ where } } +#[allow(deprecated)] impl AsyncClient for ThinClient where P: ConnectionPool, From cb260f10d1b05743391b9b442bf175c19d894931 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 1 Mar 2024 13:37:51 -0800 Subject: [PATCH 307/401] Remove unnecessary unwrap from `simulate_transaction_unchecked()` (#35375) Remove unnecessary unwrap from simulate_transaction_unchecked() --- runtime/src/bank.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 84c9e2093ebf50..200d43dc30b6c9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4306,9 +4306,7 @@ impl Bank { let post_simulation_accounts = loaded_transactions .into_iter() .next() - .unwrap() - .0 - .ok() + .and_then(|(loaded_transactions_res, _)| loaded_transactions_res.ok()) .map(|loaded_transaction| { loaded_transaction .accounts @@ -4330,7 +4328,12 @@ impl Bank { debug!("simulate_transaction: {:?}", timings); - let execution_result = execution_results.pop().unwrap(); + let execution_result = + execution_results + .pop() + .unwrap_or(TransactionExecutionResult::NotExecuted( + TransactionError::InvalidProgramForExecution, + )); let flattened_result = execution_result.flattened_result(); let (logs, return_data, inner_instructions) = match execution_result { TransactionExecutionResult::Executed { details, .. } => ( From 608329b9740467da3c42e0c109573b7d261b89e4 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 1 Mar 2024 15:18:12 -0800 Subject: [PATCH 308/401] [TieredStorage] rent_epoch() returns 0 for zero-lamport accounts (#35344) #### Problem In TieredAccountMeta, RENT_EXEMPT_RENT_EPOCH will be used when its optional field rent_epoch is None. However, for legacy reasons, 0 should be used for zero-lamport accounts. #### Summary of Changes Return 0 for TieredAccountMeta::rent_epoch() for zero-lamport accounts. #### Test Plan accounts_db::tests::test_clean_zero_lamport_and_dead_slot --- accounts-db/src/tiered_storage/readable.rs | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 12c4a8224d48ea..1801b04fcecd80 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -11,7 +11,10 @@ use { TieredStorageResult, }, }, - solana_sdk::{account::ReadableAccount, pubkey::Pubkey, stake_history::Epoch}, + solana_sdk::{ + account::ReadableAccount, pubkey::Pubkey, rent_collector::RENT_EXEMPT_RENT_EPOCH, + stake_history::Epoch, + }, std::path::Path, }; @@ -72,12 +75,23 @@ impl<'accounts_file, M: TieredAccountMeta> ReadableAccount } /// Returns the epoch that this account will next owe rent by parsing - /// the specified account block. Epoch::MAX will be returned if the account - /// is rent-exempt. + /// the specified account block. RENT_EXEMPT_RENT_EPOCH will be returned + /// if the account is rent-exempt. + /// + /// For a zero-lamport account, Epoch::default() will be returned to + /// default states of an AccountSharedData. fn rent_epoch(&self) -> Epoch { self.meta .rent_epoch(self.account_block) - .unwrap_or(Epoch::MAX) + .unwrap_or(if self.lamports() != 0 { + RENT_EXEMPT_RENT_EPOCH + } else { + // While there is no valid-values for any fields of a zero + // lamport account, here we return Epoch::default() to + // match the default states of AccountSharedData. Otherwise, + // a hash mismatch will occur. + Epoch::default() + }) } /// Returns the data associated to this account. From bfe44d95f4e57744ad95fb804ecbbeab44a5f408 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 1 Mar 2024 18:52:47 -0800 Subject: [PATCH 309/401] Wen restart aggregate last voted fork slots (#33892) * Push and aggregate RestartLastVotedForkSlots. * Fix API and lint errors. * Reduce clutter. * Put my own LastVotedForkSlots into the aggregate. * Write LastVotedForkSlots aggregate progress into local file. * Fix typo and name constants. * Fix flaky test. * Clarify the comments. * - Use constant for wait_for_supermajority - Avoid waiting after first shred when repair is in wen_restart * Fix delay_after_first_shred and remove loop in wen_restart. * Read wen_restart slots inside the loop instead. * Discard turbine shreds while in wen_restart in windows insert rather than shred_fetch_stage. * Use the new Gossip API. * Rename slots_to_repair_for_wen_restart and a few others. * Rename a few more and list all states. * Pipe exit down to aggregate loop so we can exit early. * Fix import of RestartLastVotedForkSlots. * Use the new method to generate test bank. * Make linter happy. * Use new bank constructor for tests. * Fix a bad merge. * - add new const for wen_restart - fix the test to cover more cases - add generate_repairs_for_slot_not_throtted_by_tick and generate_repairs_for_slot_throtted_by_tick to make it readable * Add initialize and put the main logic into a loop. * Change aggregate interface and other fixes. * Add failure tests and tests for state transition. * Add more tests and add ability to recover from written records in last_voted_fork_slots_aggregate. * Various name changes. * We don't really care what type of error is returned. * Wait on expected progress message in proto file instead of sleep. * Code reorganization and cleanup. * Make linter happy. * Add WenRestartError. * Split WenRestartErrors into separate erros per state. * Revert "Split WenRestartErrors into separate erros per state." This reverts commit 4c920cb8f8d492707560441912351cca779129f6. * Use individual functions when testing for failures. * Move initialization errors into initialize(). * Use anyhow instead of thiserror to generate backtrace for error. * Add missing Cargo.lock. * Add error log when last_vote is missing in the tower storage. * Change error log info. * Change test to match exact error. --- Cargo.lock | 5 + core/src/repair/ancestor_hashes_service.rs | 1 + core/src/repair/repair_generic_traversal.rs | 2 +- core/src/repair/repair_service.rs | 183 ++- core/src/repair/repair_weighted_traversal.rs | 2 +- core/src/tvu.rs | 3 + core/src/validator.rs | 15 + core/src/window_service.rs | 37 +- programs/sbf/Cargo.lock | 1 + wen-restart/Cargo.toml | 6 + wen-restart/proto/wen_restart.proto | 12 +- .../src/last_voted_fork_slots_aggregate.rs | 487 ++++++++ wen-restart/src/lib.rs | 1 + wen-restart/src/wen_restart.rs | 1055 +++++++++++++++-- 14 files changed, 1703 insertions(+), 107 deletions(-) create mode 100644 wen-restart/src/last_voted_fork_slots_aggregate.rs diff --git a/Cargo.lock b/Cargo.lock index cc42e6da02df20..72e7cfd226e9ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7608,13 +7608,17 @@ dependencies = [ name = "solana-wen-restart" version = "1.19.0" dependencies = [ + "anyhow", + "assert_matches", "log", "prost", "prost-build", "prost-types", "protobuf-src", + "rand 0.8.5", "rustc_version 0.4.0", "serial_test", + "solana-accounts-db", "solana-entry", "solana-gossip", "solana-ledger", @@ -7624,6 +7628,7 @@ dependencies = [ "solana-sdk", "solana-streamer", "solana-vote-program", + "tempfile", ] [[package]] diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 87b1f49bc9cfc1..8f455cbd6a1c19 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -1376,6 +1376,7 @@ mod test { ancestor_duplicate_slots_sender, repair_validators: None, repair_whitelist, + wen_restart_repair_slots: None, }; let (ancestor_hashes_replay_update_sender, ancestor_hashes_replay_update_receiver) = diff --git a/core/src/repair/repair_generic_traversal.rs b/core/src/repair/repair_generic_traversal.rs index f33a9b91e28bd8..c4b573620079c1 100644 --- a/core/src/repair/repair_generic_traversal.rs +++ b/core/src/repair/repair_generic_traversal.rs @@ -186,7 +186,7 @@ pub fn get_closest_completion( continue; } let slot_meta = slot_meta_cache.get(&path_slot).unwrap().as_ref().unwrap(); - let new_repairs = RepairService::generate_repairs_for_slot( + let new_repairs = RepairService::generate_repairs_for_slot_throttled_by_tick( blockstore, path_slot, slot_meta, diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index f695f7b6035163..eb516fc74bb55b 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -224,6 +224,8 @@ pub struct RepairInfo { pub repair_validators: Option>, // Validators which should be given priority when serving pub repair_whitelist: Arc>>, + // A given list of slots to repair when in wen_restart + pub wen_restart_repair_slots: Option>>>, } pub struct RepairSlotRange { @@ -397,17 +399,24 @@ impl RepairService { ); add_votes_elapsed.stop(); - let repairs = repair_weight.get_best_weighted_repairs( - blockstore, - root_bank.epoch_stakes_map(), - root_bank.epoch_schedule(), - MAX_ORPHANS, - MAX_REPAIR_LENGTH, - MAX_UNKNOWN_LAST_INDEX_REPAIRS, - MAX_CLOSEST_COMPLETION_REPAIRS, - &mut repair_timing, - &mut best_repairs_stats, - ); + let repairs = match repair_info.wen_restart_repair_slots.clone() { + Some(slots_to_repair) => Self::generate_repairs_for_wen_restart( + blockstore, + MAX_REPAIR_LENGTH, + &slots_to_repair.read().unwrap(), + ), + None => repair_weight.get_best_weighted_repairs( + blockstore, + root_bank.epoch_stakes_map(), + root_bank.epoch_schedule(), + MAX_ORPHANS, + MAX_REPAIR_LENGTH, + MAX_UNKNOWN_LAST_INDEX_REPAIRS, + MAX_CLOSEST_COMPLETION_REPAIRS, + &mut repair_timing, + &mut best_repairs_stats, + ), + }; let mut popular_pruned_forks = repair_weight.get_popular_pruned_forks( root_bank.epoch_stakes_map(), @@ -618,32 +627,58 @@ impl RepairService { } } + pub fn generate_repairs_for_slot_throttled_by_tick( + blockstore: &Blockstore, + slot: Slot, + slot_meta: &SlotMeta, + max_repairs: usize, + ) -> Vec { + Self::generate_repairs_for_slot(blockstore, slot, slot_meta, max_repairs, true) + } + + pub fn generate_repairs_for_slot_not_throttled_by_tick( + blockstore: &Blockstore, + slot: Slot, + slot_meta: &SlotMeta, + max_repairs: usize, + ) -> Vec { + Self::generate_repairs_for_slot(blockstore, slot, slot_meta, max_repairs, false) + } + /// If this slot is missing shreds generate repairs - pub fn generate_repairs_for_slot( + fn generate_repairs_for_slot( blockstore: &Blockstore, slot: Slot, slot_meta: &SlotMeta, max_repairs: usize, + throttle_requests_by_shred_tick: bool, ) -> Vec { + let defer_repair_threshold_ticks = if throttle_requests_by_shred_tick { + DEFER_REPAIR_THRESHOLD_TICKS + } else { + 0 + }; if max_repairs == 0 || slot_meta.is_full() { vec![] } else if slot_meta.consumed == slot_meta.received { - // check delay time of last shred - if let Some(reference_tick) = slot_meta - .received - .checked_sub(1) - .and_then(|index| blockstore.get_data_shred(slot, index).ok()?) - .and_then(|shred| shred::layout::get_reference_tick(&shred).ok()) - .map(u64::from) - { - // System time is not monotonic - let ticks_since_first_insert = DEFAULT_TICKS_PER_SECOND - * timestamp().saturating_sub(slot_meta.first_shred_timestamp) - / 1_000; - if ticks_since_first_insert - < reference_tick.saturating_add(DEFER_REPAIR_THRESHOLD_TICKS) + if throttle_requests_by_shred_tick { + // check delay time of last shred + if let Some(reference_tick) = slot_meta + .received + .checked_sub(1) + .and_then(|index| blockstore.get_data_shred(slot, index).ok()?) + .and_then(|shred| shred::layout::get_reference_tick(&shred).ok()) + .map(u64::from) { - return vec![]; + // System time is not monotonic + let ticks_since_first_insert = DEFAULT_TICKS_PER_SECOND + * timestamp().saturating_sub(slot_meta.first_shred_timestamp) + / 1_000; + if ticks_since_first_insert + < reference_tick.saturating_add(defer_repair_threshold_ticks) + { + return vec![]; + } } } vec![ShredRepairType::HighestShred(slot, slot_meta.received)] @@ -652,7 +687,7 @@ impl RepairService { .find_missing_data_indexes( slot, slot_meta.first_shred_timestamp, - DEFER_REPAIR_THRESHOLD_TICKS, + defer_repair_threshold_ticks, slot_meta.consumed, slot_meta.received, max_repairs, @@ -674,7 +709,7 @@ impl RepairService { while repairs.len() < max_repairs && !pending_slots.is_empty() { let slot = pending_slots.pop().unwrap(); if let Some(slot_meta) = blockstore.meta(slot).unwrap() { - let new_repairs = Self::generate_repairs_for_slot( + let new_repairs = Self::generate_repairs_for_slot_throttled_by_tick( blockstore, slot, &slot_meta, @@ -689,6 +724,33 @@ impl RepairService { } } + pub(crate) fn generate_repairs_for_wen_restart( + blockstore: &Blockstore, + max_repairs: usize, + slots: &Vec, + ) -> Vec { + let mut repairs: Vec = Vec::new(); + for slot in slots { + if let Some(slot_meta) = blockstore.meta(*slot).unwrap() { + // When in wen_restart, turbine is not running, so there is + // no need to wait after first shred. + let new_repairs = Self::generate_repairs_for_slot_not_throttled_by_tick( + blockstore, + *slot, + &slot_meta, + max_repairs - repairs.len(), + ); + repairs.extend(new_repairs); + } else { + repairs.push(ShredRepairType::HighestShred(*slot, 0)); + } + if repairs.len() >= max_repairs { + break; + } + } + repairs + } + fn get_repair_peers( cluster_info: Arc, cluster_slots: Arc, @@ -845,7 +907,7 @@ impl RepairService { ..SlotMeta::default() }); - let new_repairs = Self::generate_repairs_for_slot( + let new_repairs = Self::generate_repairs_for_slot_throttled_by_tick( blockstore, slot, &meta, @@ -867,7 +929,7 @@ impl RepairService { // If the slot is full, no further need to repair this slot None } else { - Some(Self::generate_repairs_for_slot( + Some(Self::generate_repairs_for_slot_throttled_by_tick( blockstore, slot, &slot_meta, @@ -1548,4 +1610,63 @@ mod test { ); assert_ne!(duplicate_status.repair_pubkey_and_addr, dummy_addr); } + + #[test] + fn test_generate_repairs_for_wen_restart() { + solana_logger::setup(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + let max_repairs = 3; + + let slots: Vec = vec![2, 3, 5, 7]; + let num_entries_per_slot = max_ticks_per_n_shreds(3, None) + 1; + + let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot); + for (i, (mut slot_shreds, _)) in shreds.into_iter().enumerate() { + slot_shreds.remove(i); + blockstore.insert_shreds(slot_shreds, None, false).unwrap(); + } + + let mut slots_to_repair: Vec = vec![]; + + // When slots_to_repair is empty, ignore all and return empty result. + let result = RepairService::generate_repairs_for_wen_restart( + &blockstore, + max_repairs, + &slots_to_repair, + ); + assert!(result.is_empty()); + + // When asked to repair slot with missing shreds and some unknown slot, return correct results. + slots_to_repair = vec![3, 81]; + let result = RepairService::generate_repairs_for_wen_restart( + &blockstore, + max_repairs, + &slots_to_repair, + ); + assert_eq!( + result, + vec![ + ShredRepairType::Shred(3, 1), + ShredRepairType::HighestShred(81, 0), + ], + ); + + // Test that it will not generate more than max_repairs.e().unwrap(); + slots_to_repair = vec![2, 82, 7, 83, 84]; + let result = RepairService::generate_repairs_for_wen_restart( + &blockstore, + max_repairs, + &slots_to_repair, + ); + assert_eq!(result.len(), max_repairs); + assert_eq!( + result, + vec![ + ShredRepairType::Shred(2, 0), + ShredRepairType::HighestShred(82, 0), + ShredRepairType::HighestShred(7, 3), + ], + ); + } } diff --git a/core/src/repair/repair_weighted_traversal.rs b/core/src/repair/repair_weighted_traversal.rs index 38682a3fd1326b..175b3268ebafc2 100644 --- a/core/src/repair/repair_weighted_traversal.rs +++ b/core/src/repair/repair_weighted_traversal.rs @@ -98,7 +98,7 @@ pub fn get_best_repair_shreds( if let Some(slot_meta) = slot_meta { match next { Visit::Unvisited(slot) => { - let new_repairs = RepairService::generate_repairs_for_slot( + let new_repairs = RepairService::generate_repairs_for_slot_throttled_by_tick( blockstore, slot, slot_meta, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index b0fe93890761b4..47bc9a7905da5f 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -143,6 +143,7 @@ impl Tvu { repair_quic_endpoint_sender: AsyncSender, outstanding_repair_requests: Arc>, cluster_slots: Arc, + wen_restart_repair_slots: Option>>>, ) -> Result { let TvuSockets { repair: repair_socket, @@ -214,6 +215,7 @@ impl Tvu { repair_whitelist: tvu_config.repair_whitelist, cluster_info: cluster_info.clone(), cluster_slots: cluster_slots.clone(), + wen_restart_repair_slots, }; WindowService::new( blockstore.clone(), @@ -499,6 +501,7 @@ pub mod tests { repair_quic_endpoint_sender, outstanding_repair_requests, cluster_slots, + None, ) .expect("assume success"); exit.store(true, Ordering::Relaxed); diff --git a/core/src/validator.rs b/core/src/validator.rs index 97ef0a01ef87ad..a6d5921bcef5c9 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -138,6 +138,11 @@ use { const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000; const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80; +// Right now since we reuse the wait for supermajority code, the +// following threshold should always greater than or equal to +// WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT. +const WAIT_FOR_WEN_RESTART_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT; #[derive(Clone, EnumString, EnumVariantNames, Default, IntoStaticStr, Display)] #[strum(serialize_all = "kebab-case")] @@ -1236,6 +1241,11 @@ impl Validator { }; let in_wen_restart = config.wen_restart_proto_path.is_some() && !waited_for_supermajority; + let wen_restart_repair_slots = if in_wen_restart { + Some(Arc::new(RwLock::new(Vec::new()))) + } else { + None + }; let tower = match process_blockstore.process_to_create_tower() { Ok(tower) => { info!("Tower state: {:?}", tower); @@ -1310,6 +1320,7 @@ impl Validator { repair_quic_endpoint_sender, outstanding_repair_requests.clone(), cluster_slots.clone(), + wen_restart_repair_slots.clone(), )?; if in_wen_restart { @@ -1319,6 +1330,10 @@ impl Validator { last_vote, blockstore.clone(), cluster_info.clone(), + bank_forks.clone(), + wen_restart_repair_slots.clone(), + WAIT_FOR_WEN_RESTART_SUPERMAJORITY_THRESHOLD_PERCENT, + exit.clone(), ) { Ok(()) => { return Err("wen_restart phase one completedy".to_string()); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 504776db1e1a25..7d939eeea8de44 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -248,10 +248,11 @@ fn verify_repair( .unwrap_or(true) } -fn prune_shreds_invalid_repair( +fn prune_shreds_by_repair_status( shreds: &mut Vec, repair_infos: &mut Vec>, outstanding_requests: &RwLock, + accept_repairs_only: bool, ) { assert_eq!(shreds.len(), repair_infos.len()); let mut i = 0; @@ -260,7 +261,8 @@ fn prune_shreds_invalid_repair( let mut outstanding_requests = outstanding_requests.write().unwrap(); shreds.retain(|shred| { let should_keep = ( - verify_repair(&mut outstanding_requests, shred, &repair_infos[i]), + (!accept_repairs_only || repair_infos[i].is_some()) + && verify_repair(&mut outstanding_requests, shred, &repair_infos[i]), i += 1, ) .0; @@ -288,6 +290,7 @@ fn run_insert( retransmit_sender: &Sender>, outstanding_requests: &RwLock, reed_solomon_cache: &ReedSolomonCache, + accept_repairs_only: bool, ) -> Result<()> where F: Fn(PossibleDuplicateShred), @@ -333,7 +336,12 @@ where let mut prune_shreds_elapsed = Measure::start("prune_shreds_elapsed"); let num_shreds = shreds.len(); - prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, outstanding_requests); + prune_shreds_by_repair_status( + &mut shreds, + &mut repair_infos, + outstanding_requests, + accept_repairs_only, + ); ws_metrics.num_shreds_pruned_invalid_repair = num_shreds - shreds.len(); let repairs: Vec<_> = repair_infos .iter() @@ -391,6 +399,10 @@ impl WindowService { let cluster_info = repair_info.cluster_info.clone(); let bank_forks = repair_info.bank_forks.clone(); + // In wen_restart, we discard all shreds from Turbine and keep only those from repair to + // avoid new shreds make validator OOM before wen_restart is over. + let accept_repairs_only = repair_info.wen_restart_repair_slots.is_some(); + let repair_service = RepairService::new( blockstore.clone(), exit.clone(), @@ -426,6 +438,7 @@ impl WindowService { completed_data_sets_sender, retransmit_sender, outstanding_repair_requests, + accept_repairs_only, ); WindowService { @@ -475,6 +488,7 @@ impl WindowService { completed_data_sets_sender: CompletedDataSetsSender, retransmit_sender: Sender>, outstanding_requests: Arc>, + accept_repairs_only: bool, ) -> JoinHandle<()> { let handle_error = || { inc_new_counter_error!("solana-window-insert-error", 1, 1); @@ -507,6 +521,7 @@ impl WindowService { &retransmit_sender, &outstanding_requests, &reed_solomon_cache, + accept_repairs_only, ) { ws_metrics.record_error(&e); if Self::should_exit_on_error(e, &handle_error) { @@ -743,7 +758,7 @@ mod test { 4, // position 0, // version ); - let mut shreds = vec![shred.clone(), shred.clone(), shred]; + let mut shreds = vec![shred.clone(), shred.clone(), shred.clone()]; let repair_meta = RepairMeta { nonce: 0 }; let outstanding_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default())); let repair_type = ShredRepairType::Orphan(9); @@ -753,9 +768,21 @@ mod test { .add_request(repair_type, timestamp()); let repair_meta1 = RepairMeta { nonce }; let mut repair_infos = vec![None, Some(repair_meta), Some(repair_meta1)]; - prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, &outstanding_requests); + prune_shreds_by_repair_status(&mut shreds, &mut repair_infos, &outstanding_requests, false); + assert_eq!(shreds.len(), 2); assert_eq!(repair_infos.len(), 2); assert!(repair_infos[0].is_none()); assert_eq!(repair_infos[1].as_ref().unwrap().nonce, nonce); + + shreds = vec![shred.clone(), shred.clone(), shred]; + let repair_meta2 = RepairMeta { nonce: 0 }; + let repair_meta3 = RepairMeta { nonce }; + repair_infos = vec![None, Some(repair_meta2), Some(repair_meta3)]; + // In wen_restart, we discard all Turbine shreds and only keep valid repair shreds. + prune_shreds_by_repair_status(&mut shreds, &mut repair_infos, &outstanding_requests, true); + assert_eq!(shreds.len(), 1); + assert_eq!(repair_infos.len(), 1); + assert!(repair_infos[0].is_some()); + assert_eq!(repair_infos[0].as_ref().unwrap().nonce, nonce); } } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 93e2a243e2004d..0929453a9e27bf 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6581,6 +6581,7 @@ dependencies = [ name = "solana-wen-restart" version = "1.19.0" dependencies = [ + "anyhow", "log", "prost", "prost-build", diff --git a/wen-restart/Cargo.toml b/wen-restart/Cargo.toml index a2e6e5c1ac885d..add4340cdc05c3 100644 --- a/wen-restart/Cargo.toml +++ b/wen-restart/Cargo.toml @@ -11,6 +11,7 @@ edition = { workspace = true } publish = true [dependencies] +anyhow = { workspace = true } log = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } @@ -23,9 +24,14 @@ solana-sdk = { workspace = true } solana-vote-program = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } +rand = { workspace = true } serial_test = { workspace = true } +solana-accounts-db = { workspace = true } solana-entry = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-streamer = { workspace = true } +tempfile = { workspace = true } [build-dependencies] prost-build = { workspace = true } diff --git a/wen-restart/proto/wen_restart.proto b/wen-restart/proto/wen_restart.proto index 1f6423462b55b0..b25c2f17764bfd 100644 --- a/wen-restart/proto/wen_restart.proto +++ b/wen-restart/proto/wen_restart.proto @@ -11,13 +11,19 @@ enum State { DONE = 6; } -message MyLastVotedForkSlots { - uint64 last_vote_slot = 1; +message LastVotedForkSlotsRecord { + repeated uint64 last_voted_fork_slots = 1; string last_vote_bankhash = 2; uint32 shred_version = 3; + uint64 wallclock = 4; +} + +message LastVotedForkSlotsAggregateRecord { + map received = 1; } message WenRestartProgress { State state = 1; - optional MyLastVotedForkSlots my_last_voted_fork_slots = 2; + optional LastVotedForkSlotsRecord my_last_voted_fork_slots = 2; + optional LastVotedForkSlotsAggregateRecord last_voted_fork_slots_aggregate = 3; } \ No newline at end of file diff --git a/wen-restart/src/last_voted_fork_slots_aggregate.rs b/wen-restart/src/last_voted_fork_slots_aggregate.rs new file mode 100644 index 00000000000000..8a26c4d315f419 --- /dev/null +++ b/wen-restart/src/last_voted_fork_slots_aggregate.rs @@ -0,0 +1,487 @@ +use { + crate::solana::wen_restart_proto::LastVotedForkSlotsRecord, + anyhow::Result, + log::*, + solana_gossip::restart_crds_values::RestartLastVotedForkSlots, + solana_runtime::epoch_stakes::EpochStakes, + solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}, + std::{ + collections::{HashMap, HashSet}, + str::FromStr, + }, +}; + +pub struct LastVotedForkSlotsAggregate { + root_slot: Slot, + repair_threshold: f64, + // TODO(wen): using local root's EpochStakes, need to fix if crossing Epoch boundary. + epoch_stakes: EpochStakes, + last_voted_fork_slots: HashMap, + slots_stake_map: HashMap, + active_peers: HashSet, + slots_to_repair: HashSet, +} + +pub struct LastVotedForkSlotsAggregateResult { + pub slots_to_repair: Vec, + pub active_percent: f64, /* 0 ~ 100.0 */ +} + +impl LastVotedForkSlotsAggregate { + pub(crate) fn new( + root_slot: Slot, + repair_threshold: f64, + epoch_stakes: &EpochStakes, + last_voted_fork_slots: &Vec, + my_pubkey: &Pubkey, + ) -> Self { + let mut active_peers = HashSet::new(); + let sender_stake = Self::validator_stake(epoch_stakes, my_pubkey); + active_peers.insert(*my_pubkey); + let mut slots_stake_map = HashMap::new(); + for slot in last_voted_fork_slots { + if slot > &root_slot { + slots_stake_map.insert(*slot, sender_stake); + } + } + Self { + root_slot, + repair_threshold, + epoch_stakes: epoch_stakes.clone(), + last_voted_fork_slots: HashMap::new(), + slots_stake_map, + active_peers, + slots_to_repair: HashSet::new(), + } + } + + fn validator_stake(epoch_stakes: &EpochStakes, pubkey: &Pubkey) -> u64 { + epoch_stakes + .node_id_to_vote_accounts() + .get(pubkey) + .map(|x| x.total_stake) + .unwrap_or_default() + } + + pub(crate) fn aggregate_from_record( + &mut self, + key_string: &str, + record: &LastVotedForkSlotsRecord, + ) -> Result> { + let from = Pubkey::from_str(key_string)?; + let last_voted_hash = Hash::from_str(&record.last_vote_bankhash)?; + let converted_record = RestartLastVotedForkSlots::new( + from, + record.wallclock, + &record.last_voted_fork_slots, + last_voted_hash, + record.shred_version as u16, + )?; + Ok(self.aggregate(converted_record)) + } + + pub(crate) fn aggregate( + &mut self, + new_slots: RestartLastVotedForkSlots, + ) -> Option { + let total_stake = self.epoch_stakes.total_stake(); + let threshold_stake = (total_stake as f64 * self.repair_threshold) as u64; + let from = &new_slots.from; + let sender_stake = Self::validator_stake(&self.epoch_stakes, from); + if sender_stake == 0 { + warn!( + "Gossip should not accept zero-stake RestartLastVotedFork from {:?}", + from + ); + return None; + } + self.active_peers.insert(*from); + let new_slots_vec = new_slots.to_slots(self.root_slot); + let record = LastVotedForkSlotsRecord { + last_voted_fork_slots: new_slots_vec.clone(), + last_vote_bankhash: new_slots.last_voted_hash.to_string(), + shred_version: new_slots.shred_version as u32, + wallclock: new_slots.wallclock, + }; + let new_slots_set: HashSet = HashSet::from_iter(new_slots_vec); + let old_slots_set = match self.last_voted_fork_slots.insert(*from, new_slots.clone()) { + Some(old_slots) => { + if old_slots == new_slots { + return None; + } else { + HashSet::from_iter(old_slots.to_slots(self.root_slot)) + } + } + None => HashSet::new(), + }; + for slot in old_slots_set.difference(&new_slots_set) { + let entry = self.slots_stake_map.get_mut(slot).unwrap(); + *entry = entry.saturating_sub(sender_stake); + if *entry < threshold_stake { + self.slots_to_repair.remove(slot); + } + } + for slot in new_slots_set.difference(&old_slots_set) { + let entry = self.slots_stake_map.entry(*slot).or_insert(0); + *entry = entry.saturating_add(sender_stake); + if *entry >= threshold_stake { + self.slots_to_repair.insert(*slot); + } + } + Some(record) + } + + pub(crate) fn get_aggregate_result(&self) -> LastVotedForkSlotsAggregateResult { + let total_stake = self.epoch_stakes.total_stake(); + let total_active_stake = self.active_peers.iter().fold(0, |sum: u64, pubkey| { + sum.saturating_add(Self::validator_stake(&self.epoch_stakes, pubkey)) + }); + let active_percent = total_active_stake as f64 / total_stake as f64 * 100.0; + LastVotedForkSlotsAggregateResult { + slots_to_repair: self.slots_to_repair.iter().cloned().collect(), + active_percent, + } + } +} + +#[cfg(test)] +mod tests { + use { + crate::{ + last_voted_fork_slots_aggregate::LastVotedForkSlotsAggregate, + solana::wen_restart_proto::LastVotedForkSlotsRecord, + }, + solana_gossip::restart_crds_values::RestartLastVotedForkSlots, + solana_program::{clock::Slot, pubkey::Pubkey}, + solana_runtime::{ + bank::Bank, + genesis_utils::{ + create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, + }, + }, + solana_sdk::{hash::Hash, signature::Signer, timing::timestamp}, + }; + + const TOTAL_VALIDATOR_COUNT: u16 = 10; + const MY_INDEX: usize = 9; + const REPAIR_THRESHOLD: f64 = 0.42; + const SHRED_VERSION: u16 = 52; + + struct TestAggregateInitResult { + pub slots_aggregate: LastVotedForkSlotsAggregate, + pub validator_voting_keypairs: Vec, + pub root_slot: Slot, + pub last_voted_fork_slots: Vec, + } + + fn test_aggregate_init() -> TestAggregateInitResult { + solana_logger::setup(); + let validator_voting_keypairs: Vec<_> = (0..TOTAL_VALIDATOR_COUNT) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 10_000, + &validator_voting_keypairs, + vec![100; validator_voting_keypairs.len()], + ); + let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let root_bank = bank_forks.read().unwrap().root_bank(); + let root_slot = root_bank.slot(); + let last_voted_fork_slots = vec![ + root_slot.saturating_add(1), + root_slot.saturating_add(2), + root_slot.saturating_add(3), + ]; + TestAggregateInitResult { + slots_aggregate: LastVotedForkSlotsAggregate::new( + root_slot, + REPAIR_THRESHOLD, + root_bank.epoch_stakes(root_bank.epoch()).unwrap(), + &last_voted_fork_slots, + &validator_voting_keypairs[MY_INDEX].node_keypair.pubkey(), + ), + validator_voting_keypairs, + root_slot, + last_voted_fork_slots, + } + } + + #[test] + fn test_aggregate() { + let mut test_state = test_aggregate_init(); + let root_slot = test_state.root_slot; + let initial_num_active_validators = 3; + for validator_voting_keypair in test_state + .validator_voting_keypairs + .iter() + .take(initial_num_active_validators) + { + let pubkey = validator_voting_keypair.node_keypair.pubkey(); + let now = timestamp(); + assert_eq!( + test_state.slots_aggregate.aggregate( + RestartLastVotedForkSlots::new( + pubkey, + now, + &test_state.last_voted_fork_slots, + Hash::default(), + SHRED_VERSION, + ) + .unwrap(), + ), + Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: Hash::default().to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: now, + }), + ); + } + let result = test_state.slots_aggregate.get_aggregate_result(); + let mut expected_active_percent = + (initial_num_active_validators + 1) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0; + assert_eq!(result.active_percent, expected_active_percent); + assert!(result.slots_to_repair.is_empty()); + + let new_active_validator = test_state.validator_voting_keypairs + [initial_num_active_validators + 1] + .node_keypair + .pubkey(); + let now = timestamp(); + let new_active_validator_last_voted_slots = RestartLastVotedForkSlots::new( + new_active_validator, + now, + &test_state.last_voted_fork_slots, + Hash::default(), + SHRED_VERSION, + ) + .unwrap(); + assert_eq!( + test_state + .slots_aggregate + .aggregate(new_active_validator_last_voted_slots), + Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: Hash::default().to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: now, + }), + ); + let result = test_state.slots_aggregate.get_aggregate_result(); + expected_active_percent = + (initial_num_active_validators + 2) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0; + assert_eq!(result.active_percent, expected_active_percent); + let mut actual_slots = Vec::from_iter(result.slots_to_repair); + actual_slots.sort(); + assert_eq!(actual_slots, test_state.last_voted_fork_slots); + + let replace_message_validator = test_state.validator_voting_keypairs[2] + .node_keypair + .pubkey(); + // Allow specific validator to replace message. + let now = timestamp(); + let replace_message_validator_last_fork = RestartLastVotedForkSlots::new( + replace_message_validator, + now, + &[root_slot + 1, root_slot + 4, root_slot + 5], + Hash::default(), + SHRED_VERSION, + ) + .unwrap(); + assert_eq!( + test_state + .slots_aggregate + .aggregate(replace_message_validator_last_fork), + Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: vec![root_slot + 1, root_slot + 4, root_slot + 5], + last_vote_bankhash: Hash::default().to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: now, + }), + ); + let result = test_state.slots_aggregate.get_aggregate_result(); + assert_eq!(result.active_percent, expected_active_percent); + let mut actual_slots = Vec::from_iter(result.slots_to_repair); + actual_slots.sort(); + assert_eq!(actual_slots, vec![root_slot + 1]); + + // test that zero stake validator is ignored. + let random_pubkey = Pubkey::new_unique(); + assert_eq!( + test_state.slots_aggregate.aggregate( + RestartLastVotedForkSlots::new( + random_pubkey, + timestamp(), + &[root_slot + 1, root_slot + 4, root_slot + 5], + Hash::default(), + SHRED_VERSION, + ) + .unwrap(), + ), + None, + ); + let result = test_state.slots_aggregate.get_aggregate_result(); + assert_eq!(result.active_percent, expected_active_percent); + let mut actual_slots = Vec::from_iter(result.slots_to_repair); + actual_slots.sort(); + assert_eq!(actual_slots, vec![root_slot + 1]); + } + + #[test] + fn test_aggregate_from_record() { + let mut test_state = test_aggregate_init(); + let root_slot = test_state.root_slot; + let last_vote_bankhash = Hash::new_unique(); + let time1 = timestamp(); + let record = LastVotedForkSlotsRecord { + wallclock: time1, + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + }; + let result = test_state.slots_aggregate.get_aggregate_result(); + assert_eq!(result.active_percent, 10.0); + assert_eq!( + test_state + .slots_aggregate + .aggregate_from_record( + &test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey() + .to_string(), + &record, + ) + .unwrap(), + Some(record.clone()), + ); + let result = test_state.slots_aggregate.get_aggregate_result(); + assert_eq!(result.active_percent, 20.0); + // Now if you get the same result from Gossip again, it should be ignored. + assert_eq!( + test_state.slots_aggregate.aggregate( + RestartLastVotedForkSlots::new( + test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey(), + time1, + &test_state.last_voted_fork_slots, + last_vote_bankhash, + SHRED_VERSION, + ) + .unwrap(), + ), + None, + ); + + // But if it's a new record from the same validator, it will be replaced. + let time2 = timestamp(); + let last_voted_fork_slots2 = + vec![root_slot + 1, root_slot + 2, root_slot + 3, root_slot + 4]; + let last_vote_bankhash2 = Hash::new_unique(); + assert_eq!( + test_state.slots_aggregate.aggregate( + RestartLastVotedForkSlots::new( + test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey(), + time2, + &last_voted_fork_slots2, + last_vote_bankhash2, + SHRED_VERSION, + ) + .unwrap(), + ), + Some(LastVotedForkSlotsRecord { + wallclock: time2, + last_voted_fork_slots: last_voted_fork_slots2.clone(), + last_vote_bankhash: last_vote_bankhash2.to_string(), + shred_version: SHRED_VERSION as u32, + }), + ); + // percentage doesn't change since it's a replace. + let result = test_state.slots_aggregate.get_aggregate_result(); + assert_eq!(result.active_percent, 20.0); + + // Record from validator with zero stake should be ignored. + assert_eq!( + test_state + .slots_aggregate + .aggregate_from_record( + &Pubkey::new_unique().to_string(), + &LastVotedForkSlotsRecord { + wallclock: timestamp(), + last_voted_fork_slots: vec![root_slot + 10, root_slot + 300], + last_vote_bankhash: Hash::new_unique().to_string(), + shred_version: SHRED_VERSION as u32, + } + ) + .unwrap(), + None, + ); + // percentage doesn't change since the previous aggregate is ignored. + let result = test_state.slots_aggregate.get_aggregate_result(); + assert_eq!(result.active_percent, 20.0); + } + + #[test] + fn test_aggregate_from_record_failures() { + solana_logger::setup(); + let mut test_state = test_aggregate_init(); + let last_vote_bankhash = Hash::new_unique(); + let mut last_voted_fork_slots_record = LastVotedForkSlotsRecord { + wallclock: timestamp(), + last_voted_fork_slots: test_state.last_voted_fork_slots, + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + }; + // First test that this is a valid record. + assert_eq!( + test_state + .slots_aggregate + .aggregate_from_record( + &test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey() + .to_string(), + &last_voted_fork_slots_record, + ) + .unwrap(), + Some(last_voted_fork_slots_record.clone()), + ); + // Then test that it fails if the record is invalid. + + // Invalid pubkey. + assert!(test_state + .slots_aggregate + .aggregate_from_record("invalid_pubkey", &last_voted_fork_slots_record,) + .is_err()); + + // Invalid hash. + last_voted_fork_slots_record.last_vote_bankhash.clear(); + assert!(test_state + .slots_aggregate + .aggregate_from_record( + &test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey() + .to_string(), + &last_voted_fork_slots_record, + ) + .is_err()); + last_voted_fork_slots_record.last_vote_bankhash.pop(); + + // Empty last voted fork. + last_voted_fork_slots_record.last_vote_bankhash = last_vote_bankhash.to_string(); + last_voted_fork_slots_record.last_voted_fork_slots.clear(); + assert!(test_state + .slots_aggregate + .aggregate_from_record( + &test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey() + .to_string(), + &last_voted_fork_slots_record, + ) + .is_err()); + } +} diff --git a/wen-restart/src/lib.rs b/wen-restart/src/lib.rs index e58a6d04bf831f..d389136bb13bcd 100644 --- a/wen-restart/src/lib.rs +++ b/wen-restart/src/lib.rs @@ -4,4 +4,5 @@ pub(crate) mod solana { } } +pub(crate) mod last_voted_fork_slots_aggregate; pub mod wen_restart; diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 75e4e21ce9431a..b14b7e4e840c61 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -1,59 +1,370 @@ //! The `wen-restart` module handles automatic repair during a cluster restart use { - crate::solana::wen_restart_proto::{ - MyLastVotedForkSlots, State as RestartState, WenRestartProgress, + crate::{ + last_voted_fork_slots_aggregate::LastVotedForkSlotsAggregate, + solana::wen_restart_proto::{ + self, LastVotedForkSlotsAggregateRecord, LastVotedForkSlotsRecord, + State as RestartState, WenRestartProgress, + }, }, + anyhow::Result, log::*, prost::Message, - solana_gossip::{cluster_info::ClusterInfo, epoch_slots::MAX_SLOTS_PER_ENTRY}, + solana_gossip::{ + cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS}, + restart_crds_values::RestartLastVotedForkSlots, + }, solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore}, + solana_program::{clock::Slot, hash::Hash}, + solana_runtime::bank_forks::BankForks, + solana_sdk::timing::timestamp, solana_vote_program::vote_state::VoteTransaction, std::{ - fs::File, - io::{Error, Write}, + collections::{HashMap, HashSet}, + fs::{read, File}, + io::{Cursor, Write}, path::PathBuf, - sync::Arc, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + thread::sleep, + time::Duration, }, }; +// If >42% of the validators have this block, repair this block locally. +const REPAIR_THRESHOLD: f64 = 0.42; + +#[derive(Debug, PartialEq)] +pub enum WenRestartError { + Exiting, + InvalidLastVoteType(VoteTransaction), + MalformedLastVotedForkSlotsProtobuf(Option), + MissingLastVotedForkSlots, + UnexpectedState(wen_restart_proto::State), +} + +impl std::fmt::Display for WenRestartError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + WenRestartError::Exiting => write!(f, "Exiting"), + WenRestartError::InvalidLastVoteType(vote) => { + write!(f, "Invalid last vote type: {:?}", vote) + } + WenRestartError::MalformedLastVotedForkSlotsProtobuf(record) => { + write!(f, "Malformed last voted fork slots protobuf: {:?}", record) + } + WenRestartError::MissingLastVotedForkSlots => { + write!(f, "Missing last voted fork slots") + } + WenRestartError::UnexpectedState(state) => { + write!(f, "Unexpected state: {:?}", state) + } + } + } +} + +impl std::error::Error for WenRestartError {} + +// We need a WenRestartProgressInternalState so we can convert the protobuf written in file +// into internal data structure in the initialize function. It should be easily +// convertable to and from WenRestartProgress protobuf. +#[derive(Debug, PartialEq)] +pub(crate) enum WenRestartProgressInternalState { + Init { + last_voted_fork_slots: Vec, + last_vote_bankhash: Hash, + }, + LastVotedForkSlots { + last_voted_fork_slots: Vec, + }, + Done, +} + +pub(crate) fn send_restart_last_voted_fork_slots( + cluster_info: Arc, + last_voted_fork_slots: &[Slot], + last_vote_bankhash: Hash, +) -> Result { + cluster_info.push_restart_last_voted_fork_slots(last_voted_fork_slots, last_vote_bankhash)?; + Ok(LastVotedForkSlotsRecord { + last_voted_fork_slots: last_voted_fork_slots.to_vec(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: cluster_info.my_shred_version() as u32, + wallclock: timestamp(), + }) +} + +pub(crate) fn aggregate_restart_last_voted_fork_slots( + wen_restart_path: &PathBuf, + wait_for_supermajority_threshold_percent: u64, + cluster_info: Arc, + last_voted_fork_slots: &Vec, + bank_forks: Arc>, + wen_restart_repair_slots: Arc>>, + exit: Arc, + progress: &mut WenRestartProgress, +) -> Result<()> { + let root_bank; + { + root_bank = bank_forks.read().unwrap().root_bank().clone(); + } + let root_slot = root_bank.slot(); + let mut last_voted_fork_slots_aggregate = LastVotedForkSlotsAggregate::new( + root_slot, + REPAIR_THRESHOLD, + root_bank.epoch_stakes(root_bank.epoch()).unwrap(), + last_voted_fork_slots, + &cluster_info.id(), + ); + if let Some(aggregate_record) = &progress.last_voted_fork_slots_aggregate { + for (key_string, message) in &aggregate_record.received { + if let Err(e) = + last_voted_fork_slots_aggregate.aggregate_from_record(key_string, message) + { + error!("Failed to aggregate from record: {:?}", e); + } + } + } else { + progress.last_voted_fork_slots_aggregate = Some(LastVotedForkSlotsAggregateRecord { + received: HashMap::new(), + }); + } + let mut cursor = solana_gossip::crds::Cursor::default(); + let mut is_full_slots = HashSet::new(); + loop { + if exit.load(Ordering::Relaxed) { + return Err(WenRestartError::Exiting.into()); + } + let start = timestamp(); + for new_last_voted_fork_slots in cluster_info.get_restart_last_voted_fork_slots(&mut cursor) + { + let from = new_last_voted_fork_slots.from.to_string(); + if let Some(record) = + last_voted_fork_slots_aggregate.aggregate(new_last_voted_fork_slots) + { + progress + .last_voted_fork_slots_aggregate + .as_mut() + .unwrap() + .received + .insert(from, record); + } + } + let result = last_voted_fork_slots_aggregate.get_aggregate_result(); + let mut filtered_slots: Vec; + { + let my_bank_forks = bank_forks.read().unwrap(); + filtered_slots = result + .slots_to_repair + .into_iter() + .filter(|slot| { + if slot <= &root_slot || is_full_slots.contains(slot) { + return false; + } + let is_full = my_bank_forks + .get(*slot) + .map_or(false, |bank| bank.is_frozen()); + if is_full { + is_full_slots.insert(*slot); + } + !is_full + }) + .collect(); + } + filtered_slots.sort(); + info!( + "Active peers: {} Slots to repair: {:?}", + result.active_percent, &filtered_slots + ); + if filtered_slots.is_empty() + && result.active_percent > wait_for_supermajority_threshold_percent as f64 + { + *wen_restart_repair_slots.write().unwrap() = vec![]; + break; + } + { + *wen_restart_repair_slots.write().unwrap() = filtered_slots; + } + write_wen_restart_records(wen_restart_path, progress)?; + let elapsed = timestamp().saturating_sub(start); + let time_left = GOSSIP_SLEEP_MILLIS.saturating_sub(elapsed); + if time_left > 0 { + sleep(Duration::from_millis(time_left)); + } + } + Ok(()) +} + pub fn wait_for_wen_restart( wen_restart_path: &PathBuf, last_vote: VoteTransaction, blockstore: Arc, cluster_info: Arc, -) -> Result<(), Box> { - // repair and restart option does not work without last voted slot. - let last_vote_slot = last_vote - .last_voted_slot() - .expect("wen_restart doesn't work if local tower is wiped"); - let mut last_vote_fork: Vec = AncestorIterator::new_inclusive(last_vote_slot, &blockstore) - .take(MAX_SLOTS_PER_ENTRY) - .collect(); - info!( - "wen_restart last voted fork {} {:?}", - last_vote_slot, last_vote_fork - ); - last_vote_fork.reverse(); - // Todo(wen): add the following back in after Gossip code is checked in. - // cluster_info.push_last_voted_fork_slots(&last_voted_fork, last_vote.hash()); - // The rest of the protocol will be in another PR. - let current_progress = WenRestartProgress { - state: RestartState::Init.into(), - my_last_voted_fork_slots: Some(MyLastVotedForkSlots { - last_vote_slot, - last_vote_bankhash: last_vote.hash().to_string(), - shred_version: cluster_info.my_shred_version() as u32, - }), + bank_forks: Arc>, + wen_restart_repair_slots: Option>>>, + wait_for_supermajority_threshold_percent: u64, + exit: Arc, +) -> Result<()> { + let (mut state, mut progress) = + initialize(wen_restart_path, last_vote.clone(), blockstore.clone())?; + loop { + match &state { + WenRestartProgressInternalState::Init { + last_voted_fork_slots, + last_vote_bankhash, + } => { + progress.my_last_voted_fork_slots = Some(send_restart_last_voted_fork_slots( + cluster_info.clone(), + last_voted_fork_slots, + *last_vote_bankhash, + )?) + } + WenRestartProgressInternalState::LastVotedForkSlots { + last_voted_fork_slots, + } => aggregate_restart_last_voted_fork_slots( + wen_restart_path, + wait_for_supermajority_threshold_percent, + cluster_info.clone(), + last_voted_fork_slots, + bank_forks.clone(), + wen_restart_repair_slots.clone().unwrap(), + exit.clone(), + &mut progress, + )?, + WenRestartProgressInternalState::Done => return Ok(()), + }; + state = increment_and_write_wen_restart_records(wen_restart_path, state, &mut progress)?; + } +} + +pub(crate) fn increment_and_write_wen_restart_records( + records_path: &PathBuf, + current_state: WenRestartProgressInternalState, + progress: &mut WenRestartProgress, +) -> Result { + let new_state = match current_state { + WenRestartProgressInternalState::Init { + last_voted_fork_slots, + last_vote_bankhash: _, + } => { + progress.set_state(RestartState::LastVotedForkSlots); + WenRestartProgressInternalState::LastVotedForkSlots { + last_voted_fork_slots, + } + } + WenRestartProgressInternalState::LastVotedForkSlots { + last_voted_fork_slots: _, + } => { + progress.set_state(RestartState::Done); + WenRestartProgressInternalState::Done + } + WenRestartProgressInternalState::Done => { + return Err(WenRestartError::UnexpectedState(RestartState::Done).into()) + } }; - write_wen_restart_records(wen_restart_path, current_progress)?; - Ok(()) + write_wen_restart_records(records_path, progress)?; + Ok(new_state) +} + +pub(crate) fn initialize( + records_path: &PathBuf, + last_vote: VoteTransaction, + blockstore: Arc, +) -> Result<(WenRestartProgressInternalState, WenRestartProgress)> { + let progress = match read_wen_restart_records(records_path) { + Ok(progress) => progress, + Err(e) => { + let stdio_err = e.downcast_ref::(); + if stdio_err.is_some_and(|e| e.kind() == std::io::ErrorKind::NotFound) { + info!( + "wen restart proto file not found at {:?}, write init state", + records_path + ); + let progress = WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: None, + last_voted_fork_slots_aggregate: None, + }; + write_wen_restart_records(records_path, &progress)?; + progress + } else { + return Err(e); + } + } + }; + match progress.state() { + RestartState::Done => Ok((WenRestartProgressInternalState::Done, progress)), + RestartState::Init => { + let last_voted_fork_slots; + let last_vote_bankhash; + match &progress.my_last_voted_fork_slots { + Some(my_last_voted_fork_slots) => { + last_voted_fork_slots = my_last_voted_fork_slots.last_voted_fork_slots.clone(); + last_vote_bankhash = + Hash::from_str(&my_last_voted_fork_slots.last_vote_bankhash).unwrap(); + } + None => { + // repair and restart option does not work without last voted slot. + if let VoteTransaction::Vote(ref vote) = last_vote { + if let Some(last_vote_slot) = vote.last_voted_slot() { + last_vote_bankhash = vote.hash; + last_voted_fork_slots = + AncestorIterator::new_inclusive(last_vote_slot, &blockstore) + .take(RestartLastVotedForkSlots::MAX_SLOTS) + .collect(); + } else { + error!(" + Cannot find last voted slot in the tower storage, it either means that this node has never \ + voted or the tower storage is corrupted. Unfortunately, since WenRestart is a consensus protocol \ + depending on each participant to send their last voted fork slots, your validator cannot participate.\ + Please check discord for the conclusion of the WenRestart protocol, then generate a snapshot and use \ + --wait-for-supermajority to restart the validator."); + return Err(WenRestartError::MissingLastVotedForkSlots.into()); + } + } else { + return Err(WenRestartError::InvalidLastVoteType(last_vote).into()); + } + } + } + Ok(( + WenRestartProgressInternalState::Init { + last_voted_fork_slots, + last_vote_bankhash, + }, + progress, + )) + } + RestartState::LastVotedForkSlots => { + if let Some(record) = progress.my_last_voted_fork_slots.as_ref() { + Ok(( + WenRestartProgressInternalState::LastVotedForkSlots { + last_voted_fork_slots: record.last_voted_fork_slots.clone(), + }, + progress, + )) + } else { + Err(WenRestartError::MalformedLastVotedForkSlotsProtobuf(None).into()) + } + } + _ => Err(WenRestartError::UnexpectedState(progress.state()).into()), + } +} + +fn read_wen_restart_records(records_path: &PathBuf) -> Result { + let buffer = read(records_path)?; + let progress = WenRestartProgress::decode(&mut Cursor::new(buffer))?; + info!("read record {:?}", progress); + Ok(progress) } -fn write_wen_restart_records( +pub(crate) fn write_wen_restart_records( records_path: &PathBuf, - new_progress: WenRestartProgress, -) -> Result<(), Error> { + new_progress: &WenRestartProgress, +) -> Result<()> { // overwrite anything if exists let mut file = File::create(records_path)?; info!("writing new record {:?}", new_progress); @@ -62,61 +373,133 @@ fn write_wen_restart_records( file.write_all(&buf)?; Ok(()) } + #[cfg(test)] mod tests { use { crate::wen_restart::*, + assert_matches::assert_matches, solana_entry::entry, - solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, + solana_gossip::{ + cluster_info::ClusterInfo, + contact_info::ContactInfo, + crds::GossipRoute, + crds_value::{CrdsData, CrdsValue}, + legacy_contact_info::LegacyContactInfo, + restart_crds_values::RestartLastVotedForkSlots, + }, solana_ledger::{blockstore, get_tmp_ledger_path_auto_delete}, - solana_program::{hash::Hash, vote::state::Vote}, + solana_program::{ + hash::Hash, + vote::state::{Vote, VoteStateUpdate}, + }, + solana_runtime::{ + bank::Bank, + genesis_utils::{ + create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, + }, + }, solana_sdk::{ + pubkey::Pubkey, signature::{Keypair, Signer}, timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, - std::{fs::read, sync::Arc}, + std::{fs::remove_file, sync::Arc, thread::Builder}, + tempfile::TempDir, }; - #[test] - fn test_wen_restart_normal_flow() { - solana_logger::setup(); - let node_keypair = Arc::new(Keypair::new()); + const SHRED_VERSION: u16 = 2; + const EXPECTED_SLOTS: usize = 400; + + fn push_restart_last_voted_fork_slots( + cluster_info: Arc, + node: &LegacyContactInfo, + expected_slots_to_repair: &[Slot], + last_vote_hash: &Hash, + node_keypair: &Keypair, + wallclock: u64, + ) { + let slots = RestartLastVotedForkSlots::new( + *node.pubkey(), + wallclock, + expected_slots_to_repair, + *last_vote_hash, + SHRED_VERSION, + ) + .unwrap(); + let entries = vec![ + CrdsValue::new_signed(CrdsData::LegacyContactInfo(node.clone()), node_keypair), + CrdsValue::new_signed(CrdsData::RestartLastVotedForkSlots(slots), node_keypair), + ]; + { + let mut gossip_crds = cluster_info.gossip.crds.write().unwrap(); + for entry in entries { + assert!(gossip_crds + .insert(entry, /*now=*/ 0, GossipRoute::LocalMessage) + .is_ok()); + } + } + } + + struct WenRestartTestInitResult { + pub validator_voting_keypairs: Vec, + pub blockstore: Arc, + pub cluster_info: Arc, + pub bank_forks: Arc>, + pub last_voted_fork_slots: Vec, + pub wen_restart_proto_path: PathBuf, + } + + fn wen_restart_test_init(ledger_path: &TempDir) -> WenRestartTestInitResult { + let validator_voting_keypairs: Vec<_> = + (0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect(); + let node_keypair = Arc::new(validator_voting_keypairs[0].node_keypair.insecure_clone()); let cluster_info = Arc::new(ClusterInfo::new( { let mut contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()); - contact_info.set_shred_version(2); + contact_info.set_shred_version(SHRED_VERSION); contact_info }, - node_keypair, + node_keypair.clone(), SocketAddrSpace::Unspecified, )); - let ledger_path = get_tmp_ledger_path_auto_delete!(); - let mut wen_restart_proto_path = ledger_path.path().to_path_buf(); - wen_restart_proto_path.push("wen_restart_status.proto"); let blockstore = Arc::new(blockstore::Blockstore::open(ledger_path.path()).unwrap()); - let expected_slots = 400; - let last_vote_slot = (MAX_SLOTS_PER_ENTRY + expected_slots).try_into().unwrap(); - let last_parent = (MAX_SLOTS_PER_ENTRY >> 1).try_into().unwrap(); - for i in 0..expected_slots { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 10_000, + &validator_voting_keypairs, + vec![100; validator_voting_keypairs.len()], + ); + let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let last_parent = (RestartLastVotedForkSlots::MAX_SLOTS >> 1) + .try_into() + .unwrap(); + let mut last_voted_fork_slots = Vec::new(); + for i in 0..EXPECTED_SLOTS { let entries = entry::create_ticks(1, 0, Hash::default()); let parent_slot = if i > 0 { - (MAX_SLOTS_PER_ENTRY + i).try_into().unwrap() + (RestartLastVotedForkSlots::MAX_SLOTS.saturating_add(i)) + .try_into() + .unwrap() } else { last_parent }; + let slot = (RestartLastVotedForkSlots::MAX_SLOTS + .saturating_add(i) + .saturating_add(1)) as Slot; let shreds = blockstore::entries_to_test_shreds( &entries, - (MAX_SLOTS_PER_ENTRY + i + 1).try_into().unwrap(), + slot, parent_slot, false, 0, true, // merkle_variant ); blockstore.insert_shreds(shreds, None, false).unwrap(); + last_voted_fork_slots.push(slot); } - // link directly to slot 1 whose distance to last_vote > MAX_SLOTS_PER_ENTRY so it will not be included. + // link directly to slot 1 whose distance to last_vote > RestartLastVotedForkSlots::MAX_SLOTS so it will not be included. let entries = entry::create_ticks(1, 0, Hash::default()); let shreds = blockstore::entries_to_test_shreds( &entries, @@ -126,27 +509,567 @@ mod tests { 0, true, // merkle_variant ); + last_voted_fork_slots.extend([last_parent, 1]); blockstore.insert_shreds(shreds, None, false).unwrap(); - let last_vote_bankhash = Hash::new_unique(); - assert!(wait_for_wen_restart( - &wen_restart_proto_path, - VoteTransaction::from(Vote::new(vec![last_vote_slot], last_vote_bankhash)), + last_voted_fork_slots.sort(); + last_voted_fork_slots.reverse(); + let mut wen_restart_proto_path = ledger_path.path().to_path_buf(); + wen_restart_proto_path.push("wen_restart_status.proto"); + let _ = remove_file(&wen_restart_proto_path); + WenRestartTestInitResult { + validator_voting_keypairs, blockstore, - cluster_info - ) - .is_ok()); - let buffer = read(wen_restart_proto_path).unwrap(); - let progress = WenRestartProgress::decode(&mut std::io::Cursor::new(buffer)).unwrap(); + cluster_info, + bank_forks, + last_voted_fork_slots, + wen_restart_proto_path, + } + } + + const WAIT_FOR_THREAD_TIMEOUT: u64 = 10_000; + + fn wait_on_expected_progress_with_timeout( + wen_restart_proto_path: PathBuf, + expected_progress: WenRestartProgress, + ) { + let start = timestamp(); + let mut progress = WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: None, + last_voted_fork_slots_aggregate: None, + }; + loop { + if let Ok(new_progress) = read_wen_restart_records(&wen_restart_proto_path) { + progress = new_progress; + if let Some(my_last_voted_fork_slots) = &expected_progress.my_last_voted_fork_slots + { + if let Some(record) = progress.my_last_voted_fork_slots.as_mut() { + record.wallclock = my_last_voted_fork_slots.wallclock; + } + } + if progress == expected_progress { + return; + } + } + if timestamp().saturating_sub(start) > WAIT_FOR_THREAD_TIMEOUT { + panic!( + "wait_on_expected_progress_with_timeout failed to get expected progress {:?} expected {:?}", + &progress, + expected_progress + ); + } + sleep(Duration::from_millis(10)); + } + } + + fn wen_restart_test_succeed_after_failure( + test_state: WenRestartTestInitResult, + last_vote_bankhash: Hash, + expected_progress: WenRestartProgress, + ) { + let wen_restart_proto_path_clone = test_state.wen_restart_proto_path.clone(); + // continue normally after the error, we should be good. + let exit = Arc::new(AtomicBool::new(false)); + let exit_clone = exit.clone(); + let last_vote_slot: Slot = test_state.last_voted_fork_slots[0]; + let wen_restart_thread_handle = Builder::new() + .name("solana-wen-restart".to_string()) + .spawn(move || { + let _ = wait_for_wen_restart( + &wen_restart_proto_path_clone, + VoteTransaction::from(Vote::new(vec![last_vote_slot], last_vote_bankhash)), + test_state.blockstore, + test_state.cluster_info, + test_state.bank_forks, + Some(Arc::new(RwLock::new(Vec::new()))), + 80, + exit_clone, + ); + }) + .unwrap(); + wait_on_expected_progress_with_timeout( + test_state.wen_restart_proto_path.clone(), + expected_progress, + ); + exit.store(true, Ordering::Relaxed); + let _ = wen_restart_thread_handle.join(); + let _ = remove_file(&test_state.wen_restart_proto_path); + } + + fn insert_and_freeze_slots( + bank_forks: Arc>, + expected_slots_to_repair: Vec, + ) { + let mut parent_bank = bank_forks.read().unwrap().root_bank(); + for slot in expected_slots_to_repair { + let mut bank_forks_rw = bank_forks.write().unwrap(); + bank_forks_rw.insert(Bank::new_from_parent( + parent_bank.clone(), + &Pubkey::default(), + slot, + )); + parent_bank = bank_forks_rw.get(slot).unwrap(); + parent_bank.freeze(); + } + } + + #[test] + fn test_wen_restart_normal_flow() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let wen_restart_repair_slots = Some(Arc::new(RwLock::new(Vec::new()))); + let test_state = wen_restart_test_init(&ledger_path); + let wen_restart_proto_path_clone = test_state.wen_restart_proto_path.clone(); + let cluster_info_clone = test_state.cluster_info.clone(); + let last_vote_slot = test_state.last_voted_fork_slots[0]; + let last_vote_bankhash = Hash::new_unique(); + let expected_slots_to_repair: Vec = + (last_vote_slot + 1..last_vote_slot + 3).collect(); + let blockstore_clone = test_state.blockstore.clone(); + let bank_forks_clone = test_state.bank_forks.clone(); + let wen_restart_thread_handle = Builder::new() + .name("solana-wen-restart".to_string()) + .spawn(move || { + assert!(wait_for_wen_restart( + &wen_restart_proto_path_clone, + VoteTransaction::from(Vote::new(vec![last_vote_slot], last_vote_bankhash)), + blockstore_clone, + cluster_info_clone, + bank_forks_clone, + wen_restart_repair_slots.clone(), + 80, + Arc::new(AtomicBool::new(false)), + ) + .is_ok()); + }) + .unwrap(); + let mut rng = rand::thread_rng(); + let mut expected_messages = HashMap::new(); + // Skip the first 2 validators, because 0 is myself, we only need 8 more to reach > 80%. + for keypairs in test_state.validator_voting_keypairs.iter().skip(2) { + let node_pubkey = keypairs.node_keypair.pubkey(); + let node = LegacyContactInfo::new_rand(&mut rng, Some(node_pubkey)); + let last_vote_hash = Hash::new_unique(); + let now = timestamp(); + push_restart_last_voted_fork_slots( + test_state.cluster_info.clone(), + &node, + &expected_slots_to_repair, + &last_vote_hash, + &keypairs.node_keypair, + now, + ); + expected_messages.insert( + node_pubkey.to_string(), + LastVotedForkSlotsRecord { + last_voted_fork_slots: expected_slots_to_repair.clone(), + last_vote_bankhash: last_vote_hash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: now, + }, + ); + } + + // Simulating successful repair of missing blocks. + insert_and_freeze_slots(test_state.bank_forks.clone(), expected_slots_to_repair); + + let _ = wen_restart_thread_handle.join(); + let progress = read_wen_restart_records(&test_state.wen_restart_proto_path).unwrap(); + let progress_start_time = progress + .my_last_voted_fork_slots + .as_ref() + .unwrap() + .wallclock; assert_eq!( progress, WenRestartProgress { - state: RestartState::Init.into(), - my_last_voted_fork_slots: Some(MyLastVotedForkSlots { - last_vote_slot, + state: RestartState::Done.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots, + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: progress_start_time, + }), + last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { + received: expected_messages + }), + } + ) + } + + fn change_proto_file_readonly(wen_restart_proto_path: &PathBuf, readonly: bool) { + let mut perms = std::fs::metadata(wen_restart_proto_path) + .unwrap() + .permissions(); + perms.set_readonly(readonly); + std::fs::set_permissions(wen_restart_proto_path, perms).unwrap(); + } + + #[test] + fn test_wen_restart_initialize_failures() { + solana_logger::setup(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let last_vote_bankhash = Hash::new_unique(); + let mut last_voted_fork_slots = test_state.last_voted_fork_slots.clone(); + last_voted_fork_slots.reverse(); + let mut file = File::create(&test_state.wen_restart_proto_path).unwrap(); + file.write_all(b"garbage").unwrap(); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + VoteTransaction::from(Vote::new(last_voted_fork_slots.clone(), last_vote_bankhash)), + test_state.blockstore.clone() + ) + .unwrap_err() + .downcast::() + .unwrap(), + prost::DecodeError::new("invalid wire type value: 7") + ); + remove_file(&test_state.wen_restart_proto_path).unwrap(); + let invalid_last_vote = VoteTransaction::from(VoteStateUpdate::from(vec![(0, 8), (1, 1)])); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + invalid_last_vote.clone(), + test_state.blockstore.clone() + ) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::InvalidLastVoteType(invalid_last_vote) + ); + let empty_last_vote = VoteTransaction::from(Vote::new(vec![], last_vote_bankhash)); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + empty_last_vote.clone(), + test_state.blockstore.clone() + ) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::MissingLastVotedForkSlots, + ); + // Test the case where the file is not found. + let _ = remove_file(&test_state.wen_restart_proto_path); + assert_matches!( + initialize(&test_state.wen_restart_proto_path, VoteTransaction::from(Vote::new(last_voted_fork_slots.clone(), last_vote_bankhash)), test_state.blockstore.clone()), + Ok((WenRestartProgressInternalState::Init { last_voted_fork_slots, last_vote_bankhash: bankhash }, progress)) => { + assert_eq!(last_voted_fork_slots, test_state.last_voted_fork_slots); + assert_eq!(bankhash, last_vote_bankhash); + assert_eq!(progress, WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: None, + last_voted_fork_slots_aggregate: None, + }); + } + ); + let _ = write_wen_restart_records( + &test_state.wen_restart_proto_path, + &WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: None, + last_voted_fork_slots_aggregate: None, + }, + ); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + VoteTransaction::from(Vote::new(last_voted_fork_slots.clone(), last_vote_bankhash)), + test_state.blockstore.clone() + ) + .err() + .unwrap() + .to_string(), + "Malformed last voted fork slots protobuf: None" + ); + let _ = write_wen_restart_records( + &test_state.wen_restart_proto_path, + &WenRestartProgress { + state: RestartState::WaitingForSupermajority.into(), + my_last_voted_fork_slots: None, + last_voted_fork_slots_aggregate: None, + }, + ); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + VoteTransaction::from(Vote::new(last_voted_fork_slots, last_vote_bankhash)), + test_state.blockstore.clone() + ) + .err() + .unwrap() + .to_string(), + "Unexpected state: WaitingForSupermajority" + ); + } + + #[test] + fn test_wen_restart_send_last_voted_fork_failures() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let progress = wen_restart_proto::WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: None, + last_voted_fork_slots_aggregate: None, + }; + let original_progress = progress.clone(); + assert_eq!( + send_restart_last_voted_fork_slots( + test_state.cluster_info.clone(), + &[], + Hash::new_unique(), + ) + .err() + .unwrap() + .to_string(), + "Last voted fork cannot be empty" + ); + assert_eq!(progress, original_progress); + let last_vote_bankhash = Hash::new_unique(); + let last_voted_fork_slots = test_state.last_voted_fork_slots.clone(); + wen_restart_test_succeed_after_failure( + test_state, + last_vote_bankhash, + WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots, + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { + received: HashMap::new(), + }), + }, + ); + } + + #[test] + fn test_write_wen_restart_records_failure() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let progress = wen_restart_proto::WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: None, + last_voted_fork_slots_aggregate: None, + }; + assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress).is_ok()); + change_proto_file_readonly(&test_state.wen_restart_proto_path, true); + assert_eq!( + write_wen_restart_records(&test_state.wen_restart_proto_path, &progress) + .unwrap_err() + .downcast::() + .unwrap() + .kind(), + std::io::ErrorKind::PermissionDenied, + ); + change_proto_file_readonly(&test_state.wen_restart_proto_path, false); + assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress).is_ok()); + let last_voted_fork_slots = test_state.last_voted_fork_slots.clone(); + let last_vote_bankhash = Hash::new_unique(); + wen_restart_test_succeed_after_failure( + test_state, + last_vote_bankhash, + WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots, last_vote_bankhash: last_vote_bankhash.to_string(), - shred_version: 2, + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { + received: HashMap::new(), + }), + }, + ); + } + + #[test] + fn test_wen_restart_aggregate_last_voted_fork_failures() { + solana_logger::setup(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let last_vote_slot: Slot = test_state.last_voted_fork_slots[0]; + let last_vote_bankhash = Hash::new_unique(); + let start_time = timestamp(); + assert!(write_wen_restart_records( + &test_state.wen_restart_proto_path, + &WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: start_time, + }), + last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { + received: HashMap::new() }), } ) + .is_ok()); + let mut rng = rand::thread_rng(); + let mut expected_messages = HashMap::new(); + let expected_slots_to_repair: Vec = + (last_vote_slot + 1..last_vote_slot + 3).collect(); + // Skip the first 2 validators, because 0 is myself, we need 8 so it hits 80%. + assert_eq!(test_state.validator_voting_keypairs.len(), 10); + let progress = WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: start_time, + }), + last_voted_fork_slots_aggregate: None, + }; + for keypairs in test_state.validator_voting_keypairs.iter().skip(2) { + let wen_restart_proto_path_clone = test_state.wen_restart_proto_path.clone(); + let cluster_info_clone = test_state.cluster_info.clone(); + let bank_forks_clone = test_state.bank_forks.clone(); + let exit = Arc::new(AtomicBool::new(false)); + let exit_clone = exit.clone(); + let mut progress_clone = progress.clone(); + let last_voted_fork_slots = test_state.last_voted_fork_slots.clone(); + let wen_restart_thread_handle = Builder::new() + .name("solana-wen-restart".to_string()) + .spawn(move || { + let _ = aggregate_restart_last_voted_fork_slots( + &wen_restart_proto_path_clone, + 80, + cluster_info_clone, + &last_voted_fork_slots, + bank_forks_clone, + Arc::new(RwLock::new(Vec::new())), + exit_clone, + &mut progress_clone, + ); + }) + .unwrap(); + let node_pubkey = keypairs.node_keypair.pubkey(); + let node = LegacyContactInfo::new_rand(&mut rng, Some(node_pubkey)); + let last_vote_hash = Hash::new_unique(); + let now = timestamp(); + push_restart_last_voted_fork_slots( + test_state.cluster_info.clone(), + &node, + &expected_slots_to_repair, + &last_vote_hash, + &keypairs.node_keypair, + now, + ); + expected_messages.insert( + node_pubkey.to_string(), + LastVotedForkSlotsRecord { + last_voted_fork_slots: expected_slots_to_repair.clone(), + last_vote_bankhash: last_vote_hash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: now, + }, + ); + // Wait for the newly pushed message to be in written proto file. + wait_on_expected_progress_with_timeout( + test_state.wen_restart_proto_path.clone(), + WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: start_time, + }), + last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { + received: expected_messages.clone(), + }), + }, + ); + exit.store(true, Ordering::Relaxed); + let _ = wen_restart_thread_handle.join(); + } + + // Simulating successful repair of missing blocks. + insert_and_freeze_slots(test_state.bank_forks.clone(), expected_slots_to_repair); + + let last_voted_fork_slots = test_state.last_voted_fork_slots.clone(); + wen_restart_test_succeed_after_failure( + test_state, + last_vote_bankhash, + WenRestartProgress { + state: RestartState::Done.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots, + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: start_time, + }), + last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { + received: expected_messages, + }), + }, + ); + } + + #[test] + fn test_increment_and_write_wen_restart_records() { + solana_logger::setup(); + let my_dir = TempDir::new().unwrap(); + let mut wen_restart_proto_path = my_dir.path().to_path_buf(); + wen_restart_proto_path.push("wen_restart_status.proto"); + let last_vote_bankhash = Hash::new_unique(); + let mut state = WenRestartProgressInternalState::Init { + last_voted_fork_slots: vec![0, 1], + last_vote_bankhash, + }; + let my_last_voted_fork_slots = Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: vec![0, 1], + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: 0, + wallclock: 0, + }); + let mut progress = WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), + last_voted_fork_slots_aggregate: None, + }; + for (expected_state, expected_progress) in [ + ( + WenRestartProgressInternalState::LastVotedForkSlots { + last_voted_fork_slots: vec![0, 1], + }, + WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), + last_voted_fork_slots_aggregate: None, + }, + ), + ( + WenRestartProgressInternalState::Done, + WenRestartProgress { + state: RestartState::Done.into(), + my_last_voted_fork_slots, + last_voted_fork_slots_aggregate: None, + }, + ), + ] { + state = increment_and_write_wen_restart_records( + &wen_restart_proto_path, + state, + &mut progress, + ) + .unwrap(); + assert_eq!(&state, &expected_state); + assert_eq!(&progress, &expected_progress); + } + assert_eq!( + increment_and_write_wen_restart_records(&wen_restart_proto_path, state, &mut progress) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::UnexpectedState(RestartState::Done), + ); } } From 9f60168a56fdaf4d5a31599ace18193f60b5ae7e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 2 Mar 2024 13:27:27 +0800 Subject: [PATCH 310/401] build(deps): bump rayon from 1.8.1 to 1.9.0 (#35348) * build(deps): bump rayon from 1.8.1 to 1.9.0 Bumps [rayon](https://github.com/rayon-rs/rayon) from 1.8.1 to 1.9.0. - [Changelog](https://github.com/rayon-rs/rayon/blob/main/RELEASES.md) - [Commits](https://github.com/rayon-rs/rayon/compare/rayon-core-v1.8.1...rayon-core-v1.9.0) --- updated-dependencies: - dependency-name: rayon dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72e7cfd226e9ca..0680de2c01a8fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4276,9 +4276,9 @@ checksum = "6c9cf9270cc5903afdef387f06ef1cd89fb77f45c357c2a425bae78b839fd866" [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", diff --git a/Cargo.toml b/Cargo.toml index 8cc38b69144d3d..31500fd62c8216 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,7 +280,7 @@ quote = "1.0" rand = "0.8.5" rand_chacha = "0.3.1" raptorq = "1.8.0" -rayon = "1.8.1" +rayon = "1.9.0" reed-solomon-erasure = "6.0.0" regex = "1.10.3" reqwest = { version = "0.11.23", default-features = false } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 0929453a9e27bf..3213461576fe65 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3801,9 +3801,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", From f31ec1f2d64615e2c8dc7944466fc4d85e416d16 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 2 Mar 2024 13:27:37 +0800 Subject: [PATCH 311/401] build(deps): bump indexmap from 2.2.3 to 2.2.5 (#35389) * build(deps): bump indexmap from 2.2.3 to 2.2.5 Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.2.3 to 2.2.5. - [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.2.3...2.2.5) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 22 +++++++++++----------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0680de2c01a8fb..650b369d205c5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2292,7 +2292,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.3", + "indexmap 2.2.5", "slab", "tokio", "tokio-util 0.7.1", @@ -2668,9 +2668,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4842,7 +4842,7 @@ version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "itoa", "ryu", "serde", @@ -5186,7 +5186,7 @@ dependencies = [ "fnv", "im", "index_list", - "indexmap 2.2.3", + "indexmap 2.2.5", "itertools", "lazy_static", "libsecp256k1", @@ -5666,7 +5666,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.2.3", + "indexmap 2.2.5", "indicatif", "log", "quinn", @@ -5747,7 +5747,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.2.3", + "indexmap 2.2.5", "indicatif", "log", "rand 0.8.5", @@ -6085,7 +6085,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.2.3", + "indexmap 2.2.5", "itertools", "log", "lru", @@ -7178,7 +7178,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.2.3", + "indexmap 2.2.5", "itertools", "libc", "log", @@ -7288,7 +7288,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.2.3", + "indexmap 2.2.5", "indicatif", "pickledb", "serde", @@ -7317,7 +7317,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.2.3", + "indexmap 2.2.5", "indicatif", "log", "rayon", @@ -8534,7 +8534,7 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "toml_datetime", "winnow", ] @@ -8545,7 +8545,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c9ffdf896f8daaabf9b66ba8e77ea1ed5ed0f72821b398aba62352e95062951" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index 31500fd62c8216..66436c9cfb3fd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -222,7 +222,7 @@ hyper = "0.14.28" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.11" -indexmap = "2.2.3" +indexmap = "2.2.5" indicatif = "0.17.8" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3213461576fe65..2829cf27b6da6f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1930,7 +1930,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.3", + "indexmap 2.2.5", "slab", "tokio", "tokio-util 0.7.1", @@ -2281,9 +2281,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -4291,7 +4291,7 @@ version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "itoa", "ryu", "serde", @@ -4562,7 +4562,7 @@ dependencies = [ "fnv", "im", "index_list", - "indexmap 2.2.3", + "indexmap 2.2.5", "itertools", "lazy_static", "log", @@ -4789,7 +4789,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.2.3", + "indexmap 2.2.5", "indicatif", "log", "quinn", @@ -4839,7 +4839,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.2.3", + "indexmap 2.2.5", "log", "rand 0.8.5", "rayon", @@ -5089,7 +5089,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.2.3", + "indexmap 2.2.5", "itertools", "log", "lru", @@ -6253,7 +6253,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.2.3", + "indexmap 2.2.5", "itertools", "libc", "log", @@ -6354,7 +6354,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.2.3", + "indexmap 2.2.5", "indicatif", "log", "rayon", @@ -7412,7 +7412,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "toml_datetime", "winnow", ] From bd932850257865b4b2c2d17d0cb865e1c4d93c24 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Sat, 2 Mar 2024 06:12:24 -0300 Subject: [PATCH 312/401] Proposal: Use SanitizedMessage when possible in `svm/account_loader.rs` (#35390) --- runtime/src/bank.rs | 4 +- svm/src/account_loader.rs | 79 ++++++++++---------------------- svm/src/transaction_processor.rs | 2 +- 3 files changed, 27 insertions(+), 58 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 200d43dc30b6c9..3ea316f857a2bc 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7538,13 +7538,13 @@ impl TransactionProcessingCallback for Bank { fn check_account_access( &self, - tx: &SanitizedTransaction, + message: &SanitizedMessage, account_index: usize, account: &AccountSharedData, error_counters: &mut TransactionErrorMetrics, ) -> Result<()> { if self.get_reward_interval() == RewardInterval::InsideInterval - && tx.message().is_writable(account_index) + && message.is_writable(account_index) && solana_stake_program::check_id(account.owner()) { error_counters.program_execution_temporarily_restricted += 1; diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 126be625e9cf15..1c02ded24665ff 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -66,15 +66,14 @@ pub fn load_accounts( .zip(lock_results) .map(|etx| match etx { (tx, (Ok(()), nonce, lamports_per_signature)) => { + let message = tx.message(); let fee = if let Some(lamports_per_signature) = lamports_per_signature { fee_structure.calculate_fee( - tx.message(), + message, *lamports_per_signature, - &process_compute_budget_instructions( - tx.message().program_instructions_iter(), - ) - .unwrap_or_default() - .into(), + &process_compute_budget_instructions(message.program_instructions_iter()) + .unwrap_or_default() + .into(), feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), feature_set.is_active(&remove_rounding_in_fee_calculation::id()), @@ -86,7 +85,7 @@ pub fn load_accounts( // load transactions let loaded_transaction = match load_transaction_accounts( callbacks, - tx, + message, fee, error_counters, account_overrides, @@ -101,7 +100,7 @@ pub fn load_accounts( let nonce = if let Some(nonce) = nonce { match NonceFull::from_partial( nonce, - tx.message(), + message, &loaded_transaction.accounts, &loaded_transaction.rent_debits, ) { @@ -123,25 +122,19 @@ pub fn load_accounts( fn load_transaction_accounts( callbacks: &CB, - tx: &SanitizedTransaction, + message: &SanitizedMessage, fee: u64, error_counters: &mut TransactionErrorMetrics, account_overrides: Option<&AccountOverrides>, program_accounts: &HashMap, loaded_programs: &LoadedProgramsForTxBatch, ) -> Result { - // NOTE: this check will never fail because `tx` is sanitized - if tx.signatures().is_empty() && fee != 0 { - return Err(TransactionError::MissingSignatureForFee); - } - let feature_set = callbacks.get_feature_set(); // There is no way to predict what program will execute without an error // If a fee can pay for execution then the program will be scheduled let mut validated_fee_payer = false; let mut tx_rent: TransactionRent = 0; - let message = tx.message(); let account_keys = message.account_keys(); let mut accounts_found = Vec::with_capacity(account_keys.len()); let mut account_deps = Vec::with_capacity(account_keys.len()); @@ -149,7 +142,7 @@ fn load_transaction_accounts( let rent_collector = callbacks.get_rent_collector(); let requested_loaded_accounts_data_size_limit = - get_requested_loaded_accounts_data_size_limit(tx)?; + get_requested_loaded_accounts_data_size_limit(message)?; let mut accumulated_accounts_data_size: usize = 0; let instruction_accounts = message @@ -231,7 +224,7 @@ fn load_transaction_accounts( if !validated_fee_payer && message.is_non_loader_key(i) { if i != 0 { - warn!("Payer index should be 0! {:?}", tx); + warn!("Payer index should be 0! {:?}", message); } validate_fee_payer( @@ -246,7 +239,7 @@ fn load_transaction_accounts( validated_fee_payer = true; } - callbacks.check_account_access(tx, i, &account, error_counters)?; + callbacks.check_account_access(message, i, &account, error_counters)?; tx_rent += rent; rent_debits.insert(key, rent, account.lamports()); @@ -351,10 +344,10 @@ fn load_transaction_accounts( /// user requested loaded accounts size. /// Note, requesting zero bytes will result transaction error fn get_requested_loaded_accounts_data_size_limit( - tx: &SanitizedTransaction, + sanitized_message: &SanitizedMessage, ) -> Result> { let compute_budget_limits = - process_compute_budget_instructions(tx.message().program_instructions_iter()) + process_compute_budget_instructions(sanitized_message.program_instructions_iter()) .unwrap_or_default(); // sanitize against setting size limit to zero NonZeroUsize::new( @@ -1149,7 +1142,7 @@ mod tests { )); assert_eq!( *expected_result, - get_requested_loaded_accounts_data_size_limit(&tx) + get_requested_loaded_accounts_data_size_limit(tx.message()) ); } @@ -1429,30 +1422,6 @@ mod tests { assert_eq!(result.unwrap(), expected); } - #[test] - fn test_load_transaction_accounts_failure() { - let message = Message::default(); - let legacy = LegacyMessage::new(message); - let sanitized_message = SanitizedMessage::Legacy(legacy); - let mock_bank = TestCallbacks::default(); - let mut error_counter = TransactionErrorMetrics::default(); - let loaded_programs = LoadedProgramsForTxBatch::default(); - - let sanitized_transaction = - SanitizedTransaction::new_for_tests(sanitized_message, vec![], false); - let result = load_transaction_accounts( - &mock_bank, - &sanitized_transaction, - 32, - &mut error_counter, - None, - &HashMap::new(), - &loaded_programs, - ); - - assert_eq!(result.err(), Some(TransactionError::MissingSignatureForFee)); - } - #[test] fn test_load_transaction_accounts_fail_to_validate_fee_payer() { let message = Message { @@ -1479,7 +1448,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -1524,7 +1493,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -1591,7 +1560,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -1635,7 +1604,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -1679,7 +1648,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -1730,7 +1699,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -1799,7 +1768,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -1857,7 +1826,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -1920,7 +1889,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, @@ -2009,7 +1978,7 @@ mod tests { ); let result = load_transaction_accounts( &mock_bank, - &sanitized_transaction, + sanitized_transaction.message(), 32, &mut error_counter, None, diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index d319e2103838b8..38c5c23affd4de 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -78,7 +78,7 @@ pub trait TransactionProcessingCallback { fn check_account_access( &self, - _tx: &SanitizedTransaction, + _message: &SanitizedMessage, _account_index: usize, _account: &AccountSharedData, _error_counters: &mut TransactionErrorMetrics, From ccc6a6bf6fab3dccb12677153e41f28b486d0ea3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Sat, 2 Mar 2024 15:55:53 +0100 Subject: [PATCH 313/401] Fix - `test_feature_activation_loaded_programs_recompilation_phase()` (#35299) * Fixes test_feature_activation_loaded_programs_recompilation_phase() to trigger the recompilation phase before the epoch boundary. * Adds a direct check of the cached entries around recompilation. --- program-runtime/src/loaded_programs.rs | 8 +++ runtime/src/bank/tests.rs | 88 ++++++++++++++++++-------- 2 files changed, 69 insertions(+), 27 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 8e3e670469c45c..926d1179837380 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -1079,6 +1079,14 @@ impl LoadedPrograms { } } + /// Returns the `slot_versions` of the second level for the given program id. + pub fn get_slot_versions_for_tests(&self, key: &Pubkey) -> &[Arc] { + self.entries + .get(key) + .map(|second_level| second_level.slot_versions.as_ref()) + .unwrap_or(&[]) + } + /// This function removes the given entry for the given program from the cache. /// The function expects that the program and entry exists in the cache. Otherwise it'll panic. fn unload_program_entry(&mut self, program: &Pubkey, remove_entry: &Arc) { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 2283899d3ca30d..753116ff878e18 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -11884,12 +11884,6 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { .remove(&feature_set::reject_callx_r10::id()); let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - // Test a basic transfer - let amount = genesis_config.rent.minimum_balance(0); - let pubkey = solana_sdk::pubkey::new_rand(); - root_bank.transfer(amount, &mint_keypair, &pubkey).unwrap(); - assert_eq!(root_bank.get_balance(&pubkey), amount); - // Program Setup let program_keypair = Keypair::new(); let program_data = @@ -11903,26 +11897,19 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { }); root_bank.store_account(&program_keypair.pubkey(), &program_account); - // Compose instruction using the desired program - let instruction1 = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); - let message1 = Message::new(&[instruction1], Some(&mint_keypair.pubkey())); - let binding1 = mint_keypair.insecure_clone(); - let signers1 = vec![&binding1]; - let transaction1 = Transaction::new(&signers1, message1, root_bank.last_blockhash()); + // Compose message using the desired program. + let instruction = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + let binding = mint_keypair.insecure_clone(); + let signers = vec![&binding]; - // Advance the bank so the next transaction can be submitted. + // Advance the bank so that the program becomes effective. goto_end_of_slot(root_bank.clone()); let bank = new_from_parent_with_fork_next_slot(root_bank, bank_forks.as_ref()); - // Compose second instruction using the same program with a different block hash - let instruction2 = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); - let message2 = Message::new(&[instruction2], Some(&mint_keypair.pubkey())); - let binding2 = mint_keypair.insecure_clone(); - let signers2 = vec![&binding2]; - let transaction2 = Transaction::new(&signers2, message2, bank.last_blockhash()); - - // Execute before feature is enabled to get program into the cache. - let result_without_feature_enabled = bank.process_transaction(&transaction1); + // Load the program with the old environment. + let transaction = Transaction::new(&signers, message.clone(), bank.last_blockhash()); + let result_without_feature_enabled = bank.process_transaction(&transaction); assert_eq!( result_without_feature_enabled, Err(TransactionError::InstructionError( @@ -11931,7 +11918,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { )) ); - // Activate feature + // Schedule feature activation to trigger a change of environment at the epoch boundary. let feature_account_balance = std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); bank.store_account( @@ -11939,12 +11926,59 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { &feature::create_account(&Feature { activated_at: None }, feature_account_balance), ); + // Advance the bank to middle of epoch to start the recompilation phase. + goto_end_of_slot(bank.clone()); + let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 16); + let current_env = bank + .loaded_programs_cache + .read() + .unwrap() + .get_environments_for_epoch(0) + .program_runtime_v1 + .clone(); + let upcoming_env = bank + .loaded_programs_cache + .read() + .unwrap() + .get_environments_for_epoch(1) + .program_runtime_v1 + .clone(); + + // Advance the bank to recompile the program. + { + let loaded_programs_cache = bank.loaded_programs_cache.read().unwrap(); + let slot_versions = + loaded_programs_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); + assert_eq!(slot_versions.len(), 1); + assert!(Arc::ptr_eq( + slot_versions[0].program.get_environment().unwrap(), + ¤t_env + )); + } + goto_end_of_slot(bank.clone()); + let bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); + { + let loaded_programs_cache = bank.loaded_programs_cache.read().unwrap(); + let slot_versions = + loaded_programs_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); + assert_eq!(slot_versions.len(), 2); + assert!(Arc::ptr_eq( + slot_versions[0].program.get_environment().unwrap(), + ¤t_env + )); + assert!(Arc::ptr_eq( + slot_versions[1].program.get_environment().unwrap(), + &upcoming_env + )); + } + + // Advance the bank to cross the epoch boundary and activate the feature. goto_end_of_slot(bank.clone()); - // Advance to next epoch, which starts the recompilation phase - let bank = new_from_parent_next_epoch(bank, bank_forks.as_ref(), 1); + let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 33); - // Execute after feature is enabled to check it was filtered out and reverified. - let result_with_feature_enabled = bank.process_transaction(&transaction2); + // Load the program with the new environment. + let transaction = Transaction::new(&signers, message, bank.last_blockhash()); + let result_with_feature_enabled = bank.process_transaction(&transaction); assert_eq!( result_with_feature_enabled, Err(TransactionError::InstructionError( From cc4072bce8aefb5bb7612cf47988b5f0b4fba014 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Sat, 2 Mar 2024 20:23:55 -0800 Subject: [PATCH 314/401] blockstore: atomize slot clearing, relax parent slot meta check (#35124) * blockstore: atomize slot clearing, relax parent slot meta check clear_unconfirmed_slot can leave blockstore in an irrecoverable state if it panics in the middle. write batch this function, so that any errors can be recovered after restart. additionally relax the constraint that the parent slot meta must exist, as it could have been cleaned up if outdated. * pr feedback: use PurgeType, don't pass slot_meta * pr feedback: add unit test * pr feedback: refactor into separate function * pr feedback: add special columns to helper, err msg, comments * pr feedback: reword comments and write batch error message * pr feedback: bubble write_batch error to caller * pr feedback: reword comments Co-authored-by: steviez --------- Co-authored-by: steviez --- ledger/src/blockstore.rs | 48 ++--- ledger/src/blockstore/blockstore_purge.rs | 214 ++++++++++++++++------ 2 files changed, 173 insertions(+), 89 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 867761639d95d3..f8c8330843dfce 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1154,9 +1154,8 @@ impl Blockstore { self.completed_slots_senders.lock().unwrap().clear(); } - /// Range-delete all entries which prefix matches the specified `slot`, - /// remove `slot` its' parents SlotMeta next_slots list, and - /// clear `slot`'s SlotMeta (except for next_slots). + /// Clear `slot` from the Blockstore, see ``Blockstore::purge_slot_cleanup_chaining` + /// for more details. /// /// This function currently requires `insert_shreds_lock`, as both /// `clear_unconfirmed_slot()` and `insert_shreds_handle_duplicate()` @@ -1164,40 +1163,19 @@ impl Blockstore { /// family. pub fn clear_unconfirmed_slot(&self, slot: Slot) { let _lock = self.insert_shreds_lock.lock().unwrap(); - if let Some(mut slot_meta) = self - .meta(slot) - .expect("Couldn't fetch from SlotMeta column family") - { - // Clear all slot related information - self.run_purge(slot, slot, PurgeType::Exact) - .expect("Purge database operations failed"); - - // Clear this slot as a next slot from parent - if let Some(parent_slot) = slot_meta.parent_slot { - let mut parent_slot_meta = self - .meta(parent_slot) - .expect("Couldn't fetch from SlotMeta column family") - .expect("Unconfirmed slot should have had parent slot set"); - // .retain() is a linear scan; however, next_slots should - // only contain several elements so this isn't so bad - parent_slot_meta - .next_slots - .retain(|&next_slot| next_slot != slot); - self.meta_cf - .put(parent_slot, &parent_slot_meta) - .expect("Couldn't insert into SlotMeta column family"); - } - // Reinsert parts of `slot_meta` that are important to retain, like the `next_slots` - // field. - slot_meta.clear_unconfirmed_slot(); - self.meta_cf - .put(slot, &slot_meta) - .expect("Couldn't insert into SlotMeta column family"); - } else { - error!( + // Purge the slot and insert an empty `SlotMeta` with only the `next_slots` field preserved. + // Shreds inherently know their parent slot, and a parent's SlotMeta `next_slots` list + // will be updated when the child is inserted (see `Blockstore::handle_chaining()`). + // However, we are only purging and repairing the parent slot here. Since the child will not be + // reinserted the chaining will be lost. In order for bank forks discovery to ingest the child, + // we must retain the chain by preserving `next_slots`. + match self.purge_slot_cleanup_chaining(slot) { + Ok(_) => {} + Err(BlockstoreError::SlotUnavailable) => error!( "clear_unconfirmed_slot() called on slot {} with no SlotMeta", slot - ); + ), + Err(e) => panic!("Purge database operations failed {}", e), } } diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 4b599a353d569c..15a5c4890e9f05 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -135,6 +135,7 @@ impl Blockstore { } } + #[cfg(test)] pub(crate) fn run_purge( &self, from_slot: Slot, @@ -144,11 +145,60 @@ impl Blockstore { self.run_purge_with_stats(from_slot, to_slot, purge_type, &mut PurgeStats::default()) } + /// Purges all columns relating to `slot`. + /// + /// Additionally, we cleanup the parent of `slot` by clearing `slot` from + /// the parent's `next_slots`. We reinsert an orphaned `slot_meta` for `slot` + /// that preserves `slot`'s `next_slots`. This ensures that `slot`'s fork is + /// replayable upon repair of `slot`. + pub(crate) fn purge_slot_cleanup_chaining(&self, slot: Slot) -> Result { + let Some(mut slot_meta) = self.meta(slot)? else { + return Err(BlockstoreError::SlotUnavailable); + }; + let mut write_batch = self.db.batch()?; + + let columns_purged = self.purge_range(&mut write_batch, slot, slot, PurgeType::Exact)?; + + if let Some(parent_slot) = slot_meta.parent_slot { + let parent_slot_meta = self.meta(parent_slot)?; + if let Some(mut parent_slot_meta) = parent_slot_meta { + // .retain() is a linear scan; however, next_slots should + // only contain several elements so this isn't so bad + parent_slot_meta + .next_slots + .retain(|&next_slot| next_slot != slot); + write_batch.put::(parent_slot, &parent_slot_meta)?; + } else { + error!( + "Parent slot meta {} for child {} is missing or cleaned up. + Falling back to orphan repair to remedy the situation", + parent_slot, slot + ); + } + } + + // Retain a SlotMeta for `slot` with the `next_slots` field retained + slot_meta.clear_unconfirmed_slot(); + write_batch.put::(slot, &slot_meta)?; + + self.db.write(write_batch).inspect_err(|e| { + error!( + "Error: {:?} while submitting write batch for slot {:?}", + e, slot + ) + })?; + Ok(columns_purged) + } + /// A helper function to `purge_slots` that executes the ledger clean up. /// The cleanup applies to \[`from_slot`, `to_slot`\]. /// /// When `from_slot` is 0, any sst-file with a key-range completely older /// than `to_slot` will also be deleted. + /// + /// Note: slots > `to_slot` that chained to a purged slot are not properly + /// cleaned up. This function is not intended to be used if such slots need + /// to be replayed. pub(crate) fn run_purge_with_stats( &self, from_slot: Slot, @@ -156,78 +206,120 @@ impl Blockstore { purge_type: PurgeType, purge_stats: &mut PurgeStats, ) -> Result { - let mut write_batch = self - .db - .batch() - .expect("Database Error: Failed to get write batch"); + let mut write_batch = self.db.batch()?; + let mut delete_range_timer = Measure::start("delete_range"); + let columns_purged = self.purge_range(&mut write_batch, from_slot, to_slot, purge_type)?; + delete_range_timer.stop(); + + let mut write_timer = Measure::start("write_batch"); + self.db.write(write_batch).inspect(|e| { + error!( + "Error: {:?} while submitting write batch for purge from_slot {} to_slot {}", + e, from_slot, to_slot + ) + })?; + write_timer.stop(); + + let mut purge_files_in_range_timer = Measure::start("delete_file_in_range"); + // purge_files_in_range delete any files whose slot range is within + // [from_slot, to_slot]. When from_slot is 0, it is safe to run + // purge_files_in_range because if purge_files_in_range deletes any + // sst file that contains any range-deletion tombstone, the deletion + // range of that tombstone will be completely covered by the new + // range-delete tombstone (0, to_slot) issued above. + // + // On the other hand, purge_files_in_range is more effective and + // efficient than the compaction filter (which runs key-by-key) + // because all the sst files that have key range below to_slot + // can be deleted immediately. + if columns_purged && from_slot == 0 { + self.purge_files_in_range(from_slot, to_slot); + } + purge_files_in_range_timer.stop(); + + purge_stats.delete_range += delete_range_timer.as_us(); + purge_stats.write_batch += write_timer.as_us(); + purge_stats.delete_files_in_range += purge_files_in_range_timer.as_us(); + + Ok(columns_purged) + } + + fn purge_range( + &self, + write_batch: &mut WriteBatch, + from_slot: Slot, + to_slot: Slot, + purge_type: PurgeType, + ) -> Result { let columns_purged = self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok() & self .db - .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .delete_range_cf::(write_batch, from_slot, to_slot) .is_ok(); + match purge_type { PurgeType::Exact => { - self.purge_special_columns_exact(&mut write_batch, from_slot, to_slot)?; + self.purge_special_columns_exact(write_batch, from_slot, to_slot)?; } PurgeType::CompactionFilter => { // No explicit action is required here because this purge type completely and @@ -237,39 +329,6 @@ impl Blockstore { // in no spiky periodic huge delete_range for them. } } - delete_range_timer.stop(); - - let mut write_timer = Measure::start("write_batch"); - if let Err(e) = self.db.write(write_batch) { - error!( - "Error: {:?} while submitting write batch for slot {:?} retrying...", - e, from_slot - ); - return Err(e); - } - write_timer.stop(); - - let mut purge_files_in_range_timer = Measure::start("delete_file_in_range"); - // purge_files_in_range delete any files whose slot range is within - // [from_slot, to_slot]. When from_slot is 0, it is safe to run - // purge_files_in_range because if purge_files_in_range deletes any - // sst file that contains any range-deletion tombstone, the deletion - // range of that tombstone will be completely covered by the new - // range-delete tombstone (0, to_slot) issued above. - // - // On the other hand, purge_files_in_range is more effective and - // efficient than the compaction filter (which runs key-by-key) - // because all the sst files that have key range below to_slot - // can be deleted immediately. - if columns_purged && from_slot == 0 { - self.purge_files_in_range(from_slot, to_slot); - } - purge_files_in_range_timer.stop(); - - purge_stats.delete_range += delete_range_timer.as_us(); - purge_stats.write_batch += write_timer.as_us(); - purge_stats.delete_files_in_range += purge_files_in_range_timer.as_us(); - Ok(columns_purged) } @@ -1103,4 +1162,51 @@ pub mod tests { } assert_eq!(count, 1); } + + #[test] + fn test_purge_slot_cleanup_chaining_missing_slot_meta() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let (shreds, _) = make_many_slot_entries(0, 10, 5); + blockstore.insert_shreds(shreds, None, false).unwrap(); + + assert!(matches!( + blockstore.purge_slot_cleanup_chaining(11).unwrap_err(), + BlockstoreError::SlotUnavailable + )); + } + + #[test] + fn test_purge_slot_cleanup_chaining() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let (shreds, _) = make_many_slot_entries(0, 10, 5); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let (slot_11, _) = make_slot_entries(11, 4, 5, true); + blockstore.insert_shreds(slot_11, None, false).unwrap(); + let (slot_12, _) = make_slot_entries(12, 5, 5, true); + blockstore.insert_shreds(slot_12, None, false).unwrap(); + + blockstore.purge_slot_cleanup_chaining(5).unwrap(); + + let slot_meta = blockstore.meta(5).unwrap().unwrap(); + let expected_slot_meta = SlotMeta { + slot: 5, + // Only the next_slots should be preserved + next_slots: vec![6, 12], + ..SlotMeta::default() + }; + assert_eq!(slot_meta, expected_slot_meta); + + let parent_slot_meta = blockstore.meta(4).unwrap().unwrap(); + assert_eq!(parent_slot_meta.next_slots, vec![11]); + + let child_slot_meta = blockstore.meta(6).unwrap().unwrap(); + assert_eq!(child_slot_meta.parent_slot.unwrap(), 5); + + let child_slot_meta = blockstore.meta(12).unwrap().unwrap(); + assert_eq!(child_slot_meta.parent_slot.unwrap(), 5); + } } From 935551842450258df90ccd8c747cfb4f37722221 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 12 Feb 2024 15:17:51 +0800 Subject: [PATCH 315/401] [anza migration] replace binaries backend (#6) * ci: add upload-gcs-artifact * ci: publish release binaries to GCS * ci: redirect github repo to anza-xyz * ci: publish windows binaries to GCS * replace release.solana.com with release.anza.xyz * use a explicit name for credential --- .github/workflows/release-artifacts-auto.yml | 6 ++-- .../workflows/release-artifacts-manually.yml | 4 +-- .github/workflows/release-artifacts.yml | 33 +++++++++++-------- ci/publish-installer.sh | 12 +++---- ci/publish-tarball.sh | 6 ++-- ci/upload-ci-artifact.sh | 10 ++++++ ci/upload-github-release-asset.sh | 2 +- install/solana-install-init.sh | 4 +-- install/src/command.rs | 8 ++--- 9 files changed, 48 insertions(+), 37 deletions(-) diff --git a/.github/workflows/release-artifacts-auto.yml b/.github/workflows/release-artifacts-auto.yml index a8309cdffc8a72..0cdd176e04396c 100644 --- a/.github/workflows/release-artifacts-auto.yml +++ b/.github/workflows/release-artifacts-auto.yml @@ -14,14 +14,12 @@ concurrency: jobs: release-artifacts: - if: github.repository == 'solana-labs/solana' + if: github.repository == 'anza-xyz/agave' uses: ./.github/workflows/release-artifacts.yml with: commit: ${{ github.sha }} secrets: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL: ${{ secrets.GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL }} error_reporting: needs: diff --git a/.github/workflows/release-artifacts-manually.yml b/.github/workflows/release-artifacts-manually.yml index 35de72922c32c8..fe5c1b03b638b3 100644 --- a/.github/workflows/release-artifacts-manually.yml +++ b/.github/workflows/release-artifacts-manually.yml @@ -14,6 +14,4 @@ jobs: with: commit: ${{ github.event.inputs.commit }} secrets: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL: ${{ secrets.GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL }} diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 98dc697920262c..14760837ba0dbd 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -7,11 +7,7 @@ on: required: false type: string secrets: - AWS_ACCESS_KEY_ID: - required: true - AWS_SECRET_ACCESS_KEY: - required: true - AWS_S3_BUCKET: + GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL: required: true jobs: @@ -71,17 +67,17 @@ jobs: shell: bash run: | FOLDER_NAME=${{ steps.build.outputs.tag || steps.build.outputs.channel }} - mkdir -p "github-action-s3-upload/$FOLDER_NAME" - cp -v "solana-release-x86_64-pc-windows-msvc.tar.bz2" "github-action-s3-upload/$FOLDER_NAME/" - cp -v "solana-release-x86_64-pc-windows-msvc.yml" "github-action-s3-upload/$FOLDER_NAME/" - cp -v "solana-install-init-x86_64-pc-windows-msvc"* "github-action-s3-upload/$FOLDER_NAME" + mkdir -p "windows-release/$FOLDER_NAME" + cp -v "solana-release-x86_64-pc-windows-msvc.tar.bz2" "windows-release/$FOLDER_NAME/" + cp -v "solana-release-x86_64-pc-windows-msvc.yml" "windows-release/$FOLDER_NAME/" + cp -v "solana-install-init-x86_64-pc-windows-msvc"* "windows-release/$FOLDER_NAME" - name: Upload Artifacts if: ${{ steps.build.outputs.channel != '' || steps.build.outputs.tag != '' }} uses: actions/upload-artifact@v3 with: name: windows-artifact - path: github-action-s3-upload/ + path: windows-release/ windows-s3-upload: if: ${{ needs.windows-build.outputs.channel != '' || needs.windows-build.outputs.tag != '' }} @@ -92,7 +88,16 @@ jobs: uses: actions/download-artifact@v3 with: name: windows-artifact - path: ./github-action-s3-upload + path: .windows-release/ + + - name: Setup crediential + uses: "google-github-actions/auth@v2" + with: + credentials_json: "${{ secrets.GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL }}" + + - name: Upload files to GCS + run: | + gcloud storage cp --recursive windows-release/* gs://anza-release/ - name: Upload uses: jakejarvis/s3-sync-action@master @@ -103,7 +108,7 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} AWS_REGION: "us-west-1" - SOURCE_DIR: "github-action-s3-upload" + SOURCE_DIR: "windows-release" windows-gh-release: if: ${{ needs.windows-build.outputs.tag != '' }} @@ -114,7 +119,7 @@ jobs: uses: actions/download-artifact@v3 with: name: windows-artifact - path: ./github-action-s3-upload + path: .windows-release/ - name: Release uses: softprops/action-gh-release@v1 @@ -122,4 +127,4 @@ jobs: tag_name: ${{ needs.windows-build.outputs.tag }} draft: true files: | - github-action-s3-upload/${{ needs.windows-build.outputs.tag }}/* + windows-release/${{ needs.windows-build.outputs.tag }}/* diff --git a/ci/publish-installer.sh b/ci/publish-installer.sh index 4b5345ae0d26fe..e58fd939dd1a40 100755 --- a/ci/publish-installer.sh +++ b/ci/publish-installer.sh @@ -26,14 +26,14 @@ fi # upload install script source ci/upload-ci-artifact.sh -cat >release.solana.com-install <release.anza.xyz-install <>release.solana.com-install +cat install/solana-install-init.sh >>release.anza.xyz-install -echo --- AWS S3 Store: "install" -upload-s3-artifact "/solana/release.solana.com-install" "s3://release.solana.com/$CHANNEL_OR_TAG/install" +echo --- GCS: "install" +upload-gcs-artifact "/solana/release.anza.xyz-install" "gs://anza-release/$CHANNEL_OR_TAG/install" echo Published to: -ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install +ci/format-url.sh https://release.anza.xyz/"$CHANNEL_OR_TAG"/install diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index ff72bb7da2d066..5c64f09564fe9f 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -118,11 +118,11 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET. fi if [[ -n $BUILDKITE ]]; then - echo --- AWS S3 Store: "$file" - upload-s3-artifact "/solana/$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file" + echo --- GCS Store: "$file" + upload-gcs-artifact "/solana/$file" gs://anza-release/"$CHANNEL_OR_TAG"/"$file" echo Published to: - $DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file" + $DRYRUN ci/format-url.sh https://release.anza.xyz/"$CHANNEL_OR_TAG"/"$file" if [[ -n $TAG ]]; then ci/upload-github-release-asset.sh "$file" diff --git a/ci/upload-ci-artifact.sh b/ci/upload-ci-artifact.sh index 1236da9f27114a..e7cc34ab2b2d8c 100644 --- a/ci/upload-ci-artifact.sh +++ b/ci/upload-ci-artifact.sh @@ -40,3 +40,13 @@ upload-s3-artifact() { docker run "${args[@]}" ) } + +upload-gcs-artifact() { + echo "--- artifact: $1 to $2" + docker run --rm \ + -v "$GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL:/application_default_credentials.json" \ + -v "$PWD:/solana" \ + -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=/application_default_credentials.json \ + gcr.io/google.com/cloudsdktool/google-cloud-cli:latest \ + gcloud storage cp "$1" "$2" +} diff --git a/ci/upload-github-release-asset.sh b/ci/upload-github-release-asset.sh index ca2ae2a8f60443..229fb8993edafd 100755 --- a/ci/upload-github-release-asset.sh +++ b/ci/upload-github-release-asset.sh @@ -26,7 +26,7 @@ fi # Force CI_REPO_SLUG since sometimes # BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the # artifact upload to fail -CI_REPO_SLUG=solana-labs/solana +CI_REPO_SLUG=anza-xyz/agave #if [[ -z $CI_REPO_SLUG ]]; then # echo Error: CI_REPO_SLUG not defined # exit 1 diff --git a/install/solana-install-init.sh b/install/solana-install-init.sh index db36dc61e2ff30..4f28e300be52ab 100755 --- a/install/solana-install-init.sh +++ b/install/solana-install-init.sh @@ -16,9 +16,9 @@ { # this ensures the entire script is downloaded # if [ -z "$SOLANA_DOWNLOAD_ROOT" ]; then - SOLANA_DOWNLOAD_ROOT="https://github.com/solana-labs/solana/releases/download/" + SOLANA_DOWNLOAD_ROOT="https://github.com/anza-xyz/agave/releases/download/" fi -GH_LATEST_RELEASE="https://api.github.com/repos/solana-labs/solana/releases/latest" +GH_LATEST_RELEASE="https://api.github.com/repos/anza-xyz/agave/releases/latest" set -e diff --git a/install/src/command.rs b/install/src/command.rs index d7b92c17690bda..218e815467e9a9 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -572,7 +572,7 @@ pub fn init( fn github_release_download_url(release_semver: &str) -> String { format!( - "https://github.com/solana-labs/solana/releases/download/v{}/solana-release-{}.tar.bz2", + "https://github.com/anza-xyz/agave/releases/download/v{}/solana-release-{}.tar.bz2", release_semver, crate::build_env::TARGET ) @@ -580,7 +580,7 @@ fn github_release_download_url(release_semver: &str) -> String { fn release_channel_download_url(release_channel: &str) -> String { format!( - "https://release.solana.com/{}/solana-release-{}.tar.bz2", + "https://release.anza.xyz/{}/solana-release-{}.tar.bz2", release_channel, crate::build_env::TARGET ) @@ -588,7 +588,7 @@ fn release_channel_download_url(release_channel: &str) -> String { fn release_channel_version_url(release_channel: &str) -> String { format!( - "https://release.solana.com/{}/solana-release-{}.yml", + "https://release.anza.xyz/{}/solana-release-{}.yml", release_channel, crate::build_env::TARGET ) @@ -905,7 +905,7 @@ fn check_for_newer_github_release( while page == 1 || releases.len() == PER_PAGE { let url = reqwest::Url::parse_with_params( - "https://api.github.com/repos/solana-labs/solana/releases", + "https://api.github.com/repos/anza-xyz/agave/releases", &[ ("per_page", &format!("{PER_PAGE}")), ("page", &format!("{page}")), From b0022d73eadacf9097afffb80aeda6c10596b310 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 12 Feb 2024 15:18:11 +0800 Subject: [PATCH 316/401] [anza migration] ci (#5) * Update README.md * ci: update CodeCov report link * ci: update github pr link * ci: rename secondary pipeline * replace org name in .mergify * update channel info link * update dependabot pr link * use anza docker image * delete travis --------- Co-authored-by: Will Hickey --- .mergify.yml | 4 +- .travis.yml | 94 -------------------------------- README.md | 34 +----------- ci/buildkite-pipeline-in-disk.sh | 4 +- ci/buildkite-pipeline.sh | 8 +-- ci/buildkite-solana-private.sh | 2 +- ci/channel-info.sh | 2 +- ci/dependabot-pr.sh | 2 +- ci/rust-version.sh | 2 +- ci/test-coverage.sh | 2 +- 10 files changed, 14 insertions(+), 140 deletions(-) delete mode 100644 .travis.yml diff --git a/.mergify.yml b/.mergify.yml index ef576943d5d635..166f59a5f365d1 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -33,7 +33,7 @@ pull_request_rules: actions: request_reviews: teams: - - "@solana-labs/community-pr-subscribers" + - "@anza-xyz/community-pr-subscribers" - name: label changes from monorepo-triage conditions: - author≠@core-contributors @@ -102,7 +102,7 @@ pull_request_rules: actions: backport: assignees: &BackportAssignee - - "{{ merged_by|replace('mergify[bot]', label|select('equalto', 'community')|first|default(author)|replace('community', '@solana-labs/community-pr-subscribers')) }}" + - "{{ merged_by|replace('mergify[bot]', label|select('equalto', 'community')|first|default(author)|replace('community', '@anza-xyz/community-pr-subscribers')) }}" title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})" ignore_conflicts: true labels: diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index c2dd13e32551ba..00000000000000 --- a/.travis.yml +++ /dev/null @@ -1,94 +0,0 @@ -branches: - only: - - master - - /^v\d+\.\d+/ - -notifications: - email: false - slack: - on_success: change - if: NOT type = pull_request - secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU= - -os: linux -dist: bionic -language: minimal - -jobs: - include: - - &release-artifacts - if: type IN (api, cron) OR tag IS present - name: "macOS release artifacts" - os: osx - osx_image: xcode12 - language: rust - rust: - - stable - install: - - source ci/rust-version.sh - - PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH" - - readlink -f . - - brew install gnu-tar - - PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH" - - tar --version - script: - - source ci/env.sh - - rustup set profile default - - ci/publish-tarball.sh - deploy: - - provider: s3 - access_key_id: $AWS_ACCESS_KEY_ID - secret_access_key: $AWS_SECRET_ACCESS_KEY - bucket: release.solana.com - region: us-west-1 - skip_cleanup: true - acl: public_read - local_dir: travis-s3-upload - on: - all_branches: true - - provider: releases - token: $GITHUB_TOKEN - skip_cleanup: true - file_glob: true - file: travis-release-upload/* - on: - tags: true - - <<: *release-artifacts - name: "Windows release artifacts" - os: windows - install: - - choco install openssl - - export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64" - - source ci/rust-version.sh - - PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH" - - readlink -f . - # Linux release artifacts are still built by ci/buildkite-secondary.yml - #- <<: *release-artifacts - # name: "Linux release artifacts" - # os: linux - # before_install: - # - sudo apt-get install libssl-dev libudev-dev - - # docs pull request - - name: "docs" - if: type IN (push, pull_request) OR tag IS present - language: node_js - node_js: - - "lts/*" - - services: - - docker - - cache: - directories: - - ~/.npm - - before_install: - - source ci/env.sh - - .travis/channel_restriction.sh edge beta || travis_terminate 0 - - .travis/affects.sh docs/ .travis || travis_terminate 0 - - cd docs/ - - source .travis/before_install.sh - - script: - - source .travis/script.sh diff --git a/README.md b/README.md index c6183f6ab6183e..bbaeb3d019a658 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

- Solana + Solana

@@ -113,35 +113,3 @@ problem is solved by this code?" On the other hand, if a test does fail and you better way to solve the same problem, a Pull Request with your solution would most certainly be welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please send us that patch! - -# Disclaimer - -All claims, content, designs, algorithms, estimates, roadmaps, -specifications, and performance measurements described in this project -are done with the Solana Labs, Inc. (“SL”) good faith efforts. It is up to -the reader to check and validate their accuracy and truthfulness. -Furthermore, nothing in this project constitutes a solicitation for -investment. - -Any content produced by SL or developer resources that SL provides are -for educational and inspirational purposes only. SL does not encourage, -induce or sanction the deployment, integration or use of any such -applications (including the code comprising the Solana blockchain -protocol) in violation of applicable laws or regulations and hereby -prohibits any such deployment, integration or use. This includes the use of -any such applications by the reader (a) in violation of export control -or sanctions laws of the United States or any other applicable -jurisdiction, (b) if the reader is located in or ordinarily resident in -a country or territory subject to comprehensive sanctions administered -by the U.S. Office of Foreign Assets Control (OFAC), or (c) if the -reader is or is working on behalf of a Specially Designated National -(SDN) or a person subject to similar blocking or denied party -prohibitions. - -The reader should be aware that U.S. export control and sanctions laws prohibit -U.S. persons (and other persons that are subject to such laws) from transacting -with persons in certain countries and territories or that are on the SDN list. -Accordingly, there is a risk to individuals that other persons using any of the -code contained in this repo, or a derivation thereof, may be sanctioned persons -and that transactions with such persons would be a violation of U.S. export -controls and sanctions law. diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh index ad12e1fc000a89..2ce8af0432106b 100755 --- a/ci/buildkite-pipeline-in-disk.sh +++ b/ci/buildkite-pipeline-in-disk.sh @@ -289,7 +289,7 @@ if [[ -n $BUILDKITE_TAG ]]; then start_pipeline "Tag pipeline for $BUILDKITE_TAG" annotate --style info --context release-tag \ - "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" + "https://github.com/anza-xyz/agave/releases/$BUILDKITE_TAG" # Jump directly to the secondary build to publish release artifacts quickly trigger_secondary_step @@ -307,7 +307,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/anza-xyz/agave/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index d40273863cc7a3..4ae00c9feab586 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -121,8 +121,8 @@ EOF trigger_secondary_step() { cat >> "$output_file" <<"EOF" - - name: "Trigger Build on solana-secondary" - trigger: "solana-secondary" + - name: "Trigger Build on agave-secondary" + trigger: "agave-secondary" branches: "!pull/*" async: true soft_fail: true @@ -315,7 +315,7 @@ if [[ -n $BUILDKITE_TAG ]]; then start_pipeline "Tag pipeline for $BUILDKITE_TAG" annotate --style info --context release-tag \ - "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" + "https://github.com/anza-xyz/agave/releases/$BUILDKITE_TAG" # Jump directly to the secondary build to publish release artifacts quickly trigger_secondary_step @@ -333,7 +333,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/anza-xyz/agave/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index 70d8e4bfe4f59f..e5886a314eb27c 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -287,7 +287,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then # Add helpful link back to the corresponding Github Pull Request annotate --style info --context pr-backlink \ - "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH" + "Github Pull Request: https://github.com/anza-xyz/agave/$BUILDKITE_BRANCH" if [[ $GITHUB_USER = "dependabot[bot]" ]]; then command_step dependabot "ci/dependabot-pr.sh" 5 diff --git a/ci/channel-info.sh b/ci/channel-info.sh index c82806454d012c..2bb808365653c6 100755 --- a/ci/channel-info.sh +++ b/ci/channel-info.sh @@ -11,7 +11,7 @@ here="$(dirname "$0")" # shellcheck source=ci/semver_bash/semver.sh source "$here"/semver_bash/semver.sh -remote=https://github.com/solana-labs/solana.git +remote=https://github.com/anza-xyz/agave.git # Fetch all vX.Y.Z tags # diff --git a/ci/dependabot-pr.sh b/ci/dependabot-pr.sh index 91ecd5948c9a43..bb019001a0bcfa 100755 --- a/ci/dependabot-pr.sh +++ b/ci/dependabot-pr.sh @@ -21,7 +21,7 @@ fi echo --- "(FAILING) Backpropagating dependabot-triggered Cargo.lock updates" name="dependabot-buildkite" -api_base="https://api.github.com/repos/solana-labs/solana/pulls" +api_base="https://api.github.com/repos/anza-xyz/agave/pulls" pr_num=$(echo "$BUILDKITE_BRANCH" | grep -Eo '[0-9]+') branch=$(curl -s "$api_base/$pr_num" | python3 -c 'import json,sys;print(json.load(sys.stdin)["head"]["ref"])') diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 3321f1d5ecb6a1..97ebb1c7935006 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -37,7 +37,7 @@ export rust_stable="$stable_version" export rust_nightly=nightly-"$nightly_version" -export ci_docker_image="solanalabs/ci:rust_${rust_stable}_${rust_nightly}" +export ci_docker_image="anzaxyz/ci:rust_${rust_stable}_${rust_nightly}" [[ -z $1 ]] || ( diff --git a/ci/test-coverage.sh b/ci/test-coverage.sh index 44231cd338a13e..ffd362acd287b8 100755 --- a/ci/test-coverage.sh +++ b/ci/test-coverage.sh @@ -32,5 +32,5 @@ else codecov -t "${CODECOV_TOKEN}" annotate --style success --context codecov.io \ - "CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}" + "CodeCov report: https://codecov.io/github/anza-xyz/agave/commit/${CI_COMMIT:0:9}" fi From 58e9a19f11822c93fdece4567d5250cd0b671ad1 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 13 Feb 2024 00:00:58 +0800 Subject: [PATCH 317/401] [anza migration] ci: fix path (#8) * ci: fix windows build * ci: publish sdk docker image with the new name * update automerge status --- .github/workflows/release-artifacts.yml | 4 ++-- .mergify.yml | 2 +- sdk/docker-solana/build.sh | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 14760837ba0dbd..a77fd672d8b4b6 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -79,7 +79,7 @@ jobs: name: windows-artifact path: windows-release/ - windows-s3-upload: + windows-gcs-upload: if: ${{ needs.windows-build.outputs.channel != '' || needs.windows-build.outputs.tag != '' }} needs: [windows-build] runs-on: ubuntu-20.04 @@ -88,7 +88,7 @@ jobs: uses: actions/download-artifact@v3 with: name: windows-artifact - path: .windows-release/ + path: ./windows-release - name: Setup crediential uses: "google-github-actions/auth@v2" diff --git a/.mergify.yml b/.mergify.yml index 166f59a5f365d1..19f9b8f116a78a 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -50,7 +50,7 @@ pull_request_rules: - name: automatic merge (squash) on CI success conditions: - and: - - status-success=buildkite/solana + - status-success=buildkite/agave - status-success=ci-gate - label=automerge - label!=no-automerge diff --git a/sdk/docker-solana/build.sh b/sdk/docker-solana/build.sh index f1c8ee265d6d56..70e3d0d23e44de 100755 --- a/sdk/docker-solana/build.sh +++ b/sdk/docker-solana/build.sh @@ -29,7 +29,7 @@ cp -f ../../fetch-spl.sh usr/bin/ ./fetch-spl.sh ) -docker build -t solanalabs/solana:"$CHANNEL_OR_TAG" . +docker build -t anzaxyz/agave:"$CHANNEL_OR_TAG" . maybeEcho= if [[ -z $CI ]]; then @@ -43,4 +43,4 @@ else fi ) fi -$maybeEcho docker push solanalabs/solana:"$CHANNEL_OR_TAG" +$maybeEcho docker push anzaxyz/agave:"$CHANNEL_OR_TAG" From 91e3dd225004927f84638410f32cb98717ffb1de Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 15 Feb 2024 22:06:51 +0800 Subject: [PATCH 318/401] [anza migration] ci: removed unused s3 upload in Windows build (#9) ci: removed unused s3 upload in Windows build --- .github/workflows/release-artifacts.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index a77fd672d8b4b6..d8e22c42ce5da8 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -99,17 +99,6 @@ jobs: run: | gcloud storage cp --recursive windows-release/* gs://anza-release/ - - name: Upload - uses: jakejarvis/s3-sync-action@master - with: - args: --acl public-read --follow-symlinks - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} - AWS_REGION: "us-west-1" - SOURCE_DIR: "windows-release" - windows-gh-release: if: ${{ needs.windows-build.outputs.tag != '' }} needs: [windows-build] From 3f9a7a52eac0da3a8feb48fd83c939b222c66bc4 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 22 Feb 2024 11:44:01 +0800 Subject: [PATCH 319/401] [anza migration] rename crates (#10) * rename geyser-plugin-interface * rename cargo registry * rename watchtower * rename ledger tool * rename validator * rename install * rename geyser plugin interface when patch --- .../scripts/downstream-project-spl-common.sh | 3 + .github/workflows/release-artifacts.yml | 2 +- Cargo.lock | 424 +++++++++--------- Cargo.toml | 4 +- cargo-registry/Cargo.toml | 4 +- ci/localnet-sanity.sh | 4 +- ci/publish-installer.sh | 2 +- ci/publish-tarball.sh | 4 +- ci/run-sanity.sh | 2 +- docs/src/backwards-compatibility.md | 2 +- docs/src/cli/install.md | 12 +- docs/src/clusters/available.md | 12 +- docs/src/clusters/benchmark.md | 2 +- docs/src/implemented-proposals/installer.md | 58 +-- .../rpc-transaction-history.md | 2 +- docs/src/operations/best-practices/general.md | 28 +- .../operations/best-practices/monitoring.md | 28 +- docs/src/operations/guides/restart-cluster.md | 14 +- .../operations/guides/validator-failover.md | 12 +- docs/src/operations/guides/validator-start.md | 32 +- docs/src/operations/guides/vote-accounts.md | 8 +- docs/src/operations/setup-a-validator.md | 24 +- docs/src/operations/setup-an-rpc-node.md | 4 +- docs/src/validator/geyser.md | 8 +- geyser-plugin-interface/Cargo.toml | 4 +- .../src/geyser_plugin_interface.rs | 2 +- geyser-plugin-manager/Cargo.toml | 2 +- .../src/accounts_update_notifier.rs | 6 +- .../src/block_metadata_notifier.rs | 4 +- geyser-plugin-manager/src/entry_notifier.rs | 6 +- .../src/geyser_plugin_manager.rs | 4 +- .../src/slot_status_notifier.rs | 2 +- .../src/transaction_notifier.rs | 4 +- install/Cargo.toml | 4 +- ...-install-init.sh => agave-install-init.sh} | 16 +- install/install-help.sh | 6 +- ...-install-init.rs => agave-install-init.rs} | 2 +- install/src/command.rs | 4 +- install/src/lib.rs | 2 +- install/src/main.rs | 2 +- ledger-tool/Cargo.toml | 4 +- ledger-tool/src/blockstore.rs | 2 +- ledger-tool/src/ledger_utils.rs | 8 +- ledger/src/blockstore_db.rs | 2 +- local-cluster/tests/local_cluster.rs | 4 +- multinode-demo/bootstrap-validator.sh | 4 +- multinode-demo/common.sh | 6 +- multinode-demo/validator.sh | 6 +- net/net.sh | 8 +- net/remote/remote-deploy-update.sh | 2 +- net/remote/remote-node.sh | 8 +- net/remote/remote-sanity.sh | 4 +- notifier/src/lib.rs | 4 +- programs/sbf/Cargo.lock | 152 +++---- programs/sbf/Cargo.toml | 2 +- programs/sbf/rust/simulation/Cargo.toml | 2 +- .../sbf/rust/simulation/tests/validator.rs | 2 +- .../src/nonblocking/pubsub_client.rs | 6 +- pubsub-client/src/pubsub_client.rs | 6 +- rbpf-cli/src/main.rs | 4 +- rpc/src/rpc.rs | 8 +- ...tall-deploy.sh => agave-install-deploy.sh} | 4 +- scripts/cargo-install-all.sh | 14 +- scripts/check-dev-context-only-utils.sh | 2 +- scripts/run.sh | 6 +- .../abi-testcases/mixed-validator-test.sh | 6 +- .../stability-testcases/gossip-dos-test.sh | 6 +- validator/Cargo.toml | 8 +- validator/src/bin/solana-test-validator.rs | 8 +- validator/src/bootstrap.rs | 2 +- validator/src/main.rs | 20 +- watchtower/Cargo.toml | 4 +- watchtower/README.md | 2 +- watchtower/src/main.rs | 10 +- 74 files changed, 554 insertions(+), 547 deletions(-) rename install/{solana-install-init.sh => agave-install-init.sh} (89%) rename install/src/bin/{solana-install-init.rs => agave-install-init.rs} (92%) rename scripts/{solana-install-deploy.sh => agave-install-deploy.sh} (90%) diff --git a/.github/scripts/downstream-project-spl-common.sh b/.github/scripts/downstream-project-spl-common.sh index c6dcfaca007867..861be12c7d1a45 100644 --- a/.github/scripts/downstream-project-spl-common.sh +++ b/.github/scripts/downstream-project-spl-common.sh @@ -22,3 +22,6 @@ if semverGT "$project_used_solana_version" "$SOLANA_VER"; then fi ./patch.crates-io.sh "$SOLANA_DIR" + +# anza migration stopgap. can be removed when agave is fully recommended for public usage. +sed -i 's/solana-geyser-plugin-interface/agave-geyser-plugin-interface/g' ./Cargo.toml diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index d8e22c42ce5da8..45be181c3ce9e1 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -70,7 +70,7 @@ jobs: mkdir -p "windows-release/$FOLDER_NAME" cp -v "solana-release-x86_64-pc-windows-msvc.tar.bz2" "windows-release/$FOLDER_NAME/" cp -v "solana-release-x86_64-pc-windows-msvc.yml" "windows-release/$FOLDER_NAME/" - cp -v "solana-install-init-x86_64-pc-windows-msvc"* "windows-release/$FOLDER_NAME" + cp -v "agave-install-init-x86_64-pc-windows-msvc"* "windows-release/$FOLDER_NAME" - name: Upload Artifacts if: ${{ steps.build.outputs.channel != '' || steps.build.outputs.tag != '' }} diff --git a/Cargo.lock b/Cargo.lock index 650b369d205c5c..c72b90930d7cf9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -62,6 +62,217 @@ dependencies = [ "zeroize", ] +[[package]] +name = "agave-cargo-registry" +version = "1.19.0" +dependencies = [ + "clap 2.33.3", + "flate2", + "hex", + "hyper", + "log", + "rustc_version 0.4.0", + "serde", + "serde_json", + "sha2 0.10.8", + "solana-clap-utils", + "solana-cli", + "solana-cli-config", + "solana-cli-output", + "solana-logger", + "solana-remote-wallet", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-version", + "tar", + "tempfile", + "tokio", + "toml 0.8.10", +] + +[[package]] +name = "agave-geyser-plugin-interface" +version = "1.19.0" +dependencies = [ + "log", + "solana-sdk", + "solana-transaction-status", + "thiserror", +] + +[[package]] +name = "agave-install" +version = "1.19.0" +dependencies = [ + "atty", + "bincode", + "bzip2", + "chrono", + "clap 2.33.3", + "console", + "crossbeam-channel", + "ctrlc", + "dirs-next", + "indicatif", + "lazy_static", + "nix 0.26.4", + "reqwest", + "scopeguard", + "semver 1.0.22", + "serde", + "serde_yaml 0.8.26", + "serde_yaml 0.9.32", + "solana-clap-utils", + "solana-config-program", + "solana-logger", + "solana-rpc-client", + "solana-sdk", + "solana-version", + "tar", + "tempfile", + "url 2.5.0", + "winapi 0.3.9", + "winreg", +] + +[[package]] +name = "agave-ledger-tool" +version = "1.19.0" +dependencies = [ + "assert_cmd", + "bs58", + "bytecount", + "chrono", + "clap 2.33.3", + "crossbeam-channel", + "csv", + "dashmap", + "futures 0.3.30", + "histogram", + "itertools", + "log", + "num_cpus", + "regex", + "serde", + "serde_json", + "signal-hook", + "solana-account-decoder", + "solana-accounts-db", + "solana-bpf-loader-program", + "solana-clap-utils", + "solana-cli-output", + "solana-core", + "solana-cost-model", + "solana-entry", + "solana-geyser-plugin-manager", + "solana-gossip", + "solana-ledger", + "solana-logger", + "solana-measure", + "solana-program-runtime", + "solana-rpc", + "solana-runtime", + "solana-sdk", + "solana-stake-program", + "solana-storage-bigtable", + "solana-streamer", + "solana-svm", + "solana-transaction-status", + "solana-unified-scheduler-pool", + "solana-version", + "solana-vote-program", + "solana_rbpf", + "thiserror", + "tikv-jemallocator", + "tokio", +] + +[[package]] +name = "agave-validator" +version = "1.19.0" +dependencies = [ + "agave-geyser-plugin-interface", + "chrono", + "clap 2.33.3", + "console", + "core_affinity", + "crossbeam-channel", + "fd-lock", + "indicatif", + "itertools", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-ipc-server", + "jsonrpc-server-utils", + "lazy_static", + "libc", + "libloading", + "log", + "num_cpus", + "rand 0.8.5", + "rayon", + "serde", + "serde_json", + "serde_yaml 0.9.32", + "signal-hook", + "solana-account-decoder", + "solana-accounts-db", + "solana-clap-utils", + "solana-cli-config", + "solana-core", + "solana-download-utils", + "solana-entry", + "solana-faucet", + "solana-genesis-utils", + "solana-geyser-plugin-manager", + "solana-gossip", + "solana-ledger", + "solana-logger", + "solana-metrics", + "solana-net-utils", + "solana-perf", + "solana-poh", + "solana-rpc", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-runtime", + "solana-sdk", + "solana-send-transaction-service", + "solana-storage-bigtable", + "solana-streamer", + "solana-svm", + "solana-test-validator", + "solana-tpu-client", + "solana-unified-scheduler-pool", + "solana-version", + "solana-vote-program", + "spl-token-2022", + "symlink", + "thiserror", + "tikv-jemallocator", +] + +[[package]] +name = "agave-watchtower" +version = "1.19.0" +dependencies = [ + "clap 2.33.3", + "humantime", + "log", + "solana-clap-utils", + "solana-cli-config", + "solana-cli-output", + "solana-logger", + "solana-metrics", + "solana-notifier", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-version", +] + [[package]] name = "ahash" version = "0.7.6" @@ -5482,35 +5693,6 @@ dependencies = [ "tar", ] -[[package]] -name = "solana-cargo-registry" -version = "1.19.0" -dependencies = [ - "clap 2.33.3", - "flate2", - "hex", - "hyper", - "log", - "rustc_version 0.4.0", - "serde", - "serde_json", - "sha2 0.10.8", - "solana-clap-utils", - "solana-cli", - "solana-cli-config", - "solana-cli-output", - "solana-logger", - "solana-remote-wallet", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-sdk", - "solana-version", - "tar", - "tempfile", - "tokio", - "toml 0.8.10", -] - [[package]] name = "solana-cargo-test-bpf" version = "1.19.0" @@ -6040,20 +6222,11 @@ dependencies = [ "solana-sdk", ] -[[package]] -name = "solana-geyser-plugin-interface" -version = "1.19.0" -dependencies = [ - "log", - "solana-sdk", - "solana-transaction-status", - "thiserror", -] - [[package]] name = "solana-geyser-plugin-manager" version = "1.19.0" dependencies = [ + "agave-geyser-plugin-interface", "bs58", "crossbeam-channel", "json5", @@ -6064,7 +6237,6 @@ dependencies = [ "serde_json", "solana-accounts-db", "solana-entry", - "solana-geyser-plugin-interface", "solana-ledger", "solana-measure", "solana-metrics", @@ -6126,41 +6298,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "solana-install" -version = "1.19.0" -dependencies = [ - "atty", - "bincode", - "bzip2", - "chrono", - "clap 2.33.3", - "console", - "crossbeam-channel", - "ctrlc", - "dirs-next", - "indicatif", - "lazy_static", - "nix 0.26.4", - "reqwest", - "scopeguard", - "semver 1.0.22", - "serde", - "serde_yaml 0.8.26", - "serde_yaml 0.9.32", - "solana-clap-utils", - "solana-config-program", - "solana-logger", - "solana-rpc-client", - "solana-sdk", - "solana-version", - "tar", - "tempfile", - "url 2.5.0", - "winapi 0.3.9", - "winreg", -] - [[package]] name = "solana-keygen" version = "1.19.0" @@ -6248,58 +6385,6 @@ dependencies = [ "trees", ] -[[package]] -name = "solana-ledger-tool" -version = "1.19.0" -dependencies = [ - "assert_cmd", - "bs58", - "bytecount", - "chrono", - "clap 2.33.3", - "crossbeam-channel", - "csv", - "dashmap", - "futures 0.3.30", - "histogram", - "itertools", - "log", - "num_cpus", - "regex", - "serde", - "serde_json", - "signal-hook", - "solana-account-decoder", - "solana-accounts-db", - "solana-bpf-loader-program", - "solana-clap-utils", - "solana-cli-output", - "solana-core", - "solana-cost-model", - "solana-entry", - "solana-geyser-plugin-manager", - "solana-gossip", - "solana-ledger", - "solana-logger", - "solana-measure", - "solana-program-runtime", - "solana-rpc", - "solana-runtime", - "solana-sdk", - "solana-stake-program", - "solana-storage-bigtable", - "solana-streamer", - "solana-svm", - "solana-transaction-status", - "solana-unified-scheduler-pool", - "solana-version", - "solana-vote-program", - "solana_rbpf", - "thiserror", - "tikv-jemallocator", - "tokio", -] - [[package]] name = "solana-loader-v4-program" version = "1.19.0" @@ -7463,72 +7548,6 @@ dependencies = [ "solana-metrics", ] -[[package]] -name = "solana-validator" -version = "1.19.0" -dependencies = [ - "chrono", - "clap 2.33.3", - "console", - "core_affinity", - "crossbeam-channel", - "fd-lock", - "indicatif", - "itertools", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-ipc-server", - "jsonrpc-server-utils", - "lazy_static", - "libc", - "libloading", - "log", - "num_cpus", - "rand 0.8.5", - "rayon", - "serde", - "serde_json", - "serde_yaml 0.9.32", - "signal-hook", - "solana-account-decoder", - "solana-accounts-db", - "solana-clap-utils", - "solana-cli-config", - "solana-core", - "solana-download-utils", - "solana-entry", - "solana-faucet", - "solana-genesis-utils", - "solana-geyser-plugin-interface", - "solana-geyser-plugin-manager", - "solana-gossip", - "solana-ledger", - "solana-logger", - "solana-metrics", - "solana-net-utils", - "solana-perf", - "solana-poh", - "solana-rpc", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-runtime", - "solana-sdk", - "solana-send-transaction-service", - "solana-storage-bigtable", - "solana-streamer", - "solana-svm", - "solana-test-validator", - "solana-tpu-client", - "solana-unified-scheduler-pool", - "solana-version", - "solana-vote-program", - "spl-token-2022", - "symlink", - "thiserror", - "tikv-jemallocator", -] - [[package]] name = "solana-version" version = "1.19.0" @@ -7585,25 +7604,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "solana-watchtower" -version = "1.19.0" -dependencies = [ - "clap 2.33.3", - "humantime", - "log", - "solana-clap-utils", - "solana-cli-config", - "solana-cli-output", - "solana-logger", - "solana-metrics", - "solana-notifier", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-sdk", - "solana-version", -] - [[package]] name = "solana-wen-restart" version = "1.19.0" diff --git a/Cargo.toml b/Cargo.toml index 66436c9cfb3fd8..27376370297e26 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -317,7 +317,7 @@ solana-bench-tps = { path = "bench-tps", version = "=1.19.0" } solana-bloom = { path = "bloom", version = "=1.19.0" } solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.19.0" } solana-bucket-map = { path = "bucket_map", version = "=1.19.0" } -solana-cargo-registry = { path = "cargo-registry", version = "=1.19.0" } +agave-cargo-registry = { path = "cargo-registry", version = "=1.19.0" } solana-clap-utils = { path = "clap-utils", version = "=1.19.0" } solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.19.0" } solana-cli = { path = "cli", version = "=1.19.0" } @@ -336,7 +336,7 @@ solana-frozen-abi = { path = "frozen-abi", version = "=1.19.0" } solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=1.19.0" } solana-genesis = { path = "genesis", version = "=1.19.0" } solana-genesis-utils = { path = "genesis-utils", version = "=1.19.0" } -solana-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.19.0" } +agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.19.0" } solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=1.19.0" } solana-gossip = { path = "gossip", version = "=1.19.0" } solana-ledger = { path = "ledger", version = "=1.19.0" } diff --git a/cargo-registry/Cargo.toml b/cargo-registry/Cargo.toml index 4e13f477ee5e2d..395493a8e85f00 100644 --- a/cargo-registry/Cargo.toml +++ b/cargo-registry/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-cargo-registry" +name = "agave-cargo-registry" description = "Solana cargo registry" -documentation = "https://docs.rs/solana-cargo-registry" +documentation = "https://docs.rs/agave-cargo-registry" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/ci/localnet-sanity.sh b/ci/localnet-sanity.sh index e6734e180aa2da..b01eca31d50d81 100755 --- a/ci/localnet-sanity.sh +++ b/ci/localnet-sanity.sh @@ -202,8 +202,8 @@ killNodes() { # Try to use the RPC exit API to cleanly exit the first two nodes # (dynamic nodes, -x, are just killed) echo "--- RPC exit" - $solana_validator --ledger "$SOLANA_CONFIG_DIR"/bootstrap-validator exit --force || true - $solana_validator --ledger "$SOLANA_CONFIG_DIR"/validator exit --force || true + $agave_validator --ledger "$SOLANA_CONFIG_DIR"/bootstrap-validator exit --force || true + $agave_validator --ledger "$SOLANA_CONFIG_DIR"/validator exit --force || true # Give the nodes a splash of time to cleanly exit before killing them sleep 2 diff --git a/ci/publish-installer.sh b/ci/publish-installer.sh index e58fd939dd1a40..f7d98ffd5ddcf9 100755 --- a/ci/publish-installer.sh +++ b/ci/publish-installer.sh @@ -31,7 +31,7 @@ SOLANA_RELEASE=$CHANNEL_OR_TAG SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG SOLANA_DOWNLOAD_ROOT=https://release.anza.xyz EOF -cat install/solana-install-init.sh >>release.anza.xyz-install +cat install/agave-install-init.sh >>release.anza.xyz-install echo --- GCS: "install" upload-gcs-artifact "/solana/release.anza.xyz-install" "gs://anza-release/$CHANNEL_OR_TAG/install" diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index 5c64f09564fe9f..da5862fb3de1d2 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -93,7 +93,7 @@ echo --- Creating release tarball tar cvf "${TARBALL_BASENAME}"-$TARGET.tar "${RELEASE_BASENAME}" bzip2 "${TARBALL_BASENAME}"-$TARGET.tar - cp "${RELEASE_BASENAME}"/bin/solana-install-init solana-install-init-$TARGET + cp "${RELEASE_BASENAME}"/bin/agave-install-init agave-install-init-$TARGET cp "${RELEASE_BASENAME}"/version.yml "${TARBALL_BASENAME}"-$TARGET.yml ) @@ -110,7 +110,7 @@ fi source ci/upload-ci-artifact.sh -for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do +for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.yml agave-install-init-"$TARGET"* $MAYBE_TARBALLS; do if [[ -n $DO_NOT_PUBLISH_TAR ]]; then upload-ci-artifact "$file" echo "Skipped $file due to DO_NOT_PUBLISH_TAR" diff --git a/ci/run-sanity.sh b/ci/run-sanity.sh index 8108d13a061fd5..88a6f40b1adf28 100755 --- a/ci/run-sanity.sh +++ b/ci/run-sanity.sh @@ -31,7 +31,7 @@ while [[ $latest_slot -le $((snapshot_slot + 1)) ]]; do latest_slot=$($solana_cli --url http://localhost:8899 slot --commitment processed) done -$solana_validator --ledger config/ledger exit --force || true +$agave_validator --ledger config/ledger exit --force || true wait $pid diff --git a/docs/src/backwards-compatibility.md b/docs/src/backwards-compatibility.md index 4a3c60b8e129bd..0fdc388ea2dbae 100644 --- a/docs/src/backwards-compatibility.md +++ b/docs/src/backwards-compatibility.md @@ -76,7 +76,7 @@ Major releases: - [`solana-program`](https://docs.rs/solana-program/) - Rust SDK for writing programs - [`solana-client`](https://docs.rs/solana-client/) - Rust client for connecting to RPC API - [`solana-cli-config`](https://docs.rs/solana-cli-config/) - Rust client for managing Solana CLI config files -- [`solana-geyser-plugin-interface`](https://docs.rs/solana-geyser-plugin-interface/) - Rust interface for developing Solana Geyser plugins. +- [`agave-geyser-plugin-interface`](https://docs.rs/agave-geyser-plugin-interface/) - Rust interface for developing Solana Geyser plugins. Patch releases: diff --git a/docs/src/cli/install.md b/docs/src/cli/install.md index 3667c733e3f4d4..20f6516314fb02 100644 --- a/docs/src/cli/install.md +++ b/docs/src/cli/install.md @@ -56,7 +56,7 @@ Please update your PATH environment variable to include the solana programs: solana --version ``` -- After a successful install, `solana-install update` may be used to easily +- After a successful install, `agave-install update` may be used to easily update the Solana software to a newer version at any time. --- @@ -74,7 +74,7 @@ solana --version installer into a temporary directory: ```bash -cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-install-init-x86_64-pc-windows-msvc.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs" +cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/agave-install-init-x86_64-pc-windows-msvc.exe --output C:\agave-install-tmp\agave-install-init.exe --create-dirs" ``` - Copy and paste the following command, then press Enter to install the latest @@ -82,7 +82,7 @@ cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-ins to allow the program to run. ```bash -C:\solana-install-tmp\solana-install-init.exe LATEST_SOLANA_RELEASE_VERSION +C:\agave-install-tmp\agave-install-init.exe LATEST_SOLANA_RELEASE_VERSION ``` - When the installer is finished, press Enter. @@ -97,12 +97,12 @@ C:\solana-install-tmp\solana-install-init.exe LATEST_SOLANA_RELEASE_VERSION solana --version ``` -- After a successful install, `solana-install update` may be used to easily +- After a successful install, `agave-install update` may be used to easily update the Solana software to a newer version at any time. ## Download Prebuilt Binaries -If you would rather not use `solana-install` to manage the install, you can +If you would rather not use `agave-install` to manage the install, you can manually download and install the binaries. ### Linux @@ -255,7 +255,7 @@ You can then run the following command to obtain the same result as with prebuilt binaries: ```bash -solana-install init +agave-install init ``` ## Use Homebrew diff --git a/docs/src/clusters/available.md b/docs/src/clusters/available.md index dfbca41672b499..52a7d469ad0cc5 100644 --- a/docs/src/clusters/available.md +++ b/docs/src/clusters/available.md @@ -41,10 +41,10 @@ export SOLANA_METRICS_CONFIG="host=https://metrics.solana.com:8086,db=devnet,u=s solana config set --url https://api.devnet.solana.com ``` -##### Example `solana-validator` command-line +##### Example `agave-validator` command-line ```bash -$ solana-validator \ +$ agave-validator \ --identity validator-keypair.json \ --vote-account vote-account-keypair.json \ --known-validator dv1ZAGvdsz5hHLwWXsVnM94hWf1pjbKVau1QVkaMJ92 \ @@ -93,10 +93,10 @@ export SOLANA_METRICS_CONFIG="host=https://metrics.solana.com:8086,db=tds,u=test solana config set --url https://api.testnet.solana.com ``` -##### Example `solana-validator` command-line +##### Example `agave-validator` command-line ```bash -$ solana-validator \ +$ agave-validator \ --identity validator-keypair.json \ --vote-account vote-account-keypair.json \ --known-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \ @@ -145,10 +145,10 @@ export SOLANA_METRICS_CONFIG="host=https://metrics.solana.com:8086,db=mainnet-be solana config set --url https://api.mainnet-beta.solana.com ``` -##### Example `solana-validator` command-line +##### Example `agave-validator` command-line ```bash -$ solana-validator \ +$ agave-validator \ --identity ~/validator-keypair.json \ --vote-account ~/vote-account-keypair.json \ --known-validator 7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2 \ diff --git a/docs/src/clusters/benchmark.md b/docs/src/clusters/benchmark.md index d913f9e5f16392..35978cdd0967dd 100644 --- a/docs/src/clusters/benchmark.md +++ b/docs/src/clusters/benchmark.md @@ -108,7 +108,7 @@ For example Generally we are using `debug` for infrequent debug messages, `trace` for potentially frequent messages and `info` for performance-related logging. -You can also attach to a running process with GDB. The leader's process is named _solana-validator_: +You can also attach to a running process with GDB. The leader's process is named _agave-validator_: ```bash sudo gdb diff --git a/docs/src/implemented-proposals/installer.md b/docs/src/implemented-proposals/installer.md index a3ad797171c5b8..c052aa7b4e54e5 100644 --- a/docs/src/implemented-proposals/installer.md +++ b/docs/src/implemented-proposals/installer.md @@ -13,16 +13,16 @@ This document proposes an easy to use software install and updater that can be u The easiest install method for supported platforms: ```bash -$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/solana-install-init.sh | sh +$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/agave-install-init.sh | sh ``` -This script will check github for the latest tagged release and download and run the `solana-install-init` binary from there. +This script will check github for the latest tagged release and download and run the `agave-install-init` binary from there. If additional arguments need to be specified during the installation, the following shell syntax is used: ```bash -$ init_args=.... # arguments for `solana-install-init ...` -$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/solana-install-init.sh | sh -s - ${init_args} +$ init_args=.... # arguments for `agave-install-init ...` +$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/agave-install-init.sh | sh -s - ${init_args} ``` ### Fetch and run a pre-built installer from a Github release @@ -30,9 +30,9 @@ $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/ With a well-known release URL, a pre-built binary can be obtained for supported platforms: ```bash -$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v1.0.0/solana-install-init-x86_64-apple-darwin -$ chmod +x ./solana-install-init -$ ./solana-install-init --help +$ curl -o agave-install-init https://github.com/solana-labs/solana/releases/download/v1.0.0/agave-install-init-x86_64-apple-darwin +$ chmod +x ./agave-install-init +$ ./agave-install-init --help ``` ### Build and run the installer from source @@ -51,16 +51,16 @@ Given a solana release tarball \(as created by `ci/publish-tarball.sh`\) that ha ```bash $ solana-keygen new -o update-manifest.json # <-- only generated once, the public key is shared with users -$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json +$ agave-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json ``` ### Run a validator node that auto updates itself ```bash -$ solana-install init --pubkey 92DMonmBYXwEMHJ99c9ceRSpAmk9v6i3RdvDdXaVcrfj # <-- pubkey is obtained from whoever is deploying the updates -$ export PATH=~/.local/share/solana-install/bin:$PATH +$ agave-install init --pubkey 92DMonmBYXwEMHJ99c9ceRSpAmk9v6i3RdvDdXaVcrfj # <-- pubkey is obtained from whoever is deploying the updates +$ export PATH=~/.local/share/agave-install/bin:$PATH $ solana-keygen ... # <-- runs the latest solana-keygen -$ solana-install run solana-validator ... # <-- runs a validator, restarting it as necessary when an update is applied +$ agave-install run agave-validator ... # <-- runs a validator, restarting it as necessary when an update is applied ``` ## On-chain Update Manifest @@ -87,9 +87,9 @@ pub struct SignedUpdateManifest { } ``` -Note that the `manifest` field itself contains a corresponding signature \(`manifest_signature`\) to guard against man-in-the-middle attacks between the `solana-install` tool and the solana cluster RPC API. +Note that the `manifest` field itself contains a corresponding signature \(`manifest_signature`\) to guard against man-in-the-middle attacks between the `agave-install` tool and the solana cluster RPC API. -To guard against rollback attacks, `solana-install` will refuse to install an update with an older `timestamp_secs` than what is currently installed. +To guard against rollback attacks, `agave-install` will refuse to install an update with an older `timestamp_secs` than what is currently installed. ## Release Archive Contents @@ -101,17 +101,17 @@ A release archive is expected to be a tar file compressed with bzip2 with the fo - `/bin/` -- directory containing available programs in the release. - `solana-install` will symlink this directory to + `agave-install` will symlink this directory to - `~/.local/share/solana-install/bin` for use by the `PATH` environment + `~/.local/share/agave-install/bin` for use by the `PATH` environment variable. - `...` -- any additional files and directories are permitted -## solana-install Tool +## agave-install Tool -The `solana-install` tool is used by the user to install and update their cluster software. +The `agave-install` tool is used by the user to install and update their cluster software. It manages the following files and directories in the user's home directory: @@ -122,11 +122,11 @@ It manages the following files and directories in the user's home directory: ### Command-line Interface ```text -solana-install 0.16.0 +agave-install 0.16.0 The solana cluster software installer USAGE: - solana-install [OPTIONS] + agave-install [OPTIONS] FLAGS: -h, --help Prints help information @@ -145,11 +145,11 @@ SUBCOMMANDS: ``` ```text -solana-install-init +agave-install-init initializes a new installation USAGE: - solana-install init [OPTIONS] + agave-install init [OPTIONS] FLAGS: -h, --help Prints help information @@ -161,11 +161,11 @@ OPTIONS: ``` ```text -solana-install info +agave-install info displays information about the current installation USAGE: - solana-install info [FLAGS] + agave-install info [FLAGS] FLAGS: -h, --help Prints help information @@ -173,11 +173,11 @@ FLAGS: ``` ```text -solana-install deploy +agave-install deploy deploys a new update USAGE: - solana-install deploy + agave-install deploy FLAGS: -h, --help Prints help information @@ -188,22 +188,22 @@ ARGS: ``` ```text -solana-install update +agave-install update checks for an update, and if available downloads and applies it USAGE: - solana-install update + agave-install update FLAGS: -h, --help Prints help information ``` ```text -solana-install run +agave-install run Runs a program while periodically checking and applying software updates USAGE: - solana-install run [program_arguments]... + agave-install run [program_arguments]... FLAGS: -h, --help Prints help information diff --git a/docs/src/implemented-proposals/rpc-transaction-history.md b/docs/src/implemented-proposals/rpc-transaction-history.md index 54288ad9659bd7..607a79ce658b98 100644 --- a/docs/src/implemented-proposals/rpc-transaction-history.md +++ b/docs/src/implemented-proposals/rpc-transaction-history.md @@ -68,7 +68,7 @@ the results of BigTable queries more complicated but is not a significant issue. ## Data Population The ongoing population of instance data will occur on an epoch cadence through -the use of a new `solana-ledger-tool` command that will convert rocksdb data for +the use of a new `agave-ledger-tool` command that will convert rocksdb data for a given slot range into the instance schema. The same process will be run once, manually, to backfill the existing ledger diff --git a/docs/src/operations/best-practices/general.md b/docs/src/operations/best-practices/general.md index 29ef42c81b7f5f..3e531b0160c571 100644 --- a/docs/src/operations/best-practices/general.md +++ b/docs/src/operations/best-practices/general.md @@ -23,12 +23,12 @@ watch past workshops through the ## Help with the validator command line -From within the Solana CLI, you can execute the `solana-validator` command with +From within the Solana CLI, you can execute the `agave-validator` command with the `--help` flag to get a better understanding of the flags and sub commands available. ``` -solana-validator --help +agave-validator --help ``` ## Restarting your validator @@ -49,14 +49,14 @@ solana leader-schedule Based on the current slot and the leader schedule, you can calculate open time windows where your validator is not expected to produce blocks. -Assuming you are ready to restart, you may use the `solana-validator exit` +Assuming you are ready to restart, you may use the `agave-validator exit` command. The command exits your validator process when an appropriate idle time window is reached. Assuming that you have systemd implemented for your validator process, the validator should restart automatically after the exit. See the below help command for details: ``` -solana-validator exit --help +agave-validator exit --help ``` ## Upgrading @@ -75,28 +75,28 @@ process. It is a best practice to always build your Solana binaries from source. If you build from source, you are certain that the code you are building has not been tampered with before the binary was created. You may also be able to optimize -your `solana-validator` binary to your specific hardware. +your `agave-validator` binary to your specific hardware. If you build from source on the validator machine (or a machine with the same CPU), you can target your specific architecture using the `-march` flag. Refer to the following doc for [instructions on building from source](../../cli/install.md#build-from-source). -### solana-install +### agave-install If you are not comfortable building from source, or you need to quickly install a new version to test something out, you could instead try using the -`solana-install` command. +`agave-install` command. Assuming you want to install Solana version `1.14.17`, you would execute the following: ``` -solana-install init 1.14.17 +agave-install init 1.14.17 ``` This command downloads the executable for `1.14.17` and installs it into a -`.local` directory. You can also look at `solana-install --help` for more +`.local` directory. You can also look at `agave-install --help` for more options. > **Note** this command only works if you already have the solana cli installed. @@ -106,7 +106,7 @@ options. ### Restart For all install methods, the validator process will need to be restarted before -the newly installed version is in use. Use `solana-validator exit` to restart +the newly installed version is in use. Use `agave-validator exit` to restart your validator process. ### Verifying version @@ -132,13 +132,13 @@ have state locally. In other cases such as restarts for upgrades, a snapshot download should be avoided. To avoid downloading a snapshot on restart, add the following flag to the -`solana-validator` command: +`agave-validator` command: ``` --no-snapshot-fetch ``` -If you use this flag with the `solana-validator` command, make sure that you run +If you use this flag with the `agave-validator` command, make sure that you run `solana catchup ` after your validator starts to make sure that the validator is catching up in a reasonable time. After some time (potentially a few hours), if it appears that your validator continues to fall behind, then you @@ -199,7 +199,7 @@ It is important that you do not accidentally run out of funds in your identity account, as your node will stop voting. It is also important to note that this account keypair is the most vulnerable of the three keypairs in a vote account because the keypair for the identity account is stored on your validator when -running the `solana-validator` software. How much SOL you should store there is +running the `agave-validator` software. How much SOL you should store there is up to you. As a best practice, make sure to check the account regularly and refill or deduct from it as needed. To check the account balance do: @@ -207,7 +207,7 @@ refill or deduct from it as needed. To check the account balance do: solana balance validator-keypair.json ``` -> **Note** `solana-watchtower` can monitor for a minimum validator identity +> **Note** `agave-watchtower` can monitor for a minimum validator identity > balance. See [monitoring best practices](./monitoring.md) for details. ## Withdrawing From The Vote Account diff --git a/docs/src/operations/best-practices/monitoring.md b/docs/src/operations/best-practices/monitoring.md index 6d04fc38487be7..a0f2ef9df9fa22 100644 --- a/docs/src/operations/best-practices/monitoring.md +++ b/docs/src/operations/best-practices/monitoring.md @@ -4,34 +4,34 @@ sidebar_label: Monitoring pagination_label: "Best Practices: Validator Monitoring" --- -It is essential that you have monitoring in place on your validator. In the event that your validator is delinquent (behind the rest of the network) you want to respond immediately to fix the issue. One very useful tool to monitor your validator is [`solana-watchtower`](#solana-watchtower). +It is essential that you have monitoring in place on your validator. In the event that your validator is delinquent (behind the rest of the network) you want to respond immediately to fix the issue. One very useful tool to monitor your validator is [`agave-watchtower`](#agave-watchtower). ## Solana Watchtower -Solana Watchtower is an extremely useful monitoring tool that will regularly monitor the health of your validator. It can monitor your validator for delinquency then notify you on your application of choice: Slack, Discord, Telegram or Twilio. Additionally, `solana-watchtower` has the ability to monitor the health of the entire cluster so that you can be aware of any cluster wide problems. +Solana Watchtower is an extremely useful monitoring tool that will regularly monitor the health of your validator. It can monitor your validator for delinquency then notify you on your application of choice: Slack, Discord, Telegram or Twilio. Additionally, `agave-watchtower` has the ability to monitor the health of the entire cluster so that you can be aware of any cluster wide problems. ### Getting Started -To get started with Solana Watchtower, run `solana-watchtower --help`. From the help menu, you can see the optional flags and an explanation of the command. +To get started with Solana Watchtower, run `agave-watchtower --help`. From the help menu, you can see the optional flags and an explanation of the command. Here is a sample command that will monitor a validator node with an identity public key of `2uTk98rqqwENevkPH2AHHzGHXgeGc1h6ku8hQUqWeXZp`: ``` -solana-watchtower --monitor-active-stake --validator-identity \ +agave-watchtower --monitor-active-stake --validator-identity \ 2uTk98rqqwENevkPH2AHHzGHXgeGc1h6ku8hQUqWeXZp ``` -The command will monitor your validator, but you will not get notifications unless you added the environment variables mentioned in `solana-watchtower --help`. Since getting each of these services setup for notifications is not straight forward, the next section will walk through [setting up watchtower notifications on Telegram](#setup-telegram-notifications). +The command will monitor your validator, but you will not get notifications unless you added the environment variables mentioned in `agave-watchtower --help`. Since getting each of these services setup for notifications is not straight forward, the next section will walk through [setting up watchtower notifications on Telegram](#setup-telegram-notifications). ### Best Practices -It is a best practice to run the `solana-watchtower` command on a separate server from your validator. +It is a best practice to run the `agave-watchtower` command on a separate server from your validator. -In the case that you run `solana-watchtower` on the same computer as your `solana-validator` process, then during catastrophic events like a power outage, you will not be aware of the issue, because your `solana-watchtower` process will stop at the same time as your `solana-validator` process. +In the case that you run `agave-watchtower` on the same computer as your `agave-validator` process, then during catastrophic events like a power outage, you will not be aware of the issue, because your `agave-watchtower` process will stop at the same time as your `agave-validator` process. -Additionally, while running the `solana-watchtower` process manually with environment variables set in the terminal is a good way to test out the command, it is not operationally sound because the process will not be restarted when the terminal closes or during a system restart. +Additionally, while running the `agave-watchtower` process manually with environment variables set in the terminal is a good way to test out the command, it is not operationally sound because the process will not be restarted when the terminal closes or during a system restart. -Instead, you could run your `solana-watchtower` command as a system process similar to `solana-validator`. In the system process file, you can specify the environment variables for your bot. +Instead, you could run your `agave-watchtower` command as a system process similar to `agave-validator`. In the system process file, you can specify the environment variables for your bot. ### Setup Telegram Notifications @@ -41,7 +41,7 @@ To send validator health notifications to your Telegram account, we are going to 2. Send a message to the bot 3. Create a Telegram group that will get the watchtower notifications 4. Add the environment variables to your command line environment -5. Restart the `solana-watchtower` command +5. Restart the `agave-watchtower` command #### Create a Bot Using BotFather @@ -61,7 +61,7 @@ In Telegram, click on the new message icon and then select new group. Find your Now that you have a bot setup, you will need to set the environment variables for the bot so that watchtower can send notifications. -First, recall the chat message that you got from _@BotFather_. In the message, there was an HTTP API token for your bot. The token will have this format: `389178471:MMTKMrnZB4ErUzJmuFIXTKE6DupLSgoa7h4o`. You will use that token to set the `TELEGRAM_BOT_TOKEN` environment variable. In the terminal where you plan to run `solana-watchtower`, run the following: +First, recall the chat message that you got from _@BotFather_. In the message, there was an HTTP API token for your bot. The token will have this format: `389178471:MMTKMrnZB4ErUzJmuFIXTKE6DupLSgoa7h4o`. You will use that token to set the `TELEGRAM_BOT_TOKEN` environment variable. In the terminal where you plan to run `agave-watchtower`, run the following: ``` export TELEGRAM_BOT_TOKEN= @@ -73,14 +73,14 @@ Next, in your browser, go to `https://api.telegram.org/bot/getUp The response should be in JSON. Search for the string `"chat":` in the JSON. The `id` value of that chat is your `TELEGRAM_CHAT_ID`. It will be a negative number like: `-781559558`. Remember to include the negative sign! If you cannot find `"chat":` in the JSON, then you may have to remove the bot from your chat group and add it again. -With your Telegram chat id in hand, export the environment variable where you plan to run `solana-watchtower`: +With your Telegram chat id in hand, export the environment variable where you plan to run `agave-watchtower`: ``` export TELEGRAM_CHAT_ID= ``` -#### Restart solana-watchtower +#### Restart agave-watchtower -Once your environment variables are set, restart `solana-watchtower`. You should see output about your validator. +Once your environment variables are set, restart `agave-watchtower`. You should see output about your validator. To test that your Telegram configuration is working properly, you could stop your validator briefly until it is labeled as delinquent. Up to a minute after the validator is delinquent, you should receive a message in the Telegram group from your bot. Start the validator again and verify that you get another message in your Telegram group from the bot. The message should say `all clear`. \ No newline at end of file diff --git a/docs/src/operations/guides/restart-cluster.md b/docs/src/operations/guides/restart-cluster.md index 85d4731d604c65..cda3f30a5a016d 100644 --- a/docs/src/operations/guides/restart-cluster.md +++ b/docs/src/operations/guides/restart-cluster.md @@ -11,7 +11,7 @@ pagination_label: "Validator Guides: Restart a Cluster" In Solana 1.14 or greater, run the following command to output the latest optimistically confirmed slot your validator observed: ```bash -solana-ledger-tool -l ledger latest-optimistic-slots +agave-ledger-tool -l ledger latest-optimistic-slots ``` In Solana 1.13 or less, the latest optimistically confirmed can be found by looking for the more recent occurrence of @@ -34,11 +34,11 @@ instead. ### Step 4. Create a new snapshot for slot `SLOT_X` with a hard fork at slot `SLOT_X` ```bash -$ solana-ledger-tool -l --snapshot-archive-path --incremental-snapshot-archive-path create-snapshot SLOT_X --hard-fork SLOT_X +$ agave-ledger-tool -l --snapshot-archive-path --incremental-snapshot-archive-path create-snapshot SLOT_X --hard-fork SLOT_X ``` The snapshots directory should now contain the new snapshot. -`solana-ledger-tool create-snapshot` will also output the new shred version, and bank hash value, +`agave-ledger-tool create-snapshot` will also output the new shred version, and bank hash value, call this NEW_SHRED_VERSION and NEW_BANK_HASH respectively. Adjust your validator's arguments: @@ -68,7 +68,7 @@ Post something like the following to #announcements (adjusting the text as appro > 2. a. Preferred method, start from your local ledger with: > > ```bash -> solana-validator +> agave-validator > --wait-for-supermajority SLOT_X # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --expected-bank-hash NEW_BANK_HASH # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --hard-fork SLOT_X # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART @@ -84,7 +84,7 @@ Post something like the following to #announcements (adjusting the text as appro > b. If your validator doesn't have ledger up to slot SLOT_X or if you have deleted your ledger, have it instead download a snapshot with: > > ```bash -> solana-validator +> agave-validator > --wait-for-supermajority SLOT_X # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --expected-bank-hash NEW_BANK_HASH # <-- NEW! IMPORTANT! REMOVE AFTER THIS RESTART > --entrypoint entrypoint.testnet.solana.com:8001 @@ -95,7 +95,7 @@ Post something like the following to #announcements (adjusting the text as appro > ... # <-- your other --identity/--vote-account/etc arguments > ``` > -> You can check for which slots your ledger has with: `solana-ledger-tool -l path/to/ledger bounds` +> You can check for which slots your ledger has with: `agave-ledger-tool -l path/to/ledger bounds` > > 3. Wait until 80% of the stake comes online > @@ -122,7 +122,7 @@ and create a new snapshot with additional `--destake-vote-account ` arguments for each of the non-responsive validator's vote account address ```bash -$ solana-ledger-tool -l ledger create-snapshot SLOT_X ledger --hard-fork SLOT_X \ +$ agave-ledger-tool -l ledger create-snapshot SLOT_X ledger --hard-fork SLOT_X \ --destake-vote-account \ --destake-vote-account \ . diff --git a/docs/src/operations/guides/validator-failover.md b/docs/src/operations/guides/validator-failover.md index 168a1a4312cec0..b7b3fea568194b 100644 --- a/docs/src/operations/guides/validator-failover.md +++ b/docs/src/operations/guides/validator-failover.md @@ -85,11 +85,11 @@ For more information on etcd TLS setup, please refer to https://etcd.io/docs/v3.5/op-guide/security/#example-2-client-to-server-authentication-with-https-client-certificates ### Primary Validator -The following additional `solana-validator` parameters are required to enable +The following additional `agave-validator` parameters are required to enable tower storage into etcd: ``` -solana-validator ... \ +agave-validator ... \ --tower-storage etcd \ --etcd-cacert-file certs/etcd-ca.pem \ --etcd-cert-file certs/validator.pem \ @@ -103,7 +103,7 @@ that your etcd endpoint remain accessible at all times. ### Secondary Validator Configure the secondary validator like the primary with the exception of the -following `solana-validator` command-line argument changes: +following `agave-validator` command-line argument changes: * Generate and use a secondary validator identity: `--identity secondary-validator-keypair.json` * Add `--no-check-vote-account` * Add `--authorized-voter validator-keypair.json` (where @@ -114,8 +114,8 @@ When both validators are running normally and caught up to the cluster, a failover from primary to secondary can be triggered by running the following command on the secondary validator: ```bash -$ solana-validator wait-for-restart-window --identity validator-keypair.json \ - && solana-validator set-identity validator-keypair.json +$ agave-validator wait-for-restart-window --identity validator-keypair.json \ + && agave-validator set-identity validator-keypair.json ``` The secondary validator will acquire a lock on the tower in etcd to ensure @@ -131,7 +131,7 @@ exit. However if/when the secondary validator restarts, it will do so using the secondary validator identity and thus the restart cycle is broken. ## Triggering a failover via monitoring -Monitoring of your choosing can invoke the `solana-validator set-identity +Monitoring of your choosing can invoke the `agave-validator set-identity validator-keypair.json` command mentioned in the previous section. It is not necessary to guarantee the primary validator has halted before failing diff --git a/docs/src/operations/guides/validator-start.md b/docs/src/operations/guides/validator-start.md index 378783798b3ce8..d86c714be4e6a6 100644 --- a/docs/src/operations/guides/validator-start.md +++ b/docs/src/operations/guides/validator-start.md @@ -32,7 +32,7 @@ detail on cluster activity. ## Enabling CUDA If your machine has a GPU with CUDA installed \(Linux-only currently\), include -the `--cuda` argument to `solana-validator`. +the `--cuda` argument to `agave-validator`. When your validator is started look for the following log message to indicate that CUDA is enabled: `"[ solana::validator] CUDA is enabled"` @@ -47,7 +47,7 @@ the following commands. #### **Optimize sysctl knobs** ```bash -sudo bash -c "cat >/etc/sysctl.d/21-solana-validator.conf </etc/sysctl.d/21-agave-validator.conf <` -argument to `solana-validator`. You can specify multiple ones by repeating the argument `--known-validator --known-validator `. +argument to `agave-validator`. You can specify multiple ones by repeating the argument `--known-validator --known-validator `. This has two effects, one is when the validator is booting with `--only-known-rpc`, it will only ask that set of known nodes for downloading genesis and snapshot data. Another is that in combination with the `--halt-on-known-validators-accounts-hash-mismatch` option, it will monitor the merkle root hash of the entire accounts state of other known nodes on gossip and if the hashes produce any mismatch, @@ -277,13 +277,13 @@ account state divergence. Connect to the cluster by running: ```bash -solana-validator \ +agave-validator \ --identity ~/validator-keypair.json \ --vote-account ~/vote-account-keypair.json \ --rpc-port 8899 \ --entrypoint entrypoint.devnet.solana.com:8001 \ --limit-ledger-size \ - --log ~/solana-validator.log + --log ~/agave-validator.log ``` To force validator logging to the console add a `--log -` argument, otherwise @@ -296,7 +296,7 @@ The ledger will be placed in the `ledger/` directory by default, use the > [paper wallet seed phrase](../../cli/wallets/paper.md) > for your `--identity` and/or > `--authorized-voter` keypairs. To use these, pass the respective argument as -> `solana-validator --identity ASK ... --authorized-voter ASK ...` +> `agave-validator --identity ASK ... --authorized-voter ASK ...` > and you will be prompted to enter your seed phrases and optional passphrase. Confirm your validator is connected to the network by opening a new terminal and @@ -312,7 +312,7 @@ If your validator is connected, its public key and IP address will appear in the By default the validator will dynamically select available network ports in the 8000-10000 range, and may be overridden with `--dynamic-port-range`. For -example, `solana-validator --dynamic-port-range 11000-11020 ...` will restrict +example, `agave-validator --dynamic-port-range 11000-11020 ...` will restrict the validator to ports 11000-11020. ### Limiting ledger size to conserve disk space @@ -366,8 +366,8 @@ WantedBy=multi-user.target ``` Now create `/home/sol/bin/validator.sh` to include the desired -`solana-validator` command-line. Ensure that the 'exec' command is used to -start the validator process (i.e. "exec solana-validator ..."). This is +`agave-validator` command-line. Ensure that the 'exec' command is used to +start the validator process (i.e. "exec agave-validator ..."). This is important because without it, logrotate will end up killing the validator every time the logs are rotated. @@ -394,14 +394,14 @@ to be reverted and the issue reproduced before help can be provided. #### Log rotation -The validator log file, as specified by `--log ~/solana-validator.log`, can get +The validator log file, as specified by `--log ~/agave-validator.log`, can get very large over time and it's recommended that log rotation be configured. The validator will re-open its log file when it receives the `USR1` signal, which is the basic primitive that enables log rotation. If the validator is being started by a wrapper shell script, it is important to -launch the process with `exec` (`exec solana-validator ...`) when using logrotate. +launch the process with `exec` (`exec agave-validator ...`) when using logrotate. This will prevent the `USR1` signal from being sent to the script's process instead of the validator's, which will kill them both. @@ -409,13 +409,13 @@ instead of the validator's, which will kill them both. An example setup for the `logrotate`, which assumes that the validator is running as a systemd service called `sol.service` and writes a log file at -/home/sol/solana-validator.log: +/home/sol/agave-validator.log: ```bash # Setup log rotation cat > logrotate.sol </etc/sysctl.d/21-solana-validator.conf </etc/sysctl.d/21-agave-validator.conf < For more explanation on the flags used in the command, refer to the `solana-validator --help` command +> For more explanation on the flags used in the command, refer to the `agave-validator --help` command ``` #!/bin/bash -exec solana-validator \ +exec agave-validator \ --identity /home/sol/validator-keypair.json \ --known-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \ --known-validator dDzy5SR3AXdYWVqbDEkVFdvSPCtS9ihF5kJkHCtXoFs \ diff --git a/docs/src/validator/geyser.md b/docs/src/validator/geyser.md index 769856303767d6..efea2e18e30269 100644 --- a/docs/src/validator/geyser.md +++ b/docs/src/validator/geyser.md @@ -24,20 +24,20 @@ implementation for the PostgreSQL database. ### Important Crates: -- [`solana-geyser-plugin-interface`] — This crate defines the plugin +- [`agave-geyser-plugin-interface`] — This crate defines the plugin interfaces. - [`solana-accountsdb-plugin-postgres`] — The crate for the referential plugin implementation for the PostgreSQL database. -[`solana-geyser-plugin-interface`]: https://docs.rs/solana-geyser-plugin-interface +[`agave-geyser-plugin-interface`]: https://docs.rs/agave-geyser-plugin-interface [`solana-accountsdb-plugin-postgres`]: https://docs.rs/solana-accountsdb-plugin-postgres [`solana-sdk`]: https://docs.rs/solana-sdk [`solana-transaction-status`]: https://docs.rs/solana-transaction-status ## The Plugin Interface -The Plugin interface is declared in [`solana-geyser-plugin-interface`]. It +The Plugin interface is declared in [`agave-geyser-plugin-interface`]. It is defined by the trait `GeyserPlugin`. The plugin should implement the trait and expose a "C" function `_create_plugin` to return the pointer to this trait. For example, in the referential implementation, the following code @@ -166,7 +166,7 @@ please refer to [`solana-sdk`] and [`solana-transaction-status`] The `slot` points to the slot the transaction is executed at. For more details, please refer to the Rust documentation in -[`solana-geyser-plugin-interface`]. +[`agave-geyser-plugin-interface`]. ## Example PostgreSQL Plugin diff --git a/geyser-plugin-interface/Cargo.toml b/geyser-plugin-interface/Cargo.toml index af99758b47d630..56f42fd4612cec 100644 --- a/geyser-plugin-interface/Cargo.toml +++ b/geyser-plugin-interface/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-geyser-plugin-interface" +name = "agave-geyser-plugin-interface" description = "The Solana Geyser plugin interface." -documentation = "https://docs.rs/solana-geyser-plugin-interface" +documentation = "https://docs.rs/agave-geyser-plugin-interface" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 037aedf8b87e89..d9a3b00f8dc4c8 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -327,7 +327,7 @@ pub trait GeyserPlugin: Any + Send + Sync + std::fmt::Debug { /// # Examples /// /// ``` - /// use solana_geyser_plugin_interface::geyser_plugin_interface::{GeyserPlugin, + /// use agave_geyser_plugin_interface::geyser_plugin_interface::{GeyserPlugin, /// GeyserPluginError, Result}; /// /// #[derive(Debug)] diff --git a/geyser-plugin-manager/Cargo.toml b/geyser-plugin-manager/Cargo.toml index d905248150b717..ebef2f637f642d 100644 --- a/geyser-plugin-manager/Cargo.toml +++ b/geyser-plugin-manager/Cargo.toml @@ -10,6 +10,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +agave-geyser-plugin-interface = { workspace = true } bs58 = { workspace = true } crossbeam-channel = { workspace = true } json5 = { workspace = true } @@ -20,7 +21,6 @@ log = { workspace = true } serde_json = { workspace = true } solana-accounts-db = { workspace = true } solana-entry = { workspace = true } -solana-geyser-plugin-interface = { workspace = true } solana-ledger = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } diff --git a/geyser-plugin-manager/src/accounts_update_notifier.rs b/geyser-plugin-manager/src/accounts_update_notifier.rs index 7c7e3370fc00eb..90ab0b7998a35c 100644 --- a/geyser-plugin-manager/src/accounts_update_notifier.rs +++ b/geyser-plugin-manager/src/accounts_update_notifier.rs @@ -1,14 +1,14 @@ /// Module responsible for notifying plugins of account updates use { crate::geyser_plugin_manager::GeyserPluginManager, + agave_geyser_plugin_interface::geyser_plugin_interface::{ + ReplicaAccountInfoV3, ReplicaAccountInfoVersions, + }, log::*, solana_accounts_db::{ account_storage::meta::StoredAccountMeta, accounts_update_notifier_interface::AccountsUpdateNotifierInterface, }, - solana_geyser_plugin_interface::geyser_plugin_interface::{ - ReplicaAccountInfoV3, ReplicaAccountInfoVersions, - }, solana_measure::measure::Measure, solana_metrics::*, solana_sdk::{ diff --git a/geyser-plugin-manager/src/block_metadata_notifier.rs b/geyser-plugin-manager/src/block_metadata_notifier.rs index 76d203c5e0ed44..87f15f41fc0ae0 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier.rs @@ -3,10 +3,10 @@ use { block_metadata_notifier_interface::BlockMetadataNotifier, geyser_plugin_manager::GeyserPluginManager, }, - log::*, - solana_geyser_plugin_interface::geyser_plugin_interface::{ + agave_geyser_plugin_interface::geyser_plugin_interface::{ ReplicaBlockInfoV3, ReplicaBlockInfoVersions, }, + log::*, solana_measure::measure::Measure, solana_metrics::*, solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey, reward_info::RewardInfo}, diff --git a/geyser-plugin-manager/src/entry_notifier.rs b/geyser-plugin-manager/src/entry_notifier.rs index ea14592b615db8..da9a9698ed1540 100644 --- a/geyser-plugin-manager/src/entry_notifier.rs +++ b/geyser-plugin-manager/src/entry_notifier.rs @@ -1,11 +1,11 @@ /// Module responsible for notifying plugins about entries use { crate::geyser_plugin_manager::GeyserPluginManager, - log::*, - solana_entry::entry::EntrySummary, - solana_geyser_plugin_interface::geyser_plugin_interface::{ + agave_geyser_plugin_interface::geyser_plugin_interface::{ ReplicaEntryInfoV2, ReplicaEntryInfoVersions, }, + log::*, + solana_entry::entry::EntrySummary, solana_ledger::entry_notifier_interface::EntryNotifier, solana_measure::measure::Measure, solana_metrics::*, diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index a15f9e1318075d..3d0abe16899637 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -1,9 +1,9 @@ use { + agave_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, jsonrpc_core::{ErrorCode, Result as JsonRpcResult}, jsonrpc_server_utils::tokio::sync::oneshot::Sender as OneShotSender, libloading::Library, log::*, - solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, std::{ ops::{Deref, DerefMut}, path::Path, @@ -442,8 +442,8 @@ mod tests { crate::geyser_plugin_manager::{ GeyserPluginManager, LoadedGeyserPlugin, TESTPLUGIN2_CONFIG, TESTPLUGIN_CONFIG, }, + agave_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, libloading::Library, - solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, std::sync::{Arc, RwLock}, }; diff --git a/geyser-plugin-manager/src/slot_status_notifier.rs b/geyser-plugin-manager/src/slot_status_notifier.rs index 587abe2f79d4de..1557bb2d4d8c36 100644 --- a/geyser-plugin-manager/src/slot_status_notifier.rs +++ b/geyser-plugin-manager/src/slot_status_notifier.rs @@ -1,7 +1,7 @@ use { crate::geyser_plugin_manager::GeyserPluginManager, + agave_geyser_plugin_interface::geyser_plugin_interface::SlotStatus, log::*, - solana_geyser_plugin_interface::geyser_plugin_interface::SlotStatus, solana_measure::measure::Measure, solana_metrics::*, solana_sdk::clock::Slot, diff --git a/geyser-plugin-manager/src/transaction_notifier.rs b/geyser-plugin-manager/src/transaction_notifier.rs index ab821e811047d2..b757c1202b377d 100644 --- a/geyser-plugin-manager/src/transaction_notifier.rs +++ b/geyser-plugin-manager/src/transaction_notifier.rs @@ -1,10 +1,10 @@ /// Module responsible for notifying plugins of transactions use { crate::geyser_plugin_manager::GeyserPluginManager, - log::*, - solana_geyser_plugin_interface::geyser_plugin_interface::{ + agave_geyser_plugin_interface::geyser_plugin_interface::{ ReplicaTransactionInfoV2, ReplicaTransactionInfoVersions, }, + log::*, solana_measure::measure::Measure, solana_metrics::*, solana_rpc::transaction_notifier_interface::TransactionNotifier, diff --git a/install/Cargo.toml b/install/Cargo.toml index 588d4315df5f35..c40a0ee6e9eee3 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-install" +name = "agave-install" description = "The solana cluster software installer" -documentation = "https://docs.rs/solana-install" +documentation = "https://docs.rs/agave-install" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/install/solana-install-init.sh b/install/agave-install-init.sh similarity index 89% rename from install/solana-install-init.sh rename to install/agave-install-init.sh index 4f28e300be52ab..cf2d1babf3c306 100755 --- a/install/solana-install-init.sh +++ b/install/agave-install-init.sh @@ -10,7 +10,7 @@ # except according to those terms. # This is just a little script that can be downloaded from the internet to -# install solana-install. It just does platform detection, downloads the installer +# install agave-install. It just does platform detection, downloads the installer # and runs it. { # this ensures the entire script is downloaded # @@ -24,11 +24,11 @@ set -e usage() { cat 1>&2 < --pubkey + agave-install-init [FLAGS] [OPTIONS] --data_dir --pubkey FLAGS: -h, --help Prints help information @@ -81,7 +81,7 @@ main() { esac TARGET="${_cputype}-${_ostype}" - temp_dir="$(mktemp -d 2>/dev/null || ensure mktemp -d -t solana-install-init)" + temp_dir="$(mktemp -d 2>/dev/null || ensure mktemp -d -t agave-install-init)" ensure mkdir -p "$temp_dir" # Check for SOLANA_RELEASE environment variable override. Otherwise fetch @@ -101,8 +101,8 @@ main() { fi fi - download_url="$SOLANA_DOWNLOAD_ROOT/$release/solana-install-init-$TARGET" - solana_install_init="$temp_dir/solana-install-init" + download_url="$SOLANA_DOWNLOAD_ROOT/$release/agave-install-init-$TARGET" + solana_install_init="$temp_dir/agave-install-init" printf 'downloading %s installer\n' "$release" 1>&2 @@ -111,7 +111,7 @@ main() { ensure chmod u+x "$solana_install_init" if [ ! -x "$solana_install_init" ]; then printf '%s\n' "Cannot execute $solana_install_init (likely because of mounting /tmp as noexec)." 1>&2 - printf '%s\n' "Please copy the file to a location where you can execute binaries and run ./solana-install-init." 1>&2 + printf '%s\n' "Please copy the file to a location where you can execute binaries and run ./agave-install-init." 1>&2 exit 1 fi @@ -130,7 +130,7 @@ main() { } err() { - printf 'solana-install-init: %s\n' "$1" >&2 + printf 'agave-install-init: %s\n' "$1" >&2 exit 1 } diff --git a/install/install-help.sh b/install/install-help.sh index 9fb08afa6d14c9..7604777e378677 100755 --- a/install/install-help.sh +++ b/install/install-help.sh @@ -4,11 +4,11 @@ set -e cd "$(dirname "$0")"/.. cargo="$(readlink -f "./cargo")" -"$cargo" build --package solana-install +"$cargo" build --package agave-install export PATH=$PWD/target/debug:$PATH echo "\`\`\`manpage" -solana-install --help +agave-install --help echo "\`\`\`" echo "" @@ -16,7 +16,7 @@ commands=(init info deploy update run) for x in "${commands[@]}"; do echo "\`\`\`manpage" - solana-install "${x}" --help + agave-install "${x}" --help echo "\`\`\`" echo "" done diff --git a/install/src/bin/solana-install-init.rs b/install/src/bin/agave-install-init.rs similarity index 92% rename from install/src/bin/solana-install-init.rs rename to install/src/bin/agave-install-init.rs index ec888d8f452090..84c154ac12b35e 100644 --- a/install/src/bin/solana-install-init.rs +++ b/install/src/bin/agave-install-init.rs @@ -16,7 +16,7 @@ fn press_enter() { } fn main() { - solana_install::main_init().unwrap_or_else(|err| { + agave_install::main_init().unwrap_or_else(|err| { println!("Error: {err}"); press_enter(); exit(1); diff --git a/install/src/command.rs b/install/src/command.rs index 218e815467e9a9..4ae9e7ee38cedd 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -540,7 +540,7 @@ pub fn init( explicit_release: Option, ) -> Result<(), String> { let config = { - // Write new config file only if different, so that running |solana-install init| + // Write new config file only if different, so that running |agave-install init| // repeatedly doesn't unnecessarily re-download let mut current_config = Config::load(config_file).unwrap_or_default(); current_config.current_update_manifest = None; @@ -870,7 +870,7 @@ fn check_for_newer_github_release( prerelease_allowed: bool, ) -> Result, String> { let client = reqwest::blocking::Client::builder() - .user_agent("solana-install") + .user_agent("agave-install") .build() .map_err(|err| err.to_string())?; diff --git a/install/src/lib.rs b/install/src/lib.rs index 159317edd2e5a8..a28b963d65f825 100644 --- a/install/src/lib.rs +++ b/install/src/lib.rs @@ -281,7 +281,7 @@ pub fn main() -> Result<(), String> { pub fn main_init() -> Result<(), String> { solana_logger::setup(); - let matches = App::new("solana-install-init") + let matches = App::new("agave-install-init") .about("Initializes a new installation") .version(solana_version::version!()) .arg({ diff --git a/install/src/main.rs b/install/src/main.rs index c7b15aa6a67206..245f09825ddc6a 100644 --- a/install/src/main.rs +++ b/install/src/main.rs @@ -1,3 +1,3 @@ fn main() -> Result<(), String> { - solana_install::main() + agave_install::main() } diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 6da42940a4ba7f..cb87a0e16f4a36 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-ledger-tool" +name = "agave-ledger-tool" description = "Blockchain, Rebuilt for Scale" -documentation = "https://docs.rs/solana-ledger-tool" +documentation = "https://docs.rs/agave-ledger-tool" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/ledger-tool/src/blockstore.rs b/ledger-tool/src/blockstore.rs index 453a801702f864..fed6abde2f2d08 100644 --- a/ledger-tool/src/blockstore.rs +++ b/ledger-tool/src/blockstore.rs @@ -359,7 +359,7 @@ pub fn blockstore_subcommands<'a, 'b>(hidden: bool) -> Vec> { and timestamps.", ) // This command is important in cluster restart scenarios, so do not hide it ever - // such that the subcommand will be visible as the top level of solana-ledger-tool + // such that the subcommand will be visible as the top level of agave-ledger-tool .arg( Arg::with_name("num_slots") .long("num-slots") diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 116b21527ae4d8..c05cc6c2d64cd0 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -187,14 +187,14 @@ pub fn load_and_process_ledger( } let account_paths = if let Some(account_paths) = arg_matches.value_of("account_paths") { - // If this blockstore access is Primary, no other process (solana-validator) can hold + // If this blockstore access is Primary, no other process (agave-validator) can hold // Primary access. So, allow a custom accounts path without worry of wiping the accounts - // of solana-validator. + // of agave-validator. if !blockstore.is_primary_access() { // Attempt to open the Blockstore in Primary access; if successful, no other process // was holding Primary so allow things to proceed with custom accounts path. Release - // the Primary access instead of holding it to give priority to solana-validator over - // solana-ledger-tool should solana-validator start before we've finished. + // the Primary access instead of holding it to give priority to agave-validator over + // agave-ledger-tool should agave-validator start before we've finished. info!( "Checking if another process currently holding Primary access to {:?}", blockstore.ledger_path() diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 18ba491ea34bd1..8b6b44edae61f6 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -431,7 +431,7 @@ impl Rocks { info!( "Opening Rocks with secondary (read only) access at: {secondary_path:?}. \ This secondary access could temporarily degrade other accesses, such as \ - by solana-validator" + by agave-validator" ); DB::open_cf_descriptors_as_secondary( &db_options, diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 3b18ba44bf2d03..20eef0bb0e3e2d 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2321,13 +2321,13 @@ fn test_hard_fork_with_gap_in_roots() { ); // create hard-forked snapshot only for validator a, emulating the manual cluster restart - // procedure with `solana-ledger-tool create-snapshot` + // procedure with `agave-ledger-tool create-snapshot` let genesis_slot = 0; { let blockstore_a = Blockstore::open(&val_a_ledger_path).unwrap(); create_snapshot_to_hard_fork(&blockstore_a, hard_fork_slot, vec![hard_fork_slot]); - // Intentionally make solana-validator unbootable by replaying blocks from the genesis to + // Intentionally make agave-validator unbootable by replaying blocks from the genesis to // ensure the hard-forked snapshot is used always. Otherwise, we couldn't create a gap // in the ledger roots column family reliably. // There was a bug which caused the hard-forked snapshot at an unrooted slot to forget diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 5afc543b2f0032..2872af5cc426af 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -14,9 +14,9 @@ if [[ "$SOLANA_GPU_MISSING" -eq 1 ]]; then fi if [[ -n $SOLANA_CUDA ]]; then - program=$solana_validator_cuda + program=$agave_validator_cuda else - program=$solana_validator + program=$agave_validator fi no_restart=0 diff --git a/multinode-demo/common.sh b/multinode-demo/common.sh index 9ae9331cb7a11d..1643208947b643 100644 --- a/multinode-demo/common.sh +++ b/multinode-demo/common.sh @@ -40,6 +40,8 @@ else if [[ -z $program ]]; then crate="cli" program="solana" + elif [[ $program == "validator" || $program == "ledger-tool" || $program == "watchtower" || $program == "install" ]]; then + program="agave-$program" else program="solana-$program" fi @@ -63,8 +65,8 @@ fi solana_bench_tps=$(solana_program bench-tps) solana_faucet=$(solana_program faucet) -solana_validator=$(solana_program validator) -solana_validator_cuda="$solana_validator --cuda" +agave_validator=$(solana_program validator) +agave_validator_cuda="$agave_validator --cuda" solana_genesis=$(solana_program genesis) solana_gossip=$(solana_program gossip) solana_keygen=$(solana_program keygen) diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 487154101ac979..efb7a6afd56ea0 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -64,7 +64,7 @@ while [[ -n $1 ]]; do elif [[ $1 = --no-airdrop ]]; then airdrops_enabled=0 shift - # solana-validator options + # agave-validator options elif [[ $1 = --expected-genesis-hash ]]; then args+=("$1" "$2") shift 2 @@ -270,9 +270,9 @@ if [[ $maybeRequireTower = true ]]; then fi if [[ -n $SOLANA_CUDA ]]; then - program=$solana_validator_cuda + program=$agave_validator_cuda else - program=$solana_validator + program=$agave_validator fi set -e diff --git a/net/net.sh b/net/net.sh index a2d16cef20f417..36bc48efdb7861 100755 --- a/net/net.sh +++ b/net/net.sh @@ -122,7 +122,7 @@ Operate a configured testnet sanity/start-specific options: -F - Discard validator nodes that didn't bootup successfully - -o noInstallCheck - Skip solana-install sanity + -o noInstallCheck - Skip agave-install sanity -o rejectExtraNodes - Require the exact number of nodes stop-specific options: @@ -138,7 +138,7 @@ Operate a configured testnet --netem-cmd - Optional command argument to netem. Default is "add". Use "cleanup" to remove rules. update-specific options: - --platform linux|osx|windows - Deploy the tarball using 'solana-install deploy ...' for the + --platform linux|osx|windows - Deploy the tarball using 'agave-install deploy ...' for the given platform (multiple platforms may be specified) (-t option must be supplied as well) @@ -514,11 +514,11 @@ deployUpdate() { declare bootstrapLeader=${validatorIpList[0]} for updatePlatform in $updatePlatforms; do - echo "--- Deploying solana-install update: $updatePlatform" + echo "--- Deploying agave-install update: $updatePlatform" ( set -x - scripts/solana-install-update-manifest-keypair.sh "$updatePlatform" + scripts/agave-install-update-manifest-keypair.sh "$updatePlatform" timeout 30s scp "${sshOptions[@]}" \ update_manifest_keypair.json "$bootstrapLeader:solana/update_manifest_keypair.json" diff --git a/net/remote/remote-deploy-update.sh b/net/remote/remote-deploy-update.sh index dd772927c0e119..3a71cf5725123e 100755 --- a/net/remote/remote-deploy-update.sh +++ b/net/remote/remote-deploy-update.sh @@ -35,6 +35,6 @@ loadConfigFile PATH="$HOME"/.cargo/bin:"$PATH" set -x -scripts/solana-install-deploy.sh \ +scripts/agave-install-deploy.sh \ --keypair config/faucet.json \ localhost "$releaseChannel" "$updatePlatform" diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index aeb920bd50bab0..b7d224088da9f9 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -121,7 +121,7 @@ cat >> ~/solana/on-reboot < system-stats.pid if ${GPU_CUDA_OK} && [[ -e /dev/nvidia0 ]]; then - echo Selecting solana-validator-cuda + echo Selecting agave-validator-cuda export SOLANA_CUDA=1 elif ${GPU_FAIL_IF_NONE} ; then echo "Expected GPU, found none!" @@ -257,13 +257,13 @@ EOF if [[ -n "$maybeWarpSlot" ]]; then # shellcheck disable=SC2086 # Do not want to quote $maybeWarSlot - solana-ledger-tool -l config/bootstrap-validator create-snapshot 0 config/bootstrap-validator $maybeWarpSlot + agave-ledger-tool -l config/bootstrap-validator create-snapshot 0 config/bootstrap-validator $maybeWarpSlot fi - solana-ledger-tool -l config/bootstrap-validator shred-version --max-genesis-archive-unpacked-size 1073741824 | tee config/shred-version + agave-ledger-tool -l config/bootstrap-validator shred-version --max-genesis-archive-unpacked-size 1073741824 | tee config/shred-version if [[ -n "$maybeWaitForSupermajority" ]]; then - bankHash=$(solana-ledger-tool -l config/bootstrap-validator bank-hash --halt-at-slot 0) + bankHash=$(agave-ledger-tool -l config/bootstrap-validator bank-hash --halt-at-slot 0) extraNodeArgs="$extraNodeArgs --expected-bank-hash $bankHash" echo "$bankHash" > config/bank-hash fi diff --git a/net/remote/remote-sanity.sh b/net/remote/remote-sanity.sh index 8c36e99ffdf936..91dae4b57336fa 100755 --- a/net/remote/remote-sanity.sh +++ b/net/remote/remote-sanity.sh @@ -65,7 +65,7 @@ local|tar|skip) export USE_INSTALL=1 solana_cli=solana solana_gossip=solana-gossip - solana_install=solana-install + solana_install=agave-install ;; *) echo "Unknown deployment method: $deployMethod" @@ -122,7 +122,7 @@ else fi if $installCheck && [[ -r update_manifest_keypair.json ]]; then - echo "--- $sanityTargetIp: solana-install test" + echo "--- $sanityTargetIp: agave-install test" ( set -x diff --git a/notifier/src/lib.rs b/notifier/src/lib.rs index a369225772492c..75406d2fbdae33 100644 --- a/notifier/src/lib.rs +++ b/notifier/src/lib.rs @@ -19,7 +19,7 @@ /// /// To receive a Twilio SMS notification on failure, having a Twilio account, /// and a sending number owned by that account, -/// define environment variable before running `solana-watchtower`: +/// define environment variable before running `agave-watchtower`: /// ```bash /// export TWILIO_CONFIG='ACCOUNT=,TOKEN=,TO=,FROM=' /// ``` @@ -208,7 +208,7 @@ impl Notifier { NotificationType::Resolve { ref incident } => incident.clone().to_string(), }; - let data = json!({"payload":{"summary":msg,"source":"solana-watchtower","severity":"critical"},"routing_key":routing_key,"event_action":event_action,"dedup_key":dedup_key}); + let data = json!({"payload":{"summary":msg,"source":"agave-watchtower","severity":"critical"},"routing_key":routing_key,"event_action":event_action,"dedup_key":dedup_key}); let url = "https://events.pagerduty.com/v2/enqueue"; if let Err(err) = self.client.post(url).json(&data).send() { diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2829cf27b6da6f..7546c56bd2b26a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -63,6 +63,80 @@ dependencies = [ "zeroize", ] +[[package]] +name = "agave-geyser-plugin-interface" +version = "1.19.0" +dependencies = [ + "log", + "solana-sdk", + "solana-transaction-status", + "thiserror", +] + +[[package]] +name = "agave-validator" +version = "1.19.0" +dependencies = [ + "agave-geyser-plugin-interface", + "chrono", + "clap 2.33.3", + "console", + "core_affinity", + "crossbeam-channel", + "fd-lock", + "indicatif", + "itertools", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-ipc-server", + "jsonrpc-server-utils", + "lazy_static", + "libc", + "libloading", + "log", + "num_cpus", + "rand 0.8.5", + "rayon", + "serde", + "serde_json", + "serde_yaml", + "signal-hook", + "solana-accounts-db", + "solana-clap-utils", + "solana-cli-config", + "solana-core", + "solana-download-utils", + "solana-entry", + "solana-faucet", + "solana-genesis-utils", + "solana-geyser-plugin-manager", + "solana-gossip", + "solana-ledger", + "solana-logger", + "solana-metrics", + "solana-net-utils", + "solana-perf", + "solana-poh", + "solana-rpc", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-runtime", + "solana-sdk", + "solana-send-transaction-service", + "solana-storage-bigtable", + "solana-streamer", + "solana-svm", + "solana-test-validator", + "solana-tpu-client", + "solana-unified-scheduler-pool", + "solana-version", + "solana-vote-program", + "symlink", + "thiserror", + "tikv-jemallocator", +] + [[package]] name = "ahash" version = "0.7.6" @@ -5044,20 +5118,11 @@ dependencies = [ "solana-sdk", ] -[[package]] -name = "solana-geyser-plugin-interface" -version = "1.19.0" -dependencies = [ - "log", - "solana-sdk", - "solana-transaction-status", - "thiserror", -] - [[package]] name = "solana-geyser-plugin-manager" version = "1.19.0" dependencies = [ + "agave-geyser-plugin-interface", "bs58", "crossbeam-channel", "json5", @@ -5068,7 +5133,6 @@ dependencies = [ "serde_json", "solana-accounts-db", "solana-entry", - "solana-geyser-plugin-interface", "solana-ledger", "solana-measure", "solana-metrics", @@ -6055,11 +6119,11 @@ dependencies = [ name = "solana-sbf-rust-simulation" version = "1.19.0" dependencies = [ + "agave-validator", "solana-logger", "solana-program", "solana-program-test", "solana-sdk", - "solana-validator", ] [[package]] @@ -6462,70 +6526,6 @@ dependencies = [ "solana-vote", ] -[[package]] -name = "solana-validator" -version = "1.19.0" -dependencies = [ - "chrono", - "clap 2.33.3", - "console", - "core_affinity", - "crossbeam-channel", - "fd-lock", - "indicatif", - "itertools", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-ipc-server", - "jsonrpc-server-utils", - "lazy_static", - "libc", - "libloading", - "log", - "num_cpus", - "rand 0.8.5", - "rayon", - "serde", - "serde_json", - "serde_yaml", - "signal-hook", - "solana-accounts-db", - "solana-clap-utils", - "solana-cli-config", - "solana-core", - "solana-download-utils", - "solana-entry", - "solana-faucet", - "solana-genesis-utils", - "solana-geyser-plugin-interface", - "solana-geyser-plugin-manager", - "solana-gossip", - "solana-ledger", - "solana-logger", - "solana-metrics", - "solana-net-utils", - "solana-perf", - "solana-poh", - "solana-rpc", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-runtime", - "solana-sdk", - "solana-send-transaction-service", - "solana-storage-bigtable", - "solana-streamer", - "solana-svm", - "solana-test-validator", - "solana-tpu-client", - "solana-unified-scheduler-pool", - "solana-version", - "solana-vote-program", - "symlink", - "thiserror", - "tikv-jemallocator", -] - [[package]] name = "solana-version" version = "1.19.0" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 8a99a0f005471a..dee6a947b1965d 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -46,7 +46,7 @@ solana-sbf-rust-realloc = { path = "rust/realloc", version = "=1.19.0", default- solana-sbf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.19.0" } solana-sdk = { path = "../../sdk", version = "=1.19.0" } solana-transaction-status = { path = "../../transaction-status", version = "=1.19.0" } -solana-validator = { path = "../../validator", version = "=1.19.0" } +agave-validator = { path = "../../validator", version = "=1.19.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.19.0" } solana-svm = { path = "../../svm", version = "=1.19.0" } solana_rbpf = "=0.8.0" diff --git a/programs/sbf/rust/simulation/Cargo.toml b/programs/sbf/rust/simulation/Cargo.toml index 7091ef9d5ade0c..e9728e5916b801 100644 --- a/programs/sbf/rust/simulation/Cargo.toml +++ b/programs/sbf/rust/simulation/Cargo.toml @@ -16,10 +16,10 @@ test-bpf = [] solana-program = { workspace = true } [dev-dependencies] +agave-validator = { workspace = true } solana-logger = { workspace = true } solana-program-test = { workspace = true } solana-sdk = { workspace = true } -solana-validator = { workspace = true } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/sbf/rust/simulation/tests/validator.rs b/programs/sbf/rust/simulation/tests/validator.rs index 3044ad9a642629..17de51e665e3ec 100644 --- a/programs/sbf/rust/simulation/tests/validator.rs +++ b/programs/sbf/rust/simulation/tests/validator.rs @@ -1,13 +1,13 @@ #![cfg(feature = "test-bpf")] use { + agave_validator::test_validator::*, solana_program::{ instruction::{AccountMeta, Instruction}, pubkey::Pubkey, sysvar, }, solana_sdk::{signature::Signer, transaction::Transaction}, - solana_validator::test_validator::*, }; #[test] diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index 408df60454e4e1..b79e91f681b97f 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -33,7 +33,7 @@ //! By default the [`block_subscribe`] and [`vote_subscribe`] events are //! disabled on RPC nodes. They can be enabled by passing //! `--rpc-pubsub-enable-block-subscription` and -//! `--rpc-pubsub-enable-vote-subscription` to `solana-validator`. When these +//! `--rpc-pubsub-enable-vote-subscription` to `agave-validator`. When these //! methods are disabled, the RPC server will return a "Method not found" error //! message. //! @@ -381,7 +381,7 @@ impl PubsubClient { /// Receives messages of type [`RpcBlockUpdate`] when a block is confirmed or finalized. /// /// This method is disabled by default. It can be enabled by passing - /// `--rpc-pubsub-enable-block-subscription` to `solana-validator`. + /// `--rpc-pubsub-enable-block-subscription` to `agave-validator`. /// /// # RPC Reference /// @@ -452,7 +452,7 @@ impl PubsubClient { /// votes are observed prior to confirmation and may never be confirmed. /// /// This method is disabled by default. It can be enabled by passing - /// `--rpc-pubsub-enable-vote-subscription` to `solana-validator`. + /// `--rpc-pubsub-enable-vote-subscription` to `agave-validator`. /// /// # RPC Reference /// diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs index e1a2dd34546528..70769619db1f4d 100644 --- a/pubsub-client/src/pubsub_client.rs +++ b/pubsub-client/src/pubsub_client.rs @@ -32,7 +32,7 @@ //! By default the [`block_subscribe`] and [`vote_subscribe`] events are //! disabled on RPC nodes. They can be enabled by passing //! `--rpc-pubsub-enable-block-subscription` and -//! `--rpc-pubsub-enable-vote-subscription` to `solana-validator`. When these +//! `--rpc-pubsub-enable-vote-subscription` to `agave-validator`. When these //! methods are disabled, the RPC server will return a "Method not found" error //! message. //! @@ -416,7 +416,7 @@ impl PubsubClient { /// Receives messages of type [`RpcBlockUpdate`] when a block is confirmed or finalized. /// /// This method is disabled by default. It can be enabled by passing - /// `--rpc-pubsub-enable-block-subscription` to `solana-validator`. + /// `--rpc-pubsub-enable-block-subscription` to `agave-validator`. /// /// # RPC Reference /// @@ -578,7 +578,7 @@ impl PubsubClient { /// votes are observed prior to confirmation and may never be confirmed. /// /// This method is disabled by default. It can be enabled by passing - /// `--rpc-pubsub-enable-vote-subscription` to `solana-validator`. + /// `--rpc-pubsub-enable-vote-subscription` to `agave-validator`. /// /// # RPC Reference /// diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index e7db982026f82a..9e243f0836aa0f 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -1,6 +1,6 @@ fn main() { println!( - r##"rbpf-cli is replaced by solana-ledger-tool program run subcommand. -Please, use 'solana-ledger-tool program run --help' for more information."## + r##"rbpf-cli is replaced by agave-ledger-tool program run subcommand. +Please, use 'agave-ledger-tool program run --help' for more information."## ); } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index caeb0953109fbb..01f623dccdc108 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -2561,7 +2561,7 @@ pub mod rpc_minimal { #[rpc(meta, name = "getVersion")] fn get_version(&self, meta: Self::Metadata) -> Result; - // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal #[rpc(meta, name = "getVoteAccounts")] fn get_vote_accounts( @@ -2570,7 +2570,7 @@ pub mod rpc_minimal { config: Option, ) -> Result; - // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal #[rpc(meta, name = "getLeaderSchedule")] fn get_leader_schedule( @@ -2696,7 +2696,7 @@ pub mod rpc_minimal { }) } - // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal fn get_vote_accounts( &self, @@ -2707,7 +2707,7 @@ pub mod rpc_minimal { meta.get_vote_accounts(config) } - // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal fn get_leader_schedule( &self, diff --git a/scripts/solana-install-deploy.sh b/scripts/agave-install-deploy.sh similarity index 90% rename from scripts/solana-install-deploy.sh rename to scripts/agave-install-deploy.sh index ea77ca34bc9ea3..a8f8eeb65b3857 100755 --- a/scripts/solana-install-deploy.sh +++ b/scripts/agave-install-deploy.sh @@ -26,7 +26,7 @@ if [[ -z $URL || -z $TAG ]]; then fi if [[ ! -f update_manifest_keypair.json ]]; then - "$SOLANA_ROOT"/scripts/solana-install-update-manifest-keypair.sh "$OS" + "$SOLANA_ROOT"/scripts/agave-install-update-manifest-keypair.sh "$OS" fi case "$OS" in @@ -76,4 +76,4 @@ if [[ $balance = "0 lamports" ]]; then fi # shellcheck disable=SC2086 # Don't want to double quote $maybeKeypair -solana-install deploy $maybeKeypair --url "$URL" "$DOWNLOAD_URL" update_manifest_keypair.json +agave-install deploy $maybeKeypair --url "$URL" "$DOWNLOAD_URL" update_manifest_keypair.json diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 549aa15550b0eb..029b1fbf27943d 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -91,8 +91,8 @@ if [[ $CI_OS_NAME = windows ]]; then cargo-test-bpf cargo-test-sbf solana - solana-install - solana-install-init + agave-install + agave-install-init solana-keygen solana-stake-accounts solana-test-validator @@ -106,12 +106,12 @@ else solana-bench-tps solana-faucet solana-gossip - solana-install + agave-install solana-keygen - solana-ledger-tool + agave-ledger-tool solana-log-analyzer solana-net-shaper - solana-validator + agave-validator rbpf-cli ) @@ -123,11 +123,11 @@ else cargo-test-bpf cargo-test-sbf solana-dos - solana-install-init + agave-install-init solana-stake-accounts solana-test-validator solana-tokens - solana-watchtower + agave-watchtower ) fi diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index 8719af96a212e4..6a4f798c633e26 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -31,7 +31,7 @@ source ci/rust-version.sh nightly declare tainted_packages=( solana-accounts-bench solana-banking-bench - solana-ledger-tool + agave-ledger-tool ) # convert to comma separeted (ref: https://stackoverflow.com/a/53839433) diff --git a/scripts/run.sh b/scripts/run.sh index 699bfce3e253e3..2d8e1ec88ac450 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -23,9 +23,11 @@ fi PATH=$PWD/target/$profile:$PATH ok=true -for program in solana-{faucet,genesis,keygen,validator}; do +for program in solana-{faucet,genesis,keygen}; do $program -V || ok=false done +agave-validator -V || ok=false + $ok || { echo echo "Unable to locate required programs. Try building them first with:" @@ -115,7 +117,7 @@ args=( --no-os-network-limits-test ) # shellcheck disable=SC2086 -solana-validator "${args[@]}" $SOLANA_RUN_SH_VALIDATOR_ARGS & +agave-validator "${args[@]}" $SOLANA_RUN_SH_VALIDATOR_ARGS & validator=$! wait "$validator" diff --git a/system-test/abi-testcases/mixed-validator-test.sh b/system-test/abi-testcases/mixed-validator-test.sh index 8ab673b26a3d21..c0400560dc519e 100755 --- a/system-test/abi-testcases/mixed-validator-test.sh +++ b/system-test/abi-testcases/mixed-validator-test.sh @@ -30,14 +30,14 @@ solanaInstallGlobalOpts=( bootstrapInstall() { declare v=$1 if [[ ! -h $solanaInstallDataDir/active_release ]]; then - sh "$SOLANA_ROOT"/install/solana-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" + sh "$SOLANA_ROOT"/install/agave-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" fi export PATH="$solanaInstallDataDir/active_release/bin/:$PATH" } bootstrapInstall "$baselineVersion" for v in "${otherVersions[@]}"; do - solana-install-init "${solanaInstallGlobalOpts[@]}" "$v" + agave-install-init "${solanaInstallGlobalOpts[@]}" "$v" solana -V done @@ -113,7 +113,7 @@ for v in "${otherVersions[@]}"; do ( set -x tmux new-window -t abi -n "$v" " \ - $SOLANA_BIN/solana-validator \ + $SOLANA_BIN/agave-validator \ --ledger $ledger \ --no-snapshot-fetch \ --entrypoint 127.0.0.1:8001 \ diff --git a/system-test/stability-testcases/gossip-dos-test.sh b/system-test/stability-testcases/gossip-dos-test.sh index f8afade75dc847..68c3c540d5948c 100755 --- a/system-test/stability-testcases/gossip-dos-test.sh +++ b/system-test/stability-testcases/gossip-dos-test.sh @@ -19,14 +19,14 @@ solanaInstallGlobalOpts=( bootstrapInstall() { declare v=$1 if [[ ! -h $solanaInstallDataDir/active_release ]]; then - sh "$SOLANA_ROOT"/install/solana-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" + sh "$SOLANA_ROOT"/install/agave-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" fi export PATH="$solanaInstallDataDir/active_release/bin/:$PATH" } bootstrapInstall "edge" -solana-install-init --version -solana-install-init edge +agave-install-init --version +agave-install-init edge solana-gossip --version solana-dos --version diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 5cc76a810116b3..362a07343b5e4a 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -1,8 +1,8 @@ [package] -name = "solana-validator" +name = "agave-validator" description = "Blockchain, Rebuilt for Scale" -documentation = "https://docs.rs/solana-validator" -default-run = "solana-validator" +documentation = "https://docs.rs/agave-validator" +default-run = "agave-validator" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } @@ -11,6 +11,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +agave-geyser-plugin-interface = { workspace = true } chrono = { workspace = true, features = ["default", "serde"] } clap = { workspace = true } console = { workspace = true } @@ -41,7 +42,6 @@ solana-download-utils = { workspace = true } solana-entry = { workspace = true } solana-faucet = { workspace = true } solana-genesis-utils = { workspace = true } -solana-geyser-plugin-interface = { workspace = true } solana-geyser-plugin-manager = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true } diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 42f5a0634c0cfa..68e6bcca4fd96f 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -1,4 +1,8 @@ use { + agave_validator::{ + admin_rpc_service, cli, dashboard::Dashboard, ledger_lockfile, lock_ledger, + println_name_value, redirect_stderr_to_file, + }, clap::{crate_name, value_t, value_t_or_exit, values_t_or_exit}, crossbeam_channel::unbounded, itertools::Itertools, @@ -28,10 +32,6 @@ use { }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::*, - solana_validator::{ - admin_rpc_service, cli, dashboard::Dashboard, ledger_lockfile, lock_ledger, - println_name_value, redirect_stderr_to_file, - }, std::{ collections::HashSet, fs, io, diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 8d5457744a23b8..12bbd0b21001c9 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -447,7 +447,7 @@ pub fn attempt_download_genesis_and_snapshot( ) .unwrap_or_else(|err| { // Consider failures here to be more likely due to user error (eg, - // incorrect `solana-validator` command-line arguments) rather than the + // incorrect `agave-validator` command-line arguments) rather than the // RPC node failing. // // Power users can always use the `--no-check-vote-account` option to diff --git a/validator/src/main.rs b/validator/src/main.rs index ec70796130e7d2..9741a2aecd68a8 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -2,6 +2,15 @@ #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; use { + agave_validator::{ + admin_rpc_service, + admin_rpc_service::{load_staked_nodes_overrides, StakedNodesOverrides}, + bootstrap, + cli::{app, warn_for_deprecated_arguments, DefaultArgs}, + dashboard::Dashboard, + ledger_lockfile, lock_ledger, new_spinner_progress_bar, println_name_value, + redirect_stderr_to_file, + }, clap::{crate_name, value_t, value_t_or_exit, values_t, values_t_or_exit, ArgMatches}, console::style, crossbeam_channel::unbounded, @@ -60,15 +69,6 @@ use { solana_streamer::socket::SocketAddrSpace, solana_svm::runtime_config::RuntimeConfig, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, - solana_validator::{ - admin_rpc_service, - admin_rpc_service::{load_staked_nodes_overrides, StakedNodesOverrides}, - bootstrap, - cli::{app, warn_for_deprecated_arguments, DefaultArgs}, - dashboard::Dashboard, - ledger_lockfile, lock_ledger, new_spinner_progress_bar, println_name_value, - redirect_stderr_to_file, - }, std::{ collections::{HashSet, VecDeque}, env, @@ -917,7 +917,7 @@ pub fn main() { let logfile = matches .value_of("logfile") .map(|s| s.into()) - .unwrap_or_else(|| format!("solana-validator-{}.log", identity_keypair.pubkey())); + .unwrap_or_else(|| format!("agave-validator-{}.log", identity_keypair.pubkey())); if logfile == "-" { None diff --git a/watchtower/Cargo.toml b/watchtower/Cargo.toml index d8bad3cf4d18f0..4088ee7d9b51ab 100644 --- a/watchtower/Cargo.toml +++ b/watchtower/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-watchtower" +name = "agave-watchtower" description = "Blockchain, Rebuilt for Scale" -documentation = "https://docs.rs/solana-watchtower" +documentation = "https://docs.rs/agave-watchtower" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/watchtower/README.md b/watchtower/README.md index 33a13939cd260c..ab219be67575eb 100644 --- a/watchtower/README.md +++ b/watchtower/README.md @@ -1,4 +1,4 @@ -The `solana-watchtower` program is used to monitor the health of a cluster. It +The `agave-watchtower` program is used to monitor the health of a cluster. It periodically polls the cluster over an RPC API to confirm that the transaction count is advancing, new blockhashes are available, and no validators are delinquent. Results are reported as InfluxDB metrics, with an optional push diff --git a/watchtower/src/main.rs b/watchtower/src/main.rs index f42acdaadaabc6..341b7903c0a33e 100644 --- a/watchtower/src/main.rs +++ b/watchtower/src/main.rs @@ -47,7 +47,7 @@ fn get_config() -> Config { .version(solana_version::version!()) .after_help("ADDITIONAL HELP: To receive a Slack, Discord, PagerDuty and/or Telegram notification on sanity failure, - define environment variables before running `solana-watchtower`: + define environment variables before running `agave-watchtower`: export SLACK_WEBHOOK=... export DISCORD_WEBHOOK=... @@ -63,7 +63,7 @@ fn get_config() -> Config { To receive a Twilio SMS notification on failure, having a Twilio account, and a sending number owned by that account, - define environment variable before running `solana-watchtower`: + define environment variable before running `agave-watchtower`: export TWILIO_CONFIG='ACCOUNT=,TOKEN=,TO=,FROM='") .arg({ @@ -166,7 +166,7 @@ fn get_config() -> Config { .value_name("SUFFIX") .takes_value(true) .default_value("") - .help("Add this string into all notification messages after \"solana-watchtower\"") + .help("Add this string into all notification messages after \"agave-watchtower\"") ) .get_matches(); @@ -381,7 +381,7 @@ fn main() -> Result<(), Box> { if let Some((failure_test_name, failure_error_message)) = &failure { let notification_msg = format!( - "solana-watchtower{}: Error: {}: {}", + "agave-watchtower{}: Error: {}: {}", config.name_suffix, failure_test_name, failure_error_message ); num_consecutive_failures += 1; @@ -415,7 +415,7 @@ fn main() -> Result<(), Box> { ); info!("{}", all_clear_msg); notifier.send( - &format!("solana-watchtower{}: {}", config.name_suffix, all_clear_msg), + &format!("agave-watchtower{}: {}", config.name_suffix, all_clear_msg), &NotificationType::Resolve { incident }, ); } From 570d1a919729741f062ae0ad0b1f9bf52fbb83f2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 4 Mar 2024 13:59:35 -0500 Subject: [PATCH 320/401] Adds a bench for hash_account() (#47) --- Cargo.lock | 1 + accounts-db/Cargo.toml | 5 ++++ accounts-db/benches/bench_hashing.rs | 43 ++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) create mode 100644 accounts-db/benches/bench_hashing.rs diff --git a/Cargo.lock b/Cargo.lock index c72b90930d7cf9..a67b7aba0d6b64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5390,6 +5390,7 @@ dependencies = [ "bytemuck", "byteorder", "bzip2", + "criterion", "crossbeam-channel", "dashmap", "ed25519-dalek", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index b986c17de0636b..702f14f9f3b07d 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -70,6 +70,7 @@ name = "solana_accounts_db" [dev-dependencies] assert_matches = { workspace = true } +criterion = { workspace = true } ed25519-dalek = { workspace = true } libsecp256k1 = { workspace = true } memoffset = { workspace = true } @@ -89,3 +90,7 @@ rustc_version = { workspace = true } [features] dev-context-only-utils = [] + +[[bench]] +name = "bench_hashing" +harness = false diff --git a/accounts-db/benches/bench_hashing.rs b/accounts-db/benches/bench_hashing.rs new file mode 100644 index 00000000000000..3158f78c7a938f --- /dev/null +++ b/accounts-db/benches/bench_hashing.rs @@ -0,0 +1,43 @@ +use { + criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}, + solana_accounts_db::accounts_db::AccountsDb, + solana_sdk::{account::AccountSharedData, pubkey::Pubkey}, +}; + +const KB: usize = 1024; +const MB: usize = KB * KB; + +const DATA_SIZES: [usize; 6] = [ + 0, // the smallest account + 165, // the size of an spl token account + 200, // the size of a stake account + KB, // a medium sized account + MB, // a large sized account + 10 * MB, // the largest account +]; + +/// The number of bytes of *non account data* that are also hashed as +/// part of computing an account's hash. +/// +/// Ensure this constant stays in sync with the value of `META_SIZE` in +/// AccountsDb::hash_account_data(). +const META_SIZE: usize = 81; + +fn bench_hash_account(c: &mut Criterion) { + let lamports = 123_456_789; + let owner = Pubkey::default(); + let address = Pubkey::default(); + + let mut group = c.benchmark_group("hash_account"); + for data_size in DATA_SIZES { + let num_bytes = META_SIZE.checked_add(data_size).unwrap(); + group.throughput(Throughput::Bytes(num_bytes as u64)); + let account = AccountSharedData::new(lamports, data_size, &owner); + group.bench_function(BenchmarkId::new("data_size", data_size), |b| { + b.iter(|| AccountsDb::hash_account(&account, &address)); + }); + } +} + +criterion_group!(benches, bench_hash_account,); +criterion_main!(benches); From f5912104d0230b601e2b3696a94d376ad8f7c73c Mon Sep 17 00:00:00 2001 From: Brennan Date: Mon, 4 Mar 2024 13:17:34 -0800 Subject: [PATCH 321/401] update mio to 0.8.11 (#60) --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a67b7aba0d6b64..78ff40111ee0b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3394,9 +3394,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7546c56bd2b26a..cb0ad6f1ee448c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2892,9 +2892,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", From 93f5b514fa410b0c94a7ce134bed2fc871400890 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 4 Mar 2024 16:32:51 -0500 Subject: [PATCH 322/401] Adds StartingSnapshotStorages to AccountsHashVerifier (#58) --- accounts-db/src/lib.rs | 1 + accounts-db/src/starting_snapshot_storages.rs | 19 +++++ core/src/accounts_hash_verifier.rs | 8 +- core/src/validator.rs | 43 ++++++---- core/tests/epoch_accounts_hash.rs | 2 + core/tests/snapshots.rs | 2 + ledger-tool/src/ledger_utils.rs | 32 +++++--- ledger/src/bank_forks_utils.rs | 81 ++++++++++++++----- 8 files changed, 135 insertions(+), 53 deletions(-) create mode 100644 accounts-db/src/starting_snapshot_storages.rs diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 7883f852d1e3f2..b7994fe4354118 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -37,6 +37,7 @@ pub mod secondary_index; pub mod shared_buffer_reader; pub mod sorted_storages; pub mod stake_rewards; +pub mod starting_snapshot_storages; pub mod storable_accounts; pub mod tiered_storage; pub mod utils; diff --git a/accounts-db/src/starting_snapshot_storages.rs b/accounts-db/src/starting_snapshot_storages.rs new file mode 100644 index 00000000000000..cc5e26c61872b7 --- /dev/null +++ b/accounts-db/src/starting_snapshot_storages.rs @@ -0,0 +1,19 @@ +use {crate::accounts_db::AccountStorageEntry, std::sync::Arc}; + +/// Snapshot storages that the node loaded from +/// +/// This is used to support fastboot. Since fastboot reuses existing storages, we must carefully +/// handle the storages used to load at startup. If we do not handle these storages properly, +/// restarting from the same local state (i.e. bank snapshot) may fail. +#[derive(Debug)] +pub enum StartingSnapshotStorages { + /// Starting from genesis has no storages yet + Genesis, + /// Starting from a snapshot archive always extracts the storages from the archive, so no + /// special handling is necessary to preserve them. + Archive, + /// Starting from local state must preserve the loaded storages. These storages must *not* be + /// recycled or removed prior to taking the next snapshot, otherwise restarting from the same + /// bank snapshot may fail. + Fastboot(Vec>), +} diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 0e427d0675a2b1..f5572d94a3c7d1 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -9,6 +9,7 @@ use { IncrementalAccountsHash, }, sorted_storages::SortedStorages, + starting_snapshot_storages::StartingSnapshotStorages, }, solana_measure::measure_us, solana_runtime::{ @@ -42,6 +43,7 @@ impl AccountsHashVerifier { accounts_package_sender: Sender, accounts_package_receiver: Receiver, snapshot_package_sender: Option>, + starting_snapshot_storages: StartingSnapshotStorages, exit: Arc, snapshot_config: SnapshotConfig, ) -> Self { @@ -54,7 +56,11 @@ impl AccountsHashVerifier { // To support fastboot, we must ensure the storages used in the latest POST snapshot are // not recycled nor removed early. Hold an Arc of their AppendVecs to prevent them from // expiring. - let mut fastboot_storages = None; + let mut fastboot_storages = match starting_snapshot_storages { + StartingSnapshotStorages::Genesis => None, + StartingSnapshotStorages::Archive => None, + StartingSnapshotStorages::Fastboot(storages) => Some(storages), + }; loop { if exit.load(Ordering::Relaxed) { break; diff --git a/core/src/validator.rs b/core/src/validator.rs index a6d5921bcef5c9..196dad5f25d17a 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -35,6 +35,7 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + starting_snapshot_storages::StartingSnapshotStorages, utils::{move_and_async_delete_path, move_and_async_delete_path_contents}, }, solana_client::connection_cache::{ConnectionCache, Protocol}, @@ -690,6 +691,7 @@ impl Validator { completed_slots_receiver, leader_schedule_cache, starting_snapshot_hashes, + starting_snapshot_storages, TransactionHistoryServices { transaction_status_sender, transaction_status_service, @@ -779,6 +781,7 @@ impl Validator { accounts_package_sender.clone(), accounts_package_receiver, snapshot_package_sender, + starting_snapshot_storages, exit.clone(), config.snapshot_config.clone(), ); @@ -1767,6 +1770,7 @@ fn load_blockstore( CompletedSlotsReceiver, LeaderScheduleCache, Option, + StartingSnapshotStorages, TransactionHistoryServices, blockstore_processor::ProcessOptions, BlockstoreRootScan, @@ -1856,23 +1860,27 @@ fn load_blockstore( let entry_notifier_service = entry_notifier .map(|entry_notifier| EntryNotifierService::new(entry_notifier, exit.clone())); - let (bank_forks, mut leader_schedule_cache, starting_snapshot_hashes) = - bank_forks_utils::load_bank_forks( - &genesis_config, - &blockstore, - config.account_paths.clone(), - Some(&config.snapshot_config), - &process_options, - transaction_history_services - .cache_block_meta_sender - .as_ref(), - entry_notifier_service - .as_ref() - .map(|service| service.sender()), - accounts_update_notifier, - exit, - ) - .map_err(|err| err.to_string())?; + let ( + bank_forks, + mut leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + ) = bank_forks_utils::load_bank_forks( + &genesis_config, + &blockstore, + config.account_paths.clone(), + Some(&config.snapshot_config), + &process_options, + transaction_history_services + .cache_block_meta_sender + .as_ref(), + entry_notifier_service + .as_ref() + .map(|service| service.sender()), + accounts_update_notifier, + exit, + ) + .map_err(|err| err.to_string())?; // Before replay starts, set the callbacks in each of the banks in BankForks so that // all dropped banks come through the `pruned_banks_receiver` channel. This way all bank @@ -1898,6 +1906,7 @@ fn load_blockstore( completed_slots_receiver, leader_schedule_cache, starting_snapshot_hashes, + starting_snapshot_storages, transaction_history_services, process_options, blockstore_root_scan, diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index b0dd111676af79..62e31f0a88b766 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -9,6 +9,7 @@ use { accounts_hash::CalcAccountsHashConfig, accounts_index::AccountSecondaryIndexes, epoch_accounts_hash::EpochAccountsHash, + starting_snapshot_storages::StartingSnapshotStorages, }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, @@ -196,6 +197,7 @@ impl BackgroundServices { accounts_package_sender.clone(), accounts_package_receiver, Some(snapshot_package_sender), + StartingSnapshotStorages::Genesis, exit.clone(), snapshot_config.clone(), ); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 2694f7294a7217..e67c942f07ab0b 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -11,6 +11,7 @@ use { accounts_hash::AccountsHash, accounts_index::AccountSecondaryIndexes, epoch_accounts_hash::EpochAccountsHash, + starting_snapshot_storages::StartingSnapshotStorages, }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, @@ -1043,6 +1044,7 @@ fn test_snapshots_with_background_services( accounts_package_sender, accounts_package_receiver, Some(snapshot_package_sender), + StartingSnapshotStorages::Genesis, exit.clone(), snapshot_test_config.snapshot_config.clone(), ); diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index c05cc6c2d64cd0..8a8302d7e4e94b 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -268,19 +268,24 @@ pub fn load_and_process_ledger( }; let exit = Arc::new(AtomicBool::new(false)); - let (bank_forks, leader_schedule_cache, starting_snapshot_hashes, ..) = - bank_forks_utils::load_bank_forks( - genesis_config, - blockstore.as_ref(), - account_paths, - snapshot_config.as_ref(), - &process_options, - None, - None, // Maybe support this later, though - accounts_update_notifier, - exit.clone(), - ) - .map_err(LoadAndProcessLedgerError::LoadBankForks)?; + let ( + bank_forks, + leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + .., + ) = bank_forks_utils::load_bank_forks( + genesis_config, + blockstore.as_ref(), + account_paths, + snapshot_config.as_ref(), + &process_options, + None, + None, // Maybe support this later, though + accounts_update_notifier, + exit.clone(), + ) + .map_err(LoadAndProcessLedgerError::LoadBankForks)?; let block_verification_method = value_t!( arg_matches, "block_verification_method", @@ -325,6 +330,7 @@ pub fn load_and_process_ledger( accounts_package_sender.clone(), accounts_package_receiver, None, + starting_snapshot_storages, exit.clone(), SnapshotConfig::new_load_only(), ); diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 17412c1801ac68..b30f90986bb9c2 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -10,7 +10,10 @@ use { use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, log::*, - solana_accounts_db::accounts_update_notifier_interface::AccountsUpdateNotifier, + solana_accounts_db::{ + accounts_update_notifier_interface::AccountsUpdateNotifier, + starting_snapshot_storages::StartingSnapshotStorages, + }, solana_runtime::{ accounts_background_service::AbsRequestSender, bank_forks::BankForks, @@ -67,6 +70,7 @@ pub type LoadResult = result::Result< Arc>, LeaderScheduleCache, Option, + StartingSnapshotStorages, ), BankForksUtilsError, >; @@ -88,7 +92,13 @@ pub fn load( accounts_update_notifier: Option, exit: Arc, ) -> LoadResult { - let (bank_forks, leader_schedule_cache, starting_snapshot_hashes, ..) = load_bank_forks( + let ( + bank_forks, + leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + .., + ) = load_bank_forks( genesis_config, blockstore, account_paths, @@ -111,7 +121,12 @@ pub fn load( ) .map_err(BankForksUtilsError::ProcessBlockstoreFromRoot)?; - Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) + Ok(( + bank_forks, + leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + )) } #[allow(clippy::too_many_arguments)] @@ -161,7 +176,7 @@ pub fn load_bank_forks( )) } - let (bank_forks, starting_snapshot_hashes) = + let (bank_forks, starting_snapshot_hashes, starting_snapshot_storages) = if let Some((full_snapshot_archive_info, incremental_snapshot_archive_info)) = get_snapshots_to_load(snapshot_config) { @@ -173,17 +188,22 @@ pub fn load_bank_forks( ); std::fs::create_dir_all(&snapshot_config.bank_snapshots_dir) .expect("create bank snapshots dir"); - let (bank_forks, starting_snapshot_hashes) = bank_forks_from_snapshot( - full_snapshot_archive_info, - incremental_snapshot_archive_info, - genesis_config, - account_paths, - snapshot_config, - process_options, - accounts_update_notifier, - exit, - )?; - (bank_forks, Some(starting_snapshot_hashes)) + let (bank_forks, starting_snapshot_hashes, starting_snapshot_storages) = + bank_forks_from_snapshot( + full_snapshot_archive_info, + incremental_snapshot_archive_info, + genesis_config, + account_paths, + snapshot_config, + process_options, + accounts_update_notifier, + exit, + )?; + ( + bank_forks, + Some(starting_snapshot_hashes), + starting_snapshot_storages, + ) } else { info!("Processing ledger from genesis"); let bank_forks = blockstore_processor::process_blockstore_for_bank_0( @@ -202,7 +222,7 @@ pub fn load_bank_forks( .root_bank() .set_startup_verification_complete(); - (bank_forks, None) + (bank_forks, None, StartingSnapshotStorages::Genesis) }; let mut leader_schedule_cache = @@ -218,7 +238,12 @@ pub fn load_bank_forks( .for_each(|hard_fork_slot| root_bank.register_hard_fork(*hard_fork_slot)); } - Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) + Ok(( + bank_forks, + leader_schedule_cache, + starting_snapshot_hashes, + starting_snapshot_storages, + )) } #[allow(clippy::too_many_arguments)] @@ -231,7 +256,14 @@ fn bank_forks_from_snapshot( process_options: &ProcessOptions, accounts_update_notifier: Option, exit: Arc, -) -> Result<(Arc>, StartingSnapshotHashes), BankForksUtilsError> { +) -> Result< + ( + Arc>, + StartingSnapshotHashes, + StartingSnapshotStorages, + ), + BankForksUtilsError, +> { // Fail hard here if snapshot fails to load, don't silently continue if account_paths.is_empty() { return Err(BankForksUtilsError::AccountPathsNotPresent); @@ -257,7 +289,7 @@ fn bank_forks_from_snapshot( .unwrap_or(true), }; - let bank = if will_startup_from_snapshot_archives { + let (bank, starting_snapshot_storages) = if will_startup_from_snapshot_archives { // Given that we are going to boot from an archive, the append vecs held in the snapshot dirs for fast-boot should // be released. They will be released by the account_background_service anyway. But in the case of the account_paths // using memory-mounted file system, they are not released early enough to give space for the new append-vecs from @@ -292,7 +324,7 @@ fn bank_forks_from_snapshot( .map(|archive| archive.path().display().to_string()) .unwrap_or("none".to_string()), })?; - bank + (bank, StartingSnapshotStorages::Archive) } else { let bank_snapshot = latest_bank_snapshot.ok_or_else(|| BankForksUtilsError::NoBankSnapshotDirectory { @@ -346,7 +378,8 @@ fn bank_forks_from_snapshot( // snapshot archive next time, which is safe. snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); - bank + let storages = bank.get_snapshot_storages(None); + (bank, StartingSnapshotStorages::Fastboot(storages)) }; let full_snapshot_hash = FullSnapshotHash(( @@ -365,5 +398,9 @@ fn bank_forks_from_snapshot( incremental: incremental_snapshot_hash, }; - Ok((BankForks::new_rw_arc(bank), starting_snapshot_hashes)) + Ok(( + BankForks::new_rw_arc(bank), + starting_snapshot_hashes, + starting_snapshot_storages, + )) } From 6263537bf0afacc08a29a9f6261aa44970190094 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Mon, 4 Mar 2024 17:16:31 -0800 Subject: [PATCH 323/401] blockstore_purge: fix inspect -> inspect_err (#66) --- ledger/src/blockstore/blockstore_purge.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 15a5c4890e9f05..d8b4c7424cd8c1 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -213,7 +213,7 @@ impl Blockstore { delete_range_timer.stop(); let mut write_timer = Measure::start("write_batch"); - self.db.write(write_batch).inspect(|e| { + self.db.write(write_batch).inspect_err(|e| { error!( "Error: {:?} while submitting write batch for purge from_slot {} to_slot {}", e, from_slot, to_slot From 661de5bb76851c60edd97ec8bb1c82937127e38d Mon Sep 17 00:00:00 2001 From: Tyera Date: Mon, 4 Mar 2024 19:21:30 -0700 Subject: [PATCH 324/401] Rpc: deprecate `getStakeActivation` and make inactive_stake consistent (#69) * Make inactive_stake consistent * Add rpc_deprecated_v1_18 module * Move get_stake_activation to deprecated list * Fix typo --- rpc/src/rpc.rs | 74 +++++++++++++++++++++++------------------- rpc/src/rpc_service.rs | 6 ++-- 2 files changed, 45 insertions(+), 35 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 01f623dccdc108..41b26e5fa1e2c2 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1786,16 +1786,10 @@ impl JsonRpcRequestProcessor { } else { StakeActivationState::Inactive }; - let inactive_stake = match stake_activation_state { - StakeActivationState::Activating => activating, - StakeActivationState::Active => 0, - StakeActivationState::Deactivating => stake_account - .lamports() - .saturating_sub(effective + rent_exempt_reserve), - StakeActivationState::Inactive => { - stake_account.lamports().saturating_sub(rent_exempt_reserve) - } - }; + let inactive_stake = stake_account + .lamports() + .saturating_sub(effective) + .saturating_sub(rent_exempt_reserve); Ok(RpcStakeActivation { state: stake_activation_state, active: effective, @@ -2991,14 +2985,6 @@ pub mod rpc_accounts { block: Slot, ) -> Result>; - #[rpc(meta, name = "getStakeActivation")] - fn get_stake_activation( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result; - // SPL Token-specific RPC endpoints // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for // program details @@ -3071,20 +3057,6 @@ pub mod rpc_accounts { Ok(meta.get_block_commitment(block)) } - fn get_stake_activation( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result { - debug!( - "get_stake_activation rpc request received: {:?}", - pubkey_str - ); - let pubkey = verify_pubkey(&pubkey_str)?; - meta.get_stake_activation(&pubkey, config) - } - fn get_token_account_balance( &self, meta: Self::Metadata, @@ -4091,7 +4063,43 @@ fn rpc_perf_sample_from_perf_sample(slot: u64, sample: PerfSample) -> RpcPerfSam } } -// RPC methods deprecated in v1.8 +pub mod rpc_deprecated_v1_18 { + use super::*; + #[rpc] + pub trait DeprecatedV1_18 { + type Metadata; + + // DEPRECATED + #[rpc(meta, name = "getStakeActivation")] + fn get_stake_activation( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result; + } + + pub struct DeprecatedV1_18Impl; + impl DeprecatedV1_18 for DeprecatedV1_18Impl { + type Metadata = JsonRpcRequestProcessor; + + fn get_stake_activation( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result { + debug!( + "get_stake_activation rpc request received: {:?}", + pubkey_str + ); + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_stake_activation(&pubkey, config) + } + } +} + +// RPC methods deprecated in v1.9 pub mod rpc_deprecated_v1_9 { #![allow(deprecated)] use super::*; diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 8597394f102325..d8791ab6c3bf6b 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -6,8 +6,9 @@ use { max_slots::MaxSlots, optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, rpc::{ - rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_deprecated_v1_7::*, - rpc_deprecated_v1_9::*, rpc_full::*, rpc_minimal::*, rpc_obsolete_v1_7::*, *, + rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_deprecated_v1_18::*, + rpc_deprecated_v1_7::*, rpc_deprecated_v1_9::*, rpc_full::*, rpc_minimal::*, + rpc_obsolete_v1_7::*, *, }, rpc_cache::LargestAccountsCache, rpc_health::*, @@ -510,6 +511,7 @@ impl JsonRpcService { io.extend_with(rpc_full::FullImpl.to_delegate()); io.extend_with(rpc_deprecated_v1_7::DeprecatedV1_7Impl.to_delegate()); io.extend_with(rpc_deprecated_v1_9::DeprecatedV1_9Impl.to_delegate()); + io.extend_with(rpc_deprecated_v1_18::DeprecatedV1_18Impl.to_delegate()); } if obsolete_v1_7_api { io.extend_with(rpc_obsolete_v1_7::ObsoleteV1_7Impl.to_delegate()); From b78c0703ff97d78634c937b04083fd5272595f0c Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 5 Mar 2024 12:28:12 +0100 Subject: [PATCH 325/401] windows: Use vcpkg for openssl dep (#73) --- .github/workflows/release-artifacts.yml | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 45be181c3ce9e1..7aec77f0dac45f 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -43,17 +43,8 @@ jobs: id: build shell: bash run: | - choco install openssl --version=3.1.1 - if [[ -d "C:\Program Files\OpenSSL" ]]; then - echo "OPENSSL_DIR: C:\Program Files\OpenSSL" - export OPENSSL_DIR="C:\Program Files\OpenSSL" - elif [[ -d "C:\Program Files\OpenSSL-Win64" ]]; then - echo "OPENSSL_DIR: C:\Program Files\OpenSSL-Win64" - export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64" - else - echo "can't determine OPENSSL_DIR" - exit 1 - fi + vcpkg install openssl:x64-windows-static-md + vcpkg integrate install choco install protoc export PROTOC="C:\ProgramData\chocolatey\lib\protoc\tools\bin\protoc.exe" source /tmp/env.sh From 1e133bc067555df9fa2d60835f256d962d3aed98 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 5 Mar 2024 12:02:47 -0500 Subject: [PATCH 326/401] Increases account hash's stack buffer to hold 200 bytes of data (#56) --- accounts-db/Cargo.toml | 2 +- accounts-db/src/accounts_db.rs | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 702f14f9f3b07d..0fc5a381fbda5e 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -42,7 +42,7 @@ regex = { workspace = true } seqlock = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } -smallvec = { workspace = true } +smallvec = { workspace = true, features = ["const_generics"] } solana-bucket-map = { workspace = true } solana-config-program = { workspace = true } solana-frozen-abi = { workspace = true } diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1f3c36876f4531..cf4d17745b1b73 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6119,17 +6119,18 @@ impl AccountsDb { } let mut hasher = blake3::Hasher::new(); - // allocate 128 bytes buffer on the stack - const BUFFER_SIZE: usize = 128; - const METADATA_SIZE: usize = 8 /* lamports */ + 8 /* rent_epoch */ + 1 /* executable */ + 32 /* owner */ + 32 /* pubkey */; - const REMAINING_SIZE: usize = BUFFER_SIZE - METADATA_SIZE; + // allocate a buffer on the stack that's big enough + // to hold a token account or a stake account + const META_SIZE: usize = 8 /* lamports */ + 8 /* rent_epoch */ + 1 /* executable */ + 32 /* owner */ + 32 /* pubkey */; + const DATA_SIZE: usize = 200; // stake acounts are 200 B and token accounts are 165-182ish B + const BUFFER_SIZE: usize = META_SIZE + DATA_SIZE; let mut buffer = SmallVec::<[u8; BUFFER_SIZE]>::new(); // collect lamports, rent_epoch into buffer to hash buffer.extend_from_slice(&lamports.to_le_bytes()); buffer.extend_from_slice(&rent_epoch.to_le_bytes()); - if data.len() > REMAINING_SIZE { + if data.len() > DATA_SIZE { // For larger accounts whose data can't fit into the buffer, update the hash now. hasher.update(&buffer); buffer.clear(); From ce34f3f01444e01fcc67630d2c77ba66aefa7539 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 5 Mar 2024 12:09:17 -0600 Subject: [PATCH 327/401] Rename and uniquify QUIC thread names (#28) When viewing in various tools such as gdb and perf, it is not easy to distinguish which threads are serving which function (TPU or TPU FWD) --- client/src/connection_cache.rs | 1 + core/src/tpu.rs | 2 ++ quic-client/src/quic_client.rs | 2 +- quic-client/tests/quic_client.rs | 3 +++ streamer/src/quic.rs | 26 +++++++++++++++----------- 5 files changed, 22 insertions(+), 12 deletions(-) diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index b53b66b155e719..a94bc7cd3d8ca8 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -260,6 +260,7 @@ mod tests { thread: response_recv_thread, key_updater: _, } = solana_streamer::quic::spawn_server( + "solQuicTest", "quic_streamer_test", response_recv_socket, &keypair2, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 548b299148d935..0f5431da4eb43c 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -153,6 +153,7 @@ impl Tpu { thread: tpu_quic_t, key_updater, } = spawn_server( + "solQuicTpu", "quic_streamer_tpu", transactions_quic_sockets, keypair, @@ -172,6 +173,7 @@ impl Tpu { thread: tpu_forwards_quic_t, key_updater: forwards_key_updater, } = spawn_server( + "solQuicTpuFwd", "quic_streamer_tpu_forwards", transactions_forwards_quic_sockets, keypair, diff --git a/quic-client/src/quic_client.rs b/quic-client/src/quic_client.rs index f057980c79fe06..8c8e8e5338993f 100644 --- a/quic-client/src/quic_client.rs +++ b/quic-client/src/quic_client.rs @@ -69,7 +69,7 @@ lazy_static! { static ref ASYNC_TASK_SEMAPHORE: AsyncTaskSemaphore = AsyncTaskSemaphore::new(MAX_OUTSTANDING_TASK); static ref RUNTIME: Runtime = tokio::runtime::Builder::new_multi_thread() - .thread_name("quic-client") + .thread_name("solQuicClientRt") .enable_all() .build() .unwrap(); diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index 658ee6a57d672d..0237fc21d098dc 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -72,6 +72,7 @@ mod tests { thread: t, key_updater: _, } = solana_streamer::quic::spawn_server( + "solQuicTest", "quic_streamer_test", s.try_clone().unwrap(), &keypair, @@ -212,6 +213,7 @@ mod tests { thread: request_recv_thread, key_updater: _, } = solana_streamer::quic::spawn_server( + "solQuicTest", "quic_streamer_test", request_recv_socket.try_clone().unwrap(), &keypair, @@ -239,6 +241,7 @@ mod tests { thread: response_recv_thread, key_updater: _, } = solana_streamer::quic::spawn_server( + "solQuicTest", "quic_streamer_test", response_recv_socket, &keypair2, diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 69a75532b8ca68..a7a08c73f4833b 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -100,9 +100,9 @@ pub(crate) fn configure_server( Ok((server_config, cert_chain_pem)) } -fn rt() -> Runtime { +fn rt(name: String) -> Runtime { tokio::runtime::Builder::new_multi_thread() - .thread_name("quic-server") + .thread_name(name) .enable_all() .build() .unwrap() @@ -431,7 +431,8 @@ impl StreamStats { #[allow(clippy::too_many_arguments)] pub fn spawn_server( - name: &'static str, + thread_name: &'static str, + metrics_name: &'static str, sock: UdpSocket, keypair: &Keypair, packet_sender: Sender, @@ -443,11 +444,11 @@ pub fn spawn_server( wait_for_chunk_timeout: Duration, coalesce: Duration, ) -> Result { - let runtime = rt(); + let runtime = rt(format!("{thread_name}Rt")); let (endpoint, _stats, task) = { let _guard = runtime.enter(); crate::nonblocking::quic::spawn_server( - name, + metrics_name, sock, keypair, packet_sender, @@ -461,7 +462,7 @@ pub fn spawn_server( ) }?; let handle = thread::Builder::new() - .name("solQuicServer".into()) + .name(thread_name.into()) .spawn(move || { if let Err(e) = runtime.block_on(task) { warn!("error from runtime.block_on: {:?}", e); @@ -505,6 +506,7 @@ mod test { thread: t, key_updater: _, } = spawn_server( + "solQuicTest", "quic_streamer_test", s, &keypair, @@ -532,7 +534,7 @@ mod test { fn test_quic_timeout() { solana_logger::setup(); let (t, exit, receiver, server_address) = setup_quic_server(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_timeout(receiver, server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -543,7 +545,7 @@ mod test { solana_logger::setup(); let (t, exit, _receiver, server_address) = setup_quic_server(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_block_multiple_connections(server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -563,6 +565,7 @@ mod test { thread: t, key_updater: _, } = spawn_server( + "solQuicTest", "quic_streamer_test", s, &keypair, @@ -577,7 +580,7 @@ mod test { ) .unwrap(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_multiple_streams(receiver, server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -588,7 +591,7 @@ mod test { solana_logger::setup(); let (t, exit, receiver, server_address) = setup_quic_server(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_multiple_writes(receiver, server_address, None)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -608,6 +611,7 @@ mod test { thread: t, key_updater: _, } = spawn_server( + "solQuicTest", "quic_streamer_test", s, &keypair, @@ -622,7 +626,7 @@ mod test { ) .unwrap(); - let runtime = rt(); + let runtime = rt("solQuicTestRt".to_string()); runtime.block_on(check_unstaked_node_connect_failure(server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); From 1110fc93d75dbf294eb61534152fe2a891c98407 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 5 Mar 2024 22:02:04 -0600 Subject: [PATCH 328/401] Give SigVerify and ShredFetch threads unique names (#98) - solTvuFetchPmod ==> solTvuPktMod + solTvuRepPktMod - solSigVerifier ==> solSigVerTpu + solSigVerTpuVot --- core/benches/sigverify_stage.rs | 2 +- core/src/shred_fetch_stage.rs | 5 ++++- core/src/sigverify_stage.rs | 23 +++++++++-------------- core/src/tpu.rs | 9 +++++++-- 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index 70f33020dd3e70..7013f718e4ab2e 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -160,7 +160,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher, use_same_tx: bool) { let (packet_s, packet_r) = unbounded(); let (verified_s, verified_r) = BankingTracer::channel_for_test(); let verifier = TransactionSigVerifier::new(verified_s); - let stage = SigVerifyStage::new(packet_r, verifier, "bench"); + let stage = SigVerifyStage::new(packet_r, verifier, "solSigVerBench", "bench"); bencher.iter(move || { let now = Instant::now(); diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 39cc193adad96e..84f1520e649822 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -148,6 +148,7 @@ impl ShredFetchStage { #[allow(clippy::too_many_arguments)] fn packet_modifier( receiver_thread_name: &'static str, + modifier_thread_name: &'static str, sockets: Vec>, exit: Arc, sender: Sender, @@ -178,7 +179,7 @@ impl ShredFetchStage { }) .collect(); let modifier_hdl = Builder::new() - .name("solTvuFetchPMod".to_string()) + .name(modifier_thread_name.to_string()) .spawn(move || { let repair_context = repair_context .as_ref() @@ -215,6 +216,7 @@ impl ShredFetchStage { let (mut tvu_threads, tvu_filter) = Self::packet_modifier( "solRcvrShred", + "solTvuPktMod", sockets, exit.clone(), sender.clone(), @@ -229,6 +231,7 @@ impl ShredFetchStage { let (repair_receiver, repair_handler) = Self::packet_modifier( "solRcvrShredRep", + "solTvuRepPktMod", vec![repair_socket.clone()], exit.clone(), sender.clone(), diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index e5e06a3bc701c9..cde1735611c0d0 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -238,9 +238,11 @@ impl SigVerifyStage { pub fn new( packet_receiver: Receiver, verifier: T, - name: &'static str, + thread_name: &'static str, + metrics_name: &'static str, ) -> Self { - let thread_hdl = Self::verifier_services(packet_receiver, verifier, name); + let thread_hdl = + Self::verifier_service(packet_receiver, verifier, thread_name, metrics_name); Self { thread_hdl } } @@ -407,7 +409,8 @@ impl SigVerifyStage { fn verifier_service( packet_receiver: Receiver, mut verifier: T, - name: &'static str, + thread_name: &'static str, + metrics_name: &'static str, ) -> JoinHandle<()> { let mut stats = SigVerifierStats::default(); let mut last_print = Instant::now(); @@ -415,7 +418,7 @@ impl SigVerifyStage { const DEDUPER_FALSE_POSITIVE_RATE: f64 = 0.001; const DEDUPER_NUM_BITS: u64 = 63_999_979; Builder::new() - .name("solSigVerifier".to_string()) + .name(thread_name.to_string()) .spawn(move || { let mut rng = rand::thread_rng(); let mut deduper = Deduper::<2, [u8]>::new(&mut rng, DEDUPER_NUM_BITS); @@ -440,7 +443,7 @@ impl SigVerifyStage { } } if last_print.elapsed().as_secs() > 2 { - stats.report(name); + stats.report(metrics_name); stats = SigVerifierStats::default(); last_print = Instant::now(); } @@ -449,14 +452,6 @@ impl SigVerifyStage { .unwrap() } - fn verifier_services( - packet_receiver: Receiver, - verifier: T, - name: &'static str, - ) -> JoinHandle<()> { - Self::verifier_service(packet_receiver, verifier, name) - } - pub fn join(self) -> thread::Result<()> { self.thread_hdl.join() } @@ -552,7 +547,7 @@ mod tests { let (packet_s, packet_r) = unbounded(); let (verified_s, verified_r) = BankingTracer::channel_for_test(); let verifier = TransactionSigVerifier::new(verified_s); - let stage = SigVerifyStage::new(packet_r, verifier, "test"); + let stage = SigVerifyStage::new(packet_r, verifier, "solSigVerTest", "test"); let now = Instant::now(); let packets_per_batch = 128; diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 0f5431da4eb43c..640caf64544d45 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -190,14 +190,19 @@ impl Tpu { let sigverify_stage = { let verifier = TransactionSigVerifier::new(non_vote_sender); - SigVerifyStage::new(packet_receiver, verifier, "tpu-verifier") + SigVerifyStage::new(packet_receiver, verifier, "solSigVerTpu", "tpu-verifier") }; let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); let vote_sigverify_stage = { let verifier = TransactionSigVerifier::new_reject_non_vote(tpu_vote_sender); - SigVerifyStage::new(vote_packet_receiver, verifier, "tpu-vote-verifier") + SigVerifyStage::new( + vote_packet_receiver, + verifier, + "solSigVerTpuVot", + "tpu-vote-verifier", + ) }; let (gossip_vote_sender, gossip_vote_receiver) = From 3cf48347a984563595e2808dff7482af00888da8 Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 6 Mar 2024 00:23:32 -0700 Subject: [PATCH 329/401] Cli: improve vote-account vote-authority display (#95) * Simplify vote-authority display * Add handling for new vote authority * Add proper None handling, because unit test (shouldn't happen IRL, though) * Unwrap->expect --- cli-output/src/cli_output.rs | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 0eca9cde5c1a52..62b66eddf27eb0 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -1648,7 +1648,23 @@ impl VerboseDisplay for CliAuthorizedVoters {} impl fmt::Display for CliAuthorizedVoters { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.authorized_voters) + if let Some((_epoch, current_authorized_voter)) = self.authorized_voters.first_key_value() { + write!(f, "{current_authorized_voter}")?; + } else { + write!(f, "None")?; + } + if self.authorized_voters.len() > 1 { + let (epoch, upcoming_authorized_voter) = self + .authorized_voters + .last_key_value() + .expect("CliAuthorizedVoters::authorized_voters.len() > 1"); + writeln!(f)?; + write!( + f, + " New Vote Authority as of Epoch {epoch}: {upcoming_authorized_voter}" + )?; + } + Ok(()) } } @@ -3379,12 +3395,12 @@ mod tests { ..CliVoteAccount::default() }; let s = format!("{c}"); - assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: {}\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\n Epoch Reward Slot Time Amount New Balance Percent Change APR Commission\n 1 100 1970-01-01 00:00:00 UTC ◎0.000000010 ◎0.000000100 11.000% 10.00% 1%\n 2 200 1970-01-12 13:46:40 UTC ◎0.000000012 ◎0.000000100 11.000% 13.00% 1%\n"); + assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: None\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\n Epoch Reward Slot Time Amount New Balance Percent Change APR Commission\n 1 100 1970-01-01 00:00:00 UTC ◎0.000000010 ◎0.000000100 11.000% 10.00% 1%\n 2 200 1970-01-12 13:46:40 UTC ◎0.000000012 ◎0.000000100 11.000% 13.00% 1%\n"); println!("{s}"); c.use_csv = true; let s = format!("{c}"); - assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: {}\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\nEpoch,Reward Slot,Time,Amount,New Balance,Percent Change,APR,Commission\n1,100,1970-01-01 00:00:00 UTC,0.00000001,0.0000001,11%,10.00%,1%\n2,200,1970-01-12 13:46:40 UTC,0.000000012,0.0000001,11%,13.00%,1%\n"); + assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: None\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\nEpoch,Reward Slot,Time,Amount,New Balance,Percent Change,APR,Commission\n1,100,1970-01-01 00:00:00 UTC,0.00000001,0.0000001,11%,10.00%,1%\n2,200,1970-01-12 13:46:40 UTC,0.000000012,0.0000001,11%,13.00%,1%\n"); println!("{s}"); } } From b6f6fdbc9a01ce3b37823fdc10212d807c8ed8fe Mon Sep 17 00:00:00 2001 From: Jon C Date: Wed, 6 Mar 2024 15:32:05 +0100 Subject: [PATCH 330/401] frozen-abi: Remove proc_macro_hygiene featurization (#109) --- frozen-abi/build.rs | 5 ----- frozen-abi/src/lib.rs | 1 - perf/build.rs | 5 ----- programs/address-lookup-table/src/lib.rs | 1 - sdk/program/src/lib.rs | 1 - sdk/src/lib.rs | 1 - 6 files changed, 14 deletions(-) diff --git a/frozen-abi/build.rs b/frozen-abi/build.rs index c9550c1c5c4f22..e17ca70cb4718b 100644 --- a/frozen-abi/build.rs +++ b/frozen-abi/build.rs @@ -17,11 +17,6 @@ fn main() { } Channel::Dev => { println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); } } } diff --git a/frozen-abi/src/lib.rs b/frozen-abi/src/lib.rs index 189535ccddaa74..4747cf64b9e50f 100644 --- a/frozen-abi/src/lib.rs +++ b/frozen-abi/src/lib.rs @@ -1,6 +1,5 @@ #![allow(incomplete_features)] #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] -#![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] // Allows macro expansion of `use ::solana_frozen_abi::*` to work within this crate extern crate self as solana_frozen_abi; diff --git a/perf/build.rs b/perf/build.rs index 4925ee898eb612..eef20dd887bc42 100644 --- a/perf/build.rs +++ b/perf/build.rs @@ -27,11 +27,6 @@ fn main() { } Channel::Dev => { println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); } } } diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs index 737ec32c8f6782..737c35e4c4b2f4 100644 --- a/programs/address-lookup-table/src/lib.rs +++ b/programs/address-lookup-table/src/lib.rs @@ -1,6 +1,5 @@ #![allow(incomplete_features)] #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] -#![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] #[cfg(not(target_os = "solana"))] pub mod processor; diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 54de9d817205a8..4d623524772ccb 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -465,7 +465,6 @@ #![allow(incomplete_features)] #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] -#![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] // Allows macro expansion of `use ::solana_program::*` to work within this crate extern crate self as solana_program; diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 7c6b643884e449..ecc186f0494191 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -31,7 +31,6 @@ #![allow(incomplete_features)] #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] -#![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] // Allows macro expansion of `use ::solana_sdk::*` to work within this crate extern crate self as solana_sdk; From c1613517bf43b2f67c1feb4db3fb7536116fa9ff Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Wed, 6 Mar 2024 11:08:49 -0600 Subject: [PATCH 331/401] assert simple vote tx const cost (#100) * assert simple vote tx const cost --- cost-model/src/transaction_cost.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index e765eee3bc7038..76865fff30fd57 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -18,8 +18,19 @@ pub enum TransactionCost { impl TransactionCost { pub fn sum(&self) -> u64 { + #![allow(clippy::assertions_on_constants)] match self { - Self::SimpleVote { .. } => SIMPLE_VOTE_USAGE_COST, + Self::SimpleVote { .. } => { + const _: () = assert!( + SIMPLE_VOTE_USAGE_COST + == solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS + + block_cost_limits::SIGNATURE_COST + + 2 * block_cost_limits::WRITE_LOCK_UNITS + + 8 + ); + + SIMPLE_VOTE_USAGE_COST + } Self::Transaction(usage_cost) => usage_cost.sum(), } } From b38ea4145ed60d882ae8fc348d28f9e91467d66c Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 6 Mar 2024 14:49:32 -0600 Subject: [PATCH 332/401] Use tokio directly instead of jsonrpc_server_utils's re-export (#116) --- Cargo.lock | 4 ++-- Cargo.toml | 1 - geyser-plugin-manager/Cargo.toml | 2 +- geyser-plugin-manager/src/geyser_plugin_manager.rs | 2 +- programs/sbf/Cargo.lock | 4 ++-- validator/Cargo.toml | 2 +- validator/src/admin_rpc_service.rs | 6 +++--- 7 files changed, 10 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78ff40111ee0b0..db5431da6ef62e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -205,7 +205,6 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-ipc-server", - "jsonrpc-server-utils", "lazy_static", "libc", "libloading", @@ -252,6 +251,7 @@ dependencies = [ "symlink", "thiserror", "tikv-jemallocator", + "tokio", ] [[package]] @@ -6232,7 +6232,6 @@ dependencies = [ "crossbeam-channel", "json5", "jsonrpc-core", - "jsonrpc-server-utils", "libloading", "log", "serde_json", @@ -6246,6 +6245,7 @@ dependencies = [ "solana-sdk", "solana-transaction-status", "thiserror", + "tokio", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 27376370297e26..4b8ae12dab0078 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -236,7 +236,6 @@ jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" -jsonrpc-server-utils = "18.0.0" lazy_static = "1.4.0" libc = "0.2.153" libloading = "0.7.4" diff --git a/geyser-plugin-manager/Cargo.toml b/geyser-plugin-manager/Cargo.toml index ebef2f637f642d..a7b02f8d593a8d 100644 --- a/geyser-plugin-manager/Cargo.toml +++ b/geyser-plugin-manager/Cargo.toml @@ -15,7 +15,6 @@ bs58 = { workspace = true } crossbeam-channel = { workspace = true } json5 = { workspace = true } jsonrpc-core = { workspace = true } -jsonrpc-server-utils = { workspace = true } libloading = { workspace = true } log = { workspace = true } serde_json = { workspace = true } @@ -29,6 +28,7 @@ solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-transaction-status = { workspace = true } thiserror = { workspace = true } +tokio = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 3d0abe16899637..d88814d88e9470 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -1,13 +1,13 @@ use { agave_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, jsonrpc_core::{ErrorCode, Result as JsonRpcResult}, - jsonrpc_server_utils::tokio::sync::oneshot::Sender as OneShotSender, libloading::Library, log::*, std::{ ops::{Deref, DerefMut}, path::Path, }, + tokio::sync::oneshot::Sender as OneShotSender, }; #[derive(Debug)] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cb0ad6f1ee448c..11a4bcab04d7c0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -90,7 +90,6 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-ipc-server", - "jsonrpc-server-utils", "lazy_static", "libc", "libloading", @@ -135,6 +134,7 @@ dependencies = [ "symlink", "thiserror", "tikv-jemallocator", + "tokio", ] [[package]] @@ -5127,7 +5127,6 @@ dependencies = [ "crossbeam-channel", "json5", "jsonrpc-core", - "jsonrpc-server-utils", "libloading", "log", "serde_json", @@ -5141,6 +5140,7 @@ dependencies = [ "solana-sdk", "solana-transaction-status", "thiserror", + "tokio", ] [[package]] diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 362a07343b5e4a..844a2bca9aa97f 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -24,7 +24,6 @@ jsonrpc-core = { workspace = true } jsonrpc-core-client = { workspace = true, features = ["ipc"] } jsonrpc-derive = { workspace = true } jsonrpc-ipc-server = { workspace = true } -jsonrpc-server-utils = { workspace = true } lazy_static = { workspace = true } libloading = { workspace = true } log = { workspace = true } @@ -66,6 +65,7 @@ solana-version = { workspace = true } solana-vote-program = { workspace = true } symlink = { workspace = true } thiserror = { workspace = true } +tokio = { workspace = true } [dev-dependencies] solana-account-decoder = { workspace = true } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 57be4cf488865d..3881487882dc2e 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -6,7 +6,6 @@ use { jsonrpc_ipc_server::{ tokio::sync::oneshot::channel as oneshot_channel, RequestContext, ServerBuilder, }, - jsonrpc_server_utils::tokio, log::*, serde::{de::Deserializer, Deserialize, Serialize}, solana_accounts_db::accounts_index::AccountIndex, @@ -35,6 +34,7 @@ use { thread::{self, Builder}, time::{Duration, SystemTime}, }, + tokio::runtime::Runtime, }; #[derive(Clone)] @@ -815,8 +815,8 @@ pub async fn connect(ledger_path: &Path) -> std::result::Result jsonrpc_server_utils::tokio::runtime::Runtime { - jsonrpc_server_utils::tokio::runtime::Runtime::new().expect("new tokio runtime") +pub fn runtime() -> Runtime { + Runtime::new().expect("new tokio runtime") } #[derive(Default, Deserialize, Clone)] From 184b31c6b7a653cecb1941c295479bacc225cb3a Mon Sep 17 00:00:00 2001 From: Brennan Date: Wed, 6 Mar 2024 14:03:25 -0800 Subject: [PATCH 333/401] fix typo (#57) --- program-runtime/src/loaded_programs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 926d1179837380..e8a691c537934f 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -67,7 +67,7 @@ pub enum LoadedProgramType { /// /// These can potentially come back alive if the environment changes. FailedVerification(ProgramRuntimeEnvironment), - /// Tombstone for programs which were explicitly undeployoed / closed. + /// Tombstone for programs which were explicitly undeployed / closed. #[default] Closed, /// Tombstone for programs which have recently been modified but the new version is not visible yet. From 8887cd19a1a86f61cfa44fb41a6e528e8857bc59 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 6 Mar 2024 17:03:02 -0600 Subject: [PATCH 334/401] Name previously unnamed thread pool threads (#104) Several rayon and tokio threadpools did not have names; give them names to make tracking them in debug tools easier --- net-utils/src/ip_echo_server.rs | 6 +++++- runtime/src/bank.rs | 5 ++++- runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs | 3 ++- validator/src/admin_rpc_service.rs | 6 +++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs index 7d4186ccb6a810..64fbedadc7acf9 100644 --- a/net-utils/src/ip_echo_server.rs +++ b/net-utils/src/ip_echo_server.rs @@ -173,7 +173,11 @@ pub fn ip_echo_server( ) -> IpEchoServer { tcp_listener.set_nonblocking(true).unwrap(); - let runtime = Runtime::new().expect("Failed to create Runtime"); + let runtime = tokio::runtime::Builder::new_multi_thread() + .thread_name("solIpEchoSrvrRt") + .enable_all() + .build() + .expect("new tokio runtime"); runtime.spawn(run_echo_server(tcp_listener, shred_version)); runtime } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 3ea316f857a2bc..1abf9403e3fef1 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1483,7 +1483,10 @@ impl Bank { let epoch = self.epoch(); let slot = self.slot(); let (thread_pool, thread_pool_time) = measure!( - ThreadPoolBuilder::new().build().unwrap(), + ThreadPoolBuilder::new() + .thread_name(|i| format!("solBnkNewEpch{i:02}")) + .build() + .expect("new rayon threadpool"), "thread_pool_creation", ); diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index 0c6116274b1cb1..30cbbd4afd5970 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -418,9 +418,10 @@ impl SnapshotStorageRebuilder { /// Builds thread pool to rebuild with fn build_thread_pool(&self) -> ThreadPool { ThreadPoolBuilder::default() + .thread_name(|i| format!("solRbuildSnap{i:02}")) .num_threads(self.num_threads) .build() - .unwrap() + .expect("new rayon threadpool") } } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 3881487882dc2e..b6d65e3ec4a4df 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -816,7 +816,11 @@ pub async fn connect(ledger_path: &Path) -> std::result::Result Runtime { - Runtime::new().expect("new tokio runtime") + tokio::runtime::Builder::new_multi_thread() + .thread_name("solAdminRpcRt") + .enable_all() + .build() + .expect("new tokio runtime") } #[derive(Default, Deserialize, Clone)] From 9cc55349f7c1d1e7d046cbeb9b503f4de02c0888 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 7 Mar 2024 09:52:23 +0800 Subject: [PATCH 335/401] Refactor transaction account unlocking (#103) refactor: unlock accounts --- accounts-db/src/accounts.rs | 64 ++++++++++++++++++++++---------- runtime/src/bank.rs | 12 +++--- runtime/src/transaction_batch.rs | 9 ++++- 3 files changed, 57 insertions(+), 28 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 371db9eb08c095..33a57d56461c78 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -80,11 +80,20 @@ impl AccountLocks { if *count == 0 { occupied_entry.remove_entry(); } + } else { + debug_assert!( + false, + "Attempted to remove a read-lock for a key that wasn't read-locked" + ); } } fn unlock_write(&mut self, key: &Pubkey) { - self.write_locks.remove(key); + let removed = self.write_locks.remove(key); + debug_assert!( + removed, + "Attempted to remove a write-lock for a key that wasn't write-locked" + ); } } @@ -618,14 +627,16 @@ impl Accounts { #[allow(clippy::needless_collect)] pub fn unlock_accounts<'a>( &self, - txs: impl Iterator, - results: &[Result<()>], + txs_and_results: impl Iterator)>, ) { - let keys: Vec<_> = txs - .zip(results) + let keys: Vec<_> = txs_and_results .filter(|(_, res)| res.is_ok()) .map(|(tx, _)| tx.get_account_locks_unchecked()) .collect(); + if keys.is_empty() { + return; + } + let mut account_locks = self.account_locks.lock().unwrap(); debug!("bank unlock accounts"); keys.into_iter().for_each(|keys| { @@ -812,6 +823,7 @@ mod tests { }, std::{ borrow::Cow, + iter, sync::atomic::{AtomicBool, AtomicU64, Ordering}, thread, time, }, @@ -1099,8 +1111,8 @@ mod tests { let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; let results = accounts.lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); - assert_eq!(results[0], Ok(())); - accounts.unlock_accounts(txs.iter(), &results); + assert_eq!(results, vec![Ok(())]); + accounts.unlock_accounts(txs.iter().zip(&results)); } // Disallow over MAX_TX_ACCOUNT_LOCKS @@ -1156,7 +1168,7 @@ mod tests { let tx = new_sanitized_tx(&[&keypair0], message, Hash::default()); let results0 = accounts.lock_accounts([tx.clone()].iter(), MAX_TX_ACCOUNT_LOCKS); - assert!(results0[0].is_ok()); + assert_eq!(results0, vec![Ok(())]); assert_eq!( *accounts .account_locks @@ -1190,9 +1202,13 @@ mod tests { let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default()); let txs = vec![tx0, tx1]; let results1 = accounts.lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); - - assert!(results1[0].is_ok()); // Read-only account (keypair1) can be referenced multiple times - assert!(results1[1].is_err()); // Read-only account (keypair1) cannot also be locked as writable + assert_eq!( + results1, + vec![ + Ok(()), // Read-only account (keypair1) can be referenced multiple times + Err(TransactionError::AccountInUse), // Read-only account (keypair1) cannot also be locked as writable + ], + ); assert_eq!( *accounts .account_locks @@ -1204,8 +1220,8 @@ mod tests { 2 ); - accounts.unlock_accounts([tx].iter(), &results0); - accounts.unlock_accounts(txs.iter(), &results1); + accounts.unlock_accounts(iter::once(&tx).zip(&results0)); + accounts.unlock_accounts(txs.iter().zip(&results1)); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -1217,7 +1233,10 @@ mod tests { ); let tx = new_sanitized_tx(&[&keypair1], message, Hash::default()); let results2 = accounts.lock_accounts([tx].iter(), MAX_TX_ACCOUNT_LOCKS); - assert!(results2[0].is_ok()); // Now keypair1 account can be locked as writable + assert_eq!( + results2, + vec![Ok(())] // Now keypair1 account can be locked as writable + ); // Check that read-only lock with zero references is deleted assert!(accounts @@ -1285,7 +1304,7 @@ mod tests { counter_clone.clone().fetch_add(1, Ordering::SeqCst); } } - accounts_clone.unlock_accounts(txs.iter(), &results); + accounts_clone.unlock_accounts(txs.iter().zip(&results)); if exit_clone.clone().load(Ordering::Relaxed) { break; } @@ -1301,7 +1320,7 @@ mod tests { thread::sleep(time::Duration::from_millis(50)); assert_eq!(counter_value, counter_clone.clone().load(Ordering::SeqCst)); } - accounts_arc.unlock_accounts(txs.iter(), &results); + accounts_arc.unlock_accounts(txs.iter().zip(&results)); thread::sleep(time::Duration::from_millis(50)); } exit.store(true, Ordering::Relaxed); @@ -1442,9 +1461,14 @@ mod tests { MAX_TX_ACCOUNT_LOCKS, ); - assert!(results[0].is_ok()); // Read-only account (keypair0) can be referenced multiple times - assert!(results[1].is_err()); // is not locked due to !qos_results[1].is_ok() - assert!(results[2].is_ok()); // Read-only account (keypair0) can be referenced multiple times + assert_eq!( + results, + vec![ + Ok(()), // Read-only account (keypair0) can be referenced multiple times + Err(TransactionError::WouldExceedMaxBlockCostLimit), // is not locked due to !qos_results[1].is_ok() + Ok(()), // Read-only account (keypair0) can be referenced multiple times + ], + ); // verify that keypair0 read-only lock twice (for tx0 and tx2) assert_eq!( @@ -1466,7 +1490,7 @@ mod tests { .get(&keypair2.pubkey()) .is_none()); - accounts.unlock_accounts(txs.iter(), &results); + accounts.unlock_accounts(txs.iter().zip(&results)); // check all locks to be removed assert!(accounts diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1abf9403e3fef1..39df91c382feff 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4379,13 +4379,11 @@ impl Bank { account_overrides } - pub fn unlock_accounts(&self, batch: &mut TransactionBatch) { - if batch.needs_unlock() { - batch.set_needs_unlock(false); - self.rc - .accounts - .unlock_accounts(batch.sanitized_transactions().iter(), batch.lock_results()) - } + pub fn unlock_accounts<'a>( + &self, + txs_and_results: impl Iterator)>, + ) { + self.rc.accounts.unlock_accounts(txs_and_results) } pub fn remove_unrooted_slots(&self, slots: &[(Slot, BankId)]) { diff --git a/runtime/src/transaction_batch.rs b/runtime/src/transaction_batch.rs index 66711fd5a1acd5..9d0ff5fb7ce007 100644 --- a/runtime/src/transaction_batch.rs +++ b/runtime/src/transaction_batch.rs @@ -51,7 +51,14 @@ impl<'a, 'b> TransactionBatch<'a, 'b> { // Unlock all locked accounts in destructor. impl<'a, 'b> Drop for TransactionBatch<'a, 'b> { fn drop(&mut self) { - self.bank.unlock_accounts(self) + if self.needs_unlock() { + self.set_needs_unlock(false); + self.bank.unlock_accounts( + self.sanitized_transactions() + .iter() + .zip(self.lock_results()), + ) + } } } From f968532d7f4ed27e16d1e7c4a4a9a1824285e016 Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 6 Mar 2024 19:31:07 -0700 Subject: [PATCH 336/401] Prep Anchor downstream CI job for v2 bump (#123) * Add new script to patch spl in anchor downstream * Only specify major version for token-2022 * Add update for ahash * Patch spl in anchor * Remove dex and metadata features for now --- scripts/build-downstream-anchor-projects.sh | 15 +++++- scripts/patch-spl-crates-for-anchor.sh | 55 +++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 scripts/patch-spl-crates-for-anchor.sh diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index cdfa0bae10addb..7d75ccc08ab0e2 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -8,6 +8,7 @@ cd "$(dirname "$0")"/.. source ci/_ source scripts/patch-crates.sh source scripts/read-cargo-variable.sh +source scripts/patch-spl-crates-for-anchor.sh anchor_version=$1 solana_ver=$(readCargoVariable version Cargo.toml) @@ -43,6 +44,14 @@ EOF # NOTE This isn't run in a subshell to get $anchor_dir and $anchor_ver anchor() { set -x + + rm -rf spl + git clone https://github.com/solana-labs/solana-program-library.git spl + cd spl || exit 1 + spl_dir=$PWD + get_spl_versions "$spl_dir" + cd .. + rm -rf anchor git clone https://github.com/coral-xyz/anchor.git cd anchor || exit 1 @@ -57,9 +66,13 @@ anchor() { update_solana_dependencies . "$solana_ver" patch_crates_io_solana Cargo.toml "$solana_dir" + patch_spl_crates . Cargo.toml "$spl_dir" $cargo test - (cd spl && $cargo_build_sbf --features dex metadata stake) + # serum_dex and mpl-token-metadata are using caret versions of solana and SPL dependencies + # rather pull and patch those as well, ignore for now + # (cd spl && $cargo_build_sbf --features dex metadata stake) + (cd spl && $cargo_build_sbf --features stake) (cd client && $cargo test --all-features) anchor_dir=$PWD diff --git a/scripts/patch-spl-crates-for-anchor.sh b/scripts/patch-spl-crates-for-anchor.sh new file mode 100644 index 00000000000000..93ea67b8fceb20 --- /dev/null +++ b/scripts/patch-spl-crates-for-anchor.sh @@ -0,0 +1,55 @@ +spl_memo_version= +spl_token_version= +spl_token_2022_version= +spl_tlv_account_resolution_verison= +spl_transfer_hook_interface_version= + +get_spl_versions() { + declare spl_dir="$1" + spl_memo_version=$(readCargoVariable version "$spl_dir/memo/program/Cargo.toml") + spl_token_version=$(readCargoVariable version "$spl_dir/token/program/Cargo.toml") + spl_token_2022_version=$(readCargoVariable version "$spl_dir/token/program-2022/Cargo.toml"| head -c1) # only use the major version for convenience + spl_tlv_account_resolution_verison=$(readCargoVariable version "$spl_dir/libraries/tlv-account-resolution/Cargo.toml") + spl_transfer_hook_interface_version=$(readCargoVariable version "$spl_dir/token/transfer-hook/interface/Cargo.toml") +} + +patch_spl_crates() { + declare project_root="$1" + declare Cargo_toml="$2" + declare spl_dir="$3" + update_spl_dependencies "$project_root" + patch_crates_io "$Cargo_toml" "$spl_dir" +} + +update_spl_dependencies() { + declare project_root="$1" + declare tomls=() + while IFS='' read -r line; do tomls+=("$line"); done < <(find "$project_root" -name Cargo.toml) + + sed -i -e "s#\(spl-memo = \"\)[^\"]*\(\"\)#\1$spl_memo_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-memo = { version = \"\)[^\"]*\(\"\)#\1$spl_memo_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-token = \"\)[^\"]*\(\"\)#\1$spl_token_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-token = { version = \"\)[^\"]*\(\"\)#\1$spl_token_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-token-2022 = \"\).*\(\"\)#\1$spl_token_2022_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-token-2022 = { version = \"\)[^\"]*\(\"\)#\1$spl_token_2022_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-tlv-account-resolution = \"\)[^\"]*\(\"\)#\1=$spl_tlv_account_resolution_verison\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-tlv-account-resolution = { version = \"\)[^\"]*\(\"\)#\1=$spl_tlv_account_resolution_verison\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-transfer-hook-interface = \"\)[^\"]*\(\"\)#\1=$spl_transfer_hook_interface_version\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(spl-transfer-hook-interface = { version = \"\)[^\"]*\(\"\)#\1=$spl_transfer_hook_interface_version\2#g" "${tomls[@]}" || return $? + + # patch ahash. This is super brittle; putting here for convenience, since we are already iterating through the tomls + ahash_minor_version="0.8" + sed -i -e "s#\(ahash = \"\)[^\"]*\(\"\)#\1$ahash_minor_version\2#g" "${tomls[@]}" || return $? +} + +patch_crates_io() { + declare Cargo_toml="$1" + declare spl_dir="$2" + cat >> "$Cargo_toml" < Date: Wed, 6 Mar 2024 18:51:50 -0800 Subject: [PATCH 337/401] [TieredStorage] Deprecate the use of account-hash in HotStorage (#93) #### Problem TieredStorage stores account hash as an optional field inside its HotStorage. However, the field isn't used and we have already decided to deprecate the account hash. #### Summary of Changes Remove account-hash from the tiered-storage. #### Test Plan Existing tiered-storage tests. Running validators w/ tiered-storage in mainnet-beta w/o storing account-hash. --- accounts-db/src/account_storage/meta.rs | 3 +- accounts-db/src/tiered_storage.rs | 8 +- accounts-db/src/tiered_storage/byte_block.rs | 25 +--- accounts-db/src/tiered_storage/hot.rs | 60 ++------- accounts-db/src/tiered_storage/meta.rs | 128 +++++-------------- accounts-db/src/tiered_storage/readable.rs | 6 - accounts-db/src/tiered_storage/test_utils.rs | 21 +-- 7 files changed, 58 insertions(+), 193 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 69c24d7be75f7d..b6c8d72042097a 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -128,7 +128,8 @@ impl<'storage> StoredAccountMeta<'storage> { pub fn hash(&self) -> &'storage AccountHash { match self { Self::AppendVec(av) => av.hash(), - Self::Hot(hot) => hot.hash().unwrap_or(&DEFAULT_ACCOUNT_HASH), + // tiered-storage has deprecated the use of AccountHash + Self::Hot(_) => &DEFAULT_ACCOUNT_HASH, } } diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index a6f4ea89428bf9..2f8ebac65e3b57 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -362,15 +362,15 @@ mod tests { let mut expected_accounts_map = HashMap::new(); for i in 0..num_accounts { - let (account, address, account_hash, _write_version) = storable_accounts.get(i); - expected_accounts_map.insert(address, (account, account_hash)); + let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + expected_accounts_map.insert(address, account); } let mut index_offset = IndexOffset(0); let mut verified_accounts = HashSet::new(); while let Some((stored_meta, next)) = reader.get_account(index_offset).unwrap() { - if let Some((account, account_hash)) = expected_accounts_map.get(stored_meta.pubkey()) { - verify_test_account(&stored_meta, *account, stored_meta.pubkey(), account_hash); + if let Some(account) = expected_accounts_map.get(stored_meta.pubkey()) { + verify_test_account(&stored_meta, *account, stored_meta.pubkey()); verified_accounts.insert(stored_meta.pubkey()); } index_offset = next; diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index 1cd80add0c2307..6fc7dec611e9a9 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -95,9 +95,6 @@ impl ByteBlockWriter { if let Some(rent_epoch) = opt_fields.rent_epoch { size += self.write_pod(&rent_epoch)?; } - if let Some(hash) = opt_fields.account_hash { - size += self.write_pod(hash)?; - } debug_assert_eq!(size, opt_fields.size()); @@ -191,11 +188,7 @@ impl ByteBlockReader { #[cfg(test)] mod tests { - use { - super::*, - crate::accounts_hash::AccountHash, - solana_sdk::{hash::Hash, stake_history::Epoch}, - }; + use {super::*, solana_sdk::stake_history::Epoch}; fn read_type_unaligned(buffer: &[u8], offset: usize) -> (T, usize) { let size = std::mem::size_of::(); @@ -352,19 +345,13 @@ mod tests { let mut writer = ByteBlockWriter::new(format); let mut opt_fields_vec = vec![]; let mut some_count = 0; - let acc_hash = AccountHash(Hash::new_unique()); // prepare a vector of optional fields that contains all combinations // of Some and None. for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(&acc_hash)] { - some_count += rent_epoch.iter().count() + account_hash.iter().count(); + some_count += rent_epoch.iter().count(); - opt_fields_vec.push(AccountMetaOptionalFields { - rent_epoch, - account_hash, - }); - } + opt_fields_vec.push(AccountMetaOptionalFields { rent_epoch }); test_epoch += 1; } @@ -396,12 +383,6 @@ mod tests { verified_count += 1; offset += std::mem::size_of::(); } - if let Some(expected_hash) = opt_fields.account_hash { - let hash = read_pod::(&decoded_buffer, offset).unwrap(); - assert_eq!(hash, expected_hash); - verified_count += 1; - offset += std::mem::size_of::(); - } } // make sure the number of Some fields matches the number of fields we diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index f662c2e062ee11..34f7915186ba9b 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -242,19 +242,6 @@ impl TieredAccountMeta for HotAccountMeta { .flatten() } - /// Returns the account hash by parsing the specified account block. None - /// will be returned if this account does not persist this optional field. - fn account_hash<'a>(&self, account_block: &'a [u8]) -> Option<&'a AccountHash> { - self.flags() - .has_account_hash() - .then(|| { - let offset = self.optional_fields_offset(account_block) - + AccountMetaOptionalFields::account_hash_offset(self.flags()); - byte_block::read_pod::(account_block, offset) - }) - .flatten() - } - /// Returns the offset of the optional fields based on the specified account /// block. fn optional_fields_offset(&self, account_block: &[u8]) -> usize { @@ -488,9 +475,6 @@ fn write_optional_fields( if let Some(rent_epoch) = opt_fields.rent_epoch { size += file.write_pod(&rent_epoch)?; } - if let Some(hash) = opt_fields.account_hash { - size += file.write_pod(hash)?; - } debug_assert_eq!(size, opt_fields.size()); @@ -520,12 +504,8 @@ impl HotStorageWriter { account_data: &[u8], executable: bool, rent_epoch: Option, - account_hash: Option<&AccountHash>, ) -> TieredStorageResult { - let optional_fields = AccountMetaOptionalFields { - rent_epoch, - account_hash, - }; + let optional_fields = AccountMetaOptionalFields { rent_epoch }; let mut flags = AccountMetaFlags::new_from(&optional_fields); flags.set_executable(executable); @@ -574,7 +554,7 @@ impl HotStorageWriter { let total_input_accounts = len - skip; let mut stored_infos = Vec::with_capacity(total_input_accounts); for i in skip..len { - let (account, address, account_hash, _write_version) = accounts.get(i); + let (account, address, _account_hash, _write_version) = accounts.get(i); let index_entry = AccountIndexWriterEntry { address, offset: HotAccountOffset::new(cursor)?, @@ -582,7 +562,7 @@ impl HotStorageWriter { // Obtain necessary fields from the account, or default fields // for a zero-lamport account in the None case. - let (lamports, owner, data, executable, rent_epoch, account_hash) = account + let (lamports, owner, data, executable, rent_epoch) = account .map(|acc| { ( acc.lamports(), @@ -591,19 +571,12 @@ impl HotStorageWriter { acc.executable(), // only persist rent_epoch for those rent-paying accounts (acc.rent_epoch() != RENT_EXEMPT_RENT_EPOCH).then_some(acc.rent_epoch()), - Some(account_hash), ) }) - .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None, None)); + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); let owner_offset = owners_table.insert(owner); - let stored_size = self.write_account( - lamports, - owner_offset, - data, - executable, - rent_epoch, - account_hash, - )?; + let stored_size = + self.write_account(lamports, owner_offset, data, executable, rent_epoch)?; cursor += stored_size; stored_infos.push(StoredAccountInfo { @@ -755,11 +728,9 @@ pub mod tests { const TEST_PADDING: u8 = 5; const TEST_OWNER_OFFSET: OwnerOffset = OwnerOffset(0x1fef_1234); const TEST_RENT_EPOCH: Epoch = 7; - let acc_hash = AccountHash(Hash::new_unique()); let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), - account_hash: Some(&acc_hash), }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -779,7 +750,6 @@ pub mod tests { fn test_hot_account_meta_full() { let account_data = [11u8; 83]; let padding = [0u8; 5]; - let acc_hash = AccountHash(Hash::new_unique()); const TEST_LAMPORT: u64 = 2314232137; const OWNER_OFFSET: u32 = 0x1fef_1234; @@ -787,7 +757,6 @@ pub mod tests { let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), - account_hash: Some(&acc_hash), }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -810,7 +779,6 @@ pub mod tests { let meta = byte_block::read_pod::(&buffer, 0).unwrap(); assert_eq!(expected_meta, *meta); assert!(meta.flags().has_rent_epoch()); - assert!(meta.flags().has_account_hash()); assert_eq!(meta.account_data_padding() as usize, padding.len()); let account_block = &buffer[std::mem::size_of::()..]; @@ -823,10 +791,6 @@ pub mod tests { assert_eq!(account_data.len(), meta.account_data_size(account_block)); assert_eq!(account_data, meta.account_data(account_block)); assert_eq!(meta.rent_epoch(account_block), optional_fields.rent_epoch); - assert_eq!( - (meta.account_hash(account_block).unwrap()), - optional_fields.account_hash.unwrap() - ); } #[test] @@ -1334,8 +1298,8 @@ pub mod tests { .unwrap() .unwrap(); - let (account, address, account_hash, _write_version) = storable_accounts.get(i); - verify_test_account(&stored_meta, account, address, account_hash); + let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + verify_test_account(&stored_meta, account, address); assert_eq!(i + 1, next.0 as usize); } @@ -1352,9 +1316,9 @@ pub mod tests { .unwrap() .unwrap(); - let (account, address, account_hash, _write_version) = + let (account, address, _account_hash, _write_version) = storable_accounts.get(stored_info.offset); - verify_test_account(&stored_meta, account, address, account_hash); + verify_test_account(&stored_meta, account, address); } // verify get_accounts @@ -1362,8 +1326,8 @@ pub mod tests { // first, we verify everything for (i, stored_meta) in accounts.iter().enumerate() { - let (account, address, account_hash, _write_version) = storable_accounts.get(i); - verify_test_account(stored_meta, account, address, account_hash); + let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + verify_test_account(stored_meta, account, address); } // second, we verify various initial position diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 4e2bb0d95041ca..2aa53e5a4de1ed 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -1,7 +1,7 @@ //! The account meta and related structs for the tiered storage. use { - crate::{accounts_hash::AccountHash, tiered_storage::owners::OwnerOffset}, + crate::tiered_storage::owners::OwnerOffset, bytemuck::{Pod, Zeroable}, modular_bitfield::prelude::*, solana_sdk::stake_history::Epoch, @@ -14,12 +14,10 @@ use { pub struct AccountMetaFlags { /// whether the account meta has rent epoch pub has_rent_epoch: bool, - /// whether the account meta has account hash - pub has_account_hash: bool, /// whether the account is executable pub executable: bool, /// the reserved bits. - reserved: B29, + reserved: B30, } // Ensure there are no implicit padding bytes @@ -70,10 +68,6 @@ pub trait TieredAccountMeta: Sized { /// does not persist this optional field. fn rent_epoch(&self, _account_block: &[u8]) -> Option; - /// Returns the account hash by parsing the specified account block. None - /// will be returned if this account does not persist this optional field. - fn account_hash<'a>(&self, _account_block: &'a [u8]) -> Option<&'a AccountHash>; - /// Returns the offset of the optional fields based on the specified account /// block. fn optional_fields_offset(&self, _account_block: &[u8]) -> usize; @@ -91,7 +85,6 @@ impl AccountMetaFlags { pub fn new_from(optional_fields: &AccountMetaOptionalFields) -> Self { let mut flags = AccountMetaFlags::default(); flags.set_has_rent_epoch(optional_fields.rent_epoch.is_some()); - flags.set_has_account_hash(optional_fields.account_hash.is_some()); flags.set_executable(false); flags } @@ -102,20 +95,15 @@ impl AccountMetaFlags { /// Note that the storage representation of the optional fields might be /// different from its in-memory representation. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct AccountMetaOptionalFields<'a> { +pub struct AccountMetaOptionalFields { /// the epoch at which its associated account will next owe rent pub rent_epoch: Option, - /// the hash of its associated account - pub account_hash: Option<&'a AccountHash>, } -impl<'a> AccountMetaOptionalFields<'a> { +impl AccountMetaOptionalFields { /// The size of the optional fields in bytes (excluding the boolean flags). pub fn size(&self) -> usize { self.rent_epoch.map_or(0, |_| std::mem::size_of::()) - + self - .account_hash - .map_or(0, |_| std::mem::size_of::()) } /// Given the specified AccountMetaFlags, returns the size of its @@ -125,9 +113,6 @@ impl<'a> AccountMetaOptionalFields<'a> { if flags.has_rent_epoch() { fields_size += std::mem::size_of::(); } - if flags.has_account_hash() { - fields_size += std::mem::size_of::(); - } fields_size } @@ -137,29 +122,17 @@ impl<'a> AccountMetaOptionalFields<'a> { pub fn rent_epoch_offset(_flags: &AccountMetaFlags) -> usize { 0 } - - /// Given the specified AccountMetaFlags, returns the relative offset - /// of its account_hash field to the offset of its optional fields entry. - pub fn account_hash_offset(flags: &AccountMetaFlags) -> usize { - let mut offset = Self::rent_epoch_offset(flags); - // rent_epoch is the previous field to account hash - if flags.has_rent_epoch() { - offset += std::mem::size_of::(); - } - offset - } } #[cfg(test)] pub mod tests { - use {super::*, solana_sdk::hash::Hash}; + use super::*; #[test] fn test_account_meta_flags_new() { let flags = AccountMetaFlags::new(); assert!(!flags.has_rent_epoch()); - assert!(!flags.has_account_hash()); assert_eq!(flags.reserved(), 0u32); assert_eq!( @@ -179,20 +152,11 @@ pub mod tests { flags.set_has_rent_epoch(true); assert!(flags.has_rent_epoch()); - assert!(!flags.has_account_hash()); - assert!(!flags.executable()); - verify_flags_serialization(&flags); - - flags.set_has_account_hash(true); - - assert!(flags.has_rent_epoch()); - assert!(flags.has_account_hash()); assert!(!flags.executable()); verify_flags_serialization(&flags); flags.set_executable(true); assert!(flags.has_rent_epoch()); - assert!(flags.has_account_hash()); assert!(flags.executable()); verify_flags_serialization(&flags); @@ -203,84 +167,58 @@ pub mod tests { fn update_and_verify_flags(opt_fields: &AccountMetaOptionalFields) { let flags: AccountMetaFlags = AccountMetaFlags::new_from(opt_fields); assert_eq!(flags.has_rent_epoch(), opt_fields.rent_epoch.is_some()); - assert_eq!(flags.has_account_hash(), opt_fields.account_hash.is_some()); assert_eq!(flags.reserved(), 0u32); } #[test] fn test_optional_fields_update_flags() { let test_epoch = 5432312; - let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(&acc_hash)] { - update_and_verify_flags(&AccountMetaOptionalFields { - rent_epoch, - account_hash, - }); - } + update_and_verify_flags(&AccountMetaOptionalFields { rent_epoch }); } } #[test] fn test_optional_fields_size() { let test_epoch = 5432312; - let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(&acc_hash)] { - let opt_fields = AccountMetaOptionalFields { - rent_epoch, - account_hash, - }; - assert_eq!( - opt_fields.size(), - rent_epoch.map_or(0, |_| std::mem::size_of::()) - + account_hash.map_or(0, |_| std::mem::size_of::()) - ); - assert_eq!( - opt_fields.size(), - AccountMetaOptionalFields::size_from_flags(&AccountMetaFlags::new_from( - &opt_fields - )) - ); - } + let opt_fields = AccountMetaOptionalFields { rent_epoch }; + assert_eq!( + opt_fields.size(), + rent_epoch.map_or(0, |_| std::mem::size_of::()), + ); + assert_eq!( + opt_fields.size(), + AccountMetaOptionalFields::size_from_flags(&AccountMetaFlags::new_from( + &opt_fields + )) + ); } } #[test] fn test_optional_fields_offset() { let test_epoch = 5432312; - let acc_hash = AccountHash(Hash::new_unique()); for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(&acc_hash)] { - let rent_epoch_offset = 0; - let account_hash_offset = - rent_epoch_offset + rent_epoch.as_ref().map(std::mem::size_of_val).unwrap_or(0); - let derived_size = account_hash_offset - + account_hash - .as_ref() - .map(|acc_hash| std::mem::size_of_val(*acc_hash)) - .unwrap_or(0); - let opt_fields = AccountMetaOptionalFields { - rent_epoch, - account_hash, - }; - let flags = AccountMetaFlags::new_from(&opt_fields); - assert_eq!( - AccountMetaOptionalFields::rent_epoch_offset(&flags), - rent_epoch_offset - ); - assert_eq!( - AccountMetaOptionalFields::account_hash_offset(&flags), - account_hash_offset - ); - assert_eq!( - AccountMetaOptionalFields::size_from_flags(&flags), - derived_size - ); - } + let rent_epoch_offset = 0; + let derived_size = if rent_epoch.is_some() { + std::mem::size_of::() + } else { + 0 + }; + let opt_fields = AccountMetaOptionalFields { rent_epoch }; + let flags = AccountMetaFlags::new_from(&opt_fields); + assert_eq!( + AccountMetaOptionalFields::rent_epoch_offset(&flags), + rent_epoch_offset + ); + assert_eq!( + AccountMetaOptionalFields::size_from_flags(&flags), + derived_size + ); } } } diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 1801b04fcecd80..8f1d2007182a5b 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -2,7 +2,6 @@ use { crate::{ account_storage::meta::StoredAccountMeta, accounts_file::MatchAccountOwnerError, - accounts_hash::AccountHash, tiered_storage::{ footer::{AccountMetaFormat, TieredStorageFooter}, hot::HotStorageReader, @@ -40,11 +39,6 @@ impl<'accounts_file, M: TieredAccountMeta> TieredReadableAccount<'accounts_file, self.address } - /// Returns the hash of this account. - pub fn hash(&self) -> Option<&'accounts_file AccountHash> { - self.meta.account_hash(self.account_block) - } - /// Returns the index to this account in its AccountsFile. pub fn index(&self) -> IndexOffset { self.index diff --git a/accounts-db/src/tiered_storage/test_utils.rs b/accounts-db/src/tiered_storage/test_utils.rs index 2ed2399f30fbaa..f44f20f77cc5dd 100644 --- a/accounts-db/src/tiered_storage/test_utils.rs +++ b/accounts-db/src/tiered_storage/test_utils.rs @@ -48,20 +48,10 @@ pub(super) fn verify_test_account( stored_meta: &StoredAccountMeta<'_>, account: Option<&impl ReadableAccount>, address: &Pubkey, - account_hash: &AccountHash, ) { - let (lamports, owner, data, executable, account_hash) = account - .map(|acc| { - ( - acc.lamports(), - acc.owner(), - acc.data(), - acc.executable(), - // only persist rent_epoch for those rent-paying accounts - Some(*account_hash), - ) - }) - .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); + let (lamports, owner, data, executable) = account + .map(|acc| (acc.lamports(), acc.owner(), acc.data(), acc.executable())) + .unwrap_or((0, &OWNER_NO_OWNER, &[], false)); assert_eq!(stored_meta.lamports(), lamports); assert_eq!(stored_meta.data().len(), data.len()); @@ -69,8 +59,5 @@ pub(super) fn verify_test_account( assert_eq!(stored_meta.executable(), executable); assert_eq!(stored_meta.owner(), owner); assert_eq!(stored_meta.pubkey(), address); - assert_eq!( - *stored_meta.hash(), - account_hash.unwrap_or(AccountHash(Hash::default())) - ); + assert_eq!(*stored_meta.hash(), AccountHash(Hash::default())); } From adefcbbb43231cf3516ec976df7fe03843aff3f7 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 7 Mar 2024 12:06:52 +0800 Subject: [PATCH 338/401] Add support for partial tx batch unlocking (#110) * Add support for partial tx batch unlocking * add assert * fix build * Add comments --- runtime/src/transaction_batch.rs | 96 ++++++++++++++++++++++++++------ 1 file changed, 78 insertions(+), 18 deletions(-) diff --git a/runtime/src/transaction_batch.rs b/runtime/src/transaction_batch.rs index 9d0ff5fb7ce007..ecec27e02e93aa 100644 --- a/runtime/src/transaction_batch.rs +++ b/runtime/src/transaction_batch.rs @@ -46,6 +46,39 @@ impl<'a, 'b> TransactionBatch<'a, 'b> { pub fn needs_unlock(&self) -> bool { self.needs_unlock } + + /// For every error result, if the corresponding transaction is + /// still locked, unlock the transaction and then record the new error. + pub fn unlock_failures(&mut self, transaction_results: Vec>) { + assert_eq!(self.lock_results.len(), transaction_results.len()); + // Shouldn't happen but if a batch was marked as not needing an unlock, + // don't unlock failures. + if !self.needs_unlock() { + return; + } + + let txs_and_results = transaction_results + .iter() + .enumerate() + .inspect(|(index, result)| { + // It's not valid to update a previously recorded lock error to + // become an "ok" result because this could lead to serious + // account lock violations where accounts are later unlocked + // when they were not currently locked. + assert!(!(result.is_ok() && self.lock_results[*index].is_err())) + }) + .filter(|(index, result)| result.is_err() && self.lock_results[*index].is_ok()) + .map(|(index, _)| (&self.sanitized_txs[index], &self.lock_results[index])); + + // Unlock the accounts for all transactions which will be updated to an + // lock error below. + self.bank.unlock_accounts(txs_and_results); + + // Record all new errors by overwriting lock results. Note that it's + // not valid to update from err -> ok and the assertion above enforces + // that validity constraint. + self.lock_results = transaction_results; + } } // Unlock all locked accounts in destructor. @@ -67,12 +100,12 @@ mod tests { use { super::*, crate::genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, - solana_sdk::{signature::Keypair, system_transaction}, + solana_sdk::{signature::Keypair, system_transaction, transaction::TransactionError}, }; #[test] fn test_transaction_batch() { - let (bank, txs) = setup(); + let (bank, txs) = setup(false); // Test getting locked accounts let batch = bank.prepare_sanitized_batch(&txs); @@ -94,7 +127,7 @@ mod tests { #[test] fn test_simulation_batch() { - let (bank, txs) = setup(); + let (bank, txs) = setup(false); // Prepare batch without locks let batch = bank.prepare_unlocked_batch_from_single_tx(&txs[0]); @@ -109,7 +142,37 @@ mod tests { assert!(batch3.lock_results().iter().all(|x| x.is_ok())); } - fn setup() -> (Bank, Vec) { + #[test] + fn test_unlock_failures() { + let (bank, txs) = setup(true); + + // Test getting locked accounts + let mut batch = bank.prepare_sanitized_batch(&txs); + assert_eq!( + batch.lock_results, + vec![Ok(()), Err(TransactionError::AccountInUse), Ok(())] + ); + + let qos_results = vec![ + Ok(()), + Err(TransactionError::AccountInUse), + Err(TransactionError::WouldExceedMaxBlockCostLimit), + ]; + batch.unlock_failures(qos_results.clone()); + assert_eq!(batch.lock_results, qos_results); + + // Dropping the batch should unlock remaining locked transactions + drop(batch); + + // The next batch should be able to lock all but the conflicting tx + let batch2 = bank.prepare_sanitized_batch(&txs); + assert_eq!( + batch2.lock_results, + vec![Ok(()), Err(TransactionError::AccountInUse), Ok(())] + ); + } + + fn setup(insert_conflicting_tx: bool) -> (Bank, Vec) { let dummy_leader_pubkey = solana_sdk::pubkey::new_rand(); let GenesisConfigInfo { genesis_config, @@ -122,20 +185,17 @@ mod tests { let keypair2 = Keypair::new(); let pubkey2 = solana_sdk::pubkey::new_rand(); - let txs = vec![ - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &pubkey, - 1, - genesis_config.hash(), - )), - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &keypair2, - &pubkey2, - 1, - genesis_config.hash(), - )), - ]; + let mut txs = vec![SanitizedTransaction::from_transaction_for_tests( + system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()), + )]; + if insert_conflicting_tx { + txs.push(SanitizedTransaction::from_transaction_for_tests( + system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()), + )); + } + txs.push(SanitizedTransaction::from_transaction_for_tests( + system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()), + )); (bank, txs) } From 8f3f06cc7f6eac156c18fd147f954af75fa403a7 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Thu, 7 Mar 2024 09:23:49 -0600 Subject: [PATCH 339/401] Combine builtin and BPF compute cost in cost model (#29) * Combine builtin and BPF execution cost into programs_execution_cost since VM has started to consume CUs uniformly * update tests * apply suggestions from code review --- core/src/banking_stage.rs | 3 +- core/src/banking_stage/consumer.rs | 15 ++-- core/src/banking_stage/qos_service.rs | 61 +++++---------- cost-model/src/cost_model.rs | 103 ++++++++++++++++---------- cost-model/src/cost_tracker.rs | 16 ++-- cost-model/src/transaction_cost.rs | 25 ++----- 6 files changed, 106 insertions(+), 117 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 652f2569f8fd43..603ff55f0003b4 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -285,8 +285,7 @@ pub struct BatchedTransactionCostDetails { pub batched_signature_cost: u64, pub batched_write_lock_cost: u64, pub batched_data_bytes_cost: u64, - pub batched_builtins_execute_cost: u64, - pub batched_bpf_execute_cost: u64, + pub batched_programs_execute_cost: u64, } #[derive(Debug, Default)] diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index f4ac6c6040eda8..957e190c873f64 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -1549,16 +1549,17 @@ mod tests { assert_eq!(retryable_transaction_indexes, vec![1]); let expected_block_cost = if !apply_cost_tracker_during_replay_enabled { - let actual_bpf_execution_cost = match commit_transactions_result.first().unwrap() { - CommitTransactionDetails::Committed { compute_units } => *compute_units, - CommitTransactionDetails::NotCommitted => { - unreachable!() - } - }; + let actual_programs_execution_cost = + match commit_transactions_result.first().unwrap() { + CommitTransactionDetails::Committed { compute_units } => *compute_units, + CommitTransactionDetails::NotCommitted => { + unreachable!() + } + }; let mut cost = CostModel::calculate_cost(&transactions[0], &bank.feature_set); if let TransactionCost::Transaction(ref mut usage_cost) = cost { - usage_cost.bpf_execution_cost = actual_bpf_execution_cost; + usage_cost.programs_execution_cost = actual_programs_execution_cost; } block_cost + cost.sum() diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 77f05c73a3bc12..8c1507ae3fb91c 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -236,14 +236,10 @@ impl QosService { batched_transaction_details.costs.batched_data_bytes_cost, Ordering::Relaxed, ); - self.metrics.stats.estimated_builtins_execute_cu.fetch_add( + self.metrics.stats.estimated_programs_execute_cu.fetch_add( batched_transaction_details .costs - .batched_builtins_execute_cost, - Ordering::Relaxed, - ); - self.metrics.stats.estimated_bpf_execute_cu.fetch_add( - batched_transaction_details.costs.batched_bpf_execute_cost, + .batched_programs_execute_cost, Ordering::Relaxed, ); @@ -297,7 +293,7 @@ impl QosService { pub fn accumulate_actual_execute_cu(&self, units: u64) { self.metrics .stats - .actual_bpf_execute_cu + .actual_programs_execute_cu .fetch_add(units, Ordering::Relaxed); } @@ -331,12 +327,8 @@ impl QosService { saturating_add_assign!( batched_transaction_details .costs - .batched_builtins_execute_cost, - cost.builtins_execution_cost() - ); - saturating_add_assign!( - batched_transaction_details.costs.batched_bpf_execute_cost, - cost.bpf_execution_cost() + .batched_programs_execute_cost, + cost.programs_execution_cost() ); } Err(transaction_error) => match transaction_error { @@ -427,14 +419,11 @@ struct QosServiceMetricsStats { /// accumulated estimated instruction data Compute Units to be packed into block estimated_data_bytes_cu: AtomicU64, - /// accumulated estimated builtin programs Compute Units to be packed into block - estimated_builtins_execute_cu: AtomicU64, - - /// accumulated estimated SBF program Compute Units to be packed into block - estimated_bpf_execute_cu: AtomicU64, + /// accumulated estimated program Compute Units to be packed into block + estimated_programs_execute_cu: AtomicU64, /// accumulated actual program Compute Units that have been packed into block - actual_bpf_execute_cu: AtomicU64, + actual_programs_execute_cu: AtomicU64, /// accumulated actual program execute micro-sec that have been packed into block actual_execute_time_us: AtomicU64, @@ -515,24 +504,19 @@ impl QosServiceMetrics { i64 ), ( - "estimated_builtins_execute_cu", + "estimated_programs_execute_cu", self.stats - .estimated_builtins_execute_cu + .estimated_programs_execute_cu .swap(0, Ordering::Relaxed), i64 ), ( - "estimated_bpf_execute_cu", + "actual_programs_execute_cu", self.stats - .estimated_bpf_execute_cu + .actual_programs_execute_cu .swap(0, Ordering::Relaxed), i64 ), - ( - "actual_bpf_execute_cu", - self.stats.actual_bpf_execute_cu.swap(0, Ordering::Relaxed), - i64 - ), ( "actual_execute_time_us", self.stats.actual_execute_time_us.swap(0, Ordering::Relaxed), @@ -735,7 +719,7 @@ mod tests { let committed_status: Vec = qos_cost_results .iter() .map(|tx_cost| CommitTransactionDetails::Committed { - compute_units: tx_cost.as_ref().unwrap().bpf_execution_cost() + compute_units: tx_cost.as_ref().unwrap().programs_execution_cost() + execute_units_adjustment, }) .collect(); @@ -862,7 +846,7 @@ mod tests { CommitTransactionDetails::NotCommitted } else { CommitTransactionDetails::Committed { - compute_units: tx_cost.as_ref().unwrap().bpf_execution_cost() + compute_units: tx_cost.as_ref().unwrap().programs_execution_cost() + execute_units_adjustment, } } @@ -898,8 +882,7 @@ mod tests { let signature_cost = 1; let write_lock_cost = 2; let data_bytes_cost = 3; - let builtins_execution_cost = 4; - let bpf_execution_cost = 10; + let programs_execution_cost = 10; let num_txs = 4; let tx_cost_results: Vec<_> = (0..num_txs) @@ -909,8 +892,7 @@ mod tests { signature_cost, write_lock_cost, data_bytes_cost, - builtins_execution_cost, - bpf_execution_cost, + programs_execution_cost, ..UsageCostDetails::default() })) } else { @@ -922,8 +904,7 @@ mod tests { let expected_signatures = signature_cost * (num_txs / 2); let expected_write_locks = write_lock_cost * (num_txs / 2); let expected_data_bytes = data_bytes_cost * (num_txs / 2); - let expected_builtins_execution_costs = builtins_execution_cost * (num_txs / 2); - let expected_bpf_execution_costs = bpf_execution_cost * (num_txs / 2); + let expected_programs_execution_costs = programs_execution_cost * (num_txs / 2); let batched_transaction_details = QosService::accumulate_batched_transaction_costs(tx_cost_results.iter()); assert_eq!( @@ -939,14 +920,10 @@ mod tests { batched_transaction_details.costs.batched_data_bytes_cost ); assert_eq!( - expected_builtins_execution_costs, + expected_programs_execution_costs, batched_transaction_details .costs - .batched_builtins_execute_cost - ); - assert_eq!( - expected_bpf_execution_costs, - batched_transaction_details.costs.batched_bpf_execute_cost + .batched_programs_execute_cost ); } } diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index 1e15735426737f..b81ea24402d4df 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -93,21 +93,25 @@ impl CostModel { transaction: &SanitizedTransaction, feature_set: &FeatureSet, ) { - let mut builtin_costs = 0u64; - let mut bpf_costs = 0u64; + let mut programs_execution_costs = 0u64; let mut loaded_accounts_data_size_cost = 0u64; let mut data_bytes_len_total = 0u64; let mut compute_unit_limit_is_set = false; + let mut has_user_space_instructions = false; for (program_id, instruction) in transaction.message().program_instructions_iter() { - // to keep the same behavior, look for builtin first - if let Some(builtin_cost) = BUILT_IN_INSTRUCTION_COSTS.get(program_id) { - builtin_costs = builtin_costs.saturating_add(*builtin_cost); - } else { - bpf_costs = bpf_costs - .saturating_add(u64::from(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT)) - .min(u64::from(MAX_COMPUTE_UNIT_LIMIT)); - } + let ix_execution_cost = + if let Some(builtin_cost) = BUILT_IN_INSTRUCTION_COSTS.get(program_id) { + *builtin_cost + } else { + has_user_space_instructions = true; + u64::from(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) + }; + + programs_execution_costs = programs_execution_costs + .saturating_add(ix_execution_cost) + .min(u64::from(MAX_COMPUTE_UNIT_LIMIT)); + data_bytes_len_total = data_bytes_len_total.saturating_add(instruction.data.len() as u64); @@ -120,8 +124,6 @@ impl CostModel { } } - // calculate bpf cost based on compute budget instructions - // if failed to process compute_budget instructions, the transaction will not be executed // by `bank`, therefore it should be considered as no execution cost by cost model. match process_compute_budget_instructions(transaction.message().program_instructions_iter()) @@ -132,8 +134,8 @@ impl CostModel { // 'compute_unit_limit_is_set' flag, because compute_budget does not distinguish // builtin and bpf instructions when calculating default compute-unit-limit. (see // compute_budget.rs test `test_process_mixed_instructions_without_compute_budget`) - if bpf_costs > 0 && compute_unit_limit_is_set { - bpf_costs = u64::from(compute_budget_limits.compute_unit_limit); + if has_user_space_instructions && compute_unit_limit_is_set { + programs_execution_costs = u64::from(compute_budget_limits.compute_unit_limit); } if feature_set @@ -146,13 +148,11 @@ impl CostModel { } } Err(_) => { - builtin_costs = 0; - bpf_costs = 0; + programs_execution_costs = 0; } } - tx_cost.builtins_execution_cost = builtin_costs; - tx_cost.bpf_execution_cost = bpf_costs; + tx_cost.programs_execution_cost = programs_execution_costs; tx_cost.loaded_accounts_data_size_cost = loaded_accounts_data_size_cost; tx_cost.data_bytes_cost = data_bytes_len_total / INSTRUCTION_DATA_BYTES_COST; } @@ -304,8 +304,7 @@ mod tests { &simple_transaction, &FeatureSet::all_enabled(), ); - assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost); - assert_eq!(0, tx_cost.bpf_execution_cost); + assert_eq!(*expected_execution_cost, tx_cost.programs_execution_cost); assert_eq!(3, tx_cost.data_bytes_cost); } @@ -333,8 +332,10 @@ mod tests { &token_transaction, &FeatureSet::all_enabled(), ); - assert_eq!(0, tx_cost.builtins_execution_cost); - assert_eq!(200_000, tx_cost.bpf_execution_cost); + assert_eq!( + DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + tx_cost.programs_execution_cost + ); assert_eq!(0, tx_cost.data_bytes_cost); } @@ -396,13 +397,8 @@ mod tests { &token_transaction, &FeatureSet::all_enabled(), ); - assert_eq!( - *BUILT_IN_INSTRUCTION_COSTS - .get(&compute_budget::id()) - .unwrap(), - tx_cost.builtins_execution_cost - ); - assert_eq!(12_345, tx_cost.bpf_execution_cost); + // If cu-limit is specified, that would the cost for all programs + assert_eq!(12_345, tx_cost.programs_execution_cost); assert_eq!(1, tx_cost.data_bytes_cost); } @@ -446,8 +442,7 @@ mod tests { &token_transaction, &FeatureSet::all_enabled(), ); - assert_eq!(0, tx_cost.builtins_execution_cost); - assert_eq!(0, tx_cost.bpf_execution_cost); + assert_eq!(0, tx_cost.programs_execution_cost); } #[test] @@ -474,8 +469,7 @@ mod tests { let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost(&mut tx_cost, &tx, &FeatureSet::all_enabled()); - assert_eq!(expected_cost, tx_cost.builtins_execution_cost); - assert_eq!(0, tx_cost.bpf_execution_cost); + assert_eq!(expected_cost, tx_cost.programs_execution_cost); assert_eq!(6, tx_cost.data_bytes_cost); } @@ -506,8 +500,7 @@ mod tests { let expected_cost = DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 2; let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost(&mut tx_cost, &tx, &FeatureSet::all_enabled()); - assert_eq!(0, tx_cost.builtins_execution_cost); - assert_eq!(expected_cost, tx_cost.bpf_execution_cost); + assert_eq!(expected_cost, tx_cost.programs_execution_cost); assert_eq!(0, tx_cost.data_bytes_cost); } @@ -567,7 +560,7 @@ mod tests { let tx_cost = CostModel::calculate_cost(&tx, &FeatureSet::all_enabled()); assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); - assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost()); + assert_eq!(*expected_execution_cost, tx_cost.programs_execution_cost()); assert_eq!(2, tx_cost.writable_accounts().len()); assert_eq!( expected_loaded_accounts_data_size_cost, @@ -596,7 +589,7 @@ mod tests { let tx_cost = CostModel::calculate_cost(&tx, &feature_set); assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); - assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost()); + assert_eq!(*expected_execution_cost, tx_cost.programs_execution_cost()); assert_eq!(2, tx_cost.writable_accounts().len()); assert_eq!( expected_loaded_accounts_data_size_cost, @@ -635,7 +628,7 @@ mod tests { let tx_cost = CostModel::calculate_cost(&tx, &feature_set); assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); - assert_eq!(expected_execution_cost, tx_cost.builtins_execution_cost()); + assert_eq!(expected_execution_cost, tx_cost.programs_execution_cost()); assert_eq!(2, tx_cost.writable_accounts().len()); assert_eq!( expected_loaded_accounts_data_size_cost, @@ -666,7 +659,37 @@ mod tests { let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost(&mut tx_cost, &transaction, &FeatureSet::all_enabled()); - assert_eq!(expected_builtin_cost, tx_cost.builtins_execution_cost); - assert_eq!(expected_bpf_cost as u64, tx_cost.bpf_execution_cost); + assert_eq!( + expected_builtin_cost + expected_bpf_cost as u64, + tx_cost.programs_execution_cost + ); + } + + #[test] + fn test_transaction_cost_with_mix_instruction_with_cu_limit() { + let (mint_keypair, start_hash) = test_setup(); + + let transaction = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[ + system_instruction::transfer(&mint_keypair.pubkey(), &Pubkey::new_unique(), 2), + ComputeBudgetInstruction::set_compute_unit_limit(12_345), + ], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + start_hash, + )); + // transaction has one builtin instruction, and one ComputeBudget::compute_unit_limit + let expected_cost = *BUILT_IN_INSTRUCTION_COSTS + .get(&solana_system_program::id()) + .unwrap() + + BUILT_IN_INSTRUCTION_COSTS + .get(&compute_budget::id()) + .unwrap(); + + let mut tx_cost = UsageCostDetails::default(); + CostModel::get_transaction_cost(&mut tx_cost, &transaction, &FeatureSet::all_enabled()); + + assert_eq!(expected_cost, tx_cost.programs_execution_cost); } } diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 9d2b3b624afeb4..8fb092c36680a0 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -105,7 +105,7 @@ impl CostTracker { estimated_tx_cost: &TransactionCost, actual_execution_units: u64, ) { - let estimated_execution_units = estimated_tx_cost.bpf_execution_cost(); + let estimated_execution_units = estimated_tx_cost.programs_execution_cost(); match actual_execution_units.cmp(&estimated_execution_units) { Ordering::Equal => (), Ordering::Greater => { @@ -307,7 +307,7 @@ mod tests { system_transaction::transfer(mint_keypair, &keypair.pubkey(), 2, *start_hash), ); let mut tx_cost = UsageCostDetails::new_with_capacity(1); - tx_cost.bpf_execution_cost = 5; + tx_cost.programs_execution_cost = 5; tx_cost.writable_accounts.push(mint_keypair.pubkey()); (simple_transaction, TransactionCost::Transaction(tx_cost)) @@ -606,7 +606,7 @@ mod tests { { let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct1, acct2, acct3], - bpf_execution_cost: cost, + programs_execution_cost: cost, ..UsageCostDetails::default() }); assert!(testee.try_add(&tx_cost).is_ok()); @@ -624,7 +624,7 @@ mod tests { { let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct2], - bpf_execution_cost: cost, + programs_execution_cost: cost, ..UsageCostDetails::default() }); assert!(testee.try_add(&tx_cost).is_ok()); @@ -644,7 +644,7 @@ mod tests { { let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct1, acct2], - bpf_execution_cost: cost, + programs_execution_cost: cost, ..UsageCostDetails::default() }); assert!(testee.try_add(&tx_cost).is_err()); @@ -668,7 +668,7 @@ mod tests { let mut testee = CostTracker::new(account_max, block_max, block_max); let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct1, acct2, acct3], - bpf_execution_cost: cost, + programs_execution_cost: cost, ..UsageCostDetails::default() }); let mut expected_block_cost = tx_cost.sum(); @@ -755,7 +755,7 @@ mod tests { let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct1, acct2, acct3], - bpf_execution_cost: cost, + programs_execution_cost: cost, ..UsageCostDetails::default() }); @@ -802,7 +802,7 @@ mod tests { let cost = 100u64; let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![Pubkey::new_unique()], - bpf_execution_cost: cost, + programs_execution_cost: cost, ..UsageCostDetails::default() }); diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index 76865fff30fd57..c6e68bfe17b6f4 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -35,10 +35,10 @@ impl TransactionCost { } } - pub fn bpf_execution_cost(&self) -> u64 { + pub fn programs_execution_cost(&self) -> u64 { match self { - Self::SimpleVote { .. } => 0, - Self::Transaction(usage_cost) => usage_cost.bpf_execution_cost, + Self::SimpleVote { .. } => solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS, + Self::Transaction(usage_cost) => usage_cost.programs_execution_cost, } } @@ -85,13 +85,6 @@ impl TransactionCost { } } - pub fn builtins_execution_cost(&self) -> u64 { - match self { - Self::SimpleVote { .. } => solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS, - Self::Transaction(usage_cost) => usage_cost.builtins_execution_cost, - } - } - pub fn writable_accounts(&self) -> &[Pubkey] { match self { Self::SimpleVote { writable_accounts } => writable_accounts, @@ -109,8 +102,7 @@ pub struct UsageCostDetails { pub signature_cost: u64, pub write_lock_cost: u64, pub data_bytes_cost: u64, - pub builtins_execution_cost: u64, - pub bpf_execution_cost: u64, + pub programs_execution_cost: u64, pub loaded_accounts_data_size_cost: u64, pub account_data_size: u64, } @@ -122,8 +114,7 @@ impl Default for UsageCostDetails { signature_cost: 0u64, write_lock_cost: 0u64, data_bytes_cost: 0u64, - builtins_execution_cost: 0u64, - bpf_execution_cost: 0u64, + programs_execution_cost: 0u64, loaded_accounts_data_size_cost: 0u64, account_data_size: 0u64, } @@ -140,8 +131,7 @@ impl PartialEq for UsageCostDetails { self.signature_cost == other.signature_cost && self.write_lock_cost == other.write_lock_cost && self.data_bytes_cost == other.data_bytes_cost - && self.builtins_execution_cost == other.builtins_execution_cost - && self.bpf_execution_cost == other.bpf_execution_cost + && self.programs_execution_cost == other.programs_execution_cost && self.loaded_accounts_data_size_cost == other.loaded_accounts_data_size_cost && self.account_data_size == other.account_data_size && to_hash_set(&self.writable_accounts) == to_hash_set(&other.writable_accounts) @@ -168,8 +158,7 @@ impl UsageCostDetails { self.signature_cost .saturating_add(self.write_lock_cost) .saturating_add(self.data_bytes_cost) - .saturating_add(self.builtins_execution_cost) - .saturating_add(self.bpf_execution_cost) + .saturating_add(self.programs_execution_cost) .saturating_add(self.loaded_accounts_data_size_cost) } } From 85cfe23b46d745a225436a09b88d316ee470371b Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Thu, 7 Mar 2024 12:26:31 -0300 Subject: [PATCH 340/401] Add tests for `svm/transaction_processor.rs` (#112) --- Cargo.lock | 1 + programs/bpf_loader/src/lib.rs | 3 +- svm/Cargo.toml | 1 + svm/src/transaction_processor.rs | 529 ++++++++++++++++++++++++++++++- svm/tests/test_program.so | Bin 0 -> 170136 bytes 5 files changed, 531 insertions(+), 3 deletions(-) create mode 100755 svm/tests/test_program.so diff --git a/Cargo.lock b/Cargo.lock index db5431da6ef62e..afdb8b0a306578 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7288,6 +7288,7 @@ dependencies = [ name = "solana-svm" version = "1.19.0" dependencies = [ + "bincode", "itertools", "log", "percentage", diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 5ba8b26e086c69..a9c34fbabfc6f6 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -39,7 +39,7 @@ use { }, instruction::{AccountMeta, InstructionError}, loader_upgradeable_instruction::UpgradeableLoaderInstruction, - native_loader, + loader_v4, native_loader, program_utils::limited_deserialize, pubkey::Pubkey, saturating_add_assign, @@ -193,6 +193,7 @@ pub fn check_loader_id(id: &Pubkey) -> bool { bpf_loader::check_id(id) || bpf_loader_deprecated::check_id(id) || bpf_loader_upgradeable::check_id(id) + || loader_v4::check_id(id) } /// Only used in macro, do not use directly! diff --git a/svm/Cargo.toml b/svm/Cargo.toml index ac672613c9c4fc..21da2f7105bd73 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -28,6 +28,7 @@ crate-type = ["lib"] name = "solana_svm" [dev-dependencies] +bincode = { workspace = true } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 38c5c23affd4de..fa417850699372 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -87,6 +87,7 @@ pub trait TransactionProcessingCallback { } } +#[derive(Debug)] enum ProgramAccountLoadResult { AccountNotFound, InvalidAccountData(ProgramRuntimeEnvironment), @@ -921,8 +922,18 @@ impl TransactionBatchProcessor { mod tests { use { super::*, - solana_program_runtime::loaded_programs::BlockRelation, - solana_sdk::{sysvar::rent::Rent, transaction_context::TransactionContext}, + solana_program_runtime::{ + loaded_programs::BlockRelation, solana_rbpf::program::BuiltinProgram, + }, + solana_sdk::{ + account::WritableAccount, bpf_loader, sysvar::rent::Rent, + transaction_context::TransactionContext, + }, + std::{ + env, + fs::{self, File}, + io::Read, + }, }; struct TestForkGraph {} @@ -933,6 +944,43 @@ mod tests { } } + #[derive(Default)] + pub struct MockBankCallback { + rent_collector: RentCollector, + feature_set: Arc, + pub account_shared_data: HashMap, + } + + impl TransactionProcessingCallback for MockBankCallback { + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { + if let Some(data) = self.account_shared_data.get(account) { + if data.lamports() == 0 { + None + } else { + owners.iter().position(|entry| data.owner() == entry) + } + } else { + None + } + } + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + self.account_shared_data.get(pubkey).cloned() + } + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { + todo!() + } + + fn get_rent_collector(&self) -> &RentCollector { + &self.rent_collector + } + + fn get_feature_set(&self) -> Arc { + self.feature_set.clone() + } + } + #[test] fn test_inner_instructions_list_from_instruction_trace() { let instruction_trace = [1, 2, 1, 1, 2, 3, 2]; @@ -980,4 +1028,481 @@ mod tests { ] ); } + + #[test] + fn test_load_program_accounts_account_not_found() { + let mut mock_bank = MockBankCallback::default(); + let key = Pubkey::new_unique(); + let environment = ProgramRuntimeEnvironments::default(); + let batch_processor = TransactionBatchProcessor::::default(); + + let result = batch_processor.load_program_accounts(&mock_bank, &key, &environment); + + assert!(matches!(result, ProgramAccountLoadResult::AccountNotFound)); + + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader_upgradeable::id()); + let state = UpgradeableLoaderState::Program { + programdata_address: Pubkey::new_unique(), + }; + account_data.set_data(bincode::serialize(&state).unwrap()); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program_accounts(&mock_bank, &key, &environment); + assert!(matches!(result, ProgramAccountLoadResult::AccountNotFound)); + + account_data.set_data(Vec::new()); + mock_bank.account_shared_data.insert(key, account_data); + + let result = batch_processor.load_program_accounts(&mock_bank, &key, &environment); + + assert!(matches!( + result, + ProgramAccountLoadResult::InvalidAccountData(_) + )); + } + + #[test] + fn test_load_program_accounts_loader_v4() { + let key = Pubkey::new_unique(); + let mut mock_bank = MockBankCallback::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_owner(loader_v4::id()); + let environment = ProgramRuntimeEnvironments::default(); + let batch_processor = TransactionBatchProcessor::::default(); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program_accounts(&mock_bank, &key, &environment); + assert!(matches!( + result, + ProgramAccountLoadResult::InvalidAccountData(_) + )); + + account_data.set_data(vec![0; 64]); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + let result = batch_processor.load_program_accounts(&mock_bank, &key, &environment); + assert!(matches!( + result, + ProgramAccountLoadResult::InvalidAccountData(_) + )); + + let loader_data = LoaderV4State { + slot: 25, + authority_address: Pubkey::new_unique(), + status: LoaderV4Status::Deployed, + }; + let encoded = unsafe { + std::mem::transmute::<&LoaderV4State, &[u8; LoaderV4State::program_data_offset()]>( + &loader_data, + ) + }; + account_data.set_data(encoded.to_vec()); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program_accounts(&mock_bank, &key, &environment); + + match result { + ProgramAccountLoadResult::ProgramOfLoaderV4(data, slot) => { + assert_eq!(data, account_data); + assert_eq!(slot, 25); + } + + _ => panic!("Invalid result"), + } + } + + #[test] + fn test_load_program_accounts_loader_v1_or_v2() { + let key = Pubkey::new_unique(); + let mut mock_bank = MockBankCallback::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader::id()); + let environment = ProgramRuntimeEnvironments::default(); + let batch_processor = TransactionBatchProcessor::::default(); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program_accounts(&mock_bank, &key, &environment); + match result { + ProgramAccountLoadResult::ProgramOfLoaderV1orV2(data) => { + assert_eq!(data, account_data); + } + _ => panic!("Invalid result"), + } + } + + #[test] + fn test_load_program_accounts_success() { + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let mut mock_bank = MockBankCallback::default(); + let environment = ProgramRuntimeEnvironments::default(); + let batch_processor = TransactionBatchProcessor::::default(); + + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader_upgradeable::id()); + + let state = UpgradeableLoaderState::Program { + programdata_address: key2, + }; + account_data.set_data(bincode::serialize(&state).unwrap()); + mock_bank + .account_shared_data + .insert(key1, account_data.clone()); + + let state = UpgradeableLoaderState::ProgramData { + slot: 25, + upgrade_authority_address: None, + }; + let mut account_data2 = AccountSharedData::default(); + account_data2.set_data(bincode::serialize(&state).unwrap()); + mock_bank + .account_shared_data + .insert(key2, account_data2.clone()); + + let result = batch_processor.load_program_accounts(&mock_bank, &key1, &environment); + + match result { + ProgramAccountLoadResult::ProgramOfLoaderV3(data1, data2, slot) => { + assert_eq!(data1, account_data); + assert_eq!(data2, account_data2); + assert_eq!(slot, 25); + } + + _ => panic!("Invalid result"), + } + } + + #[test] + fn test_load_program_from_bytes() { + let mut dir = env::current_dir().unwrap(); + dir.push("tests"); + dir.push("test_program.so"); + let mut file = File::open(dir.clone()).expect("file not found"); + let metadata = fs::metadata(dir).expect("Unable to read metadata"); + let mut buffer = vec![0; metadata.len() as usize]; + file.read_exact(&mut buffer).expect("Buffer overflow"); + + let mut metrics = LoadProgramMetrics::default(); + let loader = bpf_loader_upgradeable::id(); + let size = metadata.len() as usize; + let slot = 2; + let environment = ProgramRuntimeEnvironment::new(BuiltinProgram::new_mock()); + + let result = TransactionBatchProcessor::::load_program_from_bytes( + &mut metrics, + &buffer, + &loader, + size, + slot, + environment.clone(), + false, + ); + + assert!(result.is_ok()); + + let result = TransactionBatchProcessor::::load_program_from_bytes( + &mut metrics, + &buffer, + &loader, + size, + slot, + environment, + true, + ); + + assert!(result.is_ok()); + } + + #[test] + fn test_load_program_not_found() { + let mock_bank = MockBankCallback::default(); + let key = Pubkey::new_unique(); + let batch_processor = TransactionBatchProcessor::::default(); + + let result = batch_processor.load_program(&mock_bank, &key, false, 50); + + let loaded_program = LoadedProgram::new_tombstone(0, LoadedProgramType::Closed); + assert_eq!(result, Arc::new(loaded_program)); + } + + #[test] + fn test_load_program_invalid_account_data() { + let key = Pubkey::new_unique(); + let mut mock_bank = MockBankCallback::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_owner(loader_v4::id()); + let batch_processor = TransactionBatchProcessor::::default(); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program(&mock_bank, &key, false, 20); + + let loaded_program = LoadedProgram::new_tombstone( + 0, + LoadedProgramType::FailedVerification( + batch_processor + .loaded_programs_cache + .read() + .unwrap() + .get_environments_for_epoch(20) + .clone() + .program_runtime_v1, + ), + ); + assert_eq!(result, Arc::new(loaded_program)); + } + + #[test] + fn test_load_program_program_loader_v1_or_v2() { + let key = Pubkey::new_unique(); + let mut mock_bank = MockBankCallback::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader::id()); + let batch_processor = TransactionBatchProcessor::::default(); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + // This should return an error + let result = batch_processor.load_program(&mock_bank, &key, false, 20); + let loaded_program = LoadedProgram::new_tombstone( + 0, + LoadedProgramType::FailedVerification( + batch_processor + .loaded_programs_cache + .read() + .unwrap() + .get_environments_for_epoch(20) + .clone() + .program_runtime_v1, + ), + ); + assert_eq!(result, Arc::new(loaded_program)); + + let mut dir = env::current_dir().unwrap(); + dir.push("tests"); + dir.push("test_program.so"); + let mut file = File::open(dir.clone()).expect("file not found"); + let metadata = fs::metadata(dir).expect("Unable to read metadata"); + let mut buffer = vec![0; metadata.len() as usize]; + file.read_exact(&mut buffer).expect("buffer overflow"); + account_data.set_data(buffer); + + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program(&mock_bank, &key, false, 20); + + let environments = ProgramRuntimeEnvironments::default(); + let expected = TransactionBatchProcessor::::load_program_from_bytes( + &mut LoadProgramMetrics::default(), + account_data.data(), + account_data.owner(), + account_data.data().len(), + 0, + environments.program_runtime_v1.clone(), + false, + ); + + assert_eq!(result, Arc::new(expected.unwrap())); + } + + #[test] + fn test_load_program_program_loader_v3() { + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let mut mock_bank = MockBankCallback::default(); + let batch_processor = TransactionBatchProcessor::::default(); + + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader_upgradeable::id()); + + let state = UpgradeableLoaderState::Program { + programdata_address: key2, + }; + account_data.set_data(bincode::serialize(&state).unwrap()); + mock_bank + .account_shared_data + .insert(key1, account_data.clone()); + + let state = UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address: None, + }; + let mut account_data2 = AccountSharedData::default(); + account_data2.set_data(bincode::serialize(&state).unwrap()); + mock_bank + .account_shared_data + .insert(key2, account_data2.clone()); + + // This should return an error + let result = batch_processor.load_program(&mock_bank, &key1, false, 0); + let loaded_program = LoadedProgram::new_tombstone( + 0, + LoadedProgramType::FailedVerification( + batch_processor + .loaded_programs_cache + .read() + .unwrap() + .get_environments_for_epoch(0) + .clone() + .program_runtime_v1, + ), + ); + assert_eq!(result, Arc::new(loaded_program)); + + let mut dir = env::current_dir().unwrap(); + dir.push("tests"); + dir.push("test_program.so"); + let mut file = File::open(dir.clone()).expect("file not found"); + let metadata = fs::metadata(dir).expect("Unable to read metadata"); + let mut buffer = vec![0; metadata.len() as usize]; + file.read_exact(&mut buffer).expect("buffer overflow"); + let mut header = bincode::serialize(&state).unwrap(); + let mut complement = vec![ + 0; + std::cmp::max( + 0, + UpgradeableLoaderState::size_of_programdata_metadata() - header.len() + ) + ]; + header.append(&mut complement); + header.append(&mut buffer); + account_data.set_data(header); + + mock_bank + .account_shared_data + .insert(key2, account_data.clone()); + + let result = batch_processor.load_program(&mock_bank, &key1, false, 20); + + let data = account_data.data(); + account_data + .set_data(data[UpgradeableLoaderState::size_of_programdata_metadata()..].to_vec()); + + let environments = ProgramRuntimeEnvironments::default(); + let expected = TransactionBatchProcessor::::load_program_from_bytes( + &mut LoadProgramMetrics::default(), + account_data.data(), + account_data.owner(), + account_data.data().len(), + 0, + environments.program_runtime_v1.clone(), + false, + ); + assert_eq!(result, Arc::new(expected.unwrap())); + } + + #[test] + fn test_load_program_of_loader_v4() { + let key = Pubkey::new_unique(); + let mut mock_bank = MockBankCallback::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_owner(loader_v4::id()); + let batch_processor = TransactionBatchProcessor::::default(); + + let loader_data = LoaderV4State { + slot: 0, + authority_address: Pubkey::new_unique(), + status: LoaderV4Status::Deployed, + }; + let encoded = unsafe { + std::mem::transmute::<&LoaderV4State, &[u8; LoaderV4State::program_data_offset()]>( + &loader_data, + ) + }; + account_data.set_data(encoded.to_vec()); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program(&mock_bank, &key, false, 0); + let loaded_program = LoadedProgram::new_tombstone( + 0, + LoadedProgramType::FailedVerification( + batch_processor + .loaded_programs_cache + .read() + .unwrap() + .get_environments_for_epoch(0) + .clone() + .program_runtime_v1, + ), + ); + assert_eq!(result, Arc::new(loaded_program)); + + let mut header = account_data.data().to_vec(); + let mut complement = + vec![0; std::cmp::max(0, LoaderV4State::program_data_offset() - header.len())]; + header.append(&mut complement); + + let mut dir = env::current_dir().unwrap(); + dir.push("tests"); + dir.push("test_program.so"); + let mut file = File::open(dir.clone()).expect("file not found"); + let metadata = fs::metadata(dir).expect("Unable to read metadata"); + let mut buffer = vec![0; metadata.len() as usize]; + file.read_exact(&mut buffer).expect("buffer overflow"); + header.append(&mut buffer); + + account_data.set_data(header); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program(&mock_bank, &key, false, 20); + + let data = account_data.data()[LoaderV4State::program_data_offset()..].to_vec(); + account_data.set_data(data); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let environments = ProgramRuntimeEnvironments::default(); + let expected = TransactionBatchProcessor::::load_program_from_bytes( + &mut LoadProgramMetrics::default(), + account_data.data(), + account_data.owner(), + account_data.data().len(), + 0, + environments.program_runtime_v1.clone(), + false, + ); + assert_eq!(result, Arc::new(expected.unwrap())); + } + + #[test] + fn test_load_program_effective_slot() { + let key = Pubkey::new_unique(); + let mut mock_bank = MockBankCallback::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_owner(loader_v4::id()); + let batch_processor = TransactionBatchProcessor::::default(); + + batch_processor + .loaded_programs_cache + .write() + .unwrap() + .upcoming_environments = Some(ProgramRuntimeEnvironments::default()); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.load_program(&mock_bank, &key, false, 20); + + let slot = batch_processor.epoch_schedule.get_first_slot_in_epoch(20); + assert_eq!(result.effective_slot, slot); + } } diff --git a/svm/tests/test_program.so b/svm/tests/test_program.so new file mode 100755 index 0000000000000000000000000000000000000000..9336ced24df6b4eec7b37c9530c99003016c28d6 GIT binary patch literal 170136 zcmeFa51d_9buWHql1l>`XiP$gAVw2luKdwBxtWaNkCEI= z_;WbcOaKM_{rcpOAhotK6Clv)gV&~pzRvTemNsbH_ptU4eYQ2Vwm)8LYg((OR+!)S zyZ)TB@45Gep!jP0aYJU`z4zK{ueJ8tYp?z1oU>>BhBtS1bVMGeMBj~=TJtK+Sz`qc z+}CNpP!_F-rsD66=oEo1prm9d^6`@OTHtXsQ_(Q@B>da-W-ZUhM_#RP9*6a&qUgx5 z*2~A!7ic|?y&kD&drPcdK7K;0MIKMLbamcx`89E_W(0p{6K2_rZ zFQ0m{WDHD!jPT3r6{j@T%iuM?00}dE5RF~i6Gepx2rRv5F4GFf^2?CEPfDYxK2Lu* z9q}yX#M6X-YcYfWap3RmIsRtk_&YD)@5h1O$ArGDKkomj#{PH;Y3~o0du_J%%K=nl z&sC;c|0|k^`XTvKsHNz~K8d0^P~4xjF~XdKfTL!xAG?6F5WPYGr=c?IC*8*;nNfd8 zvZHALJ~lZy8RDz8z&8haedH=)&_Mj30RB85%>Ujiv;3FZADN#2@Z(}yPH%oE*AGX7 zesDfqK6Ak8(LfGK_fe5svUvJYN`U1%-7JS9uL$~oxA5D(>gJ!~#xRG(z|Ga-E|wO?D&c|{vKx4cYrhImNdRRNu$Jfj9Wn&mmO zp*)l4=*vWBh=+8p59q9)X8i-X(jq1%&-%VBpW`H1x#%c!DE~ttT`QNQ`P&-$Wloy^ zU<17JmvpN>9Ynu#q`JN^%@L4vm!y8(^dlUnJ|~hMj(ft-!FawhXrFxkJa}{cJ_!8- zXq5D!0D)=zkF?GMOj8AuK9*+n>Q`zyx%fe~A1gFnUugN=4@f@8#Rr4-iI1Xny{*?2 zp6Mq*aD2b=ALc)f{0S0)Q0cf;;`rk_j)?z(9R5XsNb&C#@F;%F=)6bM^%0A=X-xP- zuxT7`M?ZDX{X^ZKR=&oxoc8h=j?-~VgO5)m9opBy(?S2(Gg7Z;K2jmlhT^ejB|qtY zLgIM*h^7}mY4Ot%7dtz|UKgT78mdxQ2nWy}NQmeMkUpjv1}wjVdJKi={Ss5o==w|! z0Uu9*Zs8YkJm&N$9bL1eI`}vw^msa1tn^hnZx+9(c=B5XupZzQ`P{1YiT_<7!0E0A za_I(0d-~561TG(!TYRedCG!;jG$<`OPL^q!?VStVtE(7f9@G;4Ev;es&RluVRomXc z(ywm(lme{tDY=(GTCQ(o=}mOG}i)7cdSOLp>(h z9{VdE*YP}L`cZ%{&rluzBhiz1pXzt~km_~(@N=5~pw>$^%@O!y4G<$FOSC_SO4QOQ z8q(~{E-6`Jk62Fox+s_rNZ)Sw6;3DZz_k}zCBRevUu*Ra(7e_ux*$B(F;iT7K7 zjGKKj`x)<3c_k~={`-8H$Cu8JE$sXvwCCffYWQh)#V%RD9F*P($EW(u@tD%*evI2m z_p{i z>{Pt#n2xu3!~%H8lJIUHD^rkx5YN+bmzA4cwSJ*LD)ssVJgRhlQ}NomFe}%3h3N04 zoc->BKVuF)1dfAfl)u!^@O5lm&3sh9Ov`#t)7aO$<;#>$rWw3HT5YzYMQa*KA7ggwZAk^VoFx)}p2!pTFnDjL8S*6zpxob7~EqqqxXH@2d zgHT_u>AK~6KX4o_#d@{AMaxq;SpVel`g*mgpN+42(b~C0<6i5pw4X0gdYby#`L}g% z%D?yX0>#(NzX~!>XX1QX?EVM&J+}qFN%v0CYu7iQ4~X~Kz`iBjccpOTBr(4?>3&r5 z!|{HI6f}b_x>LMFXX@SGVEX7216X~U+p}TX=2y;{+Ez;dSi7fqWU9J zEOzoa%BQMn^7}{NFZqbt$NGMQgPO=bhr!i%X+G`P1Mkwdn8W&C!Md=%*Xp5&rEdSd zo#WO2a;`pA_@&hUyk8y<1hqNCJwj?bsJbnvHyWaXwbf8Z@){)#kz$-BdR-Iod5Edi77S}bG$u-agdI; z({=Wb&k!_GIDgO%5`H=Kx_FZMugNWD2geki?uFRPaZ7^^+Qn9Oa3$>s>?Wzq{LybW z%}?I$F9JTLGq4v+f^pVL56Dq_J;?Ukp4^C5zQBX!aX)opaFqp5)pmMXt%U&GSFkOI+VFuw8Dw zv7M(ubNr~{i666icL-S2^>K}TU&H+$)<4hc&&K{Y*F_8?XD}E1e?tA0`aP12=>N05 zc%SkSFV%ead$_L9{?l^GaV0xEo$2R zv1wRWJPkP<&}`s+jr>}*RT(HmpOLiF*|kmqYv*cQJPDH-g8R93lT&R(%eNaJUA8{) z^~!l@m;6jzsc`e5Buvk}SJNXZwrrdr8)Qxvgx8uMqu{#H$u+h59Hjry_PgjCxB<)ccFEFA?t3P~P;@zjvXw#fCUus39DmTLS-%>z2_VO?;2R5HKd5 zE?akHdi6GaruKz zQr)vaX)i=-U#a&fipo{f+p1KHZbCjG>nhHu>sHP8b-L42Tdw)FevL;iv-tc*c#dD< zV|S8&U&j`r?+U-ZFB8r)&j7A&?be4BALaM6;K}#HD;=uP@tCfck}3cYlA6w6$zHAJ z^F?*+ucW;pTNhQwzHaH?li2rNYqoz!Iy-^F?b-kmXwbGzpxom_iF z<6E>m`N%VpuG&7&P`B2vj%m9?-Txxxp}vv-i_fwFs>#6(cgu5uG&;JWswGKx;rTrjWl!NO&e=2|v z;g;m!P73IJsebCqxvsoW{82Kl@+e^K7_KXqAnp3sF82EoZjao53GLC7&u8oH6M_HL z06tlKO!d+Abfun$B^Rr|LOpvjkgxlf;eNnTj>m_D&Fnn?VNLt|&i)VaP(L0H`qTZd zWbsj>$HvRWN)PEc8t8F&-hJ#v=o5c4pik&+LEp0h9@2MrNZ$#_X-+Wiei%80cu2=+ z105z8+uuztw*Fgba=R*%*YrVJBn>Y`UPp5M*-l=e+)3wk4RpS^UafeUm0{ewurraEZFa;yPvg4>-m0UaXOG9#A|gPD|TRU z!}R%R5TVkk<;7_*J_zoA`+P~d=3x96BV8aBB6T>D8@0asyNRvy3PK&*6LgW!@Aci9 zCcd);pE`&I+`3~ZPSbtiR_#w0c-75(et zmL%psxjl8c(LW(RKZoJ@3xnII0c)?)p#qPqinnh5(oNM=Iw3CO4`a@^Ymf8;K-nw5RX;Vz(6-$+j2ttB{zq5HP|oNAK1TSWwu}PYy*7rehGWP_eawC z)#mZgzTOw$r+vk7u&l$zgM9MGCAr&)_;lmn#(Ow!e4P9l;9rh?t*^rWekuD}=fk8r zom}stdK8~!^v>6~GF8W^_b1m?Ea5%~^*gcoAz7;Mw1?7ql?T_gUfv zC`727#9GbvDY;hl{vgO8CCNv0o#Xw|wO6X8`W5d}yB0t6fcksO6>q%H{F~**-#%L} zEm!_{uE1Z;XV-0tH(6eiAIa=R5_@~;eIm0j&KLd32K!vi?AH2ld~8?z3t=~w;HUF< z5Wh)BviJ$1KR@4b-NSO~+2Zfh{wZ8VW($>{@UaU(26ti5uFEqyMfuOq2fn{W{boq6 zeN^ZuL^xbTsIQeMohRzbUgYwq+P#61S&Id*YWJ^3W}R*6rv=~0ER|<+X-V>vW!g{4 zJ=#CXR&s&%jgRWsze&BJS+9}&+SsJwIY-jG?<@B5V(WjuZ(;X&()|FSCFXcc`6@4E zE&2n$F8K0#Ra18ccIjCuPcD8&;$p`eBK|1P_WGX^JU)NsBUZ-)kuFUj|%I*b?CgPr5!A z(C6da^_F&?^tnCqeyaGr5VI$44-Np9@M$@IGx&a0ns4J^=#)d!FGDNUe$=Z9PrgZC zVtU1WVxgPJnSVx3erK?h`4Xk);az>2CcZ1A-0acEOH$DFfR&^AAR4?Jv7EbGJ)ak1 zT^_-{sJ?EVt?jkb)1j+WPbuFUpr6%;HJ!;(^)hdFU2p3qSKoJb`Hh^Gv%jb5cpJG4 z#3Lm4mLyK^d7Wnbj;Or+ydpVG=}(3PT=roo+QjT^A-YV*i;X+3Pwsj*AvBP$D}ciF zx^Ckso~7ln;g4rqyQdl*wqKfDto@#>d{Wx+d6Dz%FNNoseotk;;-w#S7wy5grJ=`< zGF{8)+K>5@c(&-2A_YnJo!ZVVqY|g%(C*O?el*}O>E4vWk?W-U3a#ILwZvh+kD~pg zdyD1=bZsJC)r_v+fMYoUJ!bIxs_8zR?K_A2bn7sFC$d)o(Z|d6ZQQh>jIQ{KX z!PmcBR#Wl|(N{|l&q4bjOK3&3ryyP*=qZ-89+^d;eURAn(ESJRZ_4Ybiw)S|(KV?) z#CJ>eG@bgdpoetOFM9?A6sNBcK;l~y^gG+9-|Fpof3>5lenVq>#CKngz7SvSKGn+? z!|Ux6--9`PG+|yI->r7R(}aIG2Y*TfypI3a)=}m3JVnQ|`>XCxxE~(dc#h9ie$(e7 z&>llO`Gj;mo};UXdR`vieN1#CqiaIa7ayDypI^(N0o11 z7rCDLxmlNu;~I%VXxBgCeiHTB^=tEu-w}H6)9P~1a8r)%BLUs{am;;+!dp$4Y#;jn z(}>B(EAZR*8EWHdNBp^l=KY52g@SnO=Y<~J16H|?-EHZ}(jT_;F}cGZkL|VeQAxL= zZ)qF)z6;v(^tIo&QXUJ@_8gsyj4r3Ml%sQwrJc^1l5R!k<~DSG?FXWBFh}Q3qr>S` zyBv?XJk=g&@>KgBk12mi_eEA;`AfRjSX%i@x>s0Q`AfQ&NZRFloy`x@?-1p6-8V}) z^_6yk`cDZ{Z+3nU^MKj?%1N0Y#&W{B9r-yF`FJ5-=8v4N@X~JQl&M3uU&(eZ$h9-X zc4&kVLVwTOAw2Pq2JNKsI78>>c+BKs{#86?@-Y7@9y58EpB0Z?A$%uioF(nXV=DLL z4E0muG3}S+jAd4?{g#};Q$E#K3hKHu&QH^%_qsDml0Jy}Lgb0eyj<6lU(6#6mFYSW z79xG#qh{l1#GhZ-r*O2VoOfui+MT~Y5uAhYJoK(Dq(}qh_ZyJ0_tVSS&ds@YKK&zV zXG^Y~PlfGNRo_N<5dooERlbK*J>dUN*Zwv?E*^_4eMsVXOzoKa?a3z|)pEP1lYHa> zNmGBJ`tYmnvif&wZ2nn1ruNV8HOJ$c?&)K#%+C39KHje1$Jjhe_Z6JJcwFh*JnNXW zBjXl$;&J6C-uJZRZ=R>;=e{q`b9-q|n{Dwt+fpfy$CUmG_74c^_Xt>@>4J<_DF88;VV0`B9FSvhCwLFxY&o70)@8huv@0j65H$FkUK|4ot z_j1H80RN*o{5YyOA^dl?pqu{0k>L4H(tlBI{MAkn|6M1De@PDiaDd^GE(g zdq^`3c>WaA!;$kP-|4CNdnOG30Aj^sdN{J~1mWq8>-@_&S^SL9AL_>yt>lgVdp4&Z zoq>FPeO>9;C-ar>N74>({whSDW^=F)4@i4eTQAp617d{j;J<`;qfVDmb%?pRhqcYh z5tWeMsBA_(^HhF*pV0f4e#HfV3)hqJze4}_B9r+jm-!U!5d+814J@xpam(@=%g3~w z`oncE_4~tZ{Je(&oFvAbUxy}U}hy(YXiyEQY`DYMfe%g@hZ~M!cTQN2+#Ik6VT!Pn3!Lfd^|^o`K!rx zVgVi7B}(N#()~Trm+S1g6yl?Y>gTW_pB(r8d}MOV5vdoJbAIx9EBVN?s)u`R-nH?} zex@E4qIU@)ex4fl8QfVV$***tF8Wsem?WdBt$TgnwP(Ewy4gOW!w?*w%%Z4i9vInv5emCwVH9r4~X?Lz%tO>^If zXYxSoo^$O+CkFn@-emUEDCf5%461^RcZ-!*C z=|do2pC_Kn_1^1&9#T6QQBTMKOK{HQ-92c)&OGutaPZs z=DK-BZlAA&a-I*Y$hC7m6yN#E+WA-I&;7pRw9^?pS491qXYE{_YvdC2bu93c5RkA4!mY0}T7DjhmM#kRi7$~C{z0R=$_ z{kTO)hxpe3f8F-iDHqxomQ$2$|BWd3{ZIEVhdWP}n|8yUT>?McIbYVL#m;k4OV0)5 z2mSyh)_YaK$1&~!O`z_(B{|$VBjQvy+*yin%?0qwIRTp(B8H!8^6Kok$#59{-uO(D{CbC}QmuG+exIOR1+L2q6s5#Z03A0a&NjTWK> zP5JD;r5tzyz!jX?(yHE&liUNuFNwJE4}`lo!>|F=i-QW7eL(3SI2E0gS;pS z<}+mT$$Uw>9d$ZGIqd^qem|)lz1jSDVssNf1N~BxM7rOJ_{8Z}MhaqG(7)q4Z+gF! zcWHUOkFB5`^b2!1pO+AOJJ-=@ArvB&gX9A+%U1+1hM)TiCBef{272~NgmukxGS9|) zRWbbiF7aN~&!pSdEg#nP$npAN7y2Q&Ul+iB?=(4`qYLf%`-x7cwPjk{O-=_h2;_@l zB;{8PUtSK!Ykw=+zfaMm`s-=;59xn&KKh-z3HJ6o$KJPjnbBwaRxE%0Re!aJGb`pLZp6)-(U87Xxv9x5%@dB4sDqEYlD31FDYZcGcZj%?e|Ei&yN7W+pS+D zU{SrKu|L0CoDxfh>q*`EscWgi`MNm1RL50(iP;BRKi5tj6~H0Wqx9aPiVb?rSp{?! z)Gn@CP?5Cvb5~W8(a0ql`*WbYuRv1B6Jz&>H_tTty8np!k5iUN#j4$d_Iq`OXrbiS zjGu|3B%^JpPECKxVqfR_du_I(!4k?3Tf7}`lD2rq<%*E?7_xP<&dV$&Vevz157Or+ z*9$o0aE_F(+6sgSzRyN|imHm6`Ru>tn1~qDZ!Dhlr^2tp*VT$!Y-X13#7$MLGIC8^-_k)P+bH1g9O z@KfHZWd-r(Ko8{29G^%0`RbH@G{bV9+lf4;=<|++=huo>X;m8@XeVBsm z!k}OLp76Z}2O#1dn#Wp*&5tTXYUg}ji5B+RrJ(vo`vMU~LwyRLRF%*BG%5PT z9M>zxgb&li7s4~0FVCkrFFzdGC(!XThWFc6pZ-Hw58Hof+V8p0&dteH@d&!`JVR$`7VFz zJN4ZCgmhkICc*$>{;YJ&lXTnXdKPMh@cAo_gCED|dN^bLDCEv5-N&ErLo>ZP$WPZF z=BE`q%|BYJcBI&Wmv10s&kb@F1UXuN9K>(_EKYkhvm5p=n9ugQFfQFsF3N5J$DsP3 z{yd-K^XCbfW_^F|FkVaS>h~}|ya)E`2K+2#J+A7oFBv?q$P&lz>qc)kmT-KjofPkS1L-(1$MPiJTs~9#B6< zfp4OQKM3X1v@O?*ted*Uly`h!*d}>y|Pw=PpM87(Y)1K1d`)W=f z=PlaZ3lNVY9_fBT`q#^MBcB$FDUcFnI885VyTaKl;yWPulj*BNh(B(@E3Zt z6#`^ze&FU=LU6k7pj>A`FETg*kNkOw5tFCS!^r~0SBT02==3)8qx3kv7l5uqs4;Sj z^1*o*DgZjwZ*GrA@2Gn@Hk(f4s}m-`+JPABnx1bpT5{W;+R ztY`@D_5Aq}e}0~L>Trk-m|f27lQQo1G@M7D4D82=%%fKXcFE__c&m+{YKde<`EzK} z^~wf&sdDBz7Nn+rkDvb%{t@so!9s+IQHfG{(Jo*5cVGM*bB}GmW>M0ofXx4!UEimQ zMi1Zp^;45R{>tPW_m!B$bG=6Ie4RH~?=t~k+#lk($Isy(^*WWe`FXU@&s~iI3+``( z^9Q;!y$=z-pY%!U??YOV`wV>U+5IBY@pRB1qytSw2O%!Tl*{K)$KgYHeFgw5XS?m} zzT1VmPT$dX;3@&{VT*_HTqEtM+w;thE8}j*eV%QGdpl}p{s!T&Ol*N~V-p?`J4e=H_@&51VEMbSFe*euL4io~r6a$S*znW;i{^ z3gOOb38#emdCo-OPmw~>O^EjEHFPZT5(u;&mwyo8=Wroi3t7L3&&5P5`O-f6i#KnY z;gsM3)5nxvzgNi02m^@u)6ZjF->8>2<6mZv?hyE}{2LH(R;~yOa+;6*nH!;?2yAy0 zakeks&+HSE3sk5O={oC#` z)_u_s4=uFAhxyd(G(Vt=`2%R2>-&ij{6W|a{iA)N9!mZVk{#`aeI)(?#FD?$@)42F z0sNEvn=K!}G=T$%nLh_cYDw@M9sTSu-F3Uxn*;cU{6&@zIvet>|GBT*kbjxMgU*J0 zq#5X6k)AF+W{3{p_s`((3=#Dwe)V{l^*?2?!(Rhu6-s^kThtT)WlN zx^AqU?`geX@(`3k<`3(C${+5|_hyBg| z@O;+mLYR-Z1A*=E+@AaGY?tdE&co$doAH-=f4Rgz(chowFBRcGj{fQZr#{njk@Krz zD7gjBkG}=m;{IE>-odugq`yyu>+l41M*MjLq~8S_c%9jsYp^&W%Ip078sln*6FNBv zw9{;VWC2GNV)LVLA4lrdb-Ih<1tlqu7pj4a?S=gHs$|Q1t^eq48U6HAX_x6Q)8F%X zoc4=Q6IcT=R62D0#&+*F+mE?R+Nb~fY~cSUi+4&s;m`!`N!~2!WbvI!@3^3qeX$22 zr}Q4B*(a{UY3Gy0hf@5D{!Z)guYdn4vF~;H_dr;W^H2JGJ<`X%8P
9FrUgzKH1 zpu^=<-=%ffuRFj%UM`i6a|OWf7Zf{4E!wr`<{b~^awI2jWVMxdr9=I?`lEs_;`tZl zoOPSpHkdtN(!IpL}}!x8i+Uv|P76qK)%d0zK9!eLsxv zS3CVLU0$4(z{iO3V{|({*X#SNU5-QQ^>y}M`~3Sf$;Wg%vt<=uCo{is{rs%(7s~Bf z$c^&e6+9o~_imDlpH}#Xln|~Tc~0o>h4B64CZH@YwzvJzn3(m;}H9A znYKfE{>tb=H0k%{viF`p*u;m7M|-ax`9Agy#l^q=UN_%=4cp~CbC*}RZhnI827Di3 zx~4eU--`lyQ?EHbxNc@l`oDr74j-_k_rY^&Y%S zaq_Q!Hz&D8^+fn;!25vI3-N6dd^#_Ld^|%w0zJ5k=|B%|&glVEU-W>n?<=$Ye+lH8 z-d{xicY^#--qa(v%W)qP2$ha6H_@%~%jD1~zb6IWJf4@y5DmHcMCc&-Va{r!X3hbMD< zvp+-oaCsZP&ky;2y|4|wm0tQ&49Q}p)9;D-eT!uAvqHb$xAW&E<5MAM*0c8u`?*a_TTK6zEb)g7G+wQ0KtXm=J-gOd1UFU1eam3*7 z&mF*T=?{BPBA;(#+VS~!01I;K3Hc=yB6Ml@MKzPB>uvF*Z>yXY4DIOmHXL8inf+4I zLD(0V@3d+shthM!;8%wKmr?f0#9otfUx$=NCTZF_^~uIQIgv@d4ADjg~} zmsfg!)b!EUEv~1elcDFLcPO7rHSOoNwb{DAmhv<8676TJSBN%BJ@@}77HIw0p5yg< zh@1~tCI7sAe5MXS|DF@=tiaiO#J$~#t%^6_uOl`-<9ka2;Qo%oQ(n|8f6gQRm{xGO zmn`QK?q9VRE*HNq=6YYV_m&>OrU3bMKHJSJm5vVxf3fLpb?YkmCi*{p4@V$>Vfqy<+J$hmHRs7Rg9fn*5lF;Qf(4|7`fYAH2VPUQOlH zg6?y2bh8Pk+x?kJhdN@eA0wZrOMk|)@yJKiUJjk2_SC;0!+G`Dz|VC1nq2j)w&&~al}}sy zha?Wy0qPH{pXd80@jjI|{qM)IF`mi6`e{talJy(w%X&B`M|KJ8?{BDYeA;^y(|w$A z!-q z7p4CI{2`9scJ{^X$pC-|SLslPF1|+FsorgNM%^?yM{2M$1=h;!jO%r3Usk$(nJsYS z`@DefBJRO4Fh8^>9GApgoUZtNoW^&T2p+{BTcmN`jwm|#WoqAiJXbp3pzuC^?~!CM zeqSdU-d;K$y}f&^y}*vF2>5XN(*9GjAa zW2UF~*?7EbiQujMyp6|CytMJyZoIwNIQ92a{a+s^=I@W#eO!N@A^DJqA^qN6I!;t9 z>HX8%km`BUIB9!+&!_vKf7d8}@7D=izj2>R#s~gT1yX;9nFzHZn}@x8>gkg2<1?Kn zOn-y+=vrw|5c)9j}t8v63;d<$I)A`Np)fzz*-==udd%R8Kgii%wyc_)~ zy~dyKdt}cuDI+H`p5BlfPps*5XLPZ54BlIgc zkFzL++oW)0k93x&C>`Zqk`IRiy@}y4-l1p?vBx`{f!SS}bFZgJ9K4P_V*d{S4u;qTm$+0a&Z3P_v7RFCda;#0Q&pw3D4S2H^(hZ zR5W7W?c#nDYxsM*eZMZ1&EZh1!ivRl+Ii&wA~>aBc2@+;GOB^@tSc@;aZ*YvoS z$9CU$JBXpF@b!0>f=BV$0v_>sP43%#ZLc56_=4|HS7Rw=0#-w@AIPocuZ+>AsvmK@Rp@da_v8 zXZ{}X0hE*Oit$m9w_L*R+4qKgKC5)ve%_gj1Y>-9N#c4{qA0yDzuwZjHKu(&grCj+ zn4WX`^XIXB53l?om8ZWKWdP%pn(Xs$(cVAEG=s|}+h*TnA^(1j z+4*Z^Pb}=pFda1(|yznB^ibD(_do@7NS0d z`vK(l0BSbN?nk$UgXQVxUPSN)jtNWgn ze7p}-;@WJjzYR)12S1;(n0&Ck=KJmcFj+_YI=M^OYTP$qA_X5_l+G-TRoz15h@*@)q2)nRhzFz z`cxiOI|p<*QeF&&=y9p%|K~zaaGa`yCwK_0;t+e#rd9ht;mI|K*t;-;mPtB69KdlJ652JJlaxK7*fo z`gkaxJ|gnV`n#t6{SJk9`%ypJ(j2@9X*e|^I2z&6C>;3 zydy`Sw?6=Kq#tcxvc65o!MI^=c26L&bC)##%?g)XtK-J~`1HB#y(IyNZ`FCj_hHuMh z+TLECC-6Ks<#;Q|>J0sQzhpPf`|cMcpU`!u+p+40>D{2+JFGwNEJ>Q>v=gacG@|)# z$93DQLe7^Si z+wW<)AK9~2J2rk1K54&%KMEb8?fi{0!g)L8Sxmvc$Y0nu#LP+O$=M|-iBGkDD4Lws zO1Ai)FbOHVzqiQ!xA;`8&uR0+_+Gi=_4|dXpP*#Qe4aF zMXe+NjwjWNTVA4G?ALbaPm5mY_CVy%`Lmz-z0eoc3#&)?Ul8~eM~OKeTBGr{i!>hI zB(cvAgum}6dIWv{5$Vx|+Q0SJTYQ3c#>Y##4`t&y)ibj{Zg;Zt5uL}v^ACCc-~4g+ zx7hDrL%+Ly$o!dh_F;)IKzqQD-m9Fa>C{eOP?FU4=T{4*g8Mo2p9o(Yvvtla!-q`4 z)8>A#-#3h_C8?0UA0jyax-Gb8@An{lytwR%ZdyzgJCtphwjFK9iCVn_T;>_-8)8NH;@q+N-48-}j!}@~FW1 zzQ2DTjdGIp3gQy#9kw{>en9cU?lbtlw!d#GvG1}Kr|z$F zUCw%rw;%9dMcUWv@wn1m?`Li7AEEP!D!EGcw`wKUR{w7`V2zfOPC?8E`I&o9e!JY9(QH&Z{^bZz|Wlca$5XB08>8Aum@Ryv-L^4gd_e^49O z@6OM){-Hh)!0{0e>ow!qFL=s#=snQlG+l4TV>*7yHSLd{GhT(-?ANzQ?BmSGm){Sk zUWoo$?Cozyx`djCEspo;c-t|LRJEe(jgnuq_q3<<>@7=v4=)-Z`1b;Qy!t*@dLP{O zxBNTb>3RE@+7X|Bd>*KD+$?zGF_pjT8TCZu3HsSG?d&LGAAi+-`u@2ge?HXCfqh*1 zJYVSq0@j0BA4Ob<^!{G$F?(Obetl2Fs*hhru=G7099C@Czt7Y^ZN5Br9qJYR)qH=X z_we^UBmM05)#;(1OS~EXSFl;orTUci^Ipl0UTQxNL+|ReQ)~ob05N}7I=&$EIR1EC z^{sCGIrgR@tBiUaC#%sHjDdD?s`banVwGElpH>BS-28CvpK=GB&*9I>J%^81 zQD@J&jY&1RSNl@r>GE^ARyxiRM85yfYCMhtqFF9F&X`ZGYvVb+Sg5jSuJ=l9Zr zxSyD3X&5i^UtVteoQekhtnbaPj_dn5eO~qVTTxFLhU~s&A^NKHo6|@AA4Pe2StDIM zw_wfy{xFUodr<0CI_5Oh=Rt3)`Wz`9qY`END|c%;9rtR!BhF)Dhb<;vQobF3N{HR>8OFYx zj86(1AL zSn6M%q2=j&D*DvH@^ipYehUFgIa6dlE`2?e?e}dGgjE|ac{?ieG8iTu19%~a>wC@4 zH@WXJ3JlC=$n;bNnc4S#DQ~v#t0dpw!_;iwXP%IKcfOt9W_zv#79w3Ixg7lcYA=WV z{674av|q=Bt+w_vPp|h+r9A3uBf6}2bU&xJ_s=ZV|PztVA1Rk?5+hWeT6w_ZoX zJrlK){+@+UFZDeMx}Qb&=S1JH@F3<%zxNnESN{a^!~6M1m^M4d_2JdQ`1R)i!sn+S z=Xb>oK0F8cM5qrxhJ4-mB7A+U4PSeH%=vosMfi%_@U<=A%b!Pgdpr$#_D|3YuHQHW z%1ikdc2B1*`92WSm4fE``FC;38zm!LUt;QxE=N5+=iRGjQQjlX`n02mke{EQ+z;XV z+C~uw%h=D^cu$NC`8|Zn$qS`jzsFRU8kh@n^~+NP4D-0c`?|bKvm<-GbTqM-En@wM z{D%1y?gBymzQcAfS3>%*#d$tismX(!ShYka(0qO0fB50~Q-yH;3gV?H512xu^3>3jDLD<3P;whUyhp+rpJ|2x%TM)j`X5Wa>xn$M2TLke`$dQ^$d6MvaKlbDK zIgz`Po%&x+cctSI!Q=h39rTotbJ${kZ~KnB6+yoLD%00W{X+DMibscEa%qMF20foCUv(VjQ-w~r^FTY~a( zoBm4iPS<;Gh3Gwk*Yzgf|E?FqxL5p9&TnXkVZhr1pX8u2eY4~fKZAe&3&URYZ`IN( zHGhFbQI{UOMzyM@o9wFLZ5p@MZn_>bdPqO@-07-Jf2$x)=`lFk6WXg)D=w7s6SgDm z=s`b_|CAo28AuP?@p-p$vL2-2{t&xO^65vq++81dZuTJaiIi}N`7`ae4(~VJH}U#S z{bu#pZ=X6YKJAcJa-uvlyW2-{vUzo{B-`8FA3$Exea{oQ@!}kk%2Q;7S z5vqQ9-G7mcZr@c2yGCi+L?`7Sq`J|65WaCc< zLj16{=kI+fc6LjJ`WmfwIU?c5_~>$fS&Jb*oiJ<0Z_~X?>(~6fx^#Yrzg?&P+qm?5 zQtqd7o?vi4y;As_N#;;r7z)u}OFwsQk!1S*r0l-F5_}V!C60H$r^fHV^1Z#R#n`_m zTqDB>P4_zqkNoYBV+pa(8^d^ylF*NG2KNi&MVhYPq47G2QTKE0nGynV{Qg!-Fj{iXDe zf3MR0toSk0a~&2D^_=}vUx7aey{5O$zd!%T_d!m|%&PI=g$jQY*{h>85=+gt)wSKK>#N zVh$np=aGCrFk4ru|LyhseU-j0%PI6mVo3h&RGa6HR+Lbi+t#9%ktxCVa^237r|u976v=QrtJh&_XR8M60DC|ztf z)UUe14bD>dn-Bp-6MmsrI5z#V_6aZgWqNg=qLKYr$|v1#e1{e!7q5_hNmee={y~4! zN|pQ7pQshJktREf>cp3~v%_v*6}`MSkbsp;DuHp-ujRCgAJ|06X((T+ zaLf;%7w7p7hids!g=Lx{x#}YA-|jUU-*SbBlYwgHOyZj`G|U=o{)OszU(OS z!+lxVr`2VWycgi<+&&-e8TVxw`#sU}Jmuf*XR>mqwlk)*(axwGl%HlfJS*@{e@M5= zp&h?ZHnp?!2jue#E}ksV-=m!)pNu*Fzlwd!R{PIi{FmT7!t6=cbk7zH7m^+o|FdC1lk+-H)pm6|U=Cjcc1U-iC-{;wg*W zpW`~_5b)*c+YY!A{vEb>2RjzQ{k{Rni#h53>^dpNeXA|nPxCdsYM#zxKL7JQmbjiR z-=|{z@^Ztg;A#)?+n*m!pKDyA02DK8tlGrc26DMXqR8h3p2Oc6^e^R9M1w9bzvtU^ zxz(GkaTn-f=-sVxUB#NtpFFn{IUz0q#P3V_eNxf|wkF9R;{jxl&I%nb1b?rizt^QW z)xNXA$qQi{V(A}?ef`S$w}kS;7Vo(2pGCgD{`B`Sx;-pTMaLkN&)54Op5MD(vZJ2! zNfpPlW)ur5*sir&?)NYz`Ze8T&%r-2w3>g&H|4~@do6^kOj#vq=VziM$*3QtU&T-7 zqgGV9s8_3z&vW5k#1r!rp7*iX;~Os1^gt?KtyhSaN`bdSI*ua8_v5`C@9ms# z^A+GXn#&4KIqjtjcZ2#OX;tA&|i5y$@=d%C)tAAdtWby_UKvM*Yxq_ z{l)W0_6+wAMiILo&3uNTmG2e2^qUDs`7rkP0)+MH;N|PH9#LiWbO1A-J)!O39*5l@ zN&WMSb-r?aPr7eb{>D%VA)CLBN`CYFbxiX8xy$DH>quJf$?b}he_PG_I)7#J%Hi&@O|oy=d{i9;}(H`LhG69vOf%FLGFR70)UP_3M(}lO){Yi9 z9)C`@yj$;0IzIn?Y@AxzvY zk?iBKV`>*^`hZ61^PD|%^!c-lE?r;c<>Y+y%u+e{y#(T8j^Fz$c9@*EYD1py@7Iz2 zb`Vq>((<;?0fSG5CjSHCEg}D~#eUy+$K^_w-}7}lG!1Yp$?oT}_t1Z~j`MlCr(X>- z<+%stoL4Bxn(6$8b*zQ{$0R`vb9Ea!m7}+te-5>NpHyp_7k6+%KybNd{iyUkgfc>G zUZDG=1C};)at_&QUZ9`zU@%T;e{cU*W{3Sep{~m(xu5uC4*s9;nBX_8V)a=ZZ)(xE~nSd-qbU$D6KcyW5-FWzGf)fbnvT{rLj*8$!QA$K{0iTT?;o zuYcqF@_YpGkUp@Itz*a(LgttJA-| z9nq0TI@9~&S-bkatgs&3w1#@GezH|PeP7m85!*M@bsF0{+VT8kd6xPa2gnpcA=3A@ z_B0UC;id-m z`2VK-&jUUGndN`!3CjPEaPQ6MPnW-a&Rf}y9u~}D*X4Dt z1ABa~MDjiw>aowqzVF90!vJEM$OyHQ&F_%<=Rmfb;kG{l^_w zXGf#pYRgBrCKp&ui3 z8ed7b`Y+r!DPqMpMy>Yj_d6?y(Hf1sZy--Br?jgOOoN^-p}XH z106F_?%!{rtR=l(>(7Os$uO}=;{inc5)Z|`jrs5o8RjEqdhW0!8~jJ%-~30yh&-p| z`X6bAYj$dU-AVK02iG%%zZ@|&f_wQ5dSi55V{~5kj((}fc^;1=P4-Xy+T`SsZx@&b zoA5t7q9MfoUT4=|_Pg+fm^4nT7c|j);AML5C7N!~W732226{}t*dEsf?eyT9isHM@ z@cR7)`Zd(k@@zf-P4y4?4ejXgi(a&y3#^^B8vA|?>w&z6{mO7WO@+{xpo)*Dc6|IO z^!b%7^x5BgEPfw4@+bQI6Md#e|3}d0U&Hy*eLDQ`9cKOE&wu#yr|!@2xj+o{?B3;k zgSYP-_4yS5No<=x8|c>fd?tCEXz7sQ+@ zik;I0f7hF}!1wX0L(~Gmp`OB_VFbVL>F0_(FD5;H4qja^V6v{jQ(Niz{eA#ZZr(q| zDa4x1Yn;cn*%<)y{hlD@%|N@$;P(OiyORB#uZsQzxP3p|^ZmPsGZtt)mwWkiN;i}H z0O~Tw-?PATex}Ro)c@*Qg9L*2=Y5(?{UiFXlq>zS=QufA;JEnI(QJL|{uJT+07Lj^ za_~PN!k?~){C>qvpbJ&fdxi_q)W1@m=D+oK%#nRH`gIH(XFUHXpUc@?(~Rp|_!sg8 zeGL8{o~|=A9p9vJvQy(}z(+iO4pScZF4Z>rS^GYSzkjo9w!*t!bZPfSwTkACP=E-; zM-q3Orsu6rPb$}{fQes8eJMoxJ_Emh?RM%WAQ5?3y9*&7k;iha-#^vlVS1CvgS*HW zxBUt?06CE%`dPdO$$fm%$H4fU_3|^-zk`~L{QH?*7ivawxyIBVkr$GzU+(yx%4@l< zpZ$4eKc^n)%jyw+$2#TLpNFG<5`ovFUQsT2xg7uu>vJ5j96uTF@%#0IW=ClUvv71z zc}NB5=l_EPT3X(w4*W)wC-E~Br>gw?J&l7lpA1haNV|k%`#buSU#3Y{PoFmI^JNM* zOFJloBQ)*0#>{7M{UHA=@wC(Z_-p}_aizcH@95xiO(k}&E%%yJ`E*4oAL|s(-$yn0 z7EAN61%da-iHi4UCoxyJKJq=}l8;#Ow+Xns|EB{NV5xVVh|B9UJ_+IPD-nBx^8Ts! zM1-dur~C+mXpi6f8U!8&-mlIA`tJtQtNEHcsLkNL6fQ>{|Ge*zh3U~oUS2`)c?I$o z2%cAE0dReqa()YP8yj?FK=^fZPmev%TM+F>d4rxE+tV}k+fsveF9@T9Hs3#P^!UDg z&(wbruwvn;##28p@d2yc$j{8EY-ewUNDhGaQ`Lp&} zj{h^hgM5m%6+Fj#t9+n}eE=Tv$={f@uzupG!Mue0HHeE-teJmY-<$dqBPBm*$@n^yf_c`MyTIDMjU3>Nk)dq07cw zyzhv#dyetJd$G$wPwyfjOYZATg64R?-lrSJI_m}SR*>FMyP}Z%dvs6yh_1H^vYU!} zhTqpysT|P`?Nr##3utE;-;pPNw!`4>ZzMkupU_JCHP|P`vK}M78(l8I%bNb3?SrWI z`>3~Lt3B_xRqL_8Snqlp7ldQIUMt^Y_?zfwM(>%lv#^#3427XY$tfzc%@2<|w$IL4T=)dj33>`{Dkba`AWJ_e|Kh z_4^#$KPSolK7Xbe4810zqpJrDt}te2&R37to8g>~UlP3ZOR{iu zbzRj_*3nnuAi(>^Ugd}-KYKJ-^kHWbf;rq-O+>Q(& z$sAmDlaZO;Q-mF2zUQ+T zk7@nNv~j`b^^A_Sj(4I@3Ed4Msc}QyIv)>!KGN;aEyeqd?~d<@JgZ|0$8p3kbd~no ziH|E{Xa9UGrH@N`$2-^dg-Ay)^4(s){|44VTFprG-dNMlkIR#3wy?QPe`fO0f#>6t z^~m5{+gC~PfluNy`?K2l_Ij0q?gu;nshu+UWN^-Rj}Y$fk)vOS6UajwZk6m*Zxl}} zeg8F;8_*FW?fm5AH&?rUxjwmGRyzJhTJU|O;m%*7xtMGmztL#?ppK8nxi~>6M2|>2 z^-)Q}AJg*Tj(siaWBeQa90z}f=cEeJFDad z9Nl~T`RM|l{v#po$5DTNANK$c2o~&b+@tg|zXJ!${@qx&3yuEvnsw~=KZVY_9nRzN z?~vStHcM>R^ugPixC@yG6DI$s5YsLV;2-_p^gP7)rMy1+z!dm{s+Z~WSwcVTiG4q5 z#&Ti>UW{*s3nYf$W_Z%)gv^g7-djHViuBwhk2kBQggRXp2=*hZdGDE$^Y(@=R(qV7 zo%HXI`FdtMfJ=b)b=Sr-1yQQcTz_$bOBSo$mHvDSThQR^bbrrUa_u4Q*Sodc{d4xu zH-RfTPvKa#^^awpvV&;$P-)^6Y=wR|pd z7UCJUe&GAFuybAV=MuwqX<)ov-xqTI@$&<=^I__n^AWWFhxlE${Xnl5kE>i*?|-t~ z+j8|Lf93kG zu)WXj_=?oyC$U@6`Q}``De1oDUrW93LpQuX()z(TcvDb6^lRV1{Jo?AG33TTx%@Nn zS--@On7*{=pVzX!Zl9-gH`>wZYl!bT;$fk^5OjYcX0zfJw24)t(lzuAdpk4zd=2o!RF-?^8Y(8fq&nBC*Pae z@b@<62jk%RDJ%b+)Si6!A4N}|hb?zLL%Dvt2t(?jBqXV;&IT+etO~} zYXo)TN7at|dm#OuPbl9<$tV6XgzX}Egn3Ti|3SNMXPuAG-hay4KNPf|w(I*OE$DuL z?PF{)RL6AwBA<6*B1-$GrsbiW?+)s{n0|)!<_7%`_V<}VzJDi-bezuosOWeE#&qD>*ef1 zSpPk&kADn)4$SvPm5>(y4Dp1GBl-)>KWy=^tUo`xhHwl6i21Xk$8YeXS4zIu*Y(KG z`Ud06@wZ=(C}RIUQu2{wI$!P;v@*ZEaO-P^nYys}@i(%cHiYu1DqN@!{~IV(u z`{49UfIo&-{4zh!Z~UmzmGS*%g2BG&`jvM0lQ%?nGVhIFKkoyi^Jjzp%$FCU*9zlY zFYrgYe_{JIyr<6m=Jmw?$0pK!8Gfh3(~PtA$ql?VoXO!Aiz=y}nH)&(V)*I! zF2?hO^Zum2ii@FpTyC-t@OyV1M<3>~dRkxfVfpo|^ZHO}yB}lc^;JQWo`aIUPm%7u zmLJf4_Ycvpm5x^mLhnc1YmzkAA81aVGvxZ%-yh3-hRlyA6xWkqkeK`84E~9Kn5DQ(>-qFe5U#9imHA`b3kM38o-?yNm+c)1w2*>*uIq#c&8?x`zaC;`{}_n>o3H<#jKT8_|G$Cv z)Q$&qy#Bq9@<=+WzN;kuz;_6@mpO<7{GTD8`FY@0g`=DvW4X<{KJSO~&)*YoZk`~W z&ph-wvHsVb_ntQq&)*U*;NzdJ`@`R1D{KG9Zwh^%WIK0jyCHpF&DHzqzwCdUP}jcW zRFUs}ZR$Ont9S4J_?-0Cdw=Uab<*=++SL0}uHKu{`v|`y?cLF)-WOTV=g~mEr-A2G zK3*R5`){+J&!fhFs7GK`{N$V2uJvyypATSvB#+;pb0?oP<=*erM|Iyctj~!tZQtqJ zN(rm}Jx=;swELbma`-mtwUWa>Gas}tH1c;;7qP&H`wJ@h%OMg&@Nyf zyk3azlKIZc?;jR; z-(SV8wC@x00OZQ{?Hq13(+srN%=h`TAg96^{KSxeHOZOZ(_?7H^PJcy_VbnBI`(%Z(CknWrO zHQRAJV)T-q1*q4k-$xqu@!ki1oPQr*tk=u-e12)n`nBNZx*Hb((c6#uA zJ{dm~3j}>SZYQW<2=x(-^L9_ZZwb1a$KzR2f%cLb&hk$9<*v7fkuD+M@540N>6c z^-!OI{uXm4wn{SnPHy_1GUyS@`=@n8D{!CF{m@2z+A@Xx``2jO?SbQ?zxf5UF<^Kq z(^U|y>VGAupMTHJFxuaVi(I@v&zSGuxCeco+8M*|{!CsT9Cy42)0+Mxdq{t{KO54| z^NEapm0$Qi6qOwRDxS{=e6W9>$3?3=J$e3i?6Q6VUxX)ryk{HI!Fxl*Lsy&h{yKj9 zd2s$7>f_6lD#8(S2uJ;8DWW|-zxusmzo$8PuGVkFJ9E^Z^Y{1l@*a&+PWYLl<&#vg z>T9&V!VEfkw?e&;C`+E)JvMxvwVr9@xDUMhx{3eQ_Gs3 zr|I&5BqP5sm!|u)JbX@q{mpgNB=r|!V$G)eBIt*#UvUrI`hRe%;WIiop9wz1#OL#( zKPR8wH%2>a08{L=d#wFPAo%@V*PD$4nqOb9G0UNrQLB8g&@!J>RV#Rs+$z5mNi@H{4^je9CDB>T~@p_l-@CQ<2Z1cRAwKh_AmCe-P-mkPm;~DANq-cpcWX z`$3gyssIO}7i&;dELcCjL*aODjre?AZQP>yzJ6f2^ka*B^4$tv`mqJP^ka+sQh;Ya zeoH^1-G+WNKGS~O4nTz7ofLq8fm<~Q`C z#uo48QZb4_jskYqn|-dlsXd^1x>%U|rz*t|*gQ~mmXq-5h(1>pBj*e^ccxnJk! z!j1EI)RF3Azt*2vqj9mYN`CNp6Sl*3AmfdDwcO9O{d>l}w`%@DjbMj9D8Cw?2ilW9 zx907XFI^&V{@hk-ulf|x#!EDII#YYqB46~pMZV~Hi+s`Z7WpMK>Fw?Z)1v3-=LS7z zWd!Og&pGmO`tc-9#qx9Uw{r-*fwV=M%V)IvSLc3xofROlD%6(eb`@!`JPjk5( z|2CRF(e0pVaCz|k4W!BW+_(e@gp6NR0AFtu9iEL74l!BqbB2PvMM|HSVSZN$e-PY$ zOzhP3?HbAb8|+BpGcqIlKKUMAwK6$Qx_ir(jXNq(9tW z!t-7NH)wM7@&cZ=B&7GJ?{lZ`1LEL9aCF*)-_jyjRvk+L$CR^pRzgp!}bglAd2Kf(xKI%)@4(VWj(;m%ThXjI`GoN;6 zH|mid_BYcrjLr#zXF2KeeAc6VnU9$Eg#6HkIXw6Ksae;XT-e_|fSJ1nKM-7RU4M3i zNXm6P;?;;Zk|~6=pMC#+tLAfjl2fc-M%yr7P=DC3B*pPMysw*Geg_P$y!Jaf z&P5-HU%&gl8~kJ09XUMZ&HZTBV@T<5+h68?%~yD>KRvCN?V zd#>V{SZerCD0apA|GgA8!tF*M<-qdyzfQ8FiK@mM`Yiosi_Pv)p4`{)_q)3sozK+n zktl@ZDm`zY-3!kH2T)Fs3ciHJ;Cy-eyHQBGHdwptH^RRd+yRk)N#<3-YxQU+81TJ4 ziRbQ;D4Jn%Nbzpb{N0G8ebaAme>d=x4yS)&iNaSp&=5kh((aur-cjE-qI?*Jy7j!j z5Y3bH#9E1>8Sej?AClkKt1nVGzn6FzI$lE7j@y(#f1lQj^$O?fydAe`!xLv%eI!}$ zu*L20y8iCiYxUO|UL={*KlM*189QHv=#M7}k;>8Zjq(yXp3hQ!j|S_xJX1N|AjQy! zM!(DNRp$4-{5c(e4#MxP9>#Dg0af}PP0L1oPUw>Hc(PRHot4Hs$@jLfYg-`X^ z`n_uR;b++RcYT|y>KRR3uJsDC>I7d#-{x73^tpT-zA{x2xO`UGbAe9R0ICzi#6rbi z>G%`T2Zv`n)YS6w-%RrfS%`i^()oKF?e6vX{nXf=%c5uh?z!-3EIg*pz)_eY~#@LYv`A2{8|g*?}wN^z>~|E<&d?0-ta*OeR1&iH;b%Q?AncsZP*u~lZW8@IDRhCYx69}1=k~lVeIt#zCpS#V(|W6 z%<#QC>)^;2(ITzxp)YN}fOIFTCgD1p8}lyM9`x@}*9&KfQmFYtOPJaFB&&zWi z`vT?>_cwh#QJHe8RCGMbUNL0Z&N&6|5AP|XBa?C`BbJVe#(vI>G{}fLSg)y{Pvig-DvrH zj9$0H?#KIcksG&2_1rzHXtdnNRymJx)a#xeb9`TPbDt9E`|Vw$gkrhYbAIpB>~y`8 z*t*8|bEzLCz|f8|u3v;d2<|u2zbGMH{}6NWo)42}vRL)e_hHVle(>|yn*dwFzdHJl z;T+RX9~WJBXujXua60N&Ykrqa-1>Yq`UCXg|ET;S{?#<&@`$BvT>5xEhsuEv+jD&9 z&{#`a*L8{Yx8H+ze}1Dq7wqSL=ai&c)ODA|7`F`my+!|SxXbwf%1BY8Klh*Sy=LT- zN3xBu_& znWS6F(gs@SP+ADIO=q7wT_9~~Nogs8E`&9+cS74_$U;*D9Li!4k$_ti@jD<(1i>gS zQ4pdm21SXu5JbNOM2U(LL@|oO?>YB;&ScI*B7xuk_xj(yntPu2dG2$bv!8qJotehH zsphf&x%t!ETlS^gYu38kTRT$QTAEXCI_g_ncDKg%xto%y=Juv^Q>t~v|#?JJ$#)$s%EsW!K$6LBn( zC#w!bu(hMDePvs$xvBlY%C^?T%DqkTn%1^hTU)BtN+KO=+M7zQa(inc5i^ty1>9@g z%^myP#~o*#*0_B&9-@iZUQ%;M&q}p*>}_AQs-t;-Yb;xRLex!e#+uzxDvPL_%AzCn zZrjJiv+2gYEwLo!cU2;jO6+NDN>*oOVNmddl{?9uwv~H35_EBUYGq9#*1Ef8Wov48 z6BUMZys0^v>a0n$#wfQnO)V=zX>T~`^Tp#aZ_wlMrdP(>g60ZOO`yhOWi-{CXi26h zukXY4^&RPSs&z+8%l3VgU#leFotqPz+8W!Mb~mS58?)`LmRs%?s-~8-J5E)ZY+L1S z&!pVc4BSm^?*H0OHnq0wD=1j?Xf+{Jp>--9dV0g3{JFZ!T&kqJd2K z8Qq&ox4Tbp@6V72ZF`#%P0hRAQF-Z@X!3N7pWGU2-kox%tOiE~>vp#`?as7E-Ksr! zygq**7z&$_SUizTrO{3|H>X<)`KQ4C)~5DYd~b@^;3+MMj(ya~+g8SQ$1X{&Y)kH0 zi3+!BCnlSHV-wknl9S2uel|Aacf(9$0hqa4_x|wGfrjFdLY}<)Z9+lUg6%{=9X<~ zg?mftzzX-)*uHE_YdgiLfk9NRXiU4?Gb`L@?5Ae;ev?GGQ+lUG>d)DbXxY-yR(D{_ zS*JHNuixqo@7mF@!GHR$jXMrpy#7?*mIGTGc3l!$*Hm}OfxxMCyX$wfoV%@M-ciP3-OV2+0qTM_8H?(f*NN-7Zp3}5t&#u!>IsL#{Tg}!Ddt#~ZzSxHCmjuEc z*=-%`Pv4aEpT4`L&6~c|x9*a2F5PwMIiVKwtjM;uot@j-_H6Q=o<93@Z`v0M9ti9= zL#Le+-?;zc4f}S)63MN5>axD}i?#$B4s6>Q3^XLSMJ{UD(-{bzdFj>z@%YwVr|v)V z%u9BpGl`D%4eKsCYoY_FCVXP5GnMGDny2Wg?^ZZQbY<0og$m02^i+E++YzTBneOLX zTbg%oZ@M(Kx!D@&dEx(mF;pc}7^bGg86o$<`$_ zsJF&woV2d6`d4dgf8!;ogf;wSV+k7l-7S}-TGM-5_Q%}mmezf-_I5EYP_Z<%yPNi9 z_ohU5m8rF++B;gCX}G6>-5QYIZ9lzFv|sW-jjm^8g`<38a7V?|hl$~YCRv1T8r*4I zk}fA&T4{8kiHR((+}DyMt2H(2TWBWTZ)GjEw>1?@9&qooD)Imgu_8*$SgX#<(zKS? zBm8W4uc~mzX%H48uN;zT>|ML|n6;EqnsM6n;jO|=Ju2P4%3UG32!O0|yFcV!PJb$v zS6uGCytJuiqQ+2E8F~*lT(t zK9A4q^ZER~fG_9^`NBTa7x8=iUcZmFu>$^}KjaVlO@Acd33vm(fIkoj1OuT!IA8`M zK~K;d^acIFKrk2#1;arz7zueo-jFZk4+TQOP$(1*nW0G76ZVFEVShLf4u(VFaM%n- zOpoa`eWu?Gm_ajShE3CqM99SmB_AQv5xOiwC>`5Nv#>QSw6(`tsa1#>*R9%|jG?Au z%_%7{-N{i2vedS&q=iho+o}$lUo1ha^#1=mtgiBZ^~e5S4+lG(;wVb*yqcPt3*xk# zX%Im>O$yOS?PfPGn-Oav4GWd9_>g%{EeMqZK1f=~U)X0kq|YMc>sz zY2AeG^scB5>NT`9qWPF+Au)N2o?L3PIc!dQNm-fQhU# z_uT)L?*>N>`N(x2|AKSY?74@W;0>(WxM}lg+s;U)&j04O7A!1tRm?bSekii4_e;ai zm4~};>Mg5SeNwvV#+&E1G(Pf|*UpXq?ceWg-|@LyYE~X!z4O-mZC||O?%w;p^U!0Z zGiDxs)T)y=o_W{ZPyIMwcEpj3m!5R;E3dxx&f`xw-N!CHuG(*|syl6K{q~)^8qPic z!e}Cu-qY53`G-Gx$30)^8yLF(E6ptrf8xT$A1ZM;S2)rR+sc~U!J{1BISZXj%8xEt zR&t7S_KCTBN|!j7IICTO8QV5o87`k!;i_7_G2%$L%02T+j&Up~vDKQ+EhQ_R6=mgR zweI7cGs{DcRV7E3IcJvDZw~us`O9it6;~d2)|O?i6XzXy+`_}=mv5s4PMLK?Sw(4` z>-h4H8S76vv2=AwMd_KPwvtLmN$&di(RHqh++7zg-Z-P8bk?D(N-ILko%3_wUX$ED zv#z{i)5ZmLuI;lnmsRB6+*EOt+2;6WU3E*kV@B?= zk8VxOy4q7Y@5Xx$o_gE24@SyPbe>;&T*an}>XJhb_MMm7;*6Bdtrbn;GvltSe|cj0 z?XO(vpJO|!bhguV<;Sjb?kSn&C@-tLIeKb&`YBU;hQc!Z02Dz&n`bA_mL}4 zbzHN4&f!=>HfN^O zq|8;j7H_+9$IPS1myl~Vsg{@JezdgW>QckzC@Co|wU?E;%F5?fESzz~%p+%2&Yn5P zS?M_BkVDJo+2%W|Y)3ebEL&h(XkRqX?O5hmF{8%jad_=M+uimr+3#`Q>-v}dt&;!P z-*&uHeqZN->upH{JZDuYTvj$Dere zhyVNX@7^(-R)Y?&TD^AjX%}4maf-Xb;C_xqDvls@`aawH#TeT#?8r8?&_~S^zb9Ucy{ddtFF2J zj=LUy~kDhdhkDvbIRtGSCxwfcTs6Y>5--9lr1aSRI%Ji zZNuT6vD|rN=?q8i%M`WJd%7cchii>vj$=)k=~`BD@SV9;u9b6FIF6Zf%$(fEoCiO1 z#EiqcK3TG|WHq(4s`A{!i`!@BetG1~lH5Bbxff>s^;So?{L1qW%k{f*KPsuHTJ5MP zHC>xrGfUfN9PK#Qc}{ums;Y$*^UAk6b000e_l}wKo!;A=SN`g_vY90%xw|T_{Ck{#HKGi%BL8UuBsxCNjw`dxY{y0^_*qc=@8v`=!C({0>T5;ZP3G;bU- z-@Rz2JG$t#<@seNdfdxf?iyKc?~SfF`mJb<@wPkkg?FN%|JYs(*(#0+FPZgX_}iO&F~yvX-iQB+U+*yv9_ZZoHt`t zc{%+;g{_?Cnv!LXHLeq@Z0;~!;B-+%l~ve}vaJ%AJ6)twVL#Glw?}AFa@wh3*p9Y4 zY%_#hLJGFS?DJ^KB5ktmvXwb1>_^*H)3r0{%4#xCRvjhOWXkL_EUUsLO4Kgoh4u*f zr&2k}R%dh4?WE1-vYlzOm(6s=ZT9jRWt;5_$ezs>o^7KPOJ>-Xl-tryTPZnZKf>;G zR61u-u+%n3d}FBNDEra$uhwoWbJ^@O%5BtKZ5{T-wo4pNd%3OD@jMj*d0Qqd+Fhj; zcAICB*Xbd-#8zEC)9$8B+8icD(uIyyF1x+QVVh+u6A3!(Pt+Q=M~^WaH`twrDLji zYvcF7#1Fm4^|e?^PGvtzeP0|?#rI)zaNj2R?zhyYT_BS2_{OLus2*h&^X?!o?rr=`ZN!b^Eial&`6qlp;o-%P1rz1AL)aF+)7u&dMPqd!$QGDEbs?E3nU@t|< zZ&WpmV!n^K@}Zb#h$}yf`A*_)*evE35|`f?Lw+xw@PcsR7(7k}i?91VAvZKe={i*o z>MWv0@QiG#co!;fs`zeHoQPL>Ew5&|~HD1B9r>MX7pLcKK7_hw>4 z)!0NF#c34t^N2&;Ft-+4q2F0Q&1S3>2I+`V6uD^~KO!!_MT_1KAgQhDv^wnDn%{bRSp#?{1UA}mh#l1$Nsqn~7C)nW}K7az}%Tuiv!hx~|ZiuL+3 z(naY58%N@gWl|1*{D9>x@`UNs*G|^f4-c9*XoPvdZTg-!$F;(`(yoUH< z*gXCWy}TkMt2m)j$@0(R&m(;`vM3LNbgFwyqgt^>Y*H7uWO6 zB&$9luD_LJOwmF#hRI&>Jn|^Tms&CM&wJT>lJtxH=lU;Dyej8%atzay@h{>_k&YY={2H2?pKhVw16A`{J#iH-=Ax@9UmhiWRd)g6stk&GoVb{Wj;8s?1+U6)Bqf1LTeVv?W(=ieyvV0GTIH#b2Z+-?32TQ? z?T^CYVO%NNkKnavNW<_`%;72zP_<6tx%y;Wl=);_tP3XNTc+S*Jw2Je7=kC`MuHyf zx9n@Qo`Dz>PZfDN!z^7b&fmdTw=&NF$C z$>U6RW8zl+<;3Jbhmr@G?81DY;@wQnFu9k>V@x)e>gm-pxrfQaOm-iqUms<1FO!Fu z+>gmtsOzo%e73dXL2`_dzswNzF_5b-A0#c_xoB z+1Q|8A3a5vvm2SbNtcH=>+&d*&C~SwIwlV?d4$PhO!kNc3LUC^>X_WX3V#W$(>9dWbz1;J==79bxf|_uE!5Exqp`)Kg8s|20cD^jxJ}<)8&39 zSK;1Qm2Y2E*B@cBE2hU+C3Lx)$-PYOXY%-N{rc*RE?YN*gp_}tCOy83$%eJcpU|&l zay64{ncTqSERzQ=*3%zi@+gzl4TJK(3O5i+_AoiyrrYv9*9N15#I(Dn079=b%2 zuivlB877Y~Iohe~4=}msfF56UsV>(uIm_fhCXX@M^&#C}1CzU%Jj&#T%k=BJm^{Md z&V#!C$W^**T%*fXOdeu#*R{HS-A8nJh{+9IdVKwLx}0Hh)kpRCVJ7FU*W>#>rpuWd zbh-NDx;)6_?2USSp2^kSdVB+ub4(ujgsz|aq%POqrOTa6?&{U!2bi3_M~@$6a_(L| z-uSXES2MZyYkK@RlcV3%UG9Eem-9@H{#B26zoEEnLNs5<9yx!DkgiFT*u@nlRKH5V{#9Z`+9j?q%`-lZTl+#$;Ec?!TMKCX?%#oMCc~$vsT&WAY%AhnPId zWb4Opipx8y`|n|L9h0L>?qqT|lY5ywz~n(Dk1%t=E_lT9YqF}Z=s876l!xr@m?OzvfJ zKa&TUJj~=VCXX}ONa*=@G1<*zlgaf=&M>)?$z4qDVRA2%`@Jy*9TYT6 zb$Nuz-81$0K_=JE(&KYX9$>P1aX|QEr9WH0e)MQvZa7AlyP4e2-6gz*6VUVldCr9@txSuSN7^o z)#HbloZqa+8>i`Vl*xG}ySC{1StbuKxqqv!UwyhRcQbjM$2g<_F8A!$`=?5FU#?%@)2+(`*e_H1l@I9go!CE9@%7k0 zQ*sphVM;b1)UU66NS6nm(&dpMUC#VemwT{ZrR+zsU!~-6Cg+~lukT~>2$Qof>iT0Z z>2d@1b5wc**uPP7C-!HQY`&pipJ8$xZiIxslm7QIxvzZUdJ*4us4nNix;(^Wx2eZx znVe^`Sv0xwWZz4}Hyud+XLy+}?1FrzT`YPx;42F;3(4ceuhz z0`$R{!e=}Bps?RM+YEYDPdlyv?_>NL@Gizb2A*g96W{}ke-3<*@!N+@ILvEpr2>@zkHZjz|-JExQ|nIn!&S7 z{}S*z+{Y>XE5N&%{pxUiL?NZd^MfBM@arlw2 z_?;}FulD(s{?X7s8T!khzXJN|If5H}4Y+!KpzLgco!PMCh5otFuf_8NrQbwccr=Rp zcBmT{gE!1kM4k_TcQJktyqoclf@c~31o$xHw}98HfdR>X5xkS}d%=4ce*k=h@dv@p zxq7}H1&=cRBk&yKKLa0Q{MX<^jK2(S9-^oFDtMIfzk%l%|2Oyu<4ziPM7vVYi&VA` zwYc3-66n&6$}i4p9MY&?gEd2k0~zA(R?HAjH4XJpnqFb zau*D!{VFf{GugUPUS=}})W8WlxumqCIFdKkO<_m;Zi48K|6={?5{o+w_1wS>`_~Y+ z#&e{r>^}uQ0^SGx!>s(!=a3Eqz-b|PmhmOvb&M|uk1`$r&oRCRyo>QuhztMKXI48= zFCQZ=+KGCO=Z2rRKwo_ZR*et81$U!?3_^dIH6PfFI@NH%{oq}UuLd7sd;@W79tT(D zc|G*2=EE>fp8)S<{1)(f#=i)jVfN z68;|wUig3+_yX`MrhhEB$@ntxTE_jvg`euP#`MuG>-Z3HQSa(`k{juM)Z%ty2YrT2 z9BSJ8EcDg$MYk18>q_WvfxcB-6!$IYtLIrt{|V?X#b( zx*uF9x%&Hk@H)nS2tIz4(v;_C!26i~ufVGo>-sM#`^?Utz(<&!H^A$e{=dPqcrKt) zDY4dz)W42VM4o4Z4>CLR!AF_C8+?q}KLNa&aUXa+F6hJxu>};-dYi_bJ@S z#`)mtJqpzh_rQ*8k)Gdn@GMLBa&SW##%0%nyP2IE!3UU~9`H`4e;ar|#b?*W zLDRH#Y@+qF;Nmmy;!yk`_&DP~1h+nX;}Gl0racbc)l0J9uuRTupXF09! zMZOxC{u<)KfAt+G9{6*!rEfQuVt%oj4aMCK{c+ewwDAk#B8ANHFo4rvsMQKFajp2CriJW!8GyW{flaN^lR;KZ3X@hg$HwO{SlQeun9{fcIlQRo)%| z&oljl;C+l=4?fKJC&4S3{V#ymGTsZ`!}!<0yBU|ik4p8<_!BDK6IG7n`Dt)-na-aB zuVeZzfj2PzC-82@{|4UA_}k!BEM1p%e?s?1j2{AC$M{0xqJE>`s<G9|-S`yrX}YnF!@+N7c9s$seyZ=r>4pB) z#D%4?yLsDFgve<=a~M#z{eSPSod|bZdU^pl9>f=uGG_=2R_QUy#J)b~t2lyYU6ITRhG-kUDg0d>(aHl{vqJCC+mD6ajQK?WjR}Fq_P}by|=6Q8gTV~ui{&+0Ige? z{axUrj7P!iYIT1u0*^A@4&KT572w8t-A%-cpc-z;Ju8$1fF61Rq!6h-voDW(ESl#Dkth)y~l2;kxC`Fdf#1f zH+UUuCmwM1KDg4aB`#8EVCkL)UdQ+j@Lt9*1Rr2L1MWFR&sQt>7}LKD+|Bf_1U-pzWt_bATnd>!0%ny&vaxUo^^gWwsa{|s?4&R@sInU|p7$Lzcc z-plN~30}KNPgi__ld%5{X1|iSi0NbcM}iMBz688-i*COLy#7?3%Y8|kG0gaS==ap= z`dh&}nf+bh8CEY*@DXO`B5;H0w}X!{{VTw0nZEqq78-9EzZv=sjDH?{kokWXanatY zqBbkTs=a*^++h3(aP_$jrTM*4j+@!}E_jCVLD+v0{qY#s zABO#UW`7iRMwy*Al>IYw|NjG?XS~$fzoz#P>UI4&;3JG50p4(yu751}5VOAmyp!1p zgIAuZ+gS%*&G;5@gW1^$Ud!|w!5bKFQudjhHt;OdzZ|@a@sEJ#nVp*yXZp8*4>0{Z z!TXv1SHOoD|2Ft2wd}@h#xpjPC?Db}A3$xeqv$GKT z>oD%A`_1LhA7}bO=!@?{7RLqFU&Gi4F22)P9P0L9C%E_yV{xpt{u)LKTzsdlI97qb z7NvaAdm8Hf-AeEiVv>)cJo~}Z;3FXk)O(u01g~5nc^3NT#HAe%5>($)w>=?w4*KIL zh);r7V%+O>NJDodsWj~q<}SuYnIR z{w8=Gv;Q`D_8i?#=^`Q1emmo{z%$IB!@$Rw{zC97mhKYpA!cVeaj{-fpKGmxTdS0P zroSFM%F;azJkR)9;GN8$bBT*|)#qzfx*6#AF*{lC%JcMclh0YHoiqD6%f8+E4ry_P z8XvZzomZm1)c1ehNcxkttNW(lPffu`r{E6vI$bEn{!Ou=s=F2-l|ISDn6 zeV4cxpYOzYTVu$>PvtAdjcSy$(!T}cNuKfQwA8NySM~VQX35p(p_HAH7Rjraoo|7Q z_u9naktgF1?#at-<}s7=6;trjh>LRQgZ*mQ{}6HO{wQX%h)VZPi`$K1ln>O6r(tIp zc3g;TvTza6}n>7NJQ&v*iSjPX6-Rm^@najU&G$^_N?ayRrlnSLL5mho?c_cH!{ z*dG8_@7>K=BFkZv>CXqRgg+<-Lw?UEJ@2?s5qYkLzK3xSc!qHkyqocL;A4!R20qI8 zc5pZI^L+4r#*^S?RQG=`_z2T)2k&J1mw{I?ehqjx;~yt3`b!=9i|W_k23Ox-pvHlh zVW&T?`}wM}AJh5Yz-yWQf569?e#ugCJw5ki`m?}?8J`DkGQJ3Wl<{itEaP7ATtfGM z6>-sjE93`@3h$+-EKZ*jLAkkcSvR=)9tTx!4-yysXjs{I$O~SDees(H;+QM3G4D7L zpmIn8;B+*&E2Z<}!Rwg*O7INR4}**Mf{Txn!Fw351Mg#Y&IGSZ7h5rmbHPofp8y|X z`g_5<8Sfx&l^b|3lKC0*&E0ys&qH7RW`rvL)yK2}qUv|A5Eq{2!C}Vu z3-rZrEr>(SQ_IK!VMqO@gyL<)MZ4v{-Eb@P2X2&hM&!wO+R~@@IoUYy7WDg(?l|hJf!hZ+ANcrD}4fDh1pO!4tNc!uf!7JQ89zY5;X^#2at%lO;i)yz)W zGT{Z?x6=Jn@gd&}p>+t|w-p~pK!2F=#o!*smw~&P{Q!6a(_agoWqcEO594Qmk1;O4 z=YjecOE(7nDpn2`f%h~0Ht;&8e;Ifu+Z0!-ga zT;$gkm!7*}?gH>?#;=2&`U6nJ>1J^CJ+dnH7Vs{ne+Rhwo>`@TKXEaBj>G>lMEnZ+ z!_3ah;I$u87UcOa#6|fJvhokn4VuVrC-PN`%i`9J8};kUbUS-3eY^2DR-Qf3uVU%m z2K_gg{*NtA>pi5a>iwVK>id=4NPg)`Sq_WoH*myJ0)D2&sr@iJmlGFP7*{|Mr)$CU zm+QP6y!tAge@<~`=ZoNDSL*urf!8zrZ-V#dbo~MF5vKn`@Tw2%`ac67VEVrSuV?%|<>@)r~ z#k+Jn4}lLd{l}I4YjypniHq^)M2zPSM85_7>T7g;r&sE)VDo}}uZ}+7#PnA|U;LJr zIMn!@CN3;>GXJyS4b0C=!Ly8C4L-p5$BB!4tz`N74!Do`^LyACVg9@Z-g~|9cjEXb zxaYbFCPH?f_)G7XGCq^I@F$FPWogj568fD?e;s%?<6FS<%%7d$#z#f^6GtO>J>yNp zg+C@MhfjfDh4EbZ`91Je%>O^bzUyNXX^POB;A0=xxxBHZ^%&DH_X|B*SKpxP9|E3Z z`U}9v7+(rL%FbLEx!4C_F0F_%e08U2{7xR8EyRX>{uD)+jt@k=$r}}2y&K2POpVawB zz$=-4H+b$QUH`M-b)VAt7s30P{+GdBpV0Nc0p7{deFVIo=|2hH$N0~{2N?eic;zSc zbYB6lXZ&^WDC2K|4>Mj86n1G}ot48J@Os9N0PkjeF?eN zz^gv3+n4Wc)B5}~IzI>cT}(egT(sNF=cWEIF1rW%W6aLi!3P+B7(CDH4}xcz{xixB z)BiR2Fyp@mH$JQ9YYcpx@qdB$d`{PQhJ;M}Y0RJ5;MJ^Ns=zxLUku*Q_;T3H#~JSdcipNi z$n$OBm5kp5p8X$P{{ir!yw1N1ZhS%KKLBrFT)t0F`zI{jUqQczrTa2?AG7mk@Nvff z0d6uowy^Mz`r{qCpEJP6nEv75BaANs@5$?SP5`fB+z*~-d@Xp6@l(P37(Wwykn!`t zt8O#2BLyC1yal|M@dMyvj9&%b#rO^2=IvUt#%I8@jNb;HXZ&999OK^rA7uOyaQ7GW zbe{z8W&CI0os9nm+{N0%D@y-P-ToMOKjZ%d@3~9YcbGz^=L<}KCU_Oop9dagTz;P) zeGdlXCqjSVZarN;c;O4Q5WNO`q*vG91ny@14Dcx9=YaPzo&X?q-m-KWmQTmL3 zSn1!Z>t6?M+@tfG!5bL=yt2diUEn>8_kj;E{vB|W`STcYvA+EoTmO$if0XI}6Wsk} z-G6&T$n@Nq@fqO#-_Z3B2d`s%5qRf)x_-5?&$v(7VSEkv*w=MCo4|*d{u$u)U(xl? z0XOc~c^o{$_#W^qv(o|I#q>W6-p}-}12_8gbU&f&d`;)Kg7-50yTNN2|0;M7;}3!l zG5$DsHB0wt@DWzd&w89eiVuKy44Jkz(W5_;6$Sh_R7N1308gLg8% zh`6|KQs1{;h2*zDe~{_#1g~O#Ho}hjo^WO7dgz->|5M=IjQi2GzZ<-P@r%Jb8UGOY0OQwy*D^nE1RrPm zpCvBF=LYN#*ijshTb!P2u=0N$JOl2A{!wdWeRVOu8N8SA4&vg9x`$u@rz^mtjDJMw zKcVa21YY@FoqrB|klDEdyo%YmAAE%A_k-t{{qKPfJgletBk*3ve*r$m?7RTp&-fp} zGtAB#;C+n$2Yj5_DO)T2qkVX$KbN?u_bc#xgaqqY3w_rkQdWOY1s`YpOmGk5=PCX@ zWkH@(;0;W_1-ysx1K{of-Og3uwT#~Y-u-=D|1*j+{oBAtnf|@tLrni0;8iSNkAQm~ z)zf_vyn*Te47`)^-zfdZbUUwr4>JAN!ShW2E$~W~uac95UAn(xd=7Zm<9fPBfM=Qh zvEZXD-4)=2Og{`>#mZ+LcrW8yz)faxbvsXi*E9Z0@F8aB1@JM(N5Ngp z&R@ZMf2gPXZ}1+bUwX38qt9hLsq4$%lcs(9r*wWK^!r)4EdlRi`ZeGq%pduC6=a{; zSr7eg#tr0^!-}QpBco( zdT$Wx1WS!H!&ZPkU&->j4t$KIyM?&0qkiAVLpH>*zgGOEeR7uWmEhxyUk^UY_@}^) zXUJU9@jtafpy!W2D}ogKQmv5bK7#2#0N%j(cfs?F|A4s2m--!|DzYk$H)_RS8qZm} z{{bIocFNXC{VJwE7rc-01>l)qi0dYfrQpMiuLM`Wm#VIafV+OKh&*oqH-4$})0G{j ze>QlI@tCsD?Cb&0GyM+Y*18w_63dW$9{T+(U-y7lGy7jx_8ET|Jj3`PaZ!&h{2o+2 z()}B8@myo{SpcUzpRit>X@BfFaGZSL`C*-}2Jd8iBlrO0+rX=seffJn^!;hi>-OW& z?`M23c!t@z1iX&%9C$6W^HK0VrvFLs%3taJ+zOs${BH0(<6i|IV*EjH^*g*OmB+!| z%+IHZi+WVQ1E%`pTj1*Vz!X1zgRn@?Re!B4%d-c(_cuCU1zykeH-P6D-wLjN*H>BF z1>VQ>qu_&#Uj*LI{AnjH`pfFGY?kTo%af4@Uv#Df4M_MY@crOwSvC&)d1l{!inPCX zr?j&Y_G`fZvt0t!U(N+TUH(9Zu?YIt5*L=nnf-3?>i>mjyTfy^;?*unr*6myfKE(KL@Gi!) z;I7~4b`F5&89xX<&iM7<^}pBcd=k8x@ms-b8NVC6`xV{JSHZg&e-M0t*?A1SlkuN` zk2C%(c*Cfk?u+1+EZskW8%+Q2;QdVh9dMKBmv0j3(faKVdb)>zyBJ>p-pBY7@F?Re z!0Y~`+Yf=)|54{BgEuh!I`C1(&s26^)$N=Mo?|=#UiBAUe=oTE&pPh_uV(sJfcG>0 z5oPB!-A*@nJ>#DxF6KM+JC}V=c*HDT8}Y1hk#cyI}5>wnf`I$1 z1NsZMNPlhxKMuSO{88{y@OJPg!IhtR@E5>UIXngaFYu+XBfc+5)YmQON7djsKQ}7v_M!qh!N;-g%7H%%-ua@`S3J}x?U;AKPkA!-fRFV`kU=?o72MS> zx%wWAs;IQ%I-v7&!MkC<3U+RfN&Tvaq<#kFBRxi~q<$arHG*<@ z3cUNMO{2zt6<)lBE zKW9(DpF@A|g8fR^IXH#>eefsq58eN-gO9(Z^M}Aknf{~T1C0L=yi!dFF!xjNVWvL} zKK72D?g+St*?9##%lK>HRa7^{$3MVDofRMNfcLyzOzHcfw+Wd(SI+Fu0k8d+u0J2V zit$C@1FSrc2k&R;t^^-rJPba{%I9QogYi?rO~%gv?`8aK@L|Rew#)X}gLb8U@3E2e zCu^S{oq}&fJFiChtM>VgDfCC+PY8HRu8Q%xqRjQ}kp*YjO96ZnXHQ>XH-vHk2((Qj5e2nRTLD_fc`genyjDH2Zmf8On zc$Dcs0^Y^=55Pwlm*4McGu*74pM$<LHX6Fs?VaDH5_Dgg-j(XY7 zqm0i0&m5)e9|}Ii^cR4SGJY(0J!=ok!1Ijz!Mn=zbXS89GW`wUBaClR`pnLD@Jhzd z12-8@fOj6P`?Cjpl{&$0gU4tO`y zcbzHQ`7q;i!N(at61=uT_vcvfLB^MZ_b?s=uRcV#a}s#Np*lYmJj-}Jcr6Le z{Wy3(<9om}tUa`Y*D-!Mcn_g3#(Ti4Sbg0FKF;*-0nf1fJ^-F${2}l@ z#vcdwF#mr7?q>X1rO(E_5%4I}{{wg@<9`JoVEKIue2D2g&ywwPl<`^MX-E-v-{p z?B4_KX7%y__z>d{D*KE-2HwT&KLy^0{L0)K#?O@fWAyU;6}XG>7s0C;A651j>vqP# zYnlEy_yFU^cG*sPnH?9n!T22TI>zUN=NW$+>wFXIXVWc9ViD<2w$2~wkoN^cu%q^! zPMbo%1L<}itCz!NiZ9XmRp32L|9bEN#y3W3-}Pz|04J}{b#`Qj6VlH#P|rfd#Rq^QSiRwbUp^&#rQb*Amhdk+1@&mo5nZ600XS^0Xx=c^E4!nW!dhlMx<@cV`_ZKl9 zh5i`h8F0^XJ>4vL9pjzg{fy_pdl~NnuVUrX4L;0x4|pBpdGILXz2N@VCY+ij&sw{t9bl<^Z4 zH+6jQ@hb2f<8E*d^RpU!l<9lG ztC+qC-pP0^xQq3pI`9FeUk^Uc_<;_rD=-e^U2+}LK>Cx7Cs$3u@1KG{HwAZ|J-Pjr zQ}A=A;MYvSH(-B0i}IP zuU)0vFKv+ixYp@>w&G0xaPWG@j{=V}z7)Kd@fF|(OE(BU%JkQQcdpU>+z4LJ`02_% zE6<(EKI0dFyHD2br@*Tj-v=ILyaRlk@yo&c8NUYHU8|>i19+bCPl4Alek=G8<9C9O zFuof7F^6{BjsAEa=}*=lhhb-w)z=H)j_AKkVdsc*CohLnrr<48@Xt-bAD@D+fuFVTUyYMvQ|P})^=R0g zh9{-xcfomvafm^mgE&OA7sD8qC+l}01h1Vf!7wh{5vAXBgii74_fzoO$j{02|4Hv> zOvW##c0L)uZVKK{`I=1MsA*4iwi`9AEy-AW%%~B6Yg$u#36OB0IkvAUVFlU_?33sA zR?;Q~iZp5xE&KMRn(2CqZjZ(3oM~%sl_7aH+FJI;nq!S^@pNNrM_YSiTdcXM{XmT` znTmPSX-_Pe428Wh?~26kjv9~Ac<#1{^@DOpy2nWA5&=$9$_Wr}{8qW=S@m<|Md@lZ14j|7uc zYC$WH0hwZ;F(4`;Kq=O2ZEWmlrWTh2h4QX zXXPa%^AeJ!7m}qHlBE}tr58f!S&n4eTYaHqYfHAVskxE-PozYHFC;|`IvMcAQi)j7 z8;Hk(iG*h&7m+OWpTwS~=G|U@tgS88+TNIsHSJ9$=_+3+Zl;3qP$))CDj7*{)XG#Z z;ZUJm{+q>cDijZT62VX~5(@{?l-=T5GBrsyNRh$+X3-2LyrFo^7c)a~f6zmpvnaOc z_2{Bk7k$F%kZcxmIH1v{rY6-Q2V~b2K$bxOg%gxr z*Bg}M5S?X71Q8!bd>HXz#D|rkFm-HEs3sImD4I|-$&lu_F8cK%@du5jNc@sLO&zJ$ zy~&GW0iT*bv#l+=TVwlb;vsJ!7z}tkF={j3hzIkgmc+XXQVfA!YcP$sw8&9!8k1_A zM$lj??u`UOiDV@3L8mx!l{$OG8>6AbpH9mu&WkaV&a%KUYLYCg65}SxA9SMg1d|?L zD4g_rgNa}`MPq1jA^GJL^ZwTwzCa`$NK0^SnU&tQ|gd)LsEX1mtI#O}%2lOry z(8n735?M`IA8P_(J=Xky6-28OU&I$Qy@8l7oleqhP2-3U;|QI-AJDGp^NN3Q)9(w? z0-NUS57t1_o;Y=WT9o>e-n8FjGj&KGxI%jQhV=3c>E#=eZ93i-G()N8L`yQ|^KG=w z>pPnECR43qIkZ`lra#n2ru2e$U*s)~8l6AsPxHUyfrAtyiZ?dHk;l@~VvN1z{_NK&&D`BQX{$wB= zN~dGK0L{Qu8vzg1f;?MO9m#UJ>XD-s$yPI_GtGQb(VBDxu*NY8_*IPGntDW(HKmGx zx>`Ur-mVW1;SUIe>lCa)N!g?zS2dtL*|8}h&z^x9QWocl!M6xVFESpH4SkU@u z#qRZZ-?d~H_o!CmtEr)uS5t##w`6O*!m{Po)NHp}Ff~W0Gk#Y6646YpCLhkEQb{wG z@OvZableQ2ViT)>;fOEzER>@p z8=B$ByBuUCwyCvc-^5B^IvWpoeKa43L$PFtvQGs@Z8#{F7|Qm4yCw0^92pG8JwabQ z7z;50h6?7fVHeeqbzpGx|YiEw}xFM5eZ zrdMKde;^r%`>5KIURnXM67zbj%^<2oF_4R?g6{LIIijZK{oLevf*#Whc>F0^E5|~9 zR=Hk}wG0eeZLBbZc+*YIO>LRR-K{PAsZ04hDSy}(p(aMnBo>cbtt)~%13Fvd2A!?( zgwECt0iETbE&|rzDgxG+Dgu)wJK4nL4XN3Rb|f0J9c>wM$WJpAm3`O~j?f(FrKJuv zcPcR4f=~c=ZxoVuW)!fz6iV`bMJUO8BN37}aw1gF_sY9G!Q_3USJ|fk)F>qNDIo1r zK-w2d@>WZP)a^sT z$t>%m#}o;$-9^)9#=NO?JWM?%6bsUgP;uw=()_N=`ufnz=54QEpE11wtD)=e7SASy zMa(p}HR7}Y44JfMONSzUYWuQv6mGaEpijzPwvwPFpPnI_mqq&uh!(Hql*TQJ@OgrA z2SNm_>JheAx#sXg`f|8(sEC~uw zH_$F(dquSG%i{F~mQU`x2-`BB6ku1-Hj8kWBBVbQV2)B7Bn`g@j*5WZYJJoyp<$JS z(6EX@1b7Z)#fX@~MY3`QlI3k(X4Rw>vI@fr6|D1Y2lQ6yV|Q#mx@CidvOp|r6WJ=H zAa|!ktZY+O$SM~rRPb1?#s!-w9J%JTnAMW3P{Cu_E-Y5CLbrpV4-Y6R#OmZ2Mc))7QAIQZM2o4e5M;Vcu)6gDpbf@ zsE{TbD}2F1dZB`?aN#;OlK3V(mSc+WGhFb~EaWF#$hTRrZx%eH-;#odasU!m%n2(K z*A~2v6f8#y_Gk^Jtgu?3($qR^P``VnMF5LuwHz#X5Sj3x;GbC7AYrekkQ1+mCsHU* zFO4#qKU9ka1BIl#ypvAUs8>$9qL{oitLwJ7T&O#*+!K~pu_1}Kf{FG)8xPVs+P{Er zrWz%%CF=K>jdV|GWx;GzO@j74P(&0$)zDrl3W!1lCKnTyl|l+?^dieBUAm%RkGd67 zQ+cA;f}WfLggw>C$V%Z_)#)h~9fLvz?^UNKhWer$6+A^36uZtcR&7lCDIk5KKp{%b zav~0+DTSmT0?CpPA?b$*$^26Yr7l>(uACKuw54q!5wNZl0c-6Q4qGcEJfu(?%r)7L z_@2~(nwt7pYg?+`65h{(+q%v3#eyN9FOg1)ZLQ)47nEfejEId+J;KVWh|rrPt-3VD za9E%H!Za`x8#1X=#rMYcr2I7zTJ^_Fx-+CFfnlFFh`n-TINjQEX{xy~7H_HvhXP4& zA{B}x;`BHumV}~p*OBr>%w!@Nk9qvzU_2Z^95pCfNLfA?9{;O_M{St*q)kDn4ZHW z)AS(D^oD$?n96XIyuN(%>X?UAQd0t_O|S< ziKoJJV{gVHrk@_CdDJugcuQ+rrY1#C=INnkgdPS5BB_YFPDaLJfq?0WMG_vLN&BLv z^2fR|<_(GGkn|YAOHbQVDv~zzlBpVciy$61<0-nCjfFymDsE5Kgv_9y9`Pk2^aRsS zuNlB5-5U7Y+LQja18shKDIrX)p^==Y+bY^KG5zrX-Qm&RM}nRuQW3|RTbgUA$9U=X zKAkjEv_FtYr~Py<8BBQmp-3_kOC)LnG(=m^WvMl_w6@pyg8^?Mod|g1^z0;=Qf(#^ zYs2)aPAvaQ~P%t&&c;PvA@a)#6_EcoQ z_}eJof$-6;dxAD==pH34mj4CseaT2D7Eh+bu~axt&n=R{q@Suc?)RsBNi!X*5&g-^ z=X+|(XC`VBX!@H;L0Tfyt!hMU(8m)YJTp;t0>1a!^TGX~-0bm((k6A*m_JSX{Nc2` zzrkJ)?I(%t0$~}iioBnpE07A(Ry=Ls1XGD{(qlbBm|nqHdoMwK7h$s7`S;$3riVh_ zFzrMq%n-d7kksv)AizIw_ zBuCd;Qy^e= zpJ_UgsO)1O1k{rLaVQM zkY02vDzyJ+Z@1AFyfyFp2_gVv-KKYxcxvj)z~CnN28t-qt=!7gcII0 z75wyTMs7)3ZTh`Snw}!6mJ<+tDZDS$o@q(8(Yp?!fG6Jbqd4mUTPW3*h>?@j@H%!k z((A9Hrm0;dg8{M2OA8gRTvS-SoCb9|%khQ+)(}N!dH+ZOYuKT)TBb3C~GzpQR*sr^DKypqF;!UuFLyi zijud%6bLHE*^}vah#5DXUWP^I>w2LET zby|`6={Ig#T+TNVDGvVpmr8mx}Z&z-e(ygm}heZVBCYlJzer|=ViWEpT1PaL& zGzH{1W0{caZ(_0~Q9zD!mI>Jy1S?#M`xIfs8e1$Qve*SHTp@=k5i1`YSRq-d#N-fa zrD2UnR>&F-txzE~xrHy79GAWHk~fl7J(39WpzfWBATxODjh>0Ct0;hEFyxCk%JH;o zRc=y?tKLTwlY87&QtxAQl)L4Y^T;8&IuSx)5}STQ^O^6X0h?$z*a!2{f zehN?S*Ne;1V#p#|426&(3L%pe5*uC99Bi$3F{Y>MwYK`Fr|Xjs6ufjxfR;zMb7)_* zDJ**#ZUTi8az;uM>JtpDonp}pDO9+5d+%XUJ{a-R({*W_0`H?j>9NIhOVdwvpjKaE z6+Owu6lIp)RG_^$+CB>hV>G&pnZp#T%IT&;CZ?_H#~5Tr4uS%hRgi_pXsg;=y#ibbtPRQ*suwrUE<3n?I- zrGPX_0c4Fr(ldc3-q^A3Vm7t3?u)gzr&_(fY%ED1FlkTGd*ge(K6$g5ZB3<{I>mxF zK-+-<`iw~+6b!`!^6gW4M+Ofvtk6UvvTP_8SrsAKL`A6JD28jn3c0`lFW5vrWYH1B zG$BH=m_$f6c?!u#E|xD7_6lCf6}O0$i(V^K(8CNTtiV&6VyvcTJz@}TkOFW?&3O?u zZBRh2w!IX>h(jT$Tl->`1=2@e)Epx8ksyVT6`F(!mttZeJymd&R>6q8R}iVn8vzkg zMMw-qO`(E4%zdPXxle=&v2tZEtf<1ITuj8GVu_(_C{*xXKFJc-$|qS?2*po1D(J~a zjot{gdRf5Y{R69bCR)1cgrtO_T!aexvTOM0Z6gbbfMr4iWFd->)kiF3m6sJN=qnSG zy@Vort$57?Cg^n(Tv+^K%7h8oF2x1Z!KOP*`D9C@*Kd#u3Rn$55Sbk>y_R5=IlcCw zUV5RBRjmTa=i*kVZ~?rc*a=jyVO59FFA&)v1(NMhgk-n1LIoRgF({b4i?l*;mzdR& z1e5K?3dvF@CLh{cOcp3H`6__L3ckp;B$#}yzzWHN7nY~Lnx%KyMT@eox7wW*n*Izz zpY$qRUnnx!4=hU)xhTZaLvEx z6Jf;`@`AyLu8^27 zQVJnMs>jlmaEan%-xbD?3F?Z4D^&$ir!Ux0RYeS0EOc#Q1sN-JXkv1e}vSLI?R;38NkNY8c7br?bzD*=TXze0Y$d7zc z&x$RS8~Ot2VaA~l`mbOU*A}jjFJ1{Nvq6rx(VjzXtLb zeO@BmK_6A8Pj^MK^zkkFQi#2&U?P)B>=CygF@MbKp^q}q>(8nG)7qKrNRkv`xC8PmEqyxi}_||zS!?$r@Lw#i7;sxCeh-f>Y})GM2VMGMVKD@U`997 zZ);!@v+;qSRhgeg`--wTxsSR;m=iWxDTG+f;)<78xr-l7c_<-+TpMxbBSlaKa2Sz% zGlFng`EV^Uf^n8-gM>_Sbv;Lc%6@zgvdUF#Xt(p-n3d8zW5*1*c}v)+Qs z*v7(jan0%3$>EY-B@LJ1@ZZGYvl54IEphlnIWrUjTic3{1tx7x$3#n3GyCyAIP?<^ zO$UdU7g%P7FszqqV4Kd$CksR8)lW8xFbptKacDOZFYQ|1 zvQs%;v9bF6@zZB7R!`r5|M`0_R?j~D_&NL(YoIbKI62p{bDkG!5+oVSdHUKFA79@- zZ(l9mnD?7WgLB7nfCObS zK5Tq*C;p6l+>)(AIn(%=nlW5dSNn>|3-WlUjW>mhrH(g#hR!!g!)8XmB1lDl{sazsdMJ#cD=^Glc4z#ZMJ zI-mk>FRr%dO;WXK3V=@mYEm^edYDN?y~#+EEhc?z9nl0S6Bl9C=R9GQ1Z z7jq@wyL5(0FE{FRn-)5-dG-NnOL*yzmb02I(LXI<GA zEKQwP^KO~y+6`4@-*slpyJewkPrO@BIuB;(TB&t4i_-Z%fxo4rYe!UiD5Ux8emsQE zg$hMbBP_mzis$5?R#2j=FMQI~V+j*Jsp^kUC;+EPELJEARutx=vj|Biw6%6^@v-tG zp@KN1WU+KvjWD6Eu!FEqx|+@>wYtNhlXV=}sQT9B+@m>kX-__d=F>S3KB}SP2%Sw% z-f8qup`zrN&-^K55h|M#i54a_7kl^Sqf2;XF?Tmz-VvV=ORe|t(Ulu<_*3j0ggs>U zIlG&$^7Tb`S#pYR-CfxOrLzE;3CoMdv8XUKm7+jMTA=`8#L0HD7D?Aw2@?iO68hO( zqA(b=8fNzBi#IbuUl16Y`RZWc-FE<6;PnCYIAAPEPK|Nj~k*Goij0keBaO zJpFiJf*42(-+d)5qz2NpKJYi*YbCJ~gmT+c`xRMRX3FY#x5$B!&kWp%2#j6_0}S56 z+a9hRPece=y0(>R%maTTM;nb+v2Z#7dACB^Hs(rO8-JtQwlZxajLM0Yh4B`+l`e0>w2egtb{0nAJP56F12fTf zL?}d8b3)gO9yn!e(1ePLOjQ_sbkKNyK+D9Y%3z5O`T#CtjKJ829c9opoLO5mc4(ta zd(*5#22M0{CL5$-aL>%tq6ZE%GZ|wZcC2L98UwSLnTjz3?c6XIOHXAwv>HdGGso6b zknr=^jgDvo&>kj7<8@OQRcu!AJPfoDO+=xG=@;vW#bokb1K1Ihlv6m!lPhyKU@`4n zU$e7%#1KXCcaCN7;pU6+=JEKY2edhvLn`pR-B9IenRIe+|F9Uv=^Q@2zJ7ZSRg2{i z_RS;EyMUCMRjL9KVfz&cw=?z*jKr~R8%(LH6dcb(I(>B?8l$|Wy31TlRpdmUtfqo# zUJWg1Y@HY~?3zjis~@#{jj!yz^s}z2XwR&~kO~O3V^&3{(?4F-d+CGlo^^GdWmQ`b zQ-NXi(JE3~sbgh0+9N%Ew@B&~EZf6pJRqUc7Jxn0p8H)67RY_v{kusRq>+dx>@$Tt8s3v|Fi5B;W$Q zozv~z^%`68i*Y?9JOtp3u2tuG%lcW;1IgWGm5wsZe-_Yk@o;L7UGIX5J(#cXsTb_c7j*~2sE=;{@xSgTi!18)~lS>}X8=aKcik^QOS zVs9^IT?}BLwY9wIQeKPz0dnXtOH2Ez$hEvDfx6ka-t^;k3oV)+_K;9CwOu~|f`AER z!$t>*7>K}1h)VK(M2%7bHA;oj$P{a!E&)oY3u+~s;GYWdco>rn^6Lq30*3re?RU=A zu+EJWU=?Xr^Z+=p*y?NZFtX1`ND*`cgbd)(TWC*!IIRZk@$(M+f?{{9Q^dO8Tzw9v z{I&rd<8pNeqPoNZaBeKd3OLCUV{RQ?hxq}I*Y}&dt6M5-+9b&%eUomIG|09U$OvE` ztS;|vbz=)0y8=VTZmO$cav0tTEvt#*-rije7sJF`JAfdPon3Ff;N|5lw#Gcy`n+G$ zm0_5GJR-*oR^AkmPIY&AEtafOe1IodMNW&Bf{pN{rRhX)-+`vU=G*PoH>5{f*y*3O zj)9Is4%<~Y$eVb&6 zd^Y@cK>eo{8~~R)0Peb?JW=^>(I1jz5U|^ecFolVlIKwNMLux@11o+woRvweVVvu>2COfrX$X)#uDB5H5LJ7i~7S@Cv!yVpnxivJCV0 zMn1h7Oi#tO=<;9Ua5YmeWiK@RtW%! z0ymq~4wHhVTQ6fkT?STCB;yF+a)rruhxh`3g67?YWtm3|ShifWF>+H0(g`$kOt%Zd{R#byfg&^46H; zEYYWgwP5fnAv7Pb?bwgHqZb?zl-VLE`<(Gux}@TgjsnhcDwzx`6B!1=!yB3jHF&5K zt07g2=(wl)-@R4>$QPg=SY6&0nC$Lo{l#{D(T@ewq4CQ!_b)4sLooF5Sgk>Fl6z1f zC99wU%dLU6A5dzK=z?EIAluU&~ZVsqu1&1J7 zLt^)^H!_$@D}*rIK>WbysjC9pem9j>r>&=BZ-%F>uwHL_E>T)-kktrG0jbTIjP>Op zcJ@V+wsG0@h<2$B@KSTT zpnh_TLfe9AR+pf_3VFlD=SEd|97)Y!$ll0-?~QeI_#mijmbb5 z(oWY+N3fegZOOW%ZC#>@=ZWOsE&&~i?Q^P?iR%a90Vf~k8l zFk~%*=BWIR%ivxa(h9J;g1x;KeVOT@FN59xA^;hunDIefv$Vrr8rOxjAPRTYjs(~n zGseZXw=Gvu-DAc;*{4joQefK}^txRZNcH&ZxK1xE9G#e7>@6*11sXL6-{ zd3*XtXXg9qPgyzi3`3b{)ALji?htjS_{B!ZRJXZ1DVkaHiY)X-DLdyK=qR{QmBmq`-6I8o5%?d};^6jW)W-nvm3T z3Oh+JwyXI}ws1}XNCySqH9c2x7|#)kRr1t+MG{S!pc4RjEVl4laLJ2Jpbf=aVQxfX zoS(nrXL}DpUrQJmj;@=58kzL9j&068FG~@AK+P~cr@HaI@h}!J?wUms_Rm38(~it} z+=GOJ1}N(r!=s_Sra@2SaU3ZMv>S|RLD=ISFGQ*8F8`l=4zQxw=$5+_=$Tv(=f(pS zXI3n0mG$GIS2hhIjA>+*aqC$xfNVj~?~CRG<6*iN7eNi^F_g_^z1&tbM?{q={KTJK z_svR|0EErRbbCOOxeCbJDH$Dzkvq}C@Oy7l2eXsi{%!y{YbSY8u$n^! zYvo5>SM(ET2`+O#E#V*W|W$a01cFxpY$2E1IB`)=1q z+uPaMcW_AvyQcT-6T9~}Jpc2 Date: Thu, 7 Mar 2024 17:56:40 +0000 Subject: [PATCH 341/401] adds api to obtain the parent node in the turbine retransmit tree (#115) Following commits will use this api to check retransmitter's signature on incoming shreds. --- Cargo.lock | 1 + turbine/Cargo.toml | 1 + turbine/src/cluster_nodes.rs | 199 +++++++++++++++++++++++++++++++---- 3 files changed, 179 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index afdb8b0a306578..0973d94da30c29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7501,6 +7501,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-streamer", + "test-case", "thiserror", "tokio", ] diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index bedd870952af99..2d2b0a79574d27 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -43,6 +43,7 @@ tokio = { workspace = true } assert_matches = { workspace = true } solana-logger = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } +test-case = { workspace = true } [[bench]] name = "cluster_info" diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 0c55cb41e56472..6036907cd7dc5c 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -152,8 +152,7 @@ impl ClusterNodes { } pub(crate) fn get_broadcast_peer(&self, shred: &ShredId) -> Option<&ContactInfo> { - let shred_seed = shred.seed(&self.pubkey); - let mut rng = ChaChaRng::from_seed(shred_seed); + let mut rng = get_seeded_rng(/*leader:*/ &self.pubkey, shred); let index = self.weighted_shuffle.first(&mut rng)?; self.nodes[index].contact_info() } @@ -187,7 +186,6 @@ impl ClusterNodes { shred: &ShredId, fanout: usize, ) -> Result { - let shred_seed = shred.seed(slot_leader); let mut weighted_shuffle = self.weighted_shuffle.clone(); // Exclude slot leader from list of nodes. if slot_leader == &self.pubkey { @@ -200,7 +198,7 @@ impl ClusterNodes { weighted_shuffle.remove_index(*index); } let mut addrs = HashMap::::with_capacity(self.nodes.len()); - let mut rng = ChaChaRng::from_seed(shred_seed); + let mut rng = get_seeded_rng(slot_leader, shred); let protocol = get_broadcast_protocol(shred); let nodes: Vec<_> = weighted_shuffle .shuffle(&mut rng) @@ -233,6 +231,43 @@ impl ClusterNodes { addrs, }) } + + // Returns the parent node in the turbine broadcast tree. + // Returns None if the node is the root of the tree or if it is not staked. + #[allow(unused)] + fn get_retransmit_parent( + &self, + leader: &Pubkey, + shred: &ShredId, + fanout: usize, + ) -> Result, Error> { + // Exclude slot leader from list of nodes. + if leader == &self.pubkey { + return Err(Error::Loopback { + leader: *leader, + shred: *shred, + }); + } + // Unstaked nodes' position in the turbine tree is not deterministic + // and depends on gossip propagation of contact-infos. Therefore, if + // this node is not staked return None. + if self.nodes[self.index[&self.pubkey]].stake == 0 { + return Ok(None); + } + let mut weighted_shuffle = self.weighted_shuffle.clone(); + if let Some(index) = self.index.get(leader).copied() { + weighted_shuffle.remove_index(index); + } + let mut rng = get_seeded_rng(leader, shred); + // Only need shuffled nodes until this node itself. + let nodes: Vec<_> = weighted_shuffle + .shuffle(&mut rng) + .map(|index| &self.nodes[index]) + .take_while(|node| node.pubkey() != self.pubkey) + .collect(); + let parent = get_retransmit_parent(fanout, nodes.len(), &nodes); + Ok(parent.map(Node::pubkey)) + } } pub fn new_cluster_nodes( @@ -296,6 +331,11 @@ fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap) -> Vec ChaChaRng { + let seed = shred.seed(leader); + ChaChaRng::from_seed(seed) +} + // root : [0] // 1st layer: [1, 2, ..., fanout] // 2nd layer: [[fanout + 1, ..., fanout * 2], @@ -327,6 +367,21 @@ fn get_retransmit_peers( .copied() } +// Returns the parent node in the turbine broadcast tree. +// Returns None if the node is the root of the tree. +fn get_retransmit_parent( + fanout: usize, + index: usize, // Local node's index within the nodes slice. + nodes: &[T], +) -> Option { + // Node's index within its neighborhood. + let offset = index.saturating_sub(1) % fanout; + let index = index.checked_sub(1)? / fanout; + let index = index - index.saturating_sub(1) % fanout; + let index = if index == 0 { index } else { index + offset }; + nodes.get(index).copied() +} + impl ClusterNodesCache { pub fn new( // Capacity of underlying LRU-cache in terms of number of epochs. @@ -516,7 +571,11 @@ pub fn check_feature_activation(feature: &Pubkey, shred_slot: Slot, root_bank: & #[cfg(test)] mod tests { - use super::*; + use { + super::*, + std::{fmt::Debug, hash::Hash}, + test_case::test_case, + }; #[test] fn test_cluster_nodes_retransmit() { @@ -589,10 +648,42 @@ mod tests { } } + // Checks (1) computed retransmit children against expected children and + // (2) computed parent of each child against the expected parent. + fn check_retransmit_nodes(fanout: usize, nodes: &[T], peers: Vec>) + where + T: Copy + Eq + PartialEq + Debug + Hash, + { + // Map node identities to their index within the shuffled tree. + let index: HashMap<_, _> = nodes + .iter() + .copied() + .enumerate() + .map(|(k, node)| (node, k)) + .collect(); + let offset = peers.len(); + // Root node's parent is None. + assert_eq!(get_retransmit_parent(fanout, /*index:*/ 0, nodes), None); + for (k, peers) in peers.into_iter().enumerate() { + assert_eq!( + get_retransmit_peers(fanout, k, nodes).collect::>(), + peers + ); + let parent = Some(nodes[k]); + for peer in peers { + assert_eq!(get_retransmit_parent(fanout, index[&peer], nodes), parent); + } + } + // Remaining nodes have no children. + for k in offset..=nodes.len() { + assert_eq!(get_retransmit_peers(fanout, k, nodes).next(), None); + } + } + #[test] - fn test_get_retransmit_peers() { + fn test_get_retransmit_nodes() { // fanout 2 - let index = vec![ + let nodes = [ 7, // root 6, 10, // 1st layer // 2nd layer @@ -620,16 +711,9 @@ mod tests { vec![16, 9], vec![8], ]; - for (k, peers) in peers.into_iter().enumerate() { - let retransmit_peers = get_retransmit_peers(/*fanout:*/ 2, k, &index); - assert_eq!(retransmit_peers.collect::>(), peers); - } - for k in 10..=index.len() { - let mut retransmit_peers = get_retransmit_peers(/*fanout:*/ 2, k, &index); - assert_eq!(retransmit_peers.next(), None); - } + check_retransmit_nodes(/*fanout:*/ 2, &nodes, peers); // fanout 3 - let index = vec![ + let nodes = [ 19, // root 14, 15, 28, // 1st layer // 2nd layer @@ -661,13 +745,84 @@ mod tests { vec![24, 32], vec![34], ]; - for (k, peers) in peers.into_iter().enumerate() { - let retransmit_peers = get_retransmit_peers(/*fanout:*/ 3, k, &index); - assert_eq!(retransmit_peers.collect::>(), peers); + check_retransmit_nodes(/*fanout:*/ 3, &nodes, peers); + let nodes = [ + 5, // root + 34, 52, 8, // 1st layer + // 2nd layar + 44, 18, 2, // 1st neigborhood + 42, 47, 46, // 2nd + 11, 26, 28, // 3rd + // 3rd layer + 53, 23, 37, // 1st neighborhood + 40, 13, 7, // 2nd + 50, 35, 22, // 3rd + 3, 27, 31, // 4th + 10, 48, 15, // 5th + 19, 6, 30, // 6th + 36, 45, 1, // 7th + 38, 12, 17, // 8th + 4, 32, 16, // 9th + // 4th layer + 41, 49, 24, // 1st neighborhood + 14, 9, 0, // 2nd + 29, 21, 39, // 3rd + 43, 51, 33, // 4th + 25, 20, // 5th + ]; + let peers = vec![ + vec![34, 52, 8], + vec![44, 42, 11], + vec![18, 47, 26], + vec![2, 46, 28], + vec![53, 40, 50], + vec![23, 13, 35], + vec![37, 7, 22], + vec![3, 10, 19], + vec![27, 48, 6], + vec![31, 15, 30], + vec![36, 38, 4], + vec![45, 12, 32], + vec![1, 17, 16], + vec![41, 14, 29], + vec![49, 9, 21], + vec![24, 0, 39], + vec![43, 25], + vec![51, 20], + vec![33], + ]; + check_retransmit_nodes(/*fanout:*/ 3, &nodes, peers); + } + + #[test_case(2, 1_347)] + #[test_case(3, 1_359)] + #[test_case(4, 4_296)] + #[test_case(5, 3_925)] + #[test_case(6, 8_778)] + #[test_case(7, 9_879)] + fn test_get_retransmit_nodes_round_trip(fanout: usize, size: usize) { + let mut rng = rand::thread_rng(); + let mut nodes: Vec<_> = (0..size).collect(); + nodes.shuffle(&mut rng); + // Map node identities to their index within the shuffled tree. + let index: HashMap<_, _> = nodes + .iter() + .copied() + .enumerate() + .map(|(k, node)| (node, k)) + .collect(); + // Root node's parent is None. + assert_eq!(get_retransmit_parent(fanout, /*index:*/ 0, &nodes), None); + for k in 1..size { + let parent = get_retransmit_parent(fanout, k, &nodes).unwrap(); + let mut peers = get_retransmit_peers(fanout, index[&parent], &nodes); + assert_eq!(peers.find(|&peer| peer == nodes[k]), Some(nodes[k])); } - for k in 13..=index.len() { - let mut retransmit_peers = get_retransmit_peers(/*fanout:*/ 3, k, &index); - assert_eq!(retransmit_peers.next(), None); + for k in 0..size { + let parent = Some(nodes[k]); + for peer in get_retransmit_peers(fanout, k, &nodes) { + assert_eq!(get_retransmit_parent(fanout, index[&peer], &nodes), parent); + } } } } From ba43f74dcf5d1413d8eaca3de094c9d3aee925fc Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Thu, 7 Mar 2024 13:16:16 -0500 Subject: [PATCH 342/401] [SVM] Move RuntimeConfig to program-runtime (#96) RuntimeConfig doesn't use anything SVM specific and logically belongs in program runtime rather than SVM. This change moves the definition of RuntimeConfig struct from the SVM crate to program-runtime and adjusts `use` statements accordingly. --- Cargo.lock | 4 +--- core/src/validator.rs | 2 +- core/tests/epoch_accounts_hash.rs | 2 +- core/tests/snapshots.rs | 2 +- ledger-tool/Cargo.toml | 1 - ledger-tool/src/args.rs | 2 +- ledger/src/blockstore_processor.rs | 12 ++++++------ program-runtime/src/lib.rs | 1 + program-runtime/src/runtime_config.rs | 17 +++++++++++++++++ program-test/src/lib.rs | 4 ++-- programs/sbf/Cargo.lock | 3 +-- runtime/src/bank.rs | 2 +- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/serde_snapshot.rs | 2 +- runtime/src/snapshot_bank_utils.rs | 2 +- svm/src/lib.rs | 1 - svm/src/runtime_config.rs | 9 --------- svm/src/transaction_processor.rs | 2 +- test-validator/Cargo.toml | 1 - test-validator/src/lib.rs | 3 +-- validator/Cargo.toml | 2 +- validator/src/main.rs | 2 +- 22 files changed, 40 insertions(+), 38 deletions(-) create mode 100644 program-runtime/src/runtime_config.rs delete mode 100644 svm/src/runtime_config.rs diff --git a/Cargo.lock b/Cargo.lock index 0973d94da30c29..85641aff1b22d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -177,7 +177,6 @@ dependencies = [ "solana-stake-program", "solana-storage-bigtable", "solana-streamer", - "solana-svm", "solana-transaction-status", "solana-unified-scheduler-pool", "solana-version", @@ -233,6 +232,7 @@ dependencies = [ "solana-net-utils", "solana-perf", "solana-poh", + "solana-program-runtime", "solana-rpc", "solana-rpc-client", "solana-rpc-client-api", @@ -241,7 +241,6 @@ dependencies = [ "solana-send-transaction-service", "solana-storage-bigtable", "solana-streamer", - "solana-svm", "solana-test-validator", "solana-tpu-client", "solana-unified-scheduler-pool", @@ -7345,7 +7344,6 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-streamer", - "solana-svm", "solana-tpu-client", "tokio", ] diff --git a/core/src/validator.rs b/core/src/validator.rs index 196dad5f25d17a..a1c8293f86cb3a 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -74,6 +74,7 @@ use { poh_recorder::PohRecorder, poh_service::{self, PohService}, }, + solana_program_runtime::runtime_config::RuntimeConfig, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::{ @@ -116,7 +117,6 @@ use { }, solana_send_transaction_service::send_transaction_service, solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, - solana_svm::runtime_config::RuntimeConfig, solana_turbine::{self, broadcast_stage::BroadcastStageType}, solana_unified_scheduler_pool::DefaultSchedulerPool, solana_vote_program::vote_state, diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 62e31f0a88b766..25e97689923bb0 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -16,6 +16,7 @@ use { snapshot_packager_service::SnapshotPackagerService, }, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, + solana_program_runtime::runtime_config::RuntimeConfig, solana_runtime::{ accounts_background_service::{ AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, DroppedSlotsReceiver, @@ -39,7 +40,6 @@ use { timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, - solana_svm::runtime_config::RuntimeConfig, std::{ mem::ManuallyDrop, sync::{ diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index e67c942f07ab0b..1607ebd3fa2094 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -18,6 +18,7 @@ use { snapshot_packager_service::SnapshotPackagerService, }, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, + solana_program_runtime::runtime_config::RuntimeConfig, solana_runtime::{ accounts_background_service::{ AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, @@ -50,7 +51,6 @@ use { timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, - solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, fs, diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index cb87a0e16f4a36..88bb3d3ff83b72 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -44,7 +44,6 @@ solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } -solana-svm = { workspace = true } solana-transaction-status = { workspace = true } solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 80ea6f9715bf35..1f0c06966deffc 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -12,8 +12,8 @@ use { blockstore_processor::ProcessOptions, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, + solana_program_runtime::runtime_config::RuntimeConfig, solana_sdk::clock::Slot, - solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, path::{Path, PathBuf}, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 2e172870d6e5f7..c999eab1a56fd4 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -27,7 +27,10 @@ use { }, solana_measure::{measure, measure::Measure}, solana_metrics::datapoint_error, - solana_program_runtime::timings::{ExecuteTimingType, ExecuteTimings, ThreadExecuteTimings}, + solana_program_runtime::{ + runtime_config::RuntimeConfig, + timings::{ExecuteTimingType, ExecuteTimings, ThreadExecuteTimings}, + }, solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, solana_runtime::{ accounts_background_service::{AbsRequestSender, SnapshotRequestKind}, @@ -54,11 +57,8 @@ use { VersionedTransaction, }, }, - solana_svm::{ - runtime_config::RuntimeConfig, - transaction_results::{ - TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, - }, + solana_svm::transaction_results::{ + TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, }, solana_transaction_status::token_balances::TransactionTokenBalancesSet, solana_vote::{vote_account::VoteAccountsHashMap, vote_sender_types::ReplayVoteSender}, diff --git a/program-runtime/src/lib.rs b/program-runtime/src/lib.rs index 5797626a00a756..079f214fa236f0 100644 --- a/program-runtime/src/lib.rs +++ b/program-runtime/src/lib.rs @@ -17,6 +17,7 @@ pub mod loaded_programs; pub mod log_collector; pub mod message_processor; pub mod prioritization_fee; +pub mod runtime_config; pub mod stable_log; pub mod sysvar_cache; pub mod timings; diff --git a/program-runtime/src/runtime_config.rs b/program-runtime/src/runtime_config.rs new file mode 100644 index 00000000000000..da6fc1dfba4db1 --- /dev/null +++ b/program-runtime/src/runtime_config.rs @@ -0,0 +1,17 @@ +use crate::compute_budget::ComputeBudget; + +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl ::solana_frozen_abi::abi_example::AbiExample for RuntimeConfig { + fn example() -> Self { + // RuntimeConfig is not Serialize so just rely on Default. + RuntimeConfig::default() + } +} + +/// Encapsulates flags that can be used to tweak the runtime behavior. +#[derive(Debug, Default, Clone)] +pub struct RuntimeConfig { + pub compute_budget: Option, + pub log_messages_bytes_limit: Option, + pub transaction_account_lock_limit: Option, +} diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 20b9f5806e29c3..669cb15a595afb 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -17,7 +17,8 @@ use { solana_bpf_loader_program::serialization::serialize_parameters, solana_program_runtime::{ compute_budget::ComputeBudget, ic_msg, invoke_context::BuiltinFunctionWithContext, - loaded_programs::LoadedProgram, stable_log, timings::ExecuteTimings, + loaded_programs::LoadedProgram, runtime_config::RuntimeConfig, stable_log, + timings::ExecuteTimings, }, solana_runtime::{ accounts_background_service::{AbsRequestSender, SnapshotRequestKind}, @@ -45,7 +46,6 @@ use { stable_layout::stable_instruction::StableInstruction, sysvar::{Sysvar, SysvarId}, }, - solana_svm::runtime_config::RuntimeConfig, solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, std::{ cell::RefCell, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 11a4bcab04d7c0..b72b4110e336ad 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -117,6 +117,7 @@ dependencies = [ "solana-net-utils", "solana-perf", "solana-poh", + "solana-program-runtime", "solana-rpc", "solana-rpc-client", "solana-rpc-client-api", @@ -125,7 +126,6 @@ dependencies = [ "solana-send-transaction-service", "solana-storage-bigtable", "solana-streamer", - "solana-svm", "solana-test-validator", "solana-tpu-client", "solana-unified-scheduler-pool", @@ -6393,7 +6393,6 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-streamer", - "solana-svm", "solana-tpu-client", "tokio", ] diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 39df91c382feff..ee04f20787cb9a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -99,6 +99,7 @@ use { compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, loaded_programs::{LoadedProgram, LoadedProgramType, LoadedPrograms}, + runtime_config::RuntimeConfig, timings::{ExecuteTimingType, ExecuteTimings}, }, solana_sdk::{ @@ -163,7 +164,6 @@ use { solana_svm::{ account_loader::{TransactionCheckResult, TransactionLoadResult}, account_overrides::AccountOverrides, - runtime_config::RuntimeConfig, transaction_error_metrics::TransactionErrorMetrics, transaction_processor::{ TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 8b78efbcf3e11a..f5b1653e8d6311 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -31,6 +31,7 @@ mod tests { epoch_accounts_hash::EpochAccountsHash, stake_rewards::StakeReward, }, + solana_program_runtime::runtime_config::RuntimeConfig, solana_sdk::{ epoch_schedule::EpochSchedule, genesis_config::create_genesis_config, @@ -38,7 +39,6 @@ mod tests { pubkey::Pubkey, signature::{Keypair, Signer}, }, - solana_svm::runtime_config::RuntimeConfig, std::{ io::{Cursor, Read, Write}, num::NonZeroUsize, diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 4b066976d49048..8e678044e23670 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -27,6 +27,7 @@ use { epoch_accounts_hash::EpochAccountsHash, }, solana_measure::measure::Measure, + solana_program_runtime::runtime_config::RuntimeConfig, solana_sdk::{ clock::{Epoch, Slot, UnixTimestamp}, deserialize_utils::default_on_eof, @@ -39,7 +40,6 @@ use { pubkey::Pubkey, rent_collector::RentCollector, }, - solana_svm::runtime_config::RuntimeConfig, std::{ collections::{HashMap, HashSet}, io::{self, BufReader, BufWriter, Read, Write}, diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 721021142f9258..ab3a76fc80945a 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -37,6 +37,7 @@ use { utils::delete_contents_of_path, }, solana_measure::{measure, measure::Measure}, + solana_program_runtime::runtime_config::RuntimeConfig, solana_sdk::{ clock::Slot, feature_set, @@ -45,7 +46,6 @@ use { pubkey::Pubkey, slot_history::{Check, SlotHistory}, }, - solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, fs, diff --git a/svm/src/lib.rs b/svm/src/lib.rs index 5505e34bea9d61..d0f679a15c448b 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -4,7 +4,6 @@ pub mod account_loader; pub mod account_overrides; pub mod account_rent_state; -pub mod runtime_config; pub mod transaction_account_state_info; pub mod transaction_error_metrics; pub mod transaction_processor; diff --git a/svm/src/runtime_config.rs b/svm/src/runtime_config.rs deleted file mode 100644 index 2439dd85c2e46f..00000000000000 --- a/svm/src/runtime_config.rs +++ /dev/null @@ -1,9 +0,0 @@ -use solana_program_runtime::compute_budget::ComputeBudget; - -/// Encapsulates flags that can be used to tweak the runtime behavior. -#[derive(AbiExample, Debug, Default, Clone)] -pub struct RuntimeConfig { - pub compute_budget: Option, - pub log_messages_bytes_limit: Option, - pub transaction_account_lock_limit: Option, -} diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index fa417850699372..e44b426df96b0d 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -4,7 +4,6 @@ use { load_accounts, LoadedTransaction, TransactionCheckResult, TransactionLoadResult, }, account_overrides::AccountOverrides, - runtime_config::RuntimeConfig, transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, transaction_results::{ @@ -23,6 +22,7 @@ use { }, log_collector::LogCollector, message_processor::MessageProcessor, + runtime_config::RuntimeConfig, sysvar_cache::SysvarCache, timings::{ExecuteDetailsTimings, ExecuteTimingType, ExecuteTimings}, }, diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 2bc8deb5fc200e..60f299d01e58a0 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -32,7 +32,6 @@ solana-rpc-client = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } -solana-svm = { workspace = true } solana-tpu-client = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index c658b53305bf74..f551cb97820d06 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -29,7 +29,7 @@ use { create_new_tmp_ledger, }, solana_net_utils::PortRange, - solana_program_runtime::compute_budget::ComputeBudget, + solana_program_runtime::{compute_budget::ComputeBudget, runtime_config::RuntimeConfig}, solana_rpc::{rpc::JsonRpcConfig, rpc_pubsub_service::PubSubConfig}, solana_rpc_client::{nonblocking, rpc_client::RpcClient}, solana_runtime::{ @@ -54,7 +54,6 @@ use { signature::{read_keypair_file, write_keypair_file, Keypair, Signer}, }, solana_streamer::socket::SocketAddrSpace, - solana_svm::runtime_config::RuntimeConfig, solana_tpu_client::tpu_client::{ DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC, }, diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 844a2bca9aa97f..74742c90faa29d 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -49,6 +49,7 @@ solana-metrics = { workspace = true } solana-net-utils = { workspace = true } solana-perf = { workspace = true } solana-poh = { workspace = true } +solana-program-runtime = { workspace = true } solana-rpc = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } @@ -57,7 +58,6 @@ solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } -solana-svm = { workspace = true } solana-test-validator = { workspace = true } solana-tpu-client = { workspace = true } solana-unified-scheduler-pool = { workspace = true } diff --git a/validator/src/main.rs b/validator/src/main.rs index 9741a2aecd68a8..b00eabfef9a7b0 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -47,6 +47,7 @@ use { }, solana_perf::recycler::enable_recycler_warming, solana_poh::poh_service, + solana_program_runtime::runtime_config::RuntimeConfig, solana_rpc::{ rpc::{JsonRpcConfig, RpcBigtableConfig}, rpc_pubsub_service::PubSubConfig, @@ -67,7 +68,6 @@ use { }, solana_send_transaction_service::send_transaction_service, solana_streamer::socket::SocketAddrSpace, - solana_svm::runtime_config::RuntimeConfig, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, std::{ collections::{HashSet, VecDeque}, From 940bd30ac999dd93ecde92c7982369b12cbfa065 Mon Sep 17 00:00:00 2001 From: bji Date: Thu, 7 Mar 2024 11:52:22 -0800 Subject: [PATCH 343/401] Update maximum credits awarded per vote from 8 to 16 (#127) This reduces the maximum penalty for voting after the grace period by roughly 50%. This new value was derived from looking at the effects that TVC at max credits 8 would have for recent epochs (500+) and noting that the effect was a bit extreme, up to and exceeding 10% "bonus" for faster voters. This change reduces that maximum bonus by roughly half. In addition, the TVC feature key has been changed. --- programs/vote/src/vote_state/mod.rs | 103 +++++++++++++++++++--------- sdk/program/src/vote/state/mod.rs | 2 +- sdk/src/feature_set.rs | 2 +- 3 files changed, 74 insertions(+), 33 deletions(-) diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index e2a0cd449e8fbe..c3917085f4f691 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -2001,32 +2001,32 @@ mod tests { vec![32], 35, // root: 1 - // when slot 1 was voted on in slot 9, it earned 2 credits - 2, + // when slot 1 was voted on in slot 9, it earned 10 credits + 10, ), // Now another vote, should earn one credit ( vec![33], 36, // root: 2 - // when slot 2 was voted on in slot 9, it earned 3 credits - 2 + 3, // 5 + // when slot 2 was voted on in slot 9, it earned 11 credits + 10 + 11, // 21 ), // Two votes in sequence ( vec![34, 35], 37, // root: 4 - // when slots 3 and 4 were voted on in slot 9, they earned 4 and 5 credits - 5 + 4 + 5, // 14 + // when slots 3 and 4 were voted on in slot 9, they earned 12 and 13 credits + 21 + 12 + 13, // 46 ), // 3 votes in sequence ( vec![36, 37, 38], 39, // root: 7 - // slots 5, 6, and 7 earned 6, 7, and 8 credits when voted in slot 9 - 14 + 6 + 7 + 8, // 35 + // slots 5, 6, and 7 earned 14, 15, and 16 credits when voted in slot 9 + 46 + 14 + 15 + 16, // 91 ), ( // 30 votes in sequence @@ -2036,14 +2036,36 @@ mod tests { ], 69, // root: 37 - // slot 8 was voted in slot 9, earning 8 credits - // slots 9 - 25 earned 1 credit when voted in slot 34 - // slot 26, 27, 28, 29, 30, 31 earned 2, 3, 4, 5, 6, 7 credits when voted in slot 34 - // slot 32 earned 7 credits when voted in slot 35 - // slot 33 earned 7 credits when voted in slot 36 - // slot 34 and 35 earned 7 and 8 credits when voted in slot 37 - // slot 36 and 37 earned 7 and 8 credits when voted in slot 39 - 35 + 8 + ((25 - 9) + 1) + 2 + 3 + 4 + 5 + 6 + 7 + 7 + 7 + 7 + 8 + 7 + 8, // 131 + // slot 8 was voted in slot 9, earning 16 credits + // slots 9 - 25 earned 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, and 9 credits when voted in + // slot 34 + // slot 26, 27, 28, 29, 30, 31 earned 10, 11, 12, 13, 14, 15 credits when voted in slot 34 + // slot 32 earned 15 credits when voted in slot 35 + // slot 33 earned 15 credits when voted in slot 36 + // slot 34 and 35 earned 15 and 16 credits when voted in slot 37 + // slot 36 and 37 earned 15 and 16 credits when voted in slot 39 + 91 + 16 + + 9 // * 1 + + 2 + + 3 + + 4 + + 5 + + 6 + + 7 + + 8 + + 9 + + 10 + + 11 + + 12 + + 13 + + 14 + + 15 + + 15 + + 15 + + 15 + + 16 + + 15 + + 16, // 327 ), // 31 votes in sequence ( @@ -2053,11 +2075,29 @@ mod tests { ], 100, // root: 68 - // slot 38 earned 8 credits when voted in slot 39 - // slot 39 - 60 earned 1 credit each when voted in slot 69 - // slot 61, 62, 63, 64, 65, 66, 67, 68 earned 2, 3, 4, 5, 6, 7, 8, and 8 credits when + // slot 38 earned 16 credits when voted in slot 39 + // slot 39 - 60 earned 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, and 9 credits + // when voted in slot 69 + // slot 61, 62, 63, 64, 65, 66, 67, 68 earned 10, 11, 12, 13, 14, 15, 16, and 16 credits when // voted in slot 69 - 131 + 8 + ((60 - 39) + 1) + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 8, // 204 + 327 + 16 + + 14 // * 1 + + 2 + + 3 + + 4 + + 5 + + 6 + + 7 + + 8 + + 9 + + 10 + + 11 + + 12 + + 13 + + 14 + + 15 + + 16 + + 16, // 508 ), // Votes with expiry ( @@ -2066,7 +2106,7 @@ mod tests { // root: 74 // slots 96 - 114 expire // slots 69 - 74 earned 1 credit when voted in slot 100 - 204 + ((74 - 69) + 1), // 210 + 508 + ((74 - 69) + 1), // 514 ), // More votes with expiry of a large number of votes ( @@ -2074,7 +2114,7 @@ mod tests { 202, // root: 74 // slots 119 - 124 expire - 210, + 514, ), ( vec![ @@ -2083,18 +2123,19 @@ mod tests { ], 227, // root: 95 - // slot 75 - 91 earned 1 credit each when voted in slot 100 - // slot 92, 93, 94, 95 earned 2, 3, 4, 5, credits when voted in slot 100 - 210 + ((91 - 75) + 1) + 2 + 3 + 4 + 5, // 241 + // slot 75 - 91 earned 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, and 9 credits when voted in + // slot 100 + // slot 92, 93, 94, 95 earned 10, 11, 12, 13, credits when voted in slot 100 + 514 + 9 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13, // 613 ), ( vec![227, 228, 229, 230, 231, 232, 233, 234, 235, 236], 237, // root: 205 - // slot 115 - 118 earned 1 credit when voted in slot 130 - // slot 200 and 201 earned 8 credits when voted in slot 202 + // slot 115 - 118 earned 3, 4, 5, and 6 credits when voted in slot 130 + // slot 200 and 201 earned 16 credits when voted in slot 202 // slots 202 - 205 earned 1 credit when voted in slot 227 - 241 + 1 + 1 + 1 + 1 + 8 + 8 + 1 + 1 + 1 + 1, // 265 + 613 + 3 + 4 + 5 + 6 + 16 + 16 + 1 + 1 + 1 + 1, // 667 ), ]; @@ -2224,9 +2265,9 @@ mod tests { 42, // root: 10 Some(10), - // when slots 1 - 6 were voted on in slot 12, they earned 1, 1, 1, 2, 3, and 4 credits - // when slots 7 - 10 were voted on in slot 11, they earned 6, 7, 8, and 8 credits - 1 + 1 + 1 + 2 + 3 + 4 + 6 + 7 + 8 + 8, + // when slots 1 - 6 were voted on in slot 12, they earned 7, 8, 9, 10, 11, and 12 credits + // when slots 7 - 10 were voted on in slot 11, they earned 14, 15, 16, and 16 credits + 7 + 8 + 9 + 10 + 11 + 12 + 14 + 15 + 16 + 16, ), ]; diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index a6e765472750c6..8853d5de6da143 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -45,7 +45,7 @@ const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 114; pub const VOTE_CREDITS_GRACE_SLOTS: u8 = 2; // Maximum number of credits to award for a vote; this number of credits is awarded to votes on slots that land within the grace period. After that grace period, vote credits are reduced. -pub const VOTE_CREDITS_MAXIMUM_PER_SLOT: u8 = 8; +pub const VOTE_CREDITS_MAXIMUM_PER_SLOT: u8 = 16; #[frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH")] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 98dc5a4037bd05..7d956bd13f405c 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -677,7 +677,7 @@ pub mod enable_poseidon_syscall { } pub mod timely_vote_credits { - solana_sdk::declare_id!("2oXpeh141pPZCTCFHBsvCwG2BtaHZZAtrVhwaxSy6brS"); + solana_sdk::declare_id!("tvcF6b1TRz353zKuhBjinZkKzjmihXmBAHJdjNYw1sQ"); } pub mod remaining_compute_units_syscall_enabled { From c6bd3883466e72771d14dc5f6d4aaa6edd386698 Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Thu, 7 Mar 2024 12:51:44 -0800 Subject: [PATCH 344/401] Add get_blocks and get_slot methods to bench-tps-client (#94) * add get_block(s)/slot methods to BenchTpsClient * Update Cargo.lock * add commitment level for get_slot/blocks --- Cargo.lock | 1 + bench-tps/Cargo.toml | 1 + bench-tps/src/bench_tps_client.rs | 22 +++++++++++-- bench-tps/src/bench_tps_client/bank_client.rs | 24 ++++++++++++++ bench-tps/src/bench_tps_client/rpc_client.rs | 27 +++++++++++++++- bench-tps/src/bench_tps_client/thin_client.rs | 30 +++++++++++++++++ bench-tps/src/bench_tps_client/tpu_client.rs | 32 ++++++++++++++++++- 7 files changed, 132 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 85641aff1b22d6..b0b181a043c7c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5590,6 +5590,7 @@ dependencies = [ "solana-test-validator", "solana-thin-client", "solana-tpu-client", + "solana-transaction-status", "solana-version", "spl-instruction-padding", "tempfile", diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index cd40eb1c833c1c..2fc48c9e296d50 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -37,6 +37,7 @@ solana-sdk = { workspace = true } solana-streamer = { workspace = true } solana-thin-client = { workspace = true } solana-tpu-client = { workspace = true } +solana-transaction-status = { workspace = true } solana-version = { workspace = true } spl-instruction-padding = { workspace = true } thiserror = { workspace = true } diff --git a/bench-tps/src/bench_tps_client.rs b/bench-tps/src/bench_tps_client.rs index 3ab15bec11f7ee..0715d739879165 100644 --- a/bench-tps/src/bench_tps_client.rs +++ b/bench-tps/src/bench_tps_client.rs @@ -1,11 +1,12 @@ use { - solana_rpc_client_api::client_error::Error as ClientError, + solana_rpc_client_api::{client_error::Error as ClientError, config::RpcBlockConfig}, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, - message::Message, pubkey::Pubkey, signature::Signature, transaction::Transaction, - transport::TransportError, + message::Message, pubkey::Pubkey, signature::Signature, slot_history::Slot, + transaction::Transaction, transport::TransportError, }, solana_tpu_client::tpu_client::TpuSenderError, + solana_transaction_status::UiConfirmedBlock, thiserror::Error, }; @@ -93,6 +94,21 @@ pub trait BenchTpsClient { ) -> Result; fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> Result>>; + + fn get_slot_with_commitment(&self, commitment_config: CommitmentConfig) -> Result; + + fn get_blocks_with_commitment( + &self, + start_slot: Slot, + end_slot: Option, + commitment_config: CommitmentConfig, + ) -> Result>; + + fn get_block_with_config( + &self, + slot: Slot, + rpc_block_config: RpcBlockConfig, + ) -> Result; } mod bank_client; diff --git a/bench-tps/src/bench_tps_client/bank_client.rs b/bench-tps/src/bench_tps_client/bank_client.rs index 1aef7284c01ed6..3ea9080e51398a 100644 --- a/bench-tps/src/bench_tps_client/bank_client.rs +++ b/bench-tps/src/bench_tps_client/bank_client.rs @@ -1,5 +1,6 @@ use { crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, + solana_rpc_client_api::config::RpcBlockConfig, solana_runtime::bank_client::BankClient, solana_sdk::{ account::Account, @@ -10,8 +11,10 @@ use { message::Message, pubkey::Pubkey, signature::Signature, + slot_history::Slot, transaction::Transaction, }, + solana_transaction_status::UiConfirmedBlock, }; impl BenchTpsClient for BankClient { @@ -111,4 +114,25 @@ impl BenchTpsClient for BankClient { fn get_multiple_accounts(&self, _pubkeys: &[Pubkey]) -> Result>> { unimplemented!("BankClient doesn't support get_multiple_accounts"); } + + fn get_slot_with_commitment(&self, commitment_config: CommitmentConfig) -> Result { + SyncClient::get_slot_with_commitment(self, commitment_config).map_err(|err| err.into()) + } + + fn get_blocks_with_commitment( + &self, + _start_slot: Slot, + _end_slot: Option, + _commitment_config: CommitmentConfig, + ) -> Result> { + unimplemented!("BankClient doesn't support get_blocks"); + } + + fn get_block_with_config( + &self, + _slot: Slot, + _rpc_block_config: RpcBlockConfig, + ) -> Result { + unimplemented!("BankClient doesn't support get_block_with_config"); + } } diff --git a/bench-tps/src/bench_tps_client/rpc_client.rs b/bench-tps/src/bench_tps_client/rpc_client.rs index 2535099b464351..87ec1b8690c417 100644 --- a/bench-tps/src/bench_tps_client/rpc_client.rs +++ b/bench-tps/src/bench_tps_client/rpc_client.rs @@ -1,10 +1,13 @@ use { crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_rpc_client::rpc_client::RpcClient, + solana_rpc_client_api::config::RpcBlockConfig, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, - message::Message, pubkey::Pubkey, signature::Signature, transaction::Transaction, + message::Message, pubkey::Pubkey, signature::Signature, slot_history::Slot, + transaction::Transaction, }, + solana_transaction_status::UiConfirmedBlock, }; impl BenchTpsClient for RpcClient { @@ -104,4 +107,26 @@ impl BenchTpsClient for RpcClient { fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> Result>> { RpcClient::get_multiple_accounts(self, pubkeys).map_err(|err| err.into()) } + + fn get_slot_with_commitment(&self, commitment_config: CommitmentConfig) -> Result { + RpcClient::get_slot_with_commitment(self, commitment_config).map_err(|err| err.into()) + } + + fn get_blocks_with_commitment( + &self, + start_slot: Slot, + end_slot: Option, + commitment_config: CommitmentConfig, + ) -> Result> { + RpcClient::get_blocks_with_commitment(self, start_slot, end_slot, commitment_config) + .map_err(|err| err.into()) + } + + fn get_block_with_config( + &self, + slot: Slot, + rpc_block_config: RpcBlockConfig, + ) -> Result { + RpcClient::get_block_with_config(self, slot, rpc_block_config).map_err(|err| err.into()) + } } diff --git a/bench-tps/src/bench_tps_client/thin_client.rs b/bench-tps/src/bench_tps_client/thin_client.rs index 6696774d679a8a..22945c4494f453 100644 --- a/bench-tps/src/bench_tps_client/thin_client.rs +++ b/bench-tps/src/bench_tps_client/thin_client.rs @@ -1,6 +1,7 @@ use { crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::thin_client::ThinClient, + solana_rpc_client_api::config::RpcBlockConfig, solana_sdk::{ account::Account, client::{AsyncClient, Client, SyncClient}, @@ -10,8 +11,10 @@ use { message::Message, pubkey::Pubkey, signature::Signature, + slot_history::Slot, transaction::Transaction, }, + solana_transaction_status::UiConfirmedBlock, }; impl BenchTpsClient for ThinClient { @@ -110,4 +113,31 @@ impl BenchTpsClient for ThinClient { .get_multiple_accounts(pubkeys) .map_err(|err| err.into()) } + + fn get_slot_with_commitment(&self, commitment_config: CommitmentConfig) -> Result { + self.rpc_client() + .get_slot_with_commitment(commitment_config) + .map_err(|err| err.into()) + } + + fn get_blocks_with_commitment( + &self, + start_slot: Slot, + end_slot: Option, + commitment_config: CommitmentConfig, + ) -> Result> { + self.rpc_client() + .get_blocks_with_commitment(start_slot, end_slot, commitment_config) + .map_err(|err| err.into()) + } + + fn get_block_with_config( + &self, + slot: Slot, + rpc_block_config: RpcBlockConfig, + ) -> Result { + self.rpc_client() + .get_block_with_config(slot, rpc_block_config) + .map_err(|err| err.into()) + } } diff --git a/bench-tps/src/bench_tps_client/tpu_client.rs b/bench-tps/src/bench_tps_client/tpu_client.rs index c56da2ae6e880b..6c053271ad3eec 100644 --- a/bench-tps/src/bench_tps_client/tpu_client.rs +++ b/bench-tps/src/bench_tps_client/tpu_client.rs @@ -4,10 +4,13 @@ use { solana_connection_cache::connection_cache::{ ConnectionManager, ConnectionPool, NewConnectionConfig, }, + solana_rpc_client_api::config::RpcBlockConfig, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, - message::Message, pubkey::Pubkey, signature::Signature, transaction::Transaction, + message::Message, pubkey::Pubkey, signature::Signature, slot_history::Slot, + transaction::Transaction, }, + solana_transaction_status::UiConfirmedBlock, }; impl BenchTpsClient for TpuClient @@ -130,4 +133,31 @@ where .get_multiple_accounts(pubkeys) .map_err(|err| err.into()) } + + fn get_slot_with_commitment(&self, commitment_config: CommitmentConfig) -> Result { + self.rpc_client() + .get_slot_with_commitment(commitment_config) + .map_err(|err| err.into()) + } + + fn get_blocks_with_commitment( + &self, + start_slot: Slot, + end_slot: Option, + commitment_config: CommitmentConfig, + ) -> Result> { + self.rpc_client() + .get_blocks_with_commitment(start_slot, end_slot, commitment_config) + .map_err(|err| err.into()) + } + + fn get_block_with_config( + &self, + slot: Slot, + rpc_block_config: RpcBlockConfig, + ) -> Result { + self.rpc_client() + .get_block_with_config(slot, rpc_block_config) + .map_err(|err| err.into()) + } } From 26692e666454d340a6691e2483194934e6a8ddfc Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 7 Mar 2024 16:06:31 -0600 Subject: [PATCH 345/401] blockstore: Remove unnecessary function and threadpool (#122) In a previous change, we removed the threadpool used to fetch entries in parallel in favor of combining all fetches into a single rocksdb multi_get() call. This change does the same thing, except for a threadpool that was used to fetch entries when we needed them to purge the transaction status and address signatures columns. --- ledger/src/blockstore.rs | 36 +---------------------- ledger/src/blockstore/blockstore_purge.rs | 3 +- 2 files changed, 3 insertions(+), 36 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index f8c8330843dfce..f15976abdb241b 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -31,10 +31,7 @@ use { itertools::Itertools, log::*, rand::Rng, - rayon::{ - iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, - ThreadPool, - }, + rayon::iter::{IntoParallelIterator, ParallelIterator}, rocksdb::{DBRawIterator, LiveFile}, solana_accounts_db::hardened_unpack::{ unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, @@ -94,16 +91,6 @@ pub use { rocksdb::properties as RocksProperties, }; -// get_max_thread_count to match number of threads in the old code. -// see: https://github.com/solana-labs/solana/pull/24853 -lazy_static! { - static ref PAR_THREAD_POOL_ALL_CPUS: ThreadPool = rayon::ThreadPoolBuilder::new() - .num_threads(num_cpus::get()) - .thread_name(|i| format!("solBstoreAll{i:02}")) - .build() - .unwrap(); -} - pub const MAX_REPLAY_WAKE_UP_SIGNALS: usize = 1; pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000; @@ -3283,27 +3270,6 @@ impl Blockstore { self.get_slot_entries_in_block(slot, vec![(start_index, end_index)], slot_meta) } - fn get_any_valid_slot_entries(&self, slot: Slot, start_index: u64) -> Vec { - let (completed_ranges, slot_meta) = self - .get_completed_ranges(slot, start_index) - .unwrap_or_default(); - if completed_ranges.is_empty() { - return vec![]; - } - let slot_meta = slot_meta.unwrap(); - - let entries: Vec> = PAR_THREAD_POOL_ALL_CPUS.install(|| { - completed_ranges - .par_iter() - .map(|(start_index, end_index)| { - self.get_entries_in_data_block(slot, *start_index, *end_index, Some(&slot_meta)) - .unwrap_or_default() - }) - .collect() - }); - entries.into_iter().flatten().collect() - } - /// Returns a mapping from each elements of `slots` to a list of the /// element's children slots. pub fn get_slots_since(&self, slots: &[Slot]) -> Result>> { diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index d8b4c7424cd8c1..d442732303fa2a 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -455,7 +455,8 @@ impl Blockstore { for slot in from_slot..=to_slot { let primary_indexes = slot_indexes(slot); - let slot_entries = self.get_any_valid_slot_entries(slot, 0); + let (slot_entries, _, _) = + self.get_slot_entries_with_shred_info(slot, 0, true /* allow_dead_slots */)?; let transactions = slot_entries .into_iter() .flat_map(|entry| entry.transactions); From 9770cd9083126b4dfe40fb207b0a3b8b21f33d21 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Thu, 7 Mar 2024 18:48:35 -0600 Subject: [PATCH 346/401] add precompile signature metrics to cost tracker (#133) --- cost-model/src/block_cost_limits.rs | 8 +- cost-model/src/cost_model.rs | 24 +++++- cost-model/src/cost_tracker.rs | 42 ++++++++++ cost-model/src/transaction_cost.rs | 31 +++++++ sdk/program/src/message/sanitized.rs | 116 ++++++++++++++++++++++++--- 5 files changed, 205 insertions(+), 16 deletions(-) diff --git a/cost-model/src/block_cost_limits.rs b/cost-model/src/block_cost_limits.rs index 328d89cd04198b..b04f289e0553af 100644 --- a/cost-model/src/block_cost_limits.rs +++ b/cost-model/src/block_cost_limits.rs @@ -24,6 +24,10 @@ pub const MAX_CONCURRENCY: u64 = 4; pub const COMPUTE_UNIT_TO_US_RATIO: u64 = 30; /// Number of compute units for one signature verification. pub const SIGNATURE_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 24; +/// Number of compute units for one secp256k1 signature verification. +pub const SECP256K1_VERIFY_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 223; +/// Number of compute units for one ed25519 signature verification. +pub const ED25519_VERIFY_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 76; /// Number of compute units for one write lock pub const WRITE_LOCK_UNITS: u64 = COMPUTE_UNIT_TO_US_RATIO * 10; /// Number of data bytes per compute units @@ -43,8 +47,8 @@ lazy_static! { (bpf_loader::id(), solana_bpf_loader_program::DEFAULT_LOADER_COMPUTE_UNITS), (loader_v4::id(), solana_loader_v4_program::DEFAULT_COMPUTE_UNITS), // Note: These are precompile, run directly in bank during sanitizing; - (secp256k1_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), - (ed25519_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), + (secp256k1_program::id(), 0), + (ed25519_program::id(), 0), ] .iter() .cloned() diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index b81ea24402d4df..fa12a7343bc7e0 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -43,7 +43,7 @@ impl CostModel { } else { let mut tx_cost = UsageCostDetails::new_with_default_capacity(); - tx_cost.signature_cost = Self::get_signature_cost(transaction); + Self::get_signature_cost(&mut tx_cost, transaction); Self::get_write_lock_cost(&mut tx_cost, transaction, feature_set); Self::get_transaction_cost(&mut tx_cost, transaction, feature_set); tx_cost.account_data_size = Self::calculate_account_data_size(transaction); @@ -53,8 +53,26 @@ impl CostModel { } } - fn get_signature_cost(transaction: &SanitizedTransaction) -> u64 { - transaction.signatures().len() as u64 * SIGNATURE_COST + fn get_signature_cost(tx_cost: &mut UsageCostDetails, transaction: &SanitizedTransaction) { + let signatures_count_detail = transaction.message().get_signature_details(); + tx_cost.num_transaction_signatures = signatures_count_detail.num_transaction_signatures(); + tx_cost.num_secp256k1_instruction_signatures = + signatures_count_detail.num_secp256k1_instruction_signatures(); + tx_cost.num_ed25519_instruction_signatures = + signatures_count_detail.num_ed25519_instruction_signatures(); + tx_cost.signature_cost = signatures_count_detail + .num_transaction_signatures() + .saturating_mul(SIGNATURE_COST) + .saturating_add( + signatures_count_detail + .num_secp256k1_instruction_signatures() + .saturating_mul(SECP256K1_VERIFY_COST), + ) + .saturating_add( + signatures_count_detail + .num_ed25519_instruction_signatures() + .saturating_mul(ED25519_VERIFY_COST), + ); } fn get_writable_accounts(transaction: &SanitizedTransaction) -> Vec { diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 8fb092c36680a0..b5e3f9f4932a59 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -58,6 +58,9 @@ pub struct CostTracker { vote_cost: u64, transaction_count: u64, account_data_size: u64, + transaction_signature_count: u64, + secp256k1_instruction_signature_count: u64, + ed25519_instruction_signature_count: u64, } impl Default for CostTracker { @@ -77,6 +80,9 @@ impl Default for CostTracker { vote_cost: 0, transaction_count: 0, account_data_size: 0, + transaction_signature_count: 0, + secp256k1_instruction_signature_count: 0, + ed25519_instruction_signature_count: 0, } } } @@ -153,6 +159,21 @@ impl CostTracker { ("costliest_account", costliest_account.to_string(), String), ("costliest_account_cost", costliest_account_cost as i64, i64), ("account_data_size", self.account_data_size, i64), + ( + "transaction_signature_count", + self.transaction_signature_count, + i64 + ), + ( + "secp256k1_instruction_signature_count", + self.secp256k1_instruction_signature_count, + i64 + ), + ( + "ed25519_instruction_signature_count", + self.ed25519_instruction_signature_count, + i64 + ), ); } @@ -213,6 +234,18 @@ impl CostTracker { self.add_transaction_execution_cost(tx_cost, tx_cost.sum()); saturating_add_assign!(self.account_data_size, tx_cost.account_data_size()); saturating_add_assign!(self.transaction_count, 1); + saturating_add_assign!( + self.transaction_signature_count, + tx_cost.num_transaction_signatures() + ); + saturating_add_assign!( + self.secp256k1_instruction_signature_count, + tx_cost.num_secp256k1_instruction_signatures() + ); + saturating_add_assign!( + self.ed25519_instruction_signature_count, + tx_cost.num_ed25519_instruction_signatures() + ); } fn remove_transaction_cost(&mut self, tx_cost: &TransactionCost) { @@ -222,6 +255,15 @@ impl CostTracker { .account_data_size .saturating_sub(tx_cost.account_data_size()); self.transaction_count = self.transaction_count.saturating_sub(1); + self.transaction_signature_count = self + .transaction_signature_count + .saturating_sub(tx_cost.num_transaction_signatures()); + self.secp256k1_instruction_signature_count = self + .secp256k1_instruction_signature_count + .saturating_sub(tx_cost.num_secp256k1_instruction_signatures()); + self.ed25519_instruction_signature_count = self + .ed25519_instruction_signature_count + .saturating_sub(tx_cost.num_ed25519_instruction_signatures()); } /// Apply additional actual execution units to cost_tracker diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index c6e68bfe17b6f4..c92639676958ae 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -91,6 +91,27 @@ impl TransactionCost { Self::Transaction(usage_cost) => &usage_cost.writable_accounts, } } + + pub fn num_transaction_signatures(&self) -> u64 { + match self { + Self::SimpleVote { .. } => 1, + Self::Transaction(usage_cost) => usage_cost.num_transaction_signatures, + } + } + + pub fn num_secp256k1_instruction_signatures(&self) -> u64 { + match self { + Self::SimpleVote { .. } => 0, + Self::Transaction(usage_cost) => usage_cost.num_secp256k1_instruction_signatures, + } + } + + pub fn num_ed25519_instruction_signatures(&self) -> u64 { + match self { + Self::SimpleVote { .. } => 0, + Self::Transaction(usage_cost) => usage_cost.num_ed25519_instruction_signatures, + } + } } const MAX_WRITABLE_ACCOUNTS: usize = 256; @@ -105,6 +126,9 @@ pub struct UsageCostDetails { pub programs_execution_cost: u64, pub loaded_accounts_data_size_cost: u64, pub account_data_size: u64, + pub num_transaction_signatures: u64, + pub num_secp256k1_instruction_signatures: u64, + pub num_ed25519_instruction_signatures: u64, } impl Default for UsageCostDetails { @@ -117,6 +141,9 @@ impl Default for UsageCostDetails { programs_execution_cost: 0u64, loaded_accounts_data_size_cost: 0u64, account_data_size: 0u64, + num_transaction_signatures: 0u64, + num_secp256k1_instruction_signatures: 0u64, + num_ed25519_instruction_signatures: 0u64, } } } @@ -134,6 +161,10 @@ impl PartialEq for UsageCostDetails { && self.programs_execution_cost == other.programs_execution_cost && self.loaded_accounts_data_size_cost == other.loaded_accounts_data_size_cost && self.account_data_size == other.account_data_size + && self.num_transaction_signatures == other.num_transaction_signatures + && self.num_secp256k1_instruction_signatures + == other.num_secp256k1_instruction_signatures + && self.num_ed25519_instruction_signatures == other.num_ed25519_instruction_signatures && to_hash_set(&self.writable_accounts) == to_hash_set(&other.writable_accounts) } } diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index d4c7638e136a72..ce276a60ef69e7 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -345,17 +345,7 @@ impl SanitizedMessage { } pub fn num_signatures(&self) -> u64 { - let mut num_signatures = u64::from(self.header().num_required_signatures); - // This next part is really calculating the number of pre-processor - // operations being done and treating them like a signature - for (program_id, instruction) in self.program_instructions_iter() { - if secp256k1_program::check_id(program_id) || ed25519_program::check_id(program_id) { - if let Some(num_verifies) = instruction.data.first() { - num_signatures = num_signatures.saturating_add(u64::from(*num_verifies)); - } - } - } - num_signatures + self.get_signature_details().total_signatures() } /// Returns the number of requested write-locks in this message. @@ -365,6 +355,68 @@ impl SanitizedMessage { .len() .saturating_sub(self.num_readonly_accounts()) as u64 } + + /// return detailed signature counts + pub fn get_signature_details(&self) -> TransactionSignatureDetails { + let mut transaction_signature_details = TransactionSignatureDetails { + num_transaction_signatures: u64::from(self.header().num_required_signatures), + ..TransactionSignatureDetails::default() + }; + + // counting the number of pre-processor operations separately + for (program_id, instruction) in self.program_instructions_iter() { + if secp256k1_program::check_id(program_id) { + if let Some(num_verifies) = instruction.data.first() { + transaction_signature_details.num_secp256k1_instruction_signatures = + transaction_signature_details + .num_secp256k1_instruction_signatures + .saturating_add(u64::from(*num_verifies)); + } + } else if ed25519_program::check_id(program_id) { + if let Some(num_verifies) = instruction.data.first() { + transaction_signature_details.num_ed25519_instruction_signatures = + transaction_signature_details + .num_ed25519_instruction_signatures + .saturating_add(u64::from(*num_verifies)); + } + } + } + + transaction_signature_details + } +} + +#[derive(Default)] +/// Transaction signature details including the number of transaction signatures +/// and precompile signatures. +pub struct TransactionSignatureDetails { + num_transaction_signatures: u64, + num_secp256k1_instruction_signatures: u64, + num_ed25519_instruction_signatures: u64, +} + +impl TransactionSignatureDetails { + /// return total number of signature, treating pre-processor operations as signature + pub(crate) fn total_signatures(&self) -> u64 { + self.num_transaction_signatures + .saturating_add(self.num_secp256k1_instruction_signatures) + .saturating_add(self.num_ed25519_instruction_signatures) + } + + /// return the number of transaction signatures + pub fn num_transaction_signatures(&self) -> u64 { + self.num_transaction_signatures + } + + /// return the number of secp256k1 instruction signatures + pub fn num_secp256k1_instruction_signatures(&self) -> u64 { + self.num_secp256k1_instruction_signatures + } + + /// return the number of ed25519 instruction signatures + pub fn num_ed25519_instruction_signatures(&self) -> u64 { + self.num_ed25519_instruction_signatures + } } #[cfg(test)] @@ -563,4 +615,46 @@ mod tests { } } } + + #[test] + fn test_get_signature_details() { + let key0 = Pubkey::new_unique(); + let key1 = Pubkey::new_unique(); + let loader_key = Pubkey::new_unique(); + + let loader_instr = CompiledInstruction::new(2, &(), vec![0, 1]); + let mock_secp256k1_instr = CompiledInstruction::new(3, &[1u8; 10], vec![]); + let mock_ed25519_instr = CompiledInstruction::new(4, &[5u8; 10], vec![]); + + let message = SanitizedMessage::try_from_legacy_message( + legacy::Message::new_with_compiled_instructions( + 2, + 1, + 2, + vec![ + key0, + key1, + loader_key, + secp256k1_program::id(), + ed25519_program::id(), + ], + Hash::default(), + vec![ + loader_instr, + mock_secp256k1_instr.clone(), + mock_ed25519_instr, + mock_secp256k1_instr, + ], + ), + ) + .unwrap(); + + let signature_details = message.get_signature_details(); + // expect 2 required transaction signatures + assert_eq!(2, signature_details.num_transaction_signatures); + // expect 2 secp256k1 instruction signatures - 1 for each mock_secp2561k1_instr + assert_eq!(2, signature_details.num_secp256k1_instruction_signatures); + // expect 5 ed25519 instruction signatures from mock_ed25519_instr + assert_eq!(5, signature_details.num_ed25519_instruction_signatures); + } } From c0239c8eff109b8a278c3358a9dd92eccc821c5e Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 8 Mar 2024 10:48:39 +0800 Subject: [PATCH 347/401] ci: rename script (#125) --- ...nifest-keypair.sh => agave-install-update-manifest-keypair.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename scripts/{solana-install-update-manifest-keypair.sh => agave-install-update-manifest-keypair.sh} (100%) diff --git a/scripts/solana-install-update-manifest-keypair.sh b/scripts/agave-install-update-manifest-keypair.sh similarity index 100% rename from scripts/solana-install-update-manifest-keypair.sh rename to scripts/agave-install-update-manifest-keypair.sh From 377e1f911294395b666dfa799e30a25f8aefeb9b Mon Sep 17 00:00:00 2001 From: Jon C Date: Fri, 8 Mar 2024 11:02:33 +0100 Subject: [PATCH 348/401] runtime: Move `From` from sdk (#141) sdk: Move `From` into runtime --- runtime/src/bank/address_lookup_table.rs | 28 ++++++++++++++----- sdk/program/src/address_lookup_table/error.rs | 14 ---------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/runtime/src/bank/address_lookup_table.rs b/runtime/src/bank/address_lookup_table.rs index 483ec7cea00ea1..51eee794803e14 100644 --- a/runtime/src/bank/address_lookup_table.rs +++ b/runtime/src/bank/address_lookup_table.rs @@ -10,6 +10,17 @@ use { }, }; +fn into_address_loader_error(err: AddressLookupError) -> AddressLoaderError { + match err { + AddressLookupError::LookupTableAccountNotFound => { + AddressLoaderError::LookupTableAccountNotFound + } + AddressLookupError::InvalidAccountOwner => AddressLoaderError::InvalidAccountOwner, + AddressLookupError::InvalidAccountData => AddressLoaderError::InvalidAccountData, + AddressLookupError::InvalidLookupIndex => AddressLoaderError::InvalidLookupIndex, + } +} + impl AddressLoader for &Bank { fn load_addresses( self, @@ -23,15 +34,18 @@ impl AddressLoader for &Bank { .get_slot_hashes() .map_err(|_| AddressLoaderError::SlotHashesSysvarNotFound)?; - Ok(address_table_lookups + address_table_lookups .iter() .map(|address_table_lookup| { - self.rc.accounts.load_lookup_table_addresses( - &self.ancestors, - address_table_lookup, - &slot_hashes, - ) + self.rc + .accounts + .load_lookup_table_addresses( + &self.ancestors, + address_table_lookup, + &slot_hashes, + ) + .map_err(into_address_loader_error) }) - .collect::>()?) + .collect::>() } } diff --git a/sdk/program/src/address_lookup_table/error.rs b/sdk/program/src/address_lookup_table/error.rs index b427067afc386c..9925dee4dbbf4c 100644 --- a/sdk/program/src/address_lookup_table/error.rs +++ b/sdk/program/src/address_lookup_table/error.rs @@ -1,5 +1,3 @@ -#[cfg(not(target_os = "solana"))] -use solana_program::message::AddressLoaderError; use thiserror::Error; #[derive(Debug, Error, PartialEq, Eq, Clone)] @@ -20,15 +18,3 @@ pub enum AddressLookupError { #[error("Address lookup contains an invalid index")] InvalidLookupIndex, } - -#[cfg(not(target_os = "solana"))] -impl From for AddressLoaderError { - fn from(err: AddressLookupError) -> Self { - match err { - AddressLookupError::LookupTableAccountNotFound => Self::LookupTableAccountNotFound, - AddressLookupError::InvalidAccountOwner => Self::InvalidAccountOwner, - AddressLookupError::InvalidAccountData => Self::InvalidAccountData, - AddressLookupError::InvalidLookupIndex => Self::InvalidLookupIndex, - } - } -} From e027a8bd633f5ca280bbbc64a52bf250d0b6419f Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:28:04 -0300 Subject: [PATCH 349/401] Gather recording booleans in a data structure (#134) --- core/src/banking_stage/consumer.rs | 5 ++-- ledger/src/blockstore_processor.rs | 16 +++++------ programs/sbf/tests/programs.rs | 13 +++++---- runtime/src/bank.rs | 37 +++++++++++------------- runtime/src/bank/tests.rs | 24 ++++++++-------- svm/src/transaction_processor.rs | 45 +++++++++++++++++++----------- 6 files changed, 74 insertions(+), 66 deletions(-) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 957e190c873f64..c5ed22a34278ce 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -34,6 +34,7 @@ use { solana_svm::{ account_loader::{validate_fee_payer, TransactionCheckResult}, transaction_error_metrics::TransactionErrorMetrics, + transaction_processor::ExecutionRecordingConfig, }, std::{ sync::{atomic::Ordering, Arc}, @@ -593,9 +594,7 @@ impl Consumer { .load_and_execute_transactions( batch, MAX_PROCESSING_AGE, - transaction_status_sender_enabled, - transaction_status_sender_enabled, - transaction_status_sender_enabled, + ExecutionRecordingConfig::new_single_setting(transaction_status_sender_enabled), &mut execute_and_commit_timings.execute_timings, None, // account_overrides self.log_messages_bytes_limit, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index c999eab1a56fd4..e4ae5f368b2afd 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -57,8 +57,11 @@ use { VersionedTransaction, }, }, - solana_svm::transaction_results::{ - TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, + solana_svm::{ + transaction_processor::ExecutionRecordingConfig, + transaction_results::{ + TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, + }, }, solana_transaction_status::token_balances::TransactionTokenBalancesSet, solana_vote::{vote_account::VoteAccountsHashMap, vote_sender_types::ReplayVoteSender}, @@ -163,9 +166,7 @@ pub fn execute_batch( batch, MAX_PROCESSING_AGE, transaction_status_sender.is_some(), - transaction_status_sender.is_some(), - transaction_status_sender.is_some(), - transaction_status_sender.is_some(), + ExecutionRecordingConfig::new_single_setting(transaction_status_sender.is_some()), timings, log_messages_bytes_limit, ); @@ -1972,6 +1973,7 @@ pub mod tests { system_transaction, transaction::{Transaction, TransactionError}, }, + solana_svm::transaction_processor::ExecutionRecordingConfig, solana_vote::vote_account::VoteAccount, solana_vote_program::{ self, @@ -3962,9 +3964,7 @@ pub mod tests { &batch, MAX_PROCESSING_AGE, false, - false, - false, - false, + ExecutionRecordingConfig::new_single_setting(false), &mut ExecuteTimings::default(), None, ); diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index dc4867ce7e40fd..22969bc482a28e 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -48,6 +48,7 @@ use { sysvar::{self, clock}, transaction::VersionedTransaction, }, + solana_svm::transaction_processor::ExecutionRecordingConfig, solana_svm::transaction_results::{ DurableNonceFee, InnerInstruction, TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, @@ -104,9 +105,11 @@ fn process_transaction_and_record_inner( &tx_batch, MAX_PROCESSING_AGE, false, - true, - true, - false, + ExecutionRecordingConfig { + enable_cpi_recording: true, + enable_log_recording: true, + enable_return_data_recording: false, + }, &mut ExecuteTimings::default(), None, ) @@ -152,9 +155,7 @@ fn execute_transactions( &batch, std::usize::MAX, true, - true, - true, - true, + ExecutionRecordingConfig::new_single_setting(true), &mut timings, None, ); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ee04f20787cb9a..3e504d470de744 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -269,6 +269,7 @@ pub struct BankRc { #[cfg(RUSTC_WITH_SPECIALIZATION)] use solana_frozen_abi::abi_example::AbiExample; +use solana_svm::transaction_processor::ExecutionRecordingConfig; #[cfg(RUSTC_WITH_SPECIALIZATION)] impl AbiExample for BankRc { @@ -4297,9 +4298,11 @@ impl Bank { // for processing. During forwarding, the transaction could expire if the // delay is not accounted for. MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY, - enable_cpi_recording, - true, - true, + ExecutionRecordingConfig { + enable_cpi_recording, + enable_log_recording: true, + enable_return_data_recording: true, + }, &mut timings, Some(&account_overrides), None, @@ -4548,9 +4551,7 @@ impl Bank { &self, batch: &TransactionBatch, max_age: usize, - enable_cpi_recording: bool, - enable_log_recording: bool, - enable_return_data_recording: bool, + recording_config: ExecutionRecordingConfig, timings: &mut ExecuteTimings, account_overrides: Option<&AccountOverrides>, log_messages_bytes_limit: Option, @@ -4614,9 +4615,7 @@ impl Bank { sanitized_txs, &mut check_results, &mut error_counters, - enable_cpi_recording, - enable_log_recording, - enable_return_data_recording, + recording_config, timings, account_overrides, self.builtin_programs.iter(), @@ -5642,9 +5641,7 @@ impl Bank { batch: &TransactionBatch, max_age: usize, collect_balances: bool, - enable_cpi_recording: bool, - enable_log_recording: bool, - enable_return_data_recording: bool, + recording_config: ExecutionRecordingConfig, timings: &mut ExecuteTimings, log_messages_bytes_limit: Option, ) -> (TransactionResults, TransactionBalancesSet) { @@ -5665,9 +5662,7 @@ impl Bank { } = self.load_and_execute_transactions( batch, max_age, - enable_cpi_recording, - enable_log_recording, - enable_return_data_recording, + recording_config, timings, None, log_messages_bytes_limit, @@ -5735,9 +5730,11 @@ impl Bank { &batch, MAX_PROCESSING_AGE, false, // collect_balances - false, // enable_cpi_recording - true, // enable_log_recording - true, // enable_return_data_recording + ExecutionRecordingConfig { + enable_cpi_recording: false, + enable_log_recording: true, + enable_return_data_recording: true, + }, &mut ExecuteTimings::default(), Some(1000 * 1000), ); @@ -5773,9 +5770,7 @@ impl Bank { batch, MAX_PROCESSING_AGE, false, - false, - false, - false, + ExecutionRecordingConfig::new_single_setting(false), &mut ExecuteTimings::default(), None, ) diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 753116ff878e18..f9b846d85b1512 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -3122,9 +3122,7 @@ fn test_interleaving_locks() { &lock_result, MAX_PROCESSING_AGE, false, - false, - false, - false, + ExecutionRecordingConfig::new_single_setting(false), &mut ExecuteTimings::default(), None, ) @@ -5948,9 +5946,7 @@ fn test_pre_post_transaction_balances() { &lock_result, MAX_PROCESSING_AGE, true, - false, - false, - false, + ExecutionRecordingConfig::new_single_setting(false), &mut ExecuteTimings::default(), None, ); @@ -9230,9 +9226,11 @@ fn test_tx_log_order() { &batch, MAX_PROCESSING_AGE, false, - false, - true, - false, + ExecutionRecordingConfig { + enable_cpi_recording: false, + enable_log_recording: true, + enable_return_data_recording: false, + }, &mut ExecuteTimings::default(), None, ) @@ -9338,9 +9336,11 @@ fn test_tx_return_data() { &batch, MAX_PROCESSING_AGE, false, - false, - false, - true, + ExecutionRecordingConfig { + enable_cpi_recording: false, + enable_log_recording: false, + enable_return_data_recording: true, + }, &mut ExecuteTimings::default(), None, ) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index e44b426df96b0d..d90afb0a428ea3 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -65,6 +65,24 @@ pub struct LoadAndExecuteSanitizedTransactionsOutput { pub execution_results: Vec, } +/// Configuration of the recording capabilities for transaction execution +#[derive(Copy, Clone)] +pub struct ExecutionRecordingConfig { + pub enable_cpi_recording: bool, + pub enable_log_recording: bool, + pub enable_return_data_recording: bool, +} + +impl ExecutionRecordingConfig { + pub fn new_single_setting(option: bool) -> Self { + ExecutionRecordingConfig { + enable_return_data_recording: option, + enable_log_recording: option, + enable_cpi_recording: option, + } + } +} + pub trait TransactionProcessingCallback { fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option; @@ -184,9 +202,7 @@ impl TransactionBatchProcessor { sanitized_txs: &[SanitizedTransaction], check_results: &mut [TransactionCheckResult], error_counters: &mut TransactionErrorMetrics, - enable_cpi_recording: bool, - enable_log_recording: bool, - enable_return_data_recording: bool, + recording_config: ExecutionRecordingConfig, timings: &mut ExecuteTimings, account_overrides: Option<&AccountOverrides>, builtin_programs: impl Iterator, @@ -266,9 +282,7 @@ impl TransactionBatchProcessor { loaded_transaction, compute_budget, nonce.as_ref().map(DurableNonceFee::from), - enable_cpi_recording, - enable_log_recording, - enable_return_data_recording, + recording_config, timings, error_counters, log_messages_bytes_limit, @@ -466,9 +480,7 @@ impl TransactionBatchProcessor { loaded_transaction: &mut LoadedTransaction, compute_budget: ComputeBudget, durable_nonce_fee: Option, - enable_cpi_recording: bool, - enable_log_recording: bool, - enable_return_data_recording: bool, + recording_config: ExecutionRecordingConfig, timings: &mut ExecuteTimings, error_counters: &mut TransactionErrorMetrics, log_messages_bytes_limit: Option, @@ -506,7 +518,7 @@ impl TransactionBatchProcessor { tx.message(), ); - let log_collector = if enable_log_recording { + let log_collector = if recording_config.enable_log_recording { match log_messages_bytes_limit { None => Some(LogCollector::new_ref()), Some(log_messages_bytes_limit) => Some(LogCollector::new_ref_with_limit(Some( @@ -585,7 +597,7 @@ impl TransactionBatchProcessor { .ok() }); - let inner_instructions = if enable_cpi_recording { + let inner_instructions = if recording_config.enable_cpi_recording { Some(Self::inner_instructions_list_from_instruction_trace( &transaction_context, )) @@ -616,11 +628,12 @@ impl TransactionBatchProcessor { ); saturating_add_assign!(timings.details.changed_account_count, touched_account_count); - let return_data = if enable_return_data_recording && !return_data.data.is_empty() { - Some(return_data) - } else { - None - }; + let return_data = + if recording_config.enable_return_data_recording && !return_data.data.is_empty() { + Some(return_data) + } else { + None + }; TransactionExecutionResult::Executed { details: TransactionExecutionDetails { From 7a8e29d4d5edb1d0d467458e36f87871d8dfc0fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 9 Mar 2024 00:59:43 +0800 Subject: [PATCH 350/401] build(deps): bump cc from 1.0.83 to 1.0.89 (#40) * build(deps): bump cc from 1.0.83 to 1.0.89 Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.0.83 to 1.0.89. - [Release notes](https://github.com/rust-lang/cc-rs/releases) - [Commits](https://github.com/rust-lang/cc-rs/compare/1.0.83...1.0.89) --- updated-dependencies: - dependency-name: cc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b0b181a043c7c0..19b265863eba47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1231,9 +1231,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723" dependencies = [ "jobserver", "libc", diff --git a/Cargo.toml b/Cargo.toml index 4b8ae12dab0078..16786e925c34b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -170,7 +170,7 @@ bytes = "1.5" bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" -cc = "1.0.83" +cc = "1.0.89" chrono = { version = "0.4.34", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b72b4110e336ad..a3d350456afa9c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -971,9 +971,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723" dependencies = [ "jobserver", "libc", From 68be105870d669b81999faafb4a1e6d217c26cbf Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Sat, 9 Mar 2024 01:02:21 +0800 Subject: [PATCH 351/401] Use agave prefix in scripts for pre-installed binaries (#155) --- multinode-demo/common.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/multinode-demo/common.sh b/multinode-demo/common.sh index 1643208947b643..db43dd15cffaaf 100644 --- a/multinode-demo/common.sh +++ b/multinode-demo/common.sh @@ -30,7 +30,11 @@ if [[ -n $USE_INSTALL || ! -f "$SOLANA_ROOT"/Cargo.toml ]]; then if [[ -z $program ]]; then printf "solana" else - printf "solana-%s" "$program" + if [[ $program == "validator" || $program == "ledger-tool" || $program == "watchtower" || $program == "install" ]]; then + printf "agave-%s" "$program" + else + printf "solana-%s" "$program" + fi fi } else From 1ac523c121744376332693319c3113b946d39048 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 8 Mar 2024 12:14:40 -0600 Subject: [PATCH 352/401] Move delta hash test function to dev-context-utils (#151) move delta hash test function to dev-context-utils Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index cf4d17745b1b73..41ec05dce0e4a5 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7882,6 +7882,7 @@ impl AccountsDb { /// /// As part of calculating the accounts delta hash, get a list of accounts modified this slot /// (aka dirty pubkeys) and add them to `self.uncleaned_pubkeys` for future cleaning. + #[cfg(feature = "dev-context-only-utils")] pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash { self.calculate_accounts_delta_hash_internal(slot, None, HashMap::default()) } From bf0a3684eb77512ea6ea3b90a2c624c103bc9a9b Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 8 Mar 2024 12:52:35 -0600 Subject: [PATCH 353/401] Make ReplayStage create the parallel fork replay threadpool (#137) ReplayStage owning the pool allows for subsequent work to configure the size of the pool; configuring the size of the pool inside of the lazy_static would have been a little messy --- core/src/replay_stage.rs | 136 +++++++++++++++++++++------------------ 1 file changed, 73 insertions(+), 63 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 46014e3f7912de..3683e257ed10a8 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -33,7 +33,6 @@ use { window_service::DuplicateSlotReceiver, }, crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, - lazy_static::lazy_static, rayon::{prelude::*, ThreadPool}, solana_entry::entry::VerifyRecyclers, solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc, @@ -102,14 +101,6 @@ const MAX_VOTE_REFRESH_INTERVAL_MILLIS: usize = 5000; const MAX_CONCURRENT_FORKS_TO_REPLAY: usize = 4; const MAX_REPAIR_RETRY_LOOP_ATTEMPTS: usize = 10; -lazy_static! { - static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() - .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) - .thread_name(|i| format!("solReplay{i:02}")) - .build() - .unwrap(); -} - #[derive(PartialEq, Eq, Debug)] pub enum HeaviestForkFailures { LockedOut(u64), @@ -131,6 +122,11 @@ pub enum HeaviestForkFailures { ), } +enum ForkReplayMode { + Serial, + Parallel(ThreadPool), +} + #[derive(PartialEq, Eq, Debug)] enum ConfirmationType { SupermajorityVoted, @@ -656,6 +652,16 @@ impl ReplayStage { r_bank_forks.get_vote_only_mode_signal(), ) }; + let replay_mode = if replay_slots_concurrently { + ForkReplayMode::Serial + } else { + let pool = rayon::ThreadPoolBuilder::new() + .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) + .thread_name(|i| format!("solReplay{i:02}")) + .build() + .expect("new rayon threadpool"); + ForkReplayMode::Parallel(pool) + }; Self::reset_poh_recorder( &my_pubkey, @@ -717,7 +723,7 @@ impl ReplayStage { block_metadata_notifier.clone(), &mut replay_timing, log_messages_bytes_limit, - replay_slots_concurrently, + &replay_mode, &prioritization_fee_cache, &mut purge_repair_slot_counter, ); @@ -2706,6 +2712,7 @@ impl ReplayStage { fn replay_active_banks_concurrently( blockstore: &Blockstore, bank_forks: &RwLock, + thread_pool: &ThreadPool, my_pubkey: &Pubkey, vote_account: &Pubkey, progress: &mut ProgressMap, @@ -2723,7 +2730,7 @@ impl ReplayStage { let longest_replay_time_us = AtomicU64::new(0); // Allow for concurrent replaying of slots from different forks. - let replay_result_vec: Vec = PAR_THREAD_POOL.install(|| { + let replay_result_vec: Vec = thread_pool.install(|| { active_bank_slots .into_par_iter() .map(|bank_slot| { @@ -2737,7 +2744,7 @@ impl ReplayStage { trace!( "Replay active bank: slot {}, thread_idx {}", bank_slot, - PAR_THREAD_POOL.current_thread_index().unwrap_or_default() + thread_pool.current_thread_index().unwrap_or_default() ); let mut progress_lock = progress.write().unwrap(); if progress_lock @@ -3175,7 +3182,7 @@ impl ReplayStage { block_metadata_notifier: Option, replay_timing: &mut ReplayLoopTiming, log_messages_bytes_limit: Option, - replay_slots_concurrently: bool, + replay_mode: &ForkReplayMode, prioritization_fee_cache: &PrioritizationFeeCache, purge_repair_slot_counter: &mut PurgeRepairSlotCounter, ) -> bool /* completed a bank */ { @@ -3186,11 +3193,17 @@ impl ReplayStage { num_active_banks, active_bank_slots ); - if num_active_banks > 0 { - let replay_result_vec = if num_active_banks > 1 && replay_slots_concurrently { + if active_bank_slots.is_empty() { + return false; + } + + let replay_result_vec = match replay_mode { + // Skip the overhead of the threadpool if there is only one bank to play + ForkReplayMode::Parallel(thread_pool) if num_active_banks > 1 => { Self::replay_active_banks_concurrently( blockstore, bank_forks, + thread_pool, my_pubkey, vote_account, progress, @@ -3203,55 +3216,52 @@ impl ReplayStage { &active_bank_slots, prioritization_fee_cache, ) - } else { - active_bank_slots - .iter() - .map(|bank_slot| { - Self::replay_active_bank( - blockstore, - bank_forks, - my_pubkey, - vote_account, - progress, - transaction_status_sender, - entry_notification_sender, - verify_recyclers, - replay_vote_sender, - replay_timing, - log_messages_bytes_limit, - *bank_slot, - prioritization_fee_cache, - ) - }) - .collect() - }; + } + ForkReplayMode::Serial | ForkReplayMode::Parallel(_) => active_bank_slots + .iter() + .map(|bank_slot| { + Self::replay_active_bank( + blockstore, + bank_forks, + my_pubkey, + vote_account, + progress, + transaction_status_sender, + entry_notification_sender, + verify_recyclers, + replay_vote_sender, + replay_timing, + log_messages_bytes_limit, + *bank_slot, + prioritization_fee_cache, + ) + }) + .collect(), + }; - Self::process_replay_results( - blockstore, - bank_forks, - progress, - transaction_status_sender, - cache_block_meta_sender, - heaviest_subtree_fork_choice, - bank_notification_sender, - rewards_recorder_sender, - rpc_subscriptions, - duplicate_slots_tracker, - duplicate_confirmed_slots, - epoch_slots_frozen_slots, - unfrozen_gossip_verified_vote_hashes, - latest_validator_votes_for_frozen_banks, - cluster_slots_update_sender, - cost_update_sender, - duplicate_slots_to_repair, - ancestor_hashes_replay_update_sender, - block_metadata_notifier, - &replay_result_vec, - purge_repair_slot_counter, - ) - } else { - false - } + Self::process_replay_results( + blockstore, + bank_forks, + progress, + transaction_status_sender, + cache_block_meta_sender, + heaviest_subtree_fork_choice, + bank_notification_sender, + rewards_recorder_sender, + rpc_subscriptions, + duplicate_slots_tracker, + duplicate_confirmed_slots, + epoch_slots_frozen_slots, + unfrozen_gossip_verified_vote_hashes, + latest_validator_votes_for_frozen_banks, + cluster_slots_update_sender, + cost_update_sender, + duplicate_slots_to_repair, + ancestor_hashes_replay_update_sender, + block_metadata_notifier, + &replay_result_vec, + purge_repair_slot_counter, + ) } #[allow(clippy::too_many_arguments)] From d88050cda335f87e872eddbdf8506bc063f039d3 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Fri, 8 Mar 2024 14:04:07 -0500 Subject: [PATCH 354/401] SVM: Add doc comments, restrict visibility of some xfaces to crate (#136) --- runtime/src/bank.rs | 2 +- svm/src/account_loader.rs | 385 ++++++++++++++++++---- svm/src/account_overrides.rs | 1 + svm/src/account_rent_state.rs | 33 +- svm/src/transaction_account_state_info.rs | 4 +- svm/src/transaction_processor.rs | 231 ++++++------- svm/tests/account_loader.rs | 214 ------------ svm/tests/rent_state.rs | 90 ----- 8 files changed, 463 insertions(+), 497 deletions(-) delete mode 100644 svm/tests/account_loader.rs delete mode 100644 svm/tests/rent_state.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 3e504d470de744..f0ba75defa0517 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7499,7 +7499,7 @@ impl Bank { effective_epoch: Epoch, ) -> Arc { self.transaction_processor - .load_program(self, pubkey, reload, effective_epoch) + .load_program_with_pubkey(self, pubkey, reload, effective_epoch) } } diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 1c02ded24665ff..bf9b5b9c40bfee 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -38,8 +38,11 @@ use { }; // for the load instructions -pub type TransactionRent = u64; -pub type TransactionProgramIndices = Vec>; +pub(crate) type TransactionRent = u64; +pub(crate) type TransactionProgramIndices = Vec>; +pub type TransactionCheckResult = (transaction::Result<()>, Option, Option); +pub type TransactionLoadResult = (Result, Option); + #[derive(PartialEq, Eq, Debug, Clone)] pub struct LoadedTransaction { pub accounts: Vec, @@ -48,10 +51,66 @@ pub struct LoadedTransaction { pub rent_debits: RentDebits, } -pub type TransactionLoadResult = (Result, Option); -pub type TransactionCheckResult = (transaction::Result<()>, Option, Option); +/// Check whether the payer_account is capable of paying the fee. The +/// side effect is to subtract the fee amount from the payer_account +/// balance of lamports. If the payer_acount is not able to pay the +/// fee, the error_counters is incremented, and a specific error is +/// returned. +pub fn validate_fee_payer( + payer_address: &Pubkey, + payer_account: &mut AccountSharedData, + payer_index: IndexOfAccount, + error_counters: &mut TransactionErrorMetrics, + rent_collector: &RentCollector, + fee: u64, +) -> Result<()> { + if payer_account.lamports() == 0 { + error_counters.account_not_found += 1; + return Err(TransactionError::AccountNotFound); + } + let system_account_kind = get_system_account_kind(payer_account).ok_or_else(|| { + error_counters.invalid_account_for_fee += 1; + TransactionError::InvalidAccountForFee + })?; + let min_balance = match system_account_kind { + SystemAccountKind::System => 0, + SystemAccountKind::Nonce => { + // Should we ever allow a fees charge to zero a nonce account's + // balance. The state MUST be set to uninitialized in that case + rent_collector.rent.minimum_balance(NonceState::size()) + } + }; + + payer_account + .lamports() + .checked_sub(min_balance) + .and_then(|v| v.checked_sub(fee)) + .ok_or_else(|| { + error_counters.insufficient_funds += 1; + TransactionError::InsufficientFundsForFee + })?; -pub fn load_accounts( + let payer_pre_rent_state = RentState::from_account(payer_account, &rent_collector.rent); + payer_account + .checked_sub_lamports(fee) + .map_err(|_| TransactionError::InsufficientFundsForFee)?; + + let payer_post_rent_state = RentState::from_account(payer_account, &rent_collector.rent); + RentState::check_rent_state_with_account( + &payer_pre_rent_state, + &payer_post_rent_state, + payer_address, + payer_account, + payer_index, + ) +} + +/// Collect information about accounts used in txs transactions and +/// return vector of tuples, one for each transaction in the +/// batch. Each tuple contains struct of information about accounts as +/// its first element and an optional transaction nonce info as its +/// second element. +pub(crate) fn load_accounts( callbacks: &CB, txs: &[SanitizedTransaction], lock_results: &[TransactionCheckResult], @@ -399,55 +458,6 @@ fn accumulate_and_check_loaded_account_data_size( } } -pub fn validate_fee_payer( - payer_address: &Pubkey, - payer_account: &mut AccountSharedData, - payer_index: IndexOfAccount, - error_counters: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, - fee: u64, -) -> Result<()> { - if payer_account.lamports() == 0 { - error_counters.account_not_found += 1; - return Err(TransactionError::AccountNotFound); - } - let system_account_kind = get_system_account_kind(payer_account).ok_or_else(|| { - error_counters.invalid_account_for_fee += 1; - TransactionError::InvalidAccountForFee - })?; - let min_balance = match system_account_kind { - SystemAccountKind::System => 0, - SystemAccountKind::Nonce => { - // Should we ever allow a fees charge to zero a nonce account's - // balance. The state MUST be set to uninitialized in that case - rent_collector.rent.minimum_balance(NonceState::size()) - } - }; - - payer_account - .lamports() - .checked_sub(min_balance) - .and_then(|v| v.checked_sub(fee)) - .ok_or_else(|| { - error_counters.insufficient_funds += 1; - TransactionError::InsufficientFundsForFee - })?; - - let payer_pre_rent_state = RentState::from_account(payer_account, &rent_collector.rent); - payer_account - .checked_sub_lamports(fee) - .map_err(|_| TransactionError::InsufficientFundsForFee)?; - - let payer_post_rent_state = RentState::from_account(payer_account, &rent_collector.rent); - RentState::check_rent_state_with_account( - &payer_pre_rent_state, - &payer_post_rent_state, - payer_address, - payer_account, - payer_index, - ) -} - fn construct_instructions_account(message: &SanitizedMessage) -> AccountSharedData { AccountSharedData::from(Account { data: construct_instructions_data(&message.decompile_instructions()), @@ -460,11 +470,15 @@ fn construct_instructions_account(message: &SanitizedMessage) -> AccountSharedDa mod tests { use { super::*, - crate::transaction_processor::TransactionProcessingCallback, + crate::{ + transaction_account_state_info::TransactionAccountStateInfo, + transaction_processor::TransactionProcessingCallback, + }, nonce::state::Versions as NonceVersions, solana_program_runtime::{ + compute_budget::ComputeBudget, compute_budget_processor, - loaded_programs::LoadedProgram, + loaded_programs::{LoadedProgram, LoadedProgramsForTxBatch}, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, solana_sdk::{ @@ -473,22 +487,27 @@ mod tests { compute_budget::ComputeBudgetInstruction, epoch_schedule::EpochSchedule, feature_set::FeatureSet, + fee::FeeStructure, hash::Hash, instruction::CompiledInstruction, message::{ v0::{LoadedAddresses, LoadedMessage}, LegacyMessage, Message, MessageHeader, SanitizedMessage, }, + native_loader, + native_token::sol_to_lamports, nonce, + nonce_info::{NonceFull, NoncePartial}, pubkey::Pubkey, rent::Rent, - rent_collector::RentCollector, + rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, + rent_debits::RentDebits, signature::{Keypair, Signature, Signer}, - system_program, sysvar, - transaction::{Result, Transaction, TransactionError}, - transaction_context::TransactionAccount, + system_program, system_transaction, sysvar, + transaction::{Result, SanitizedTransaction, Transaction, TransactionError}, + transaction_context::{TransactionAccount, TransactionContext}, }, - std::{borrow::Cow, convert::TryFrom, sync::Arc}, + std::{borrow::Cow, collections::HashMap, convert::TryFrom, sync::Arc}, }; #[derive(Default)] @@ -2017,4 +2036,248 @@ mod tests { } ); } + + #[test] + fn test_rent_state_list_len() { + let mint_keypair = Keypair::new(); + let mut bank = TestCallbacks::default(); + let recipient = Pubkey::new_unique(); + let last_block_hash = Hash::new_unique(); + + let mut system_data = AccountSharedData::default(); + system_data.set_executable(true); + system_data.set_owner(native_loader::id()); + bank.accounts_map + .insert(Pubkey::new_from_array([0u8; 32]), system_data); + + let mut mint_data = AccountSharedData::default(); + mint_data.set_lamports(2); + bank.accounts_map.insert(mint_keypair.pubkey(), mint_data); + + bank.accounts_map + .insert(recipient, AccountSharedData::default()); + + let tx = system_transaction::transfer( + &mint_keypair, + &recipient, + sol_to_lamports(1.), + last_block_hash, + ); + let num_accounts = tx.message().account_keys.len(); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); + let mut error_counters = TransactionErrorMetrics::default(); + let loaded_txs = load_accounts( + &bank, + &[sanitized_tx.clone()], + &[(Ok(()), None, Some(0))], + &mut error_counters, + &FeeStructure::default(), + None, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + ); + + let compute_budget = ComputeBudget::new(u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + )); + let transaction_context = TransactionContext::new( + loaded_txs[0].0.as_ref().unwrap().accounts.clone(), + Rent::default(), + compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_trace_length, + ); + + assert_eq!( + TransactionAccountStateInfo::new( + &Rent::default(), + &transaction_context, + sanitized_tx.message() + ) + .len(), + num_accounts, + ); + } + + #[test] + fn test_load_accounts_success() { + let key1 = Keypair::new(); + let key2 = Keypair::new(); + let key3 = Keypair::new(); + let key4 = Keypair::new(); + + let message = Message { + account_keys: vec![key2.pubkey(), key1.pubkey(), key4.pubkey()], + header: MessageHeader::default(), + instructions: vec![ + CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }, + CompiledInstruction { + program_id_index: 1, + accounts: vec![2], + data: vec![], + }, + ], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let mut mock_bank = TestCallbacks::default(); + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(key3.pubkey()); + mock_bank.accounts_map.insert(key1.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank.accounts_map.insert(key2.pubkey(), account_data); + + let mut account_data = AccountSharedData::default(); + account_data.set_executable(true); + account_data.set_owner(native_loader::id()); + mock_bank.accounts_map.insert(key3.pubkey(), account_data); + + let mut error_counter = TransactionErrorMetrics::default(); + let loaded_programs = LoadedProgramsForTxBatch::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let lock_results = + (Ok(()), Some(NoncePartial::default()), Some(20u64)) as TransactionCheckResult; + + let results = load_accounts( + &mock_bank, + &[sanitized_transaction], + &[lock_results], + &mut error_counter, + &FeeStructure::default(), + None, + &HashMap::new(), + &loaded_programs, + ); + + let mut account_data = AccountSharedData::default(); + account_data.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + + assert_eq!(results.len(), 1); + let (loaded_result, nonce) = results[0].clone(); + assert_eq!( + loaded_result.unwrap(), + LoadedTransaction { + accounts: vec![ + ( + key2.pubkey(), + mock_bank.accounts_map[&key2.pubkey()].clone() + ), + ( + key1.pubkey(), + mock_bank.accounts_map[&key1.pubkey()].clone() + ), + (key4.pubkey(), account_data), + ( + key3.pubkey(), + mock_bank.accounts_map[&key3.pubkey()].clone() + ), + ], + program_indices: vec![vec![3, 1], vec![3, 1]], + rent: 0, + rent_debits: RentDebits::default() + } + ); + + assert_eq!( + nonce.unwrap(), + NonceFull::new( + Pubkey::from([0; 32]), + AccountSharedData::default(), + Some(mock_bank.accounts_map[&key2.pubkey()].clone()) + ) + ); + } + + #[test] + fn test_load_accounts_error() { + let mock_bank = TestCallbacks::default(); + let message = Message { + account_keys: vec![Pubkey::new_from_array([0; 32])], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + + let lock_results = (Ok(()), Some(NoncePartial::default()), None) as TransactionCheckResult; + let fee_structure = FeeStructure::default(); + + let result = load_accounts( + &mock_bank, + &[sanitized_transaction.clone()], + &[lock_results], + &mut TransactionErrorMetrics::default(), + &fee_structure, + None, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + ); + + assert_eq!( + result, + vec![(Err(TransactionError::BlockhashNotFound), None)] + ); + + let lock_results = + (Ok(()), Some(NoncePartial::default()), Some(20u64)) as TransactionCheckResult; + + let result = load_accounts( + &mock_bank, + &[sanitized_transaction.clone()], + &[lock_results.clone()], + &mut TransactionErrorMetrics::default(), + &fee_structure, + None, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + ); + + assert_eq!(result, vec![(Err(TransactionError::AccountNotFound), None)]); + + let lock_results = ( + Err(TransactionError::InvalidWritableAccount), + Some(NoncePartial::default()), + Some(20u64), + ) as TransactionCheckResult; + + let result = load_accounts( + &mock_bank, + &[sanitized_transaction.clone()], + &[lock_results], + &mut TransactionErrorMetrics::default(), + &fee_structure, + None, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + ); + + assert_eq!( + result, + vec![(Err(TransactionError::InvalidWritableAccount), None)] + ); + } } diff --git a/svm/src/account_overrides.rs b/svm/src/account_overrides.rs index c88d77d54f30a9..8a205a798f66b1 100644 --- a/svm/src/account_overrides.rs +++ b/svm/src/account_overrides.rs @@ -10,6 +10,7 @@ pub struct AccountOverrides { } impl AccountOverrides { + /// Insert or remove an account with a given pubkey to/from the list of overrides. pub fn set_account(&mut self, pubkey: &Pubkey, account: Option) { match account { Some(account) => self.accounts.insert(*pubkey, account), diff --git a/svm/src/account_rent_state.rs b/svm/src/account_rent_state.rs index 6fae6e9033bd39..7e3501d0d6c649 100644 --- a/svm/src/account_rent_state.rs +++ b/svm/src/account_rent_state.rs @@ -23,6 +23,7 @@ pub enum RentState { } impl RentState { + /// Return a new RentState instance for a given account and rent. pub fn from_account(account: &AccountSharedData, rent: &Rent) -> Self { if account.lamports() == 0 { Self::Uninitialized @@ -36,6 +37,8 @@ impl RentState { } } + /// Check whether a transition from the pre_rent_state to this + /// state is valid. pub fn transition_allowed_from(&self, pre_rent_state: &RentState) -> bool { match self { Self::Uninitialized | Self::RentExempt => true, @@ -57,21 +60,6 @@ impl RentState { } } - fn submit_rent_state_metrics(pre_rent_state: &Self, post_rent_state: &Self) { - match (pre_rent_state, post_rent_state) { - (&RentState::Uninitialized, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_err-new_account", 1); - } - (&RentState::RentPaying { .. }, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_ok-legacy", 1); - } - (_, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_err-other", 1); - } - _ => {} - } - } - pub(crate) fn check_rent_state( pre_rent_state: Option<&Self>, post_rent_state: Option<&Self>, @@ -118,6 +106,21 @@ impl RentState { Ok(()) } } + + fn submit_rent_state_metrics(pre_rent_state: &Self, post_rent_state: &Self) { + match (pre_rent_state, post_rent_state) { + (&RentState::Uninitialized, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_err-new_account", 1); + } + (&RentState::RentPaying { .. }, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_ok-legacy", 1); + } + (_, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_err-other", 1); + } + _ => {} + } + } } #[cfg(test)] diff --git a/svm/src/transaction_account_state_info.rs b/svm/src/transaction_account_state_info.rs index ff5b93f6a6c459..0631050fe0e765 100644 --- a/svm/src/transaction_account_state_info.rs +++ b/svm/src/transaction_account_state_info.rs @@ -11,12 +11,12 @@ use { }; #[derive(PartialEq, Debug)] -pub struct TransactionAccountStateInfo { +pub(crate) struct TransactionAccountStateInfo { rent_state: Option, // None: readonly account } impl TransactionAccountStateInfo { - pub fn new( + pub(crate) fn new( rent: &Rent, transaction_context: &TransactionContext, message: &SanitizedMessage, diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index d90afb0a428ea3..fec908619f14f8 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -195,6 +195,7 @@ impl TransactionBatchProcessor { } } + /// Main entrypoint to the SVM. #[allow(clippy::too_many_arguments)] pub fn load_and_execute_sanitized_transactions<'a, CB: TransactionProcessingCallback>( &self, @@ -377,6 +378,112 @@ impl TransactionBatchProcessor { result } + /// Load program with a specific pubkey from loaded programs + /// cache, and update the program's access slot as a side-effect. + pub fn load_program_with_pubkey( + &self, + callbacks: &CB, + pubkey: &Pubkey, + reload: bool, + effective_epoch: Epoch, + ) -> Arc { + let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); + let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); + let mut load_program_metrics = LoadProgramMetrics { + program_id: pubkey.to_string(), + ..LoadProgramMetrics::default() + }; + + let mut loaded_program = + match self.load_program_accounts(callbacks, pubkey, environments) { + ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( + self.slot, + LoadedProgramType::Closed, + )), + + ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), + + ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { + Self::load_program_from_bytes( + &mut load_program_metrics, + program_account.data(), + program_account.owner(), + program_account.data().len(), + 0, + environments.program_runtime_v1.clone(), + reload, + ) + .map_err(|_| (0, environments.program_runtime_v1.clone())) + } + + ProgramAccountLoadResult::ProgramOfLoaderV3( + program_account, + programdata_account, + slot, + ) => programdata_account + .data() + .get(UpgradeableLoaderState::size_of_programdata_metadata()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|programdata| { + Self::load_program_from_bytes( + &mut load_program_metrics, + programdata, + program_account.owner(), + program_account + .data() + .len() + .saturating_add(programdata_account.data().len()), + slot, + environments.program_runtime_v1.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v1.clone())), + + ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => { + program_account + .data() + .get(LoaderV4State::program_data_offset()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|elf_bytes| { + Self::load_program_from_bytes( + &mut load_program_metrics, + elf_bytes, + &loader_v4::id(), + program_account.data().len(), + slot, + environments.program_runtime_v2.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v2.clone())) + } + } + .unwrap_or_else(|(slot, env)| { + LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) + }); + + let mut timings = ExecuteDetailsTimings::default(); + load_program_metrics.submit_datapoint(&mut timings); + if !Arc::ptr_eq( + &environments.program_runtime_v1, + &loaded_programs_cache.environments.program_runtime_v1, + ) || !Arc::ptr_eq( + &environments.program_runtime_v2, + &loaded_programs_cache.environments.program_runtime_v2, + ) { + // There can be two entries per program when the environment changes. + // One for the old environment before the epoch boundary and one for the new environment after the epoch boundary. + // These two entries have the same deployment slot, so they must differ in their effective slot instead. + // This is done by setting the effective slot of the entry for the new environment to the epoch boundary. + loaded_program.effective_slot = loaded_program + .effective_slot + .max(self.epoch_schedule.get_first_slot_in_epoch(effective_epoch)); + } + loaded_program.update_access_slot(self.slot); + Arc::new(loaded_program) + } + fn replenish_program_cache( &self, callback: &CB, @@ -454,7 +561,7 @@ impl TransactionBatchProcessor { if let Some((key, count)) = program_to_load { // Load, verify and compile one program. - let program = self.load_program(callback, &key, false, self.epoch); + let program = self.load_program_with_pubkey(callback, &key, false, self.epoch); program.tx_usage_counter.store(count, Ordering::Relaxed); program_to_store = Some((key, program)); } else if missing_programs.is_empty() { @@ -683,110 +790,6 @@ impl TransactionBatchProcessor { } } - pub fn load_program( - &self, - callbacks: &CB, - pubkey: &Pubkey, - reload: bool, - effective_epoch: Epoch, - ) -> Arc { - let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); - let mut load_program_metrics = LoadProgramMetrics { - program_id: pubkey.to_string(), - ..LoadProgramMetrics::default() - }; - - let mut loaded_program = - match self.load_program_accounts(callbacks, pubkey, environments) { - ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( - self.slot, - LoadedProgramType::Closed, - )), - - ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), - - ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { - Self::load_program_from_bytes( - &mut load_program_metrics, - program_account.data(), - program_account.owner(), - program_account.data().len(), - 0, - environments.program_runtime_v1.clone(), - reload, - ) - .map_err(|_| (0, environments.program_runtime_v1.clone())) - } - - ProgramAccountLoadResult::ProgramOfLoaderV3( - program_account, - programdata_account, - slot, - ) => programdata_account - .data() - .get(UpgradeableLoaderState::size_of_programdata_metadata()..) - .ok_or(Box::new(InstructionError::InvalidAccountData).into()) - .and_then(|programdata| { - Self::load_program_from_bytes( - &mut load_program_metrics, - programdata, - program_account.owner(), - program_account - .data() - .len() - .saturating_add(programdata_account.data().len()), - slot, - environments.program_runtime_v1.clone(), - reload, - ) - }) - .map_err(|_| (slot, environments.program_runtime_v1.clone())), - - ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => { - program_account - .data() - .get(LoaderV4State::program_data_offset()..) - .ok_or(Box::new(InstructionError::InvalidAccountData).into()) - .and_then(|elf_bytes| { - Self::load_program_from_bytes( - &mut load_program_metrics, - elf_bytes, - &loader_v4::id(), - program_account.data().len(), - slot, - environments.program_runtime_v2.clone(), - reload, - ) - }) - .map_err(|_| (slot, environments.program_runtime_v2.clone())) - } - } - .unwrap_or_else(|(slot, env)| { - LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) - }); - - let mut timings = ExecuteDetailsTimings::default(); - load_program_metrics.submit_datapoint(&mut timings); - if !Arc::ptr_eq( - &environments.program_runtime_v1, - &loaded_programs_cache.environments.program_runtime_v1, - ) || !Arc::ptr_eq( - &environments.program_runtime_v2, - &loaded_programs_cache.environments.program_runtime_v2, - ) { - // There can be two entries per program when the environment changes. - // One for the old environment before the epoch boundary and one for the new environment after the epoch boundary. - // These two entries have the same deployment slot, so they must differ in their effective slot instead. - // This is done by setting the effective slot of the entry for the new environment to the epoch boundary. - loaded_program.effective_slot = loaded_program - .effective_slot - .max(self.epoch_schedule.get_first_slot_in_epoch(effective_epoch)); - } - loaded_program.update_access_slot(self.slot); - Arc::new(loaded_program) - } - fn load_program_from_bytes( load_program_metrics: &mut LoadProgramMetrics, programdata: &[u8], @@ -1242,7 +1245,7 @@ mod tests { let key = Pubkey::new_unique(); let batch_processor = TransactionBatchProcessor::::default(); - let result = batch_processor.load_program(&mock_bank, &key, false, 50); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key, false, 50); let loaded_program = LoadedProgram::new_tombstone(0, LoadedProgramType::Closed); assert_eq!(result, Arc::new(loaded_program)); @@ -1259,7 +1262,7 @@ mod tests { .account_shared_data .insert(key, account_data.clone()); - let result = batch_processor.load_program(&mock_bank, &key, false, 20); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key, false, 20); let loaded_program = LoadedProgram::new_tombstone( 0, @@ -1288,7 +1291,7 @@ mod tests { .insert(key, account_data.clone()); // This should return an error - let result = batch_processor.load_program(&mock_bank, &key, false, 20); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key, false, 20); let loaded_program = LoadedProgram::new_tombstone( 0, LoadedProgramType::FailedVerification( @@ -1316,7 +1319,7 @@ mod tests { .account_shared_data .insert(key, account_data.clone()); - let result = batch_processor.load_program(&mock_bank, &key, false, 20); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key, false, 20); let environments = ProgramRuntimeEnvironments::default(); let expected = TransactionBatchProcessor::::load_program_from_bytes( @@ -1361,7 +1364,7 @@ mod tests { .insert(key2, account_data2.clone()); // This should return an error - let result = batch_processor.load_program(&mock_bank, &key1, false, 0); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key1, false, 0); let loaded_program = LoadedProgram::new_tombstone( 0, LoadedProgramType::FailedVerification( @@ -1399,7 +1402,7 @@ mod tests { .account_shared_data .insert(key2, account_data.clone()); - let result = batch_processor.load_program(&mock_bank, &key1, false, 20); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key1, false, 20); let data = account_data.data(); account_data @@ -1441,7 +1444,7 @@ mod tests { .account_shared_data .insert(key, account_data.clone()); - let result = batch_processor.load_program(&mock_bank, &key, false, 0); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key, false, 0); let loaded_program = LoadedProgram::new_tombstone( 0, LoadedProgramType::FailedVerification( @@ -1475,7 +1478,7 @@ mod tests { .account_shared_data .insert(key, account_data.clone()); - let result = batch_processor.load_program(&mock_bank, &key, false, 20); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key, false, 20); let data = account_data.data()[LoaderV4State::program_data_offset()..].to_vec(); account_data.set_data(data); @@ -1513,7 +1516,7 @@ mod tests { .account_shared_data .insert(key, account_data.clone()); - let result = batch_processor.load_program(&mock_bank, &key, false, 20); + let result = batch_processor.load_program_with_pubkey(&mock_bank, &key, false, 20); let slot = batch_processor.epoch_schedule.get_first_slot_in_epoch(20); assert_eq!(result.effective_slot, slot); diff --git a/svm/tests/account_loader.rs b/svm/tests/account_loader.rs deleted file mode 100644 index dd4cd046046399..00000000000000 --- a/svm/tests/account_loader.rs +++ /dev/null @@ -1,214 +0,0 @@ -use { - crate::mock_bank::MockBankCallback, - solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, - solana_sdk::{ - account::{AccountSharedData, WritableAccount}, - fee::FeeStructure, - hash::Hash, - instruction::CompiledInstruction, - message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, - native_loader, - nonce_info::{NonceFull, NoncePartial}, - pubkey::Pubkey, - rent_collector::RENT_EXEMPT_RENT_EPOCH, - rent_debits::RentDebits, - signature::{Keypair, Signature, Signer}, - transaction::{SanitizedTransaction, TransactionError}, - }, - solana_svm::{ - account_loader::{load_accounts, LoadedTransaction, TransactionCheckResult}, - transaction_error_metrics::TransactionErrorMetrics, - }, - std::collections::HashMap, -}; - -mod mock_bank; - -#[test] -fn test_load_accounts_success() { - let key1 = Keypair::new(); - let key2 = Keypair::new(); - let key3 = Keypair::new(); - let key4 = Keypair::new(); - - let message = Message { - account_keys: vec![key2.pubkey(), key1.pubkey(), key4.pubkey()], - header: MessageHeader::default(), - instructions: vec![ - CompiledInstruction { - program_id_index: 1, - accounts: vec![0], - data: vec![], - }, - CompiledInstruction { - program_id_index: 1, - accounts: vec![2], - data: vec![], - }, - ], - recent_blockhash: Hash::default(), - }; - - let legacy = LegacyMessage::new(message); - let sanitized_message = SanitizedMessage::Legacy(legacy); - let mut mock_bank = MockBankCallback::default(); - let mut account_data = AccountSharedData::default(); - account_data.set_executable(true); - account_data.set_owner(key3.pubkey()); - mock_bank - .account_shared_data - .insert(key1.pubkey(), account_data); - - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(200); - mock_bank - .account_shared_data - .insert(key2.pubkey(), account_data); - - let mut account_data = AccountSharedData::default(); - account_data.set_executable(true); - account_data.set_owner(native_loader::id()); - mock_bank - .account_shared_data - .insert(key3.pubkey(), account_data); - - let mut error_counter = TransactionErrorMetrics::default(); - let loaded_programs = LoadedProgramsForTxBatch::default(); - - let sanitized_transaction = SanitizedTransaction::new_for_tests( - sanitized_message, - vec![Signature::new_unique()], - false, - ); - let lock_results = - (Ok(()), Some(NoncePartial::default()), Some(20u64)) as TransactionCheckResult; - - let results = load_accounts( - &mock_bank, - &[sanitized_transaction], - &[lock_results], - &mut error_counter, - &FeeStructure::default(), - None, - &HashMap::new(), - &loaded_programs, - ); - - let mut account_data = AccountSharedData::default(); - account_data.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - - assert_eq!(results.len(), 1); - let (loaded_result, nonce) = results[0].clone(); - assert_eq!( - loaded_result.unwrap(), - LoadedTransaction { - accounts: vec![ - ( - key2.pubkey(), - mock_bank.account_shared_data[&key2.pubkey()].clone() - ), - ( - key1.pubkey(), - mock_bank.account_shared_data[&key1.pubkey()].clone() - ), - (key4.pubkey(), account_data), - ( - key3.pubkey(), - mock_bank.account_shared_data[&key3.pubkey()].clone() - ), - ], - program_indices: vec![vec![3, 1], vec![3, 1]], - rent: 0, - rent_debits: RentDebits::default() - } - ); - - assert_eq!( - nonce.unwrap(), - NonceFull::new( - Pubkey::from([0; 32]), - AccountSharedData::default(), - Some(mock_bank.account_shared_data[&key2.pubkey()].clone()) - ) - ); -} - -#[test] -fn test_load_accounts_error() { - let mock_bank = MockBankCallback::default(); - let message = Message { - account_keys: vec![Pubkey::new_from_array([0; 32])], - header: MessageHeader::default(), - instructions: vec![CompiledInstruction { - program_id_index: 0, - accounts: vec![], - data: vec![], - }], - recent_blockhash: Hash::default(), - }; - - let legacy = LegacyMessage::new(message); - let sanitized_message = SanitizedMessage::Legacy(legacy); - let sanitized_transaction = SanitizedTransaction::new_for_tests( - sanitized_message, - vec![Signature::new_unique()], - false, - ); - - let lock_results = (Ok(()), Some(NoncePartial::default()), None) as TransactionCheckResult; - let fee_structure = FeeStructure::default(); - - let result = load_accounts( - &mock_bank, - &[sanitized_transaction.clone()], - &[lock_results], - &mut TransactionErrorMetrics::default(), - &fee_structure, - None, - &HashMap::new(), - &LoadedProgramsForTxBatch::default(), - ); - - assert_eq!( - result, - vec![(Err(TransactionError::BlockhashNotFound), None)] - ); - - let lock_results = - (Ok(()), Some(NoncePartial::default()), Some(20u64)) as TransactionCheckResult; - - let result = load_accounts( - &mock_bank, - &[sanitized_transaction.clone()], - &[lock_results.clone()], - &mut TransactionErrorMetrics::default(), - &fee_structure, - None, - &HashMap::new(), - &LoadedProgramsForTxBatch::default(), - ); - - assert_eq!(result, vec![(Err(TransactionError::AccountNotFound), None)]); - - let lock_results = ( - Err(TransactionError::InvalidWritableAccount), - Some(NoncePartial::default()), - Some(20u64), - ) as TransactionCheckResult; - - let result = load_accounts( - &mock_bank, - &[sanitized_transaction.clone()], - &[lock_results], - &mut TransactionErrorMetrics::default(), - &fee_structure, - None, - &HashMap::new(), - &LoadedProgramsForTxBatch::default(), - ); - - assert_eq!( - result, - vec![(Err(TransactionError::InvalidWritableAccount), None)] - ); -} diff --git a/svm/tests/rent_state.rs b/svm/tests/rent_state.rs deleted file mode 100644 index f3ea728f6b874f..00000000000000 --- a/svm/tests/rent_state.rs +++ /dev/null @@ -1,90 +0,0 @@ -#![cfg(test)] - -use { - solana_program_runtime::{ - compute_budget::ComputeBudget, compute_budget_processor, - loaded_programs::LoadedProgramsForTxBatch, - }, - solana_sdk::{ - account::{AccountSharedData, WritableAccount}, - fee::FeeStructure, - hash::Hash, - native_loader, - native_token::sol_to_lamports, - pubkey::Pubkey, - rent::Rent, - signature::{Keypair, Signer}, - system_transaction, - transaction::SanitizedTransaction, - transaction_context::TransactionContext, - }, - solana_svm::{ - account_loader::load_accounts, transaction_account_state_info::TransactionAccountStateInfo, - transaction_error_metrics::TransactionErrorMetrics, - }, - std::collections::HashMap, -}; - -mod mock_bank; - -#[test] -fn test_rent_state_list_len() { - let mint_keypair = Keypair::new(); - let mut bank = mock_bank::MockBankCallback::default(); - let recipient = Pubkey::new_unique(); - let last_block_hash = Hash::new_unique(); - - let mut system_data = AccountSharedData::default(); - system_data.set_executable(true); - system_data.set_owner(native_loader::id()); - bank.account_shared_data - .insert(Pubkey::new_from_array([0u8; 32]), system_data); - - let mut mint_data = AccountSharedData::default(); - mint_data.set_lamports(2); - bank.account_shared_data - .insert(mint_keypair.pubkey(), mint_data); - - bank.account_shared_data - .insert(recipient, AccountSharedData::default()); - - let tx = system_transaction::transfer( - &mint_keypair, - &recipient, - sol_to_lamports(1.), - last_block_hash, - ); - let num_accounts = tx.message().account_keys.len(); - let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); - let mut error_counters = TransactionErrorMetrics::default(); - let loaded_txs = load_accounts( - &bank, - &[sanitized_tx.clone()], - &[(Ok(()), None, Some(0))], - &mut error_counters, - &FeeStructure::default(), - None, - &HashMap::new(), - &LoadedProgramsForTxBatch::default(), - ); - - let compute_budget = ComputeBudget::new(u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - )); - let transaction_context = TransactionContext::new( - loaded_txs[0].0.as_ref().unwrap().accounts.clone(), - Rent::default(), - compute_budget.max_invoke_stack_height, - compute_budget.max_instruction_trace_length, - ); - - assert_eq!( - TransactionAccountStateInfo::new( - &Rent::default(), - &transaction_context, - sanitized_tx.message() - ) - .len(), - num_accounts, - ); -} From 3863bb1bdf0f7c9a0b35c2c19dc50943ca39657e Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 11 Mar 2024 12:29:24 +0800 Subject: [PATCH 355/401] ci: fix Windows gh release pipeline (#165) --- .github/workflows/release-artifacts.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 7aec77f0dac45f..c840862a5e28f3 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -99,7 +99,7 @@ jobs: uses: actions/download-artifact@v3 with: name: windows-artifact - path: .windows-release/ + path: ./windows-release/ - name: Release uses: softprops/action-gh-release@v1 From 0e12172ddd44f0b44130581f82e595084a553fd0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 11 Mar 2024 11:34:08 -0400 Subject: [PATCH 356/401] Moves accounts benches into accounts-db crate (#164) --- accounts-db/benches/accounts.rs | 343 ++++++++++++++++++++++++++++++++ runtime/benches/accounts.rs | 340 +------------------------------ 2 files changed, 349 insertions(+), 334 deletions(-) create mode 100644 accounts-db/benches/accounts.rs diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs new file mode 100644 index 00000000000000..9b3b70600a60a2 --- /dev/null +++ b/accounts-db/benches/accounts.rs @@ -0,0 +1,343 @@ +#![feature(test)] +#![allow(clippy::arithmetic_side_effects)] + +extern crate test; + +use { + dashmap::DashMap, + rand::Rng, + rayon::iter::{IntoParallelRefIterator, ParallelIterator}, + solana_accounts_db::{ + accounts::{AccountAddressFilter, Accounts}, + accounts_db::{ + test_utils::create_test_accounts, AccountShrinkThreshold, AccountsDb, + VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, + }, + accounts_index::{AccountSecondaryIndexes, ScanConfig}, + ancestors::Ancestors, + }, + solana_sdk::{ + account::{Account, AccountSharedData, ReadableAccount}, + genesis_config::ClusterType, + hash::Hash, + pubkey::Pubkey, + rent_collector::RentCollector, + sysvar::epoch_schedule::EpochSchedule, + }, + std::{ + collections::{HashMap, HashSet}, + path::PathBuf, + sync::{Arc, RwLock}, + thread::Builder, + }, + test::Bencher, +}; + +fn new_accounts_db(account_paths: Vec) -> AccountsDb { + AccountsDb::new_with_config( + account_paths, + &ClusterType::Development, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + None, + Arc::default(), + ) +} + +#[bench] +fn bench_accounts_hash_bank_hash(bencher: &mut Bencher) { + let accounts_db = new_accounts_db(vec![PathBuf::from("bench_accounts_hash_internal")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut pubkeys: Vec = vec![]; + let num_accounts = 60_000; + let slot = 0; + create_test_accounts(&accounts, &mut pubkeys, num_accounts, slot); + let ancestors = Ancestors::from(vec![0]); + let (_, total_lamports) = accounts + .accounts_db + .update_accounts_hash_for_tests(0, &ancestors, false, false); + accounts.add_root(slot); + accounts.accounts_db.flush_accounts_cache(true, Some(slot)); + bencher.iter(|| { + assert!(accounts.verify_accounts_hash_and_lamports( + 0, + total_lamports, + None, + VerifyAccountsHashAndLamportsConfig { + ancestors: &ancestors, + test_hash_calculation: false, + epoch_schedule: &EpochSchedule::default(), + rent_collector: &RentCollector::default(), + ignore_mismatch: false, + store_detailed_debug_info: false, + use_bg_thread_pool: false, + } + )) + }); +} + +#[bench] +fn bench_update_accounts_hash(bencher: &mut Bencher) { + solana_logger::setup(); + let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut pubkeys: Vec = vec![]; + create_test_accounts(&accounts, &mut pubkeys, 50_000, 0); + let ancestors = Ancestors::from(vec![0]); + bencher.iter(|| { + accounts + .accounts_db + .update_accounts_hash_for_tests(0, &ancestors, false, false); + }); +} + +#[bench] +fn bench_accounts_delta_hash(bencher: &mut Bencher) { + solana_logger::setup(); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delta_hash")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut pubkeys: Vec = vec![]; + create_test_accounts(&accounts, &mut pubkeys, 100_000, 0); + bencher.iter(|| { + accounts.accounts_db.calculate_accounts_delta_hash(0); + }); +} + +#[bench] +fn bench_delete_dependencies(bencher: &mut Bencher) { + solana_logger::setup(); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delete_deps")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut old_pubkey = Pubkey::default(); + let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + for i in 0..1000 { + let pubkey = solana_sdk::pubkey::new_rand(); + let account = AccountSharedData::new(i + 1, 0, AccountSharedData::default().owner()); + accounts.store_slow_uncached(i, &pubkey, &account); + accounts.store_slow_uncached(i, &old_pubkey, &zero_account); + old_pubkey = pubkey; + accounts.add_root(i); + } + bencher.iter(|| { + accounts.accounts_db.clean_accounts_for_tests(); + }); +} + +fn store_accounts_with_possible_contention( + bench_name: &str, + bencher: &mut Bencher, + reader_f: F, +) where + F: Fn(&Accounts, &[Pubkey]) + Send + Copy, +{ + let num_readers = 5; + let accounts_db = new_accounts_db(vec![PathBuf::from( + std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), + ) + .join(bench_name)]); + let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); + let num_keys = 1000; + let slot = 0; + + let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .take(num_keys) + .collect(); + let accounts_data: Vec<_> = std::iter::repeat(Account { + lamports: 1, + ..Default::default() + }) + .take(num_keys) + .collect(); + let storable_accounts: Vec<_> = pubkeys.iter().zip(accounts_data.iter()).collect(); + accounts.store_accounts_cached((slot, storable_accounts.as_slice())); + accounts.add_root(slot); + accounts + .accounts_db + .flush_accounts_cache_slot_for_tests(slot); + + let pubkeys = Arc::new(pubkeys); + for i in 0..num_readers { + let accounts = accounts.clone(); + let pubkeys = pubkeys.clone(); + Builder::new() + .name(format!("reader{i:02}")) + .spawn(move || { + reader_f(&accounts, &pubkeys); + }) + .unwrap(); + } + + let num_new_keys = 1000; + bencher.iter(|| { + let new_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .take(num_new_keys) + .collect(); + let new_storable_accounts: Vec<_> = new_pubkeys.iter().zip(accounts_data.iter()).collect(); + // Write to a different slot than the one being read from. Because + // there's a new account pubkey being written to every time, will + // compete for the accounts index lock on every store + accounts.store_accounts_cached((slot + 1, new_storable_accounts.as_slice())); + }); +} + +#[bench] +fn bench_concurrent_read_write(bencher: &mut Bencher) { + store_accounts_with_possible_contention( + "concurrent_read_write", + bencher, + |accounts, pubkeys| { + let mut rng = rand::thread_rng(); + loop { + let i = rng.gen_range(0..pubkeys.len()); + test::black_box( + accounts + .load_without_fixed_root(&Ancestors::default(), &pubkeys[i]) + .unwrap(), + ); + } + }, + ) +} + +#[bench] +fn bench_concurrent_scan_write(bencher: &mut Bencher) { + store_accounts_with_possible_contention("concurrent_scan_write", bencher, |accounts, _| loop { + test::black_box( + accounts + .load_by_program( + &Ancestors::default(), + 0, + AccountSharedData::default().owner(), + &ScanConfig::default(), + ) + .unwrap(), + ); + }) +} + +#[bench] +#[ignore] +fn bench_dashmap_single_reader_with_n_writers(bencher: &mut Bencher) { + let num_readers = 5; + let num_keys = 10000; + let map = Arc::new(DashMap::new()); + for i in 0..num_keys { + map.insert(i, i); + } + for _ in 0..num_readers { + let map = map.clone(); + Builder::new() + .name("readers".to_string()) + .spawn(move || loop { + test::black_box(map.entry(5).or_insert(2)); + }) + .unwrap(); + } + bencher.iter(|| { + for _ in 0..num_keys { + test::black_box(map.get(&5).unwrap().value()); + } + }) +} + +#[bench] +#[ignore] +fn bench_rwlock_hashmap_single_reader_with_n_writers(bencher: &mut Bencher) { + let num_readers = 5; + let num_keys = 10000; + let map = Arc::new(RwLock::new(HashMap::new())); + for i in 0..num_keys { + map.write().unwrap().insert(i, i); + } + for _ in 0..num_readers { + let map = map.clone(); + Builder::new() + .name("readers".to_string()) + .spawn(move || loop { + test::black_box(map.write().unwrap().get(&5)); + }) + .unwrap(); + } + bencher.iter(|| { + for _ in 0..num_keys { + test::black_box(map.read().unwrap().get(&5)); + } + }) +} + +fn setup_bench_dashmap_iter() -> (Arc, DashMap) { + let accounts_db = new_accounts_db(vec![PathBuf::from( + std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), + ) + .join("bench_dashmap_par_iter")]); + let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); + + let dashmap = DashMap::new(); + let num_keys = std::env::var("NUM_BENCH_KEYS") + .map(|num_keys| num_keys.parse::().unwrap()) + .unwrap_or_else(|_| 10000); + for _ in 0..num_keys { + dashmap.insert( + Pubkey::new_unique(), + ( + AccountSharedData::new(1, 0, AccountSharedData::default().owner()), + Hash::new_unique(), + ), + ); + } + + (accounts, dashmap) +} + +#[bench] +fn bench_dashmap_par_iter(bencher: &mut Bencher) { + let (accounts, dashmap) = setup_bench_dashmap_iter(); + + bencher.iter(|| { + test::black_box(accounts.accounts_db.thread_pool.install(|| { + dashmap + .par_iter() + .map(|cached_account| (*cached_account.key(), cached_account.value().1)) + .collect::>() + })); + }); +} + +#[bench] +fn bench_dashmap_iter(bencher: &mut Bencher) { + let (_accounts, dashmap) = setup_bench_dashmap_iter(); + + bencher.iter(|| { + test::black_box( + dashmap + .iter() + .map(|cached_account| (*cached_account.key(), cached_account.value().1)) + .collect::>(), + ); + }); +} + +#[bench] +fn bench_load_largest_accounts(b: &mut Bencher) { + let accounts_db = new_accounts_db(Vec::new()); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut rng = rand::thread_rng(); + for _ in 0..10_000 { + let lamports = rng.gen(); + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::new(lamports, 0, &Pubkey::default()); + accounts.store_slow_uncached(0, &pubkey, &account); + } + let ancestors = Ancestors::from(vec![0]); + let bank_id = 0; + b.iter(|| { + accounts.load_largest_accounts( + &ancestors, + bank_id, + 20, + &HashSet::new(), + AccountAddressFilter::Exclude, + ) + }); +} diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index b99425b1507cab..9d09b5c3650c97 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -4,50 +4,19 @@ extern crate test; use { - dashmap::DashMap, - rand::Rng, - rayon::iter::{IntoParallelRefIterator, ParallelIterator}, - solana_accounts_db::{ - accounts::{AccountAddressFilter, Accounts}, - accounts_db::{ - test_utils::create_test_accounts, AccountShrinkThreshold, AccountsDb, - VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, - }, - accounts_index::{AccountSecondaryIndexes, ScanConfig}, - ancestors::Ancestors, - epoch_accounts_hash::EpochAccountsHash, - }, + solana_accounts_db::epoch_accounts_hash::EpochAccountsHash, solana_runtime::bank::*, solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount}, - genesis_config::{create_genesis_config, ClusterType}, + account::{AccountSharedData, ReadableAccount}, + genesis_config::create_genesis_config, hash::Hash, lamports::LamportsError, pubkey::Pubkey, - rent_collector::RentCollector, - sysvar::epoch_schedule::EpochSchedule, - }, - std::{ - collections::{HashMap, HashSet}, - path::PathBuf, - sync::{Arc, RwLock}, - thread::Builder, }, + std::{path::PathBuf, sync::Arc}, test::Bencher, }; -fn new_accounts_db(account_paths: Vec) -> AccountsDb { - AccountsDb::new_with_config( - account_paths, - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), - None, - Arc::default(), - ) -} - fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) -> Result<(), LamportsError> { for t in 0..num { let pubkey = solana_sdk::pubkey::new_rand(); @@ -62,7 +31,7 @@ fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) -> Result<() } #[bench] -fn test_accounts_create(bencher: &mut Bencher) { +fn bench_accounts_create(bencher: &mut Bencher) { let (genesis_config, _) = create_genesis_config(10_000); let bank0 = Bank::new_with_paths_for_benches(&genesis_config, vec![PathBuf::from("bench_a0")]); bencher.iter(|| { @@ -72,7 +41,7 @@ fn test_accounts_create(bencher: &mut Bencher) { } #[bench] -fn test_accounts_squash(bencher: &mut Bencher) { +fn bench_accounts_squash(bencher: &mut Bencher) { let (mut genesis_config, _) = create_genesis_config(100_000); genesis_config.rent.burn_percent = 100; // Avoid triggering an assert in Bank::distribute_rent_to_validators() let mut prev_bank = Arc::new(Bank::new_with_paths_for_benches( @@ -108,300 +77,3 @@ fn test_accounts_squash(bencher: &mut Bencher) { prev_bank = next_bank; }); } - -#[bench] -fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { - let accounts_db = new_accounts_db(vec![PathBuf::from("bench_accounts_hash_internal")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - let num_accounts = 60_000; - let slot = 0; - create_test_accounts(&accounts, &mut pubkeys, num_accounts, slot); - let ancestors = Ancestors::from(vec![0]); - let (_, total_lamports) = accounts - .accounts_db - .update_accounts_hash_for_tests(0, &ancestors, false, false); - accounts.add_root(slot); - accounts.accounts_db.flush_accounts_cache(true, Some(slot)); - bencher.iter(|| { - assert!(accounts.verify_accounts_hash_and_lamports( - 0, - total_lamports, - None, - VerifyAccountsHashAndLamportsConfig { - ancestors: &ancestors, - test_hash_calculation: false, - epoch_schedule: &EpochSchedule::default(), - rent_collector: &RentCollector::default(), - ignore_mismatch: false, - store_detailed_debug_info: false, - use_bg_thread_pool: false, - } - )) - }); -} - -#[bench] -fn test_update_accounts_hash(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - create_test_accounts(&accounts, &mut pubkeys, 50_000, 0); - let ancestors = Ancestors::from(vec![0]); - bencher.iter(|| { - accounts - .accounts_db - .update_accounts_hash_for_tests(0, &ancestors, false, false); - }); -} - -#[bench] -fn test_accounts_delta_hash(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delta_hash")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - create_test_accounts(&accounts, &mut pubkeys, 100_000, 0); - bencher.iter(|| { - accounts.accounts_db.calculate_accounts_delta_hash(0); - }); -} - -#[bench] -fn bench_delete_dependencies(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delete_deps")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut old_pubkey = Pubkey::default(); - let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); - for i in 0..1000 { - let pubkey = solana_sdk::pubkey::new_rand(); - let account = AccountSharedData::new(i + 1, 0, AccountSharedData::default().owner()); - accounts.store_slow_uncached(i, &pubkey, &account); - accounts.store_slow_uncached(i, &old_pubkey, &zero_account); - old_pubkey = pubkey; - accounts.add_root(i); - } - bencher.iter(|| { - accounts.accounts_db.clean_accounts_for_tests(); - }); -} - -fn store_accounts_with_possible_contention( - bench_name: &str, - bencher: &mut Bencher, - reader_f: F, -) where - F: Fn(&Accounts, &[Pubkey]) + Send + Copy, -{ - let num_readers = 5; - let accounts_db = new_accounts_db(vec![PathBuf::from( - std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), - ) - .join(bench_name)]); - let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); - let num_keys = 1000; - let slot = 0; - - let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) - .take(num_keys) - .collect(); - let accounts_data: Vec<_> = std::iter::repeat(Account { - lamports: 1, - ..Default::default() - }) - .take(num_keys) - .collect(); - let storable_accounts: Vec<_> = pubkeys.iter().zip(accounts_data.iter()).collect(); - accounts.store_accounts_cached((slot, storable_accounts.as_slice())); - accounts.add_root(slot); - accounts - .accounts_db - .flush_accounts_cache_slot_for_tests(slot); - - let pubkeys = Arc::new(pubkeys); - for i in 0..num_readers { - let accounts = accounts.clone(); - let pubkeys = pubkeys.clone(); - Builder::new() - .name(format!("reader{i:02}")) - .spawn(move || { - reader_f(&accounts, &pubkeys); - }) - .unwrap(); - } - - let num_new_keys = 1000; - bencher.iter(|| { - let new_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) - .take(num_new_keys) - .collect(); - let new_storable_accounts: Vec<_> = new_pubkeys.iter().zip(accounts_data.iter()).collect(); - // Write to a different slot than the one being read from. Because - // there's a new account pubkey being written to every time, will - // compete for the accounts index lock on every store - accounts.store_accounts_cached((slot + 1, new_storable_accounts.as_slice())); - }); -} - -#[bench] -fn bench_concurrent_read_write(bencher: &mut Bencher) { - store_accounts_with_possible_contention( - "concurrent_read_write", - bencher, - |accounts, pubkeys| { - let mut rng = rand::thread_rng(); - loop { - let i = rng.gen_range(0..pubkeys.len()); - test::black_box( - accounts - .load_without_fixed_root(&Ancestors::default(), &pubkeys[i]) - .unwrap(), - ); - } - }, - ) -} - -#[bench] -fn bench_concurrent_scan_write(bencher: &mut Bencher) { - store_accounts_with_possible_contention("concurrent_scan_write", bencher, |accounts, _| loop { - test::black_box( - accounts - .load_by_program( - &Ancestors::default(), - 0, - AccountSharedData::default().owner(), - &ScanConfig::default(), - ) - .unwrap(), - ); - }) -} - -#[bench] -#[ignore] -fn bench_dashmap_single_reader_with_n_writers(bencher: &mut Bencher) { - let num_readers = 5; - let num_keys = 10000; - let map = Arc::new(DashMap::new()); - for i in 0..num_keys { - map.insert(i, i); - } - for _ in 0..num_readers { - let map = map.clone(); - Builder::new() - .name("readers".to_string()) - .spawn(move || loop { - test::black_box(map.entry(5).or_insert(2)); - }) - .unwrap(); - } - bencher.iter(|| { - for _ in 0..num_keys { - test::black_box(map.get(&5).unwrap().value()); - } - }) -} - -#[bench] -#[ignore] -fn bench_rwlock_hashmap_single_reader_with_n_writers(bencher: &mut Bencher) { - let num_readers = 5; - let num_keys = 10000; - let map = Arc::new(RwLock::new(HashMap::new())); - for i in 0..num_keys { - map.write().unwrap().insert(i, i); - } - for _ in 0..num_readers { - let map = map.clone(); - Builder::new() - .name("readers".to_string()) - .spawn(move || loop { - test::black_box(map.write().unwrap().get(&5)); - }) - .unwrap(); - } - bencher.iter(|| { - for _ in 0..num_keys { - test::black_box(map.read().unwrap().get(&5)); - } - }) -} - -fn setup_bench_dashmap_iter() -> (Arc, DashMap) { - let accounts_db = new_accounts_db(vec![PathBuf::from( - std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), - ) - .join("bench_dashmap_par_iter")]); - let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); - - let dashmap = DashMap::new(); - let num_keys = std::env::var("NUM_BENCH_KEYS") - .map(|num_keys| num_keys.parse::().unwrap()) - .unwrap_or_else(|_| 10000); - for _ in 0..num_keys { - dashmap.insert( - Pubkey::new_unique(), - ( - AccountSharedData::new(1, 0, AccountSharedData::default().owner()), - Hash::new_unique(), - ), - ); - } - - (accounts, dashmap) -} - -#[bench] -fn bench_dashmap_par_iter(bencher: &mut Bencher) { - let (accounts, dashmap) = setup_bench_dashmap_iter(); - - bencher.iter(|| { - test::black_box(accounts.accounts_db.thread_pool.install(|| { - dashmap - .par_iter() - .map(|cached_account| (*cached_account.key(), cached_account.value().1)) - .collect::>() - })); - }); -} - -#[bench] -fn bench_dashmap_iter(bencher: &mut Bencher) { - let (_accounts, dashmap) = setup_bench_dashmap_iter(); - - bencher.iter(|| { - test::black_box( - dashmap - .iter() - .map(|cached_account| (*cached_account.key(), cached_account.value().1)) - .collect::>(), - ); - }); -} - -#[bench] -fn bench_load_largest_accounts(b: &mut Bencher) { - let accounts_db = new_accounts_db(Vec::new()); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut rng = rand::thread_rng(); - for _ in 0..10_000 { - let lamports = rng.gen(); - let pubkey = Pubkey::new_unique(); - let account = AccountSharedData::new(lamports, 0, &Pubkey::default()); - accounts.store_slow_uncached(0, &pubkey, &account); - } - let ancestors = Ancestors::from(vec![0]); - let bank_id = 0; - b.iter(|| { - accounts.load_largest_accounts( - &ancestors, - bank_id, - 20, - &HashSet::new(), - AccountAddressFilter::Exclude, - ) - }); -} From 00c984fe4decfd8da1f08d6d756f3c98a3827acc Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Mon, 11 Mar 2024 13:13:56 -0400 Subject: [PATCH 357/401] deprecate `get_client` and `get_multi_client` (#177) deprecate get_client and get_multi_client --- dos/src/main.rs | 1 + gossip/src/gossip_service.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/dos/src/main.rs b/dos/src/main.rs index 8e6c3c5b2b11b5..b9e0dceba40bf0 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -39,6 +39,7 @@ //! ``` //! #![allow(clippy::arithmetic_side_effects)] +#![allow(deprecated)] use { crossbeam_channel::{select, tick, unbounded, Receiver, Sender}, itertools::Itertools, diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 806ee23a4fb0be..404a685aa75567 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -194,6 +194,7 @@ pub fn discover( } /// Creates a ThinClient by selecting a valid node at random +#[deprecated(since = "1.18.0", note = "Interface will change")] pub fn get_client( nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace, @@ -209,6 +210,7 @@ pub fn get_client( ThinClient::new(rpc, tpu, connection_cache) } +#[deprecated(since = "1.18.0", note = "Will be removed in favor of get_client")] pub fn get_multi_client( nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace, From 158c4e05d5e60c5f5b3dc8e47f5f60a90beca6e1 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 11 Mar 2024 12:21:51 -0500 Subject: [PATCH 358/401] remove dead code (#176) --- accounts-db/src/accounts_partition.rs | 30 --------------------------- runtime/src/bank/tests.rs | 28 ++++--------------------- 2 files changed, 4 insertions(+), 54 deletions(-) diff --git a/accounts-db/src/accounts_partition.rs b/accounts-db/src/accounts_partition.rs index 05d3993adcfb70..01d6929c3e07e5 100644 --- a/accounts-db/src/accounts_partition.rs +++ b/accounts-db/src/accounts_partition.rs @@ -98,36 +98,6 @@ pub fn get_partition_from_slot_indexes( (start_partition_index, end_partition_index, partition_count) } -/// used only by filler accounts in debug path -/// previous means slot - 1, not parent -// These functions/fields are only usable from a dev context (i.e. tests and benches) -#[cfg(feature = "dev-context-only-utils")] -pub fn variable_cycle_partition_from_previous_slot( - epoch_schedule: &EpochSchedule, - slot: Slot, -) -> Partition { - // similar code to Bank::variable_cycle_partitions - let (current_epoch, current_slot_index) = epoch_schedule.get_epoch_and_slot_index(slot); - let (parent_epoch, mut parent_slot_index) = - epoch_schedule.get_epoch_and_slot_index(slot.saturating_sub(1)); - let cycle_params = rent_single_epoch_collection_cycle_params( - current_epoch, - epoch_schedule.get_slots_in_epoch(current_epoch), - ); - - if parent_epoch < current_epoch { - parent_slot_index = 0; - } - - let generated_for_gapped_epochs = false; - get_partition_from_slot_indexes( - cycle_params, - parent_slot_index, - current_slot_index, - generated_for_gapped_epochs, - ) -} - /// return all end partition indexes for the given partition /// partition could be (0, 1, N). In this case we only return [1] /// the single 'end_index' that covers this partition. diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index f9b846d85b1512..29dbdc2e5aeacd 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -1279,26 +1279,6 @@ fn test_rent_complex() { assert_eq!(bank.collected_rent.load(Relaxed), rent_collected); } -fn test_rent_collection_partitions(bank: &Bank) -> Vec { - let partitions = bank.rent_collection_partitions(); - let slot = bank.slot(); - if slot.saturating_sub(1) == bank.parent_slot() { - let partition = accounts_partition::variable_cycle_partition_from_previous_slot( - bank.epoch_schedule(), - bank.slot(), - ); - assert_eq!( - partitions.last().unwrap(), - &partition, - "slot: {}, slots per epoch: {}, partitions: {:?}", - bank.slot(), - bank.epoch_schedule().slots_per_epoch, - partitions - ); - } - partitions -} - #[test] fn test_rent_eager_across_epoch_without_gap() { let mut bank = create_simple_test_arc_bank(1).0; @@ -1321,16 +1301,16 @@ fn test_rent_eager_across_epoch_without_gap_mnb() { genesis_config.cluster_type = ClusterType::MainnetBeta; let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); - assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 0, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]); bank = Arc::new(new_from_parent(bank)); - assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 1, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 32)]); for _ in 2..32 { bank = Arc::new(new_from_parent(bank)); } - assert_eq!(test_rent_collection_partitions(&bank), vec![(30, 31, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(30, 31, 32)]); bank = Arc::new(new_from_parent(bank)); - assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 0, 64)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 64)]); } #[test] From 53e7b9ac474f046ef5ea4b1725f663209c2e5139 Mon Sep 17 00:00:00 2001 From: Tyera Date: Mon, 11 Mar 2024 12:11:22 -0600 Subject: [PATCH 359/401] Version bump v2.0.0 (#121) * Put solana-svm in alphabetical order * Update version to 2.0.0. --------- Co-authored-by: Will Hickey --- Cargo.lock | 228 ++++++++-------- Cargo.toml | 162 ++++++------ programs/sbf/Cargo.lock | 250 +++++++++--------- programs/sbf/Cargo.toml | 50 ++-- .../tests/crates/fail/Cargo.toml | 4 +- .../tests/crates/noop/Cargo.toml | 4 +- 6 files changed, 349 insertions(+), 349 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19b265863eba47..88f0fa0925dcac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -64,7 +64,7 @@ dependencies = [ [[package]] name = "agave-cargo-registry" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "flate2", @@ -93,7 +93,7 @@ dependencies = [ [[package]] name = "agave-geyser-plugin-interface" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "solana-sdk", @@ -103,7 +103,7 @@ dependencies = [ [[package]] name = "agave-install" -version = "1.19.0" +version = "2.0.0" dependencies = [ "atty", "bincode", @@ -138,7 +138,7 @@ dependencies = [ [[package]] name = "agave-ledger-tool" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_cmd", "bs58", @@ -189,7 +189,7 @@ dependencies = [ [[package]] name = "agave-validator" -version = "1.19.0" +version = "2.0.0" dependencies = [ "agave-geyser-plugin-interface", "chrono", @@ -255,7 +255,7 @@ dependencies = [ [[package]] name = "agave-watchtower" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "humantime", @@ -2366,7 +2366,7 @@ dependencies = [ [[package]] name = "gen-headers" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "regex", @@ -2374,7 +2374,7 @@ dependencies = [ [[package]] name = "gen-syscall-list" -version = "1.19.0" +version = "2.0.0" dependencies = [ "regex", ] @@ -4263,7 +4263,7 @@ dependencies = [ [[package]] name = "proto" -version = "1.19.0" +version = "2.0.0" dependencies = [ "protobuf-src", "tonic-build", @@ -4506,7 +4506,7 @@ dependencies = [ [[package]] name = "rbpf-cli" -version = "1.19.0" +version = "2.0.0" [[package]] name = "rdrand" @@ -5310,7 +5310,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.19.0" +version = "2.0.0" dependencies = [ "Inflector", "assert_matches", @@ -5335,7 +5335,7 @@ dependencies = [ [[package]] name = "solana-accounts-bench" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "log", @@ -5349,7 +5349,7 @@ dependencies = [ [[package]] name = "solana-accounts-cluster-bench" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "log", @@ -5379,7 +5379,7 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "1.19.0" +version = "2.0.0" dependencies = [ "arrayref", "assert_matches", @@ -5448,7 +5448,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "bytemuck", @@ -5467,7 +5467,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program-tests" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -5478,7 +5478,7 @@ dependencies = [ [[package]] name = "solana-banking-bench" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 3.2.23", "crossbeam-channel", @@ -5502,7 +5502,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "borsh 1.2.1", "futures 0.3.30", @@ -5519,7 +5519,7 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "1.19.0" +version = "2.0.0" dependencies = [ "serde", "solana-sdk", @@ -5528,7 +5528,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "crossbeam-channel", @@ -5546,7 +5546,7 @@ dependencies = [ [[package]] name = "solana-bench-streamer" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 3.2.23", "crossbeam-channel", @@ -5557,7 +5557,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "crossbeam-channel", @@ -5599,7 +5599,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bv", "fnv", @@ -5616,7 +5616,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -5637,7 +5637,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program-tests" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -5648,7 +5648,7 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bv", "bytemuck", @@ -5667,7 +5667,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-bpf" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "solana-logger", @@ -5675,7 +5675,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-sbf" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_cmd", "bzip2", @@ -5696,11 +5696,11 @@ dependencies = [ [[package]] name = "solana-cargo-test-bpf" -version = "1.19.0" +version = "2.0.0" [[package]] name = "solana-cargo-test-sbf" -version = "1.19.0" +version = "2.0.0" dependencies = [ "cargo_metadata", "clap 3.2.23", @@ -5711,7 +5711,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "chrono", @@ -5728,7 +5728,7 @@ dependencies = [ [[package]] name = "solana-clap-v3-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "chrono", @@ -5746,7 +5746,7 @@ dependencies = [ [[package]] name = "solana-cli" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -5800,7 +5800,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.19.0" +version = "2.0.0" dependencies = [ "anyhow", "dirs-next", @@ -5815,7 +5815,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.19.0" +version = "2.0.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -5841,7 +5841,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "bincode", @@ -5873,7 +5873,7 @@ dependencies = [ [[package]] name = "solana-client-test" -version = "1.19.0" +version = "2.0.0" dependencies = [ "futures-util", "rand 0.8.5", @@ -5903,7 +5903,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -5911,7 +5911,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "chrono", @@ -5924,7 +5924,7 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "bincode", @@ -5947,7 +5947,7 @@ dependencies = [ [[package]] name = "solana-core" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "base64 0.21.7", @@ -6033,7 +6033,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "1.19.0" +version = "2.0.0" dependencies = [ "lazy_static", "log", @@ -6058,7 +6058,7 @@ dependencies = [ [[package]] name = "solana-dos" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "clap 3.2.23", @@ -6088,7 +6088,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "console", "indicatif", @@ -6100,7 +6100,7 @@ dependencies = [ [[package]] name = "solana-ed25519-program-tests" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "ed25519-dalek", @@ -6111,7 +6111,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -6133,7 +6133,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "byteorder", @@ -6155,7 +6155,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bitflags 2.4.2", "block-buffer 0.10.4", @@ -6179,7 +6179,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.19.0" +version = "2.0.0" dependencies = [ "proc-macro2", "quote", @@ -6189,7 +6189,7 @@ dependencies = [ [[package]] name = "solana-genesis" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bincode", @@ -6214,7 +6214,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "solana-accounts-db", @@ -6225,7 +6225,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "1.19.0" +version = "2.0.0" dependencies = [ "agave-geyser-plugin-interface", "bs58", @@ -6250,7 +6250,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -6301,7 +6301,7 @@ dependencies = [ [[package]] name = "solana-keygen" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bs58", "clap 3.2.23", @@ -6318,7 +6318,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -6388,7 +6388,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "log", @@ -6400,7 +6400,7 @@ dependencies = [ [[package]] name = "solana-local-cluster" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "crossbeam-channel", @@ -6439,7 +6439,7 @@ dependencies = [ [[package]] name = "solana-log-analyzer" -version = "1.19.0" +version = "2.0.0" dependencies = [ "byte-unit", "clap 3.2.23", @@ -6451,7 +6451,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.19.0" +version = "2.0.0" dependencies = [ "env_logger", "lazy_static", @@ -6460,7 +6460,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "solana-sdk", @@ -6468,11 +6468,11 @@ dependencies = [ [[package]] name = "solana-memory-management" -version = "1.19.0" +version = "2.0.0" [[package]] name = "solana-merkle-root-bench" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "log", @@ -6485,7 +6485,7 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.19.0" +version = "2.0.0" dependencies = [ "fast-math", "hex", @@ -6494,7 +6494,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.19.0" +version = "2.0.0" dependencies = [ "crossbeam-channel", "env_logger", @@ -6510,7 +6510,7 @@ dependencies = [ [[package]] name = "solana-net-shaper" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 3.2.23", "rand 0.8.5", @@ -6521,7 +6521,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "clap 3.2.23", @@ -6547,7 +6547,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" [[package]] name = "solana-notifier" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "reqwest", @@ -6557,7 +6557,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.19.0" +version = "2.0.0" dependencies = [ "ahash 0.8.10", "assert_matches", @@ -6588,7 +6588,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -6610,7 +6610,7 @@ dependencies = [ [[package]] name = "solana-poh-bench" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 3.2.23", "log", @@ -6625,7 +6625,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "anyhow", "arbitrary", @@ -6684,7 +6684,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "base64 0.21.7", @@ -6714,7 +6714,7 @@ dependencies = [ [[package]] name = "solana-program-test" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "async-trait", @@ -6744,7 +6744,7 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "anyhow", "crossbeam-channel", @@ -6768,7 +6768,7 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-mutex", "async-trait", @@ -6795,7 +6795,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.19.0" +version = "2.0.0" dependencies = [ "lazy_static", "num_cpus", @@ -6803,7 +6803,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "console", @@ -6822,7 +6822,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bincode", @@ -6882,7 +6882,7 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "async-trait", @@ -6911,7 +6911,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bs58", @@ -6931,7 +6931,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "anyhow", "clap 2.33.3", @@ -6948,7 +6948,7 @@ dependencies = [ [[package]] name = "solana-rpc-test" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "bs58", @@ -6975,7 +6975,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.19.0" +version = "2.0.0" dependencies = [ "aquamarine", "arrayref", @@ -7058,7 +7058,7 @@ dependencies = [ [[package]] name = "solana-runtime-transaction" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "log", @@ -7072,7 +7072,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.19.0" +version = "2.0.0" dependencies = [ "anyhow", "assert_matches", @@ -7131,7 +7131,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bs58", "proc-macro2", @@ -7148,7 +7148,7 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-send-transaction-service" -version = "1.19.0" +version = "2.0.0" dependencies = [ "crossbeam-channel", "log", @@ -7163,7 +7163,7 @@ dependencies = [ [[package]] name = "solana-stake-accounts" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -7179,7 +7179,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -7196,7 +7196,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.19.0" +version = "2.0.0" dependencies = [ "backoff", "bincode", @@ -7228,7 +7228,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "bs58", @@ -7244,7 +7244,7 @@ dependencies = [ [[package]] name = "solana-store-tool" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "log", @@ -7256,7 +7256,7 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "async-channel", @@ -7286,7 +7286,7 @@ dependencies = [ [[package]] name = "solana-svm" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "itertools", @@ -7307,7 +7307,7 @@ dependencies = [ [[package]] name = "solana-system-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -7321,7 +7321,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bincode", @@ -7351,7 +7351,7 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "log", @@ -7365,7 +7365,7 @@ dependencies = [ [[package]] name = "solana-tokens" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -7398,7 +7398,7 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "bincode", @@ -7420,7 +7420,7 @@ dependencies = [ [[package]] name = "solana-transaction-dos" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "clap 2.33.3", @@ -7447,7 +7447,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.19.0" +version = "2.0.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -7470,7 +7470,7 @@ dependencies = [ [[package]] name = "solana-turbine" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -7507,7 +7507,7 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "solana-connection-cache", @@ -7520,14 +7520,14 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-logic" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-sdk", ] [[package]] name = "solana-unified-scheduler-pool" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "crossbeam-channel", @@ -7544,7 +7544,7 @@ dependencies = [ [[package]] name = "solana-upload-perf" -version = "1.19.0" +version = "2.0.0" dependencies = [ "serde_json", "solana-metrics", @@ -7552,7 +7552,7 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "rustc_version 0.4.0", @@ -7566,7 +7566,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "crossbeam-channel", @@ -7585,7 +7585,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -7608,7 +7608,7 @@ dependencies = [ [[package]] name = "solana-wen-restart" -version = "1.19.0" +version = "2.0.0" dependencies = [ "anyhow", "assert_matches", @@ -7635,7 +7635,7 @@ dependencies = [ [[package]] name = "solana-zk-keygen" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bs58", "clap 3.2.23", @@ -7654,7 +7654,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bytemuck", "criterion", @@ -7668,7 +7668,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program-tests" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bytemuck", "curve25519-dalek", @@ -7680,7 +7680,7 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.19.0" +version = "2.0.0" dependencies = [ "aes-gcm-siv", "base64 0.21.7", diff --git a/Cargo.toml b/Cargo.toml index 16786e925c34b4..453408a53b956e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,7 +127,7 @@ exclude = ["programs/sbf"] resolver = "2" [workspace.package] -version = "1.19.0" +version = "2.0.0" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" homepage = "https://solanalabs.com/" @@ -306,87 +306,87 @@ smallvec = "1.13.1" smpl_jwt = "0.7.1" socket2 = "0.5.6" soketto = "0.7" -solana-account-decoder = { path = "account-decoder", version = "=1.19.0" } -solana-accounts-db = { path = "accounts-db", version = "=1.19.0" } -solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.19.0" } -solana-banks-client = { path = "banks-client", version = "=1.19.0" } -solana-banks-interface = { path = "banks-interface", version = "=1.19.0" } -solana-banks-server = { path = "banks-server", version = "=1.19.0" } -solana-bench-tps = { path = "bench-tps", version = "=1.19.0" } -solana-bloom = { path = "bloom", version = "=1.19.0" } -solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.19.0" } -solana-bucket-map = { path = "bucket_map", version = "=1.19.0" } -agave-cargo-registry = { path = "cargo-registry", version = "=1.19.0" } -solana-clap-utils = { path = "clap-utils", version = "=1.19.0" } -solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.19.0" } -solana-cli = { path = "cli", version = "=1.19.0" } -solana-cli-config = { path = "cli-config", version = "=1.19.0" } -solana-cli-output = { path = "cli-output", version = "=1.19.0" } -solana-client = { path = "client", version = "=1.19.0" } -solana-compute-budget-program = { path = "programs/compute-budget", version = "=1.19.0" } -solana-config-program = { path = "programs/config", version = "=1.19.0" } -solana-connection-cache = { path = "connection-cache", version = "=1.19.0", default-features = false } -solana-core = { path = "core", version = "=1.19.0" } -solana-cost-model = { path = "cost-model", version = "=1.19.0" } -solana-download-utils = { path = "download-utils", version = "=1.19.0" } -solana-entry = { path = "entry", version = "=1.19.0" } -solana-faucet = { path = "faucet", version = "=1.19.0" } -solana-frozen-abi = { path = "frozen-abi", version = "=1.19.0" } -solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=1.19.0" } -solana-genesis = { path = "genesis", version = "=1.19.0" } -solana-genesis-utils = { path = "genesis-utils", version = "=1.19.0" } -agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.19.0" } -solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=1.19.0" } -solana-gossip = { path = "gossip", version = "=1.19.0" } -solana-ledger = { path = "ledger", version = "=1.19.0" } -solana-loader-v4-program = { path = "programs/loader-v4", version = "=1.19.0" } -solana-local-cluster = { path = "local-cluster", version = "=1.19.0" } -solana-logger = { path = "logger", version = "=1.19.0" } -solana-measure = { path = "measure", version = "=1.19.0" } -solana-merkle-tree = { path = "merkle-tree", version = "=1.19.0" } -solana-metrics = { path = "metrics", version = "=1.19.0" } -solana-net-utils = { path = "net-utils", version = "=1.19.0" } +solana-account-decoder = { path = "account-decoder", version = "=2.0.0" } +solana-accounts-db = { path = "accounts-db", version = "=2.0.0" } +solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=2.0.0" } +solana-banks-client = { path = "banks-client", version = "=2.0.0" } +solana-banks-interface = { path = "banks-interface", version = "=2.0.0" } +solana-banks-server = { path = "banks-server", version = "=2.0.0" } +solana-bench-tps = { path = "bench-tps", version = "=2.0.0" } +solana-bloom = { path = "bloom", version = "=2.0.0" } +solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=2.0.0" } +solana-bucket-map = { path = "bucket_map", version = "=2.0.0" } +agave-cargo-registry = { path = "cargo-registry", version = "=2.0.0" } +solana-clap-utils = { path = "clap-utils", version = "=2.0.0" } +solana-clap-v3-utils = { path = "clap-v3-utils", version = "=2.0.0" } +solana-cli = { path = "cli", version = "=2.0.0" } +solana-cli-config = { path = "cli-config", version = "=2.0.0" } +solana-cli-output = { path = "cli-output", version = "=2.0.0" } +solana-client = { path = "client", version = "=2.0.0" } +solana-compute-budget-program = { path = "programs/compute-budget", version = "=2.0.0" } +solana-config-program = { path = "programs/config", version = "=2.0.0" } +solana-connection-cache = { path = "connection-cache", version = "=2.0.0", default-features = false } +solana-core = { path = "core", version = "=2.0.0" } +solana-cost-model = { path = "cost-model", version = "=2.0.0" } +solana-download-utils = { path = "download-utils", version = "=2.0.0" } +solana-entry = { path = "entry", version = "=2.0.0" } +solana-faucet = { path = "faucet", version = "=2.0.0" } +solana-frozen-abi = { path = "frozen-abi", version = "=2.0.0" } +solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=2.0.0" } +solana-genesis = { path = "genesis", version = "=2.0.0" } +solana-genesis-utils = { path = "genesis-utils", version = "=2.0.0" } +agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=2.0.0" } +solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=2.0.0" } +solana-gossip = { path = "gossip", version = "=2.0.0" } +solana-ledger = { path = "ledger", version = "=2.0.0" } +solana-loader-v4-program = { path = "programs/loader-v4", version = "=2.0.0" } +solana-local-cluster = { path = "local-cluster", version = "=2.0.0" } +solana-logger = { path = "logger", version = "=2.0.0" } +solana-measure = { path = "measure", version = "=2.0.0" } +solana-merkle-tree = { path = "merkle-tree", version = "=2.0.0" } +solana-metrics = { path = "metrics", version = "=2.0.0" } +solana-net-utils = { path = "net-utils", version = "=2.0.0" } solana-nohash-hasher = "0.2.1" -solana-notifier = { path = "notifier", version = "=1.19.0" } -solana-perf = { path = "perf", version = "=1.19.0" } -solana-poh = { path = "poh", version = "=1.19.0" } -solana-program = { path = "sdk/program", version = "=1.19.0" } -solana-program-runtime = { path = "program-runtime", version = "=1.19.0" } -solana-program-test = { path = "program-test", version = "=1.19.0" } -solana-pubsub-client = { path = "pubsub-client", version = "=1.19.0" } -solana-quic-client = { path = "quic-client", version = "=1.19.0" } -solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=1.19.0" } -solana-remote-wallet = { path = "remote-wallet", version = "=1.19.0", default-features = false } -solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=1.19.0" } -solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=1.19.0" } -solana-rpc = { path = "rpc", version = "=1.19.0" } -solana-rpc-client = { path = "rpc-client", version = "=1.19.0", default-features = false } -solana-rpc-client-api = { path = "rpc-client-api", version = "=1.19.0" } -solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=1.19.0" } -solana-runtime = { path = "runtime", version = "=1.19.0" } -solana-runtime-transaction = { path = "runtime-transaction", version = "=1.19.0" } -solana-sdk = { path = "sdk", version = "=1.19.0" } -solana-sdk-macro = { path = "sdk/macro", version = "=1.19.0" } -solana-send-transaction-service = { path = "send-transaction-service", version = "=1.19.0" } -solana-stake-program = { path = "programs/stake", version = "=1.19.0" } -solana-storage-bigtable = { path = "storage-bigtable", version = "=1.19.0" } -solana-storage-proto = { path = "storage-proto", version = "=1.19.0" } -solana-streamer = { path = "streamer", version = "=1.19.0" } -solana-svm = { path = "svm", version = "=1.19.0" } -solana-system-program = { path = "programs/system", version = "=1.19.0" } -solana-test-validator = { path = "test-validator", version = "=1.19.0" } -solana-thin-client = { path = "thin-client", version = "=1.19.0" } -solana-tpu-client = { path = "tpu-client", version = "=1.19.0", default-features = false } -solana-transaction-status = { path = "transaction-status", version = "=1.19.0" } -solana-turbine = { path = "turbine", version = "=1.19.0" } -solana-udp-client = { path = "udp-client", version = "=1.19.0" } -solana-version = { path = "version", version = "=1.19.0" } -solana-vote = { path = "vote", version = "=1.19.0" } -solana-vote-program = { path = "programs/vote", version = "=1.19.0" } -solana-wen-restart = { path = "wen-restart", version = "=1.19.0" } -solana-zk-keygen = { path = "zk-keygen", version = "=1.19.0" } -solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.19.0" } -solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.19.0" } +solana-notifier = { path = "notifier", version = "=2.0.0" } +solana-perf = { path = "perf", version = "=2.0.0" } +solana-poh = { path = "poh", version = "=2.0.0" } +solana-program = { path = "sdk/program", version = "=2.0.0" } +solana-program-runtime = { path = "program-runtime", version = "=2.0.0" } +solana-program-test = { path = "program-test", version = "=2.0.0" } +solana-pubsub-client = { path = "pubsub-client", version = "=2.0.0" } +solana-quic-client = { path = "quic-client", version = "=2.0.0" } +solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.0.0" } +solana-remote-wallet = { path = "remote-wallet", version = "=2.0.0", default-features = false } +solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.0.0" } +solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.0.0" } +solana-rpc = { path = "rpc", version = "=2.0.0" } +solana-rpc-client = { path = "rpc-client", version = "=2.0.0", default-features = false } +solana-rpc-client-api = { path = "rpc-client-api", version = "=2.0.0" } +solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=2.0.0" } +solana-runtime = { path = "runtime", version = "=2.0.0" } +solana-runtime-transaction = { path = "runtime-transaction", version = "=2.0.0" } +solana-sdk = { path = "sdk", version = "=2.0.0" } +solana-sdk-macro = { path = "sdk/macro", version = "=2.0.0" } +solana-send-transaction-service = { path = "send-transaction-service", version = "=2.0.0" } +solana-stake-program = { path = "programs/stake", version = "=2.0.0" } +solana-storage-bigtable = { path = "storage-bigtable", version = "=2.0.0" } +solana-storage-proto = { path = "storage-proto", version = "=2.0.0" } +solana-streamer = { path = "streamer", version = "=2.0.0" } +solana-svm = { path = "svm", version = "=2.0.0" } +solana-system-program = { path = "programs/system", version = "=2.0.0" } +solana-test-validator = { path = "test-validator", version = "=2.0.0" } +solana-thin-client = { path = "thin-client", version = "=2.0.0" } +solana-tpu-client = { path = "tpu-client", version = "=2.0.0", default-features = false } +solana-transaction-status = { path = "transaction-status", version = "=2.0.0" } +solana-turbine = { path = "turbine", version = "=2.0.0" } +solana-udp-client = { path = "udp-client", version = "=2.0.0" } +solana-version = { path = "version", version = "=2.0.0" } +solana-vote = { path = "vote", version = "=2.0.0" } +solana-vote-program = { path = "programs/vote", version = "=2.0.0" } +solana-wen-restart = { path = "wen-restart", version = "=2.0.0" } +solana-zk-keygen = { path = "zk-keygen", version = "=2.0.0" } +solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=2.0.0" } +solana-zk-token-sdk = { path = "zk-token-sdk", version = "=2.0.0" } solana_rbpf = "=0.8.0" spl-associated-token-account = "=2.3.1" spl-instruction-padding = "0.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a3d350456afa9c..c31befdf34c092 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -65,7 +65,7 @@ dependencies = [ [[package]] name = "agave-geyser-plugin-interface" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "solana-sdk", @@ -75,7 +75,7 @@ dependencies = [ [[package]] name = "agave-validator" -version = "1.19.0" +version = "2.0.0" dependencies = [ "agave-geyser-plugin-interface", "chrono", @@ -4598,7 +4598,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.19.0" +version = "2.0.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -4621,7 +4621,7 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "1.19.0" +version = "2.0.0" dependencies = [ "arrayref", "bincode", @@ -4681,7 +4681,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "bytemuck", @@ -4700,7 +4700,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "borsh 1.2.1", "futures 0.3.30", @@ -4715,7 +4715,7 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "1.19.0" +version = "2.0.0" dependencies = [ "serde", "solana-sdk", @@ -4724,7 +4724,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "crossbeam-channel", @@ -4742,7 +4742,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bv", "fnv", @@ -4759,7 +4759,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "byteorder 1.5.0", @@ -4776,7 +4776,7 @@ dependencies = [ [[package]] name = "solana-bpf-rust-big-mod-exp" -version = "1.19.0" +version = "2.0.0" dependencies = [ "array-bytes", "serde", @@ -4786,7 +4786,7 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bv", "bytemuck", @@ -4802,7 +4802,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "chrono", "clap 2.33.3", @@ -4817,7 +4817,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.19.0" +version = "2.0.0" dependencies = [ "dirs-next", "lazy_static", @@ -4831,7 +4831,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.19.0" +version = "2.0.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -4856,7 +4856,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "bincode", @@ -4887,7 +4887,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -4895,7 +4895,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "chrono", @@ -4907,7 +4907,7 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "bincode", @@ -4926,7 +4926,7 @@ dependencies = [ [[package]] name = "solana-core" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bincode", @@ -5001,7 +5001,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "1.19.0" +version = "2.0.0" dependencies = [ "lazy_static", "log", @@ -5023,7 +5023,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "console", "indicatif", @@ -5035,7 +5035,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "crossbeam-channel", @@ -5055,7 +5055,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "byteorder 1.5.0", @@ -5077,7 +5077,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.19.0" +version = "2.0.0" dependencies = [ "block-buffer 0.10.4", "bs58", @@ -5099,7 +5099,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.19.0" +version = "2.0.0" dependencies = [ "proc-macro2", "quote", @@ -5109,7 +5109,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "solana-accounts-db", @@ -5120,7 +5120,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "1.19.0" +version = "2.0.0" dependencies = [ "agave-geyser-plugin-interface", "bs58", @@ -5145,7 +5145,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -5193,7 +5193,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "bincode", @@ -5259,7 +5259,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "solana-measure", @@ -5270,7 +5270,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.19.0" +version = "2.0.0" dependencies = [ "env_logger", "lazy_static", @@ -5279,7 +5279,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "solana-sdk", @@ -5287,7 +5287,7 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.19.0" +version = "2.0.0" dependencies = [ "fast-math", "solana-program", @@ -5295,7 +5295,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.19.0" +version = "2.0.0" dependencies = [ "crossbeam-channel", "gethostname", @@ -5308,7 +5308,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "clap 3.1.6", @@ -5334,7 +5334,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" [[package]] name = "solana-perf" -version = "1.19.0" +version = "2.0.0" dependencies = [ "ahash 0.8.10", "bincode", @@ -5361,7 +5361,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "1.19.0" +version = "2.0.0" dependencies = [ "core_affinity", "crossbeam-channel", @@ -5377,7 +5377,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "ark-bn254", "ark-ec", @@ -5430,7 +5430,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bincode", @@ -5456,7 +5456,7 @@ dependencies = [ [[package]] name = "solana-program-test" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "async-trait", @@ -5485,7 +5485,7 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "crossbeam-channel", "futures-util", @@ -5508,7 +5508,7 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-mutex", "async-trait", @@ -5532,7 +5532,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.19.0" +version = "2.0.0" dependencies = [ "lazy_static", "num_cpus", @@ -5540,7 +5540,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.19.0" +version = "2.0.0" dependencies = [ "console", "dialoguer", @@ -5557,7 +5557,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bincode", @@ -5613,7 +5613,7 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "base64 0.21.7", @@ -5637,7 +5637,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bs58", @@ -5657,7 +5657,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "1.19.0" +version = "2.0.0" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -5668,7 +5668,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.19.0" +version = "2.0.0" dependencies = [ "aquamarine", "arrayref", @@ -5743,7 +5743,7 @@ dependencies = [ [[package]] name = "solana-sbf-programs" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "byteorder 1.5.0", @@ -5773,7 +5773,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-128bit" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-sbf-rust-128bit-dep", @@ -5781,21 +5781,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-128bit-dep" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-alloc" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-alt-bn128" -version = "1.19.0" +version = "2.0.0" dependencies = [ "array-bytes", "solana-program", @@ -5803,7 +5803,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-alt-bn128-compression" -version = "1.19.0" +version = "2.0.0" dependencies = [ "array-bytes", "solana-program", @@ -5811,21 +5811,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-call-depth" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-caller-access" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-curve25519" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-zk-token-sdk", @@ -5833,14 +5833,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-custom-heap" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-dep-crate" -version = "1.19.0" +version = "2.0.0" dependencies = [ "byteorder 1.5.0", "solana-program", @@ -5848,21 +5848,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-deprecated-loader" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-dup-accounts" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-error-handling" -version = "1.19.0" +version = "2.0.0" dependencies = [ "num-derive 0.3.0", "num-traits", @@ -5872,42 +5872,42 @@ dependencies = [ [[package]] name = "solana-sbf-rust-external-spend" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-finalize" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-get-minimum-delegation" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-inner_instruction_alignment_check" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-instruction-introspection" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke" -version = "1.19.0" +version = "2.0.0" dependencies = [ "rustversion", "solana-program", @@ -5917,49 +5917,49 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke-and-error" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-and-ok" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-and-return" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoked" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-iter" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-log-data" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-many-args" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-sbf-rust-many-args-dep", @@ -5967,14 +5967,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-many-args-dep" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-mem" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -5984,7 +5984,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-membuiltins" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-sbf-rust-mem", @@ -5992,21 +5992,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-noop" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-panic" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-param-passing" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-sbf-rust-param-passing-dep", @@ -6014,14 +6014,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-param-passing-dep" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-poseidon" -version = "1.19.0" +version = "2.0.0" dependencies = [ "array-bytes", "solana-program", @@ -6029,7 +6029,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-rand" -version = "1.19.0" +version = "2.0.0" dependencies = [ "getrandom 0.2.10", "rand 0.8.5", @@ -6038,14 +6038,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-realloc-invoke" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-sbf-rust-realloc", @@ -6053,7 +6053,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-remaining-compute-units" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -6063,21 +6063,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-ro-account_modify" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-ro-modify" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sanity" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -6087,7 +6087,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-secp256k1-recover" -version = "1.19.0" +version = "2.0.0" dependencies = [ "libsecp256k1 0.7.0", "solana-program", @@ -6095,7 +6095,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sha" -version = "1.19.0" +version = "2.0.0" dependencies = [ "blake3", "solana-program", @@ -6103,21 +6103,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sibling-instructions" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sibling_inner-instructions" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-simulation" -version = "1.19.0" +version = "2.0.0" dependencies = [ "agave-validator", "solana-logger", @@ -6128,21 +6128,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-spoof1" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-spoof1-system" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sysvar" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -6152,21 +6152,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-upgradeable" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-upgraded" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sdk" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "base64 0.21.7", @@ -6219,7 +6219,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bs58", "proc-macro2", @@ -6236,7 +6236,7 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-send-transaction-service" -version = "1.19.0" +version = "2.0.0" dependencies = [ "crossbeam-channel", "log", @@ -6250,7 +6250,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "log", @@ -6263,7 +6263,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.19.0" +version = "2.0.0" dependencies = [ "backoff", "bincode", @@ -6295,7 +6295,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "bs58", @@ -6310,7 +6310,7 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-channel", "bytes", @@ -6338,7 +6338,7 @@ dependencies = [ [[package]] name = "solana-svm" -version = "1.19.0" +version = "2.0.0" dependencies = [ "itertools", "log", @@ -6357,7 +6357,7 @@ dependencies = [ [[package]] name = "solana-system-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "log", @@ -6369,7 +6369,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.19.0" +version = "2.0.0" dependencies = [ "base64 0.21.7", "bincode", @@ -6399,7 +6399,7 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "log", @@ -6412,7 +6412,7 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "bincode", @@ -6434,7 +6434,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.19.0" +version = "2.0.0" dependencies = [ "Inflector", "base64 0.21.7", @@ -6457,7 +6457,7 @@ dependencies = [ [[package]] name = "solana-turbine" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "bytes", @@ -6491,7 +6491,7 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "1.19.0" +version = "2.0.0" dependencies = [ "async-trait", "solana-connection-cache", @@ -6504,14 +6504,14 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-logic" -version = "1.19.0" +version = "2.0.0" dependencies = [ "solana-sdk", ] [[package]] name = "solana-unified-scheduler-pool" -version = "1.19.0" +version = "2.0.0" dependencies = [ "assert_matches", "crossbeam-channel", @@ -6527,7 +6527,7 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.19.0" +version = "2.0.0" dependencies = [ "log", "rustc_version", @@ -6541,7 +6541,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "1.19.0" +version = "2.0.0" dependencies = [ "crossbeam-channel", "itertools", @@ -6558,7 +6558,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bincode", "log", @@ -6578,7 +6578,7 @@ dependencies = [ [[package]] name = "solana-wen-restart" -version = "1.19.0" +version = "2.0.0" dependencies = [ "anyhow", "log", @@ -6598,7 +6598,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program" -version = "1.19.0" +version = "2.0.0" dependencies = [ "bytemuck", "num-derive 0.4.2", @@ -6610,7 +6610,7 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.19.0" +version = "2.0.0" dependencies = [ "aes-gcm-siv", "base64 0.21.7", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index dee6a947b1965d..c8f2b431f28e7a 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.19.0" +version = "2.0.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -25,30 +25,30 @@ rand = "0.8" rustversion = "1.0.14" serde = "1.0.112" serde_json = "1.0.56" -solana-account-decoder = { path = "../../account-decoder", version = "=1.19.0" } -solana-accounts-db = { path = "../../accounts-db", version = "=1.19.0" } -solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.19.0" } -solana-cli-output = { path = "../../cli-output", version = "=1.19.0" } -solana-ledger = { path = "../../ledger", version = "=1.19.0" } -solana-logger = { path = "../../logger", version = "=1.19.0" } -solana-measure = { path = "../../measure", version = "=1.19.0" } -solana-program = { path = "../../sdk/program", version = "=1.19.0" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.19.0" } -solana-program-test = { path = "../../program-test", version = "=1.19.0" } -solana-runtime = { path = "../../runtime", version = "=1.19.0" } -solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=1.19.0" } -solana-sbf-rust-invoke = { path = "rust/invoke", version = "=1.19.0" } -solana-sbf-rust-invoked = { path = "rust/invoked", version = "=1.19.0", default-features = false } -solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=1.19.0" } -solana-sbf-rust-mem = { path = "rust/mem", version = "=1.19.0" } -solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=1.19.0" } -solana-sbf-rust-realloc = { path = "rust/realloc", version = "=1.19.0", default-features = false } -solana-sbf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.19.0" } -solana-sdk = { path = "../../sdk", version = "=1.19.0" } -solana-transaction-status = { path = "../../transaction-status", version = "=1.19.0" } -agave-validator = { path = "../../validator", version = "=1.19.0" } -solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.19.0" } -solana-svm = { path = "../../svm", version = "=1.19.0" } +solana-account-decoder = { path = "../../account-decoder", version = "=2.0.0" } +solana-accounts-db = { path = "../../accounts-db", version = "=2.0.0" } +solana-bpf-loader-program = { path = "../bpf_loader", version = "=2.0.0" } +solana-cli-output = { path = "../../cli-output", version = "=2.0.0" } +solana-ledger = { path = "../../ledger", version = "=2.0.0" } +solana-logger = { path = "../../logger", version = "=2.0.0" } +solana-measure = { path = "../../measure", version = "=2.0.0" } +solana-program = { path = "../../sdk/program", version = "=2.0.0" } +solana-program-runtime = { path = "../../program-runtime", version = "=2.0.0" } +solana-program-test = { path = "../../program-test", version = "=2.0.0" } +solana-runtime = { path = "../../runtime", version = "=2.0.0" } +solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=2.0.0" } +solana-sbf-rust-invoke = { path = "rust/invoke", version = "=2.0.0" } +solana-sbf-rust-invoked = { path = "rust/invoked", version = "=2.0.0", default-features = false } +solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=2.0.0" } +solana-sbf-rust-mem = { path = "rust/mem", version = "=2.0.0" } +solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=2.0.0" } +solana-sbf-rust-realloc = { path = "rust/realloc", version = "=2.0.0", default-features = false } +solana-sbf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=2.0.0" } +solana-sdk = { path = "../../sdk", version = "=2.0.0" } +solana-svm = { path = "../../svm", version = "=2.0.0" } +solana-transaction-status = { path = "../../transaction-status", version = "=2.0.0" } +agave-validator = { path = "../../validator", version = "=2.0.0" } +solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=2.0.0" } solana_rbpf = "=0.8.0" static_assertions = "1.1.0" thiserror = "1.0" diff --git a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml index 8e1b7f77206707..7dc085d721af50 100644 --- a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fail" -version = "1.19.0" +version = "2.0.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.19.0" } +solana-program = { path = "../../../../program", version = "=2.0.0" } [lib] crate-type = ["cdylib"] diff --git a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml index 2d48c1295424da..3d3946decdb6ab 100644 --- a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noop" -version = "1.19.0" +version = "2.0.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.19.0" } +solana-program = { path = "../../../../program", version = "=2.0.0" } [lib] crate-type = ["cdylib"] From 8fa0e5c603e05ddfc98eb0e73c5684143ae355a1 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 11 Mar 2024 13:11:43 -0500 Subject: [PATCH 360/401] Move AccountsDb replication arguments to deprecated list (#157) These arguments are not read by anything, and they appear to correspond to a proposed feature that is no longer in the codebase. --- validator/src/cli.rs | 66 +++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 41 deletions(-) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 8424d7973f0705..e3f46309724af7 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1172,44 +1172,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .default_value(&default_args.rpc_max_request_body_size) .help("The maximum request body size accepted by rpc service"), ) - .arg( - Arg::with_name("enable_accountsdb_repl") - .long("enable-accountsdb-repl") - .takes_value(false) - .hidden(hidden_unless_forced()) - .help("Enable AccountsDb Replication"), - ) - .arg( - Arg::with_name("accountsdb_repl_bind_address") - .long("accountsdb-repl-bind-address") - .value_name("HOST") - .takes_value(true) - .validator(solana_net_utils::is_host) - .hidden(hidden_unless_forced()) - .help( - "IP address to bind the AccountsDb Replication port [default: use \ - --bind-address]", - ), - ) - .arg( - Arg::with_name("accountsdb_repl_port") - .long("accountsdb-repl-port") - .value_name("PORT") - .takes_value(true) - .validator(port_validator) - .hidden(hidden_unless_forced()) - .help("Enable AccountsDb Replication Service on this port"), - ) - .arg( - Arg::with_name("accountsdb_repl_threads") - .long("accountsdb-repl-threads") - .value_name("NUMBER") - .validator(is_parsable::) - .takes_value(true) - .default_value(&default_args.accountsdb_repl_threads) - .hidden(hidden_unless_forced()) - .help("Number of threads to use for servicing AccountsDb Replication requests"), - ) .arg( Arg::with_name("geyser_plugin_config") .long("geyser-plugin-config") @@ -1989,6 +1951,27 @@ fn deprecated_arguments() -> Vec { Ok(()) } })); + add_arg!(Arg::with_name("accountsdb_repl_bind_address") + .long("accountsdb-repl-bind-address") + .value_name("HOST") + .takes_value(true) + .validator(solana_net_utils::is_host) + .help( + "IP address to bind the AccountsDb Replication port [default: use \ + --bind-address]", + )); + add_arg!(Arg::with_name("accountsdb_repl_port") + .long("accountsdb-repl-port") + .value_name("PORT") + .takes_value(true) + .validator(port_validator) + .help("Enable AccountsDb Replication Service on this port")); + add_arg!(Arg::with_name("accountsdb_repl_threads") + .long("accountsdb-repl-threads") + .value_name("NUMBER") + .validator(is_parsable::) + .takes_value(true) + .help("Number of threads to use for servicing AccountsDb Replication requests")); add_arg!(Arg::with_name("disable_accounts_disk_index") .long("disable-accounts-disk-index") .help("Disable the disk-based accounts index if it is enabled by default.") @@ -1999,6 +1982,10 @@ fn deprecated_arguments() -> Vec { .takes_value(false), usage_warning: "The quic server cannot be disabled.", ); + add_arg!(Arg::with_name("enable_accountsdb_repl") + .long("enable-accountsdb-repl") + .takes_value(false) + .help("Enable AccountsDb Replication")); add_arg!( Arg::with_name("enable_cpi_and_log_storage") .long("enable-cpi-and-log-storage") @@ -2162,8 +2149,6 @@ pub struct DefaultArgs { pub contact_debug_interval: String, - pub accountsdb_repl_threads: String, - pub snapshot_version: SnapshotVersion, pub snapshot_archive_format: String, @@ -2239,7 +2224,6 @@ impl DefaultArgs { rpc_bigtable_max_message_size: solana_storage_bigtable::DEFAULT_MAX_MESSAGE_SIZE .to_string(), rpc_pubsub_worker_threads: "4".to_string(), - accountsdb_repl_threads: num_cpus::get().to_string(), maximum_full_snapshot_archives_to_retain: DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN .to_string(), maximum_incremental_snapshot_archives_to_retain: From f205d0e729e05f48ebba4e4bb47859714ff390f2 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 11 Mar 2024 18:49:35 +0000 Subject: [PATCH 361/401] expands weighted-shuffle benchmarks (#179) Adding separate benchmarks for WeightedShuffle::new and WeightedShuffle::shuffle. --- gossip/benches/weighted_shuffle.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/gossip/benches/weighted_shuffle.rs b/gossip/benches/weighted_shuffle.rs index 58a7e37ce6a328..09615c57bbca15 100644 --- a/gossip/benches/weighted_shuffle.rs +++ b/gossip/benches/weighted_shuffle.rs @@ -11,18 +11,28 @@ use { }; fn make_weights(rng: &mut R) -> Vec { - repeat_with(|| rng.gen_range(1..100)).take(1000).collect() + repeat_with(|| rng.gen_range(1..100)).take(4000).collect() } #[bench] -fn bench_weighted_shuffle(bencher: &mut Bencher) { +fn bench_weighted_shuffle_new(bencher: &mut Bencher) { + let mut rng = rand::thread_rng(); + bencher.iter(|| { + let weights = make_weights(&mut rng); + std::hint::black_box(WeightedShuffle::new("", &weights)); + }); +} + +#[bench] +fn bench_weighted_shuffle_shuffle(bencher: &mut Bencher) { let mut seed = [0u8; 32]; let mut rng = rand::thread_rng(); let weights = make_weights(&mut rng); + let weighted_shuffle = WeightedShuffle::new("", &weights); bencher.iter(|| { rng.fill(&mut seed[..]); - WeightedShuffle::new("", &weights) - .shuffle(&mut ChaChaRng::from_seed(seed)) - .collect::>() + let mut rng = ChaChaRng::from_seed(seed); + let shuffle = weighted_shuffle.clone().shuffle(&mut rng); + std::hint::black_box(shuffle.collect::>()); }); } From 209924d220bb4bcaa9ca41ea2fc6738cf2917b01 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Mon, 11 Mar 2024 15:33:19 -0400 Subject: [PATCH 362/401] bump deprecated version numbers for `get_client` and `get_multi_client` (#184) bump deprecated version numbers --- gossip/src/gossip_service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 404a685aa75567..9e1c56520993c5 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -194,7 +194,7 @@ pub fn discover( } /// Creates a ThinClient by selecting a valid node at random -#[deprecated(since = "1.18.0", note = "Interface will change")] +#[deprecated(since = "1.18.6", note = "Interface will change")] pub fn get_client( nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace, @@ -210,7 +210,7 @@ pub fn get_client( ThinClient::new(rpc, tpu, connection_cache) } -#[deprecated(since = "1.18.0", note = "Will be removed in favor of get_client")] +#[deprecated(since = "1.18.6", note = "Will be removed in favor of get_client")] pub fn get_multi_client( nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace, From 5c1df15e922e36051c3df23b1fee4658dab18854 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 11 Mar 2024 15:38:34 -0400 Subject: [PATCH 363/401] Removes the storage recycler (#118) --- accounts-db/src/accounts_db.rs | 474 ++------------------- accounts-db/src/ancient_append_vecs.rs | 5 +- runtime/src/accounts_background_service.rs | 21 - runtime/src/bank.rs | 4 - runtime/src/snapshot_minimizer.rs | 8 +- 5 files changed, 31 insertions(+), 481 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 41ec05dce0e4a5..18ffa2d02e37ca 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -106,7 +106,7 @@ use { path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering}, - Arc, Condvar, Mutex, RwLock, + Arc, Condvar, Mutex, }, thread::{sleep, Builder}, time::{Duration, Instant}, @@ -115,7 +115,6 @@ use { }; const PAGE_SIZE: u64 = 4 * 1024; -pub(crate) const MAX_RECYCLE_STORES: usize = 1000; // when the accounts write cache exceeds this many bytes, we will flush it // this can be specified on the command line, too (--accounts-db-cache-limit-mb) const WRITE_CACHE_LIMIT_BYTES_DEFAULT: u64 = 15_000_000_000; @@ -1084,16 +1083,6 @@ impl AccountStorageEntry { *count_and_status = (count, status); } - pub fn recycle(&self, slot: Slot, id: AppendVecId) { - let mut count_and_status = self.count_and_status.lock_write(); - self.accounts.reset(); - *count_and_status = (0, AccountStorageStatus::Available); - self.slot.store(slot, Ordering::Release); - self.id.store(id, Ordering::Release); - self.approx_store_count.store(0, Ordering::Relaxed); - self.alive_bytes.store(0, Ordering::Release); - } - pub fn status(&self) -> AccountStorageStatus { self.count_and_status.read().1 } @@ -1267,76 +1256,6 @@ impl StoreAccountsTiming { } } -#[derive(Debug, Default)] -struct RecycleStores { - entries: Vec<(Instant, Arc)>, - total_bytes: u64, -} - -// 30 min should be enough to be certain there won't be any prospective recycle uses for given -// store entry -// That's because it already processed ~2500 slots and ~25 passes of AccountsBackgroundService -pub const EXPIRATION_TTL_SECONDS: u64 = 1800; - -impl RecycleStores { - fn add_entry(&mut self, new_entry: Arc) { - self.total_bytes += new_entry.capacity(); - self.entries.push((Instant::now(), new_entry)) - } - - fn iter(&self) -> std::slice::Iter<(Instant, Arc)> { - self.entries.iter() - } - - fn add_entries(&mut self, new_entries: Vec>) { - let now = Instant::now(); - for new_entry in new_entries { - self.total_bytes += new_entry.capacity(); - self.entries.push((now, new_entry)); - } - } - - fn expire_old_entries(&mut self) -> Vec> { - let mut expired = vec![]; - let now = Instant::now(); - let mut expired_bytes = 0; - self.entries.retain(|(recycled_time, entry)| { - if now.duration_since(*recycled_time).as_secs() > EXPIRATION_TTL_SECONDS { - if Arc::strong_count(entry) >= 2 { - warn!( - "Expiring still in-use recycled StorageEntry anyway...: id: {} slot: {}", - entry.append_vec_id(), - entry.slot(), - ); - } - expired_bytes += entry.capacity(); - expired.push(entry.clone()); - false - } else { - true - } - }); - - self.total_bytes -= expired_bytes; - - expired - } - - fn remove_entry(&mut self, index: usize) -> Arc { - let (_added_time, removed_entry) = self.entries.swap_remove(index); - self.total_bytes -= removed_entry.capacity(); - removed_entry - } - - fn entry_count(&self) -> usize { - self.entries.len() - } - - fn total_bytes(&self) -> u64 { - self.total_bytes - } -} - /// Removing unrooted slots in Accounts Background Service needs to be synchronized with flushing /// slots from the Accounts Cache. This keeps track of those slots and the Mutex + Condvar for /// synchronization. @@ -1377,8 +1296,6 @@ pub struct AccountsDb { sender_bg_hasher: Option>, read_only_accounts_cache: ReadOnlyAccountsCache, - recycle_stores: RwLock, - /// distribute the accounts across storage lists pub next_id: AtomicAppendVecId, @@ -1506,7 +1423,6 @@ pub struct AccountsStats { pub stakes_cache_check_and_store_us: AtomicU64, store_num_accounts: AtomicU64, store_total_data: AtomicU64, - recycle_store_count: AtomicU64, create_store_count: AtomicU64, store_get_slot_store: AtomicU64, store_find_existing: AtomicU64, @@ -1529,7 +1445,6 @@ pub struct PurgeStats { total_removed_storage_entries: AtomicUsize, total_removed_cached_bytes: AtomicU64, total_removed_stored_bytes: AtomicU64, - recycle_stores_write_elapsed: AtomicU64, scan_storages_elapsed: AtomicU64, purge_accounts_index_elapsed: AtomicU64, handle_reclaims_elapsed: AtomicU64, @@ -1591,11 +1506,6 @@ impl PurgeStats { self.total_removed_stored_bytes.swap(0, Ordering::Relaxed) as i64, i64 ), - ( - "recycle_stores_write_elapsed", - self.recycle_stores_write_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), ( "scan_storages_elapsed", self.scan_storages_elapsed.swap(0, Ordering::Relaxed) as i64, @@ -1972,7 +1882,6 @@ pub struct ShrinkStats { unpackable_slots_count: AtomicU64, newest_alive_packed_count: AtomicU64, drop_storage_entries_elapsed: AtomicU64, - recycle_stores_write_elapsed: AtomicU64, accounts_removed: AtomicUsize, bytes_removed: AtomicU64, bytes_written: AtomicU64, @@ -2038,11 +1947,6 @@ impl ShrinkStats { self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64, i64 ), - ( - "recycle_stores_write_time", - self.recycle_stores_write_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), ( "accounts_removed", self.accounts_removed.swap(0, Ordering::Relaxed) as i64, @@ -2169,13 +2073,6 @@ impl ShrinkAncientStats { .swap(0, Ordering::Relaxed) as i64, i64 ), - ( - "recycle_stores_write_time", - self.shrink_stats - .recycle_stores_write_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), ( "accounts_removed", self.shrink_stats @@ -2425,7 +2322,6 @@ impl AccountsDb { MAX_READ_ONLY_CACHE_DATA_SIZE, READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, ), - recycle_stores: RwLock::new(RecycleStores::default()), uncleaned_pubkeys: DashMap::new(), next_id: AtomicAppendVecId::new(0), shrink_candidate_slots: Mutex::new(ShrinkCandidates::default()), @@ -3949,6 +3845,7 @@ impl AccountsDb { shrink_in_progress, shrink_can_be_active, ); + let dead_storages_len = dead_storages.len(); if !shrink_collect.all_are_zero_lamports { self.add_uncleaned_pubkeys_after_shrink( @@ -3957,9 +3854,15 @@ impl AccountsDb { ); } - self.drop_or_recycle_stores(dead_storages, stats); + let (_, drop_storage_entries_elapsed) = measure_us!(drop(dead_storages)); time.stop(); + self.stats + .dropped_stores + .fetch_add(dead_storages_len as u64, Ordering::Relaxed); + stats + .drop_storage_entries_elapsed + .fetch_add(drop_storage_entries_elapsed, Ordering::Relaxed); stats .remove_old_stores_shrink_us .fetch_add(time.as_us(), Ordering::Relaxed); @@ -4148,42 +4051,10 @@ impl AccountsDb { dead_storages } - pub fn drop_or_recycle_stores( - &self, - dead_storages: Vec>, - stats: &ShrinkStats, - ) { - let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time"); - let mut recycle_stores = self.recycle_stores.write().unwrap(); - recycle_stores_write_elapsed.stop(); - - let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed"); - if recycle_stores.entry_count() < MAX_RECYCLE_STORES { - recycle_stores.add_entries(dead_storages); - drop(recycle_stores); - } else { - self.stats - .dropped_stores - .fetch_add(dead_storages.len() as u64, Ordering::Relaxed); - drop(recycle_stores); - drop(dead_storages); - } - drop_storage_entries_elapsed.stop(); - stats - .drop_storage_entries_elapsed - .fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed); - stats - .recycle_stores_write_elapsed - .fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed); - } - /// return a store that can contain 'aligned_total' bytes pub fn get_store_for_shrink(&self, slot: Slot, aligned_total: u64) -> ShrinkInProgress<'_> { - let shrunken_store = self - .try_recycle_store(slot, aligned_total, aligned_total + 1024) - .unwrap_or_else(|| { - self.create_store(slot, aligned_total, "shrink", self.shrink_paths.as_slice()) - }); + let shrunken_store = + self.create_store(slot, aligned_total, "shrink", self.shrink_paths.as_slice()); self.storage.shrinking_in_progress(slot, shrunken_store) } @@ -5524,71 +5395,7 @@ impl AccountsDb { } } - fn try_recycle_and_insert_store( - &self, - slot: Slot, - min_size: u64, - max_size: u64, - ) -> Option> { - let store = self.try_recycle_store(slot, min_size, max_size)?; - self.insert_store(slot, store.clone()); - Some(store) - } - - fn try_recycle_store( - &self, - slot: Slot, - min_size: u64, - max_size: u64, - ) -> Option> { - let mut max = 0; - let mut min = std::u64::MAX; - let mut avail = 0; - let mut recycle_stores = self.recycle_stores.write().unwrap(); - for (i, (_recycled_time, store)) in recycle_stores.iter().enumerate() { - if Arc::strong_count(store) == 1 { - max = std::cmp::max(store.accounts.capacity(), max); - min = std::cmp::min(store.accounts.capacity(), min); - avail += 1; - - if store.accounts.is_recyclable() - && store.accounts.capacity() >= min_size - && store.accounts.capacity() < max_size - { - let ret = recycle_stores.remove_entry(i); - drop(recycle_stores); - let old_id = ret.append_vec_id(); - ret.recycle(slot, self.next_id()); - // This info shows the appendvec change history. It helps debugging - // the appendvec data corrupution issues related to recycling. - debug!( - "recycling store: old slot {}, old_id: {}, new slot {}, new id{}, path {:?} ", - slot, - old_id, - ret.slot(), - ret.append_vec_id(), - ret.get_path(), - ); - self.stats - .recycle_store_count - .fetch_add(1, Ordering::Relaxed); - return Some(ret); - } - } - } - debug!( - "no recycle stores max: {} min: {} len: {} looking: {}, {} avail: {}", - max, - min, - recycle_stores.entry_count(), - min_size, - max_size, - avail, - ); - None - } - - fn find_storage_candidate(&self, slot: Slot, size: usize) -> Arc { + fn find_storage_candidate(&self, slot: Slot) -> Arc { let mut get_slot_stores = Measure::start("get_slot_stores"); let store = self.storage.get_slot_storage_entry(slot); get_slot_stores.stop(); @@ -5612,11 +5419,7 @@ impl AccountsDb { .store_find_existing .fetch_add(find_existing.as_us(), Ordering::Relaxed); - let store = if let Some(store) = self.try_recycle_store(slot, size as u64, std::u64::MAX) { - store - } else { - self.create_store(slot, self.file_size, "store", &self.paths) - }; + let store = self.create_store(slot, self.file_size, "store", &self.paths); // try_available is like taking a lock on the store, // preventing other threads from using it. @@ -5730,28 +5533,6 @@ impl AccountsDb { self.purge_slots(std::iter::once(&slot)); } - fn recycle_slot_stores( - &self, - total_removed_storage_entries: usize, - slot_stores: &[Arc], - ) -> u64 { - let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_elapsed"); - let mut recycle_stores = self.recycle_stores.write().unwrap(); - recycle_stores_write_elapsed.stop(); - - for (recycled_count, store) in slot_stores.iter().enumerate() { - if recycle_stores.entry_count() > MAX_RECYCLE_STORES { - let dropped_count = total_removed_storage_entries - recycled_count; - self.stats - .dropped_stores - .fetch_add(dropped_count as u64, Ordering::Relaxed); - return recycle_stores_write_elapsed.as_us(); - } - recycle_stores.add_entry(Arc::clone(store)); - } - recycle_stores_write_elapsed.as_us() - } - /// Purges every slot in `removed_slots` from both the cache and storage. This includes /// entries in the accounts index, cache entries, and any backing storage entries. pub fn purge_slots_from_cache_and_store<'a>( @@ -5831,7 +5612,6 @@ impl AccountsDb { .safety_checks_elapsed .fetch_add(safety_checks_elapsed.as_us(), Ordering::Relaxed); - let mut total_removed_storage_entries = 0; let mut total_removed_stored_bytes = 0; let mut all_removed_slot_storages = vec![]; @@ -5839,24 +5619,19 @@ impl AccountsDb { for remove_slot in removed_slots { // Remove the storage entries and collect some metrics if let Some(store) = self.storage.remove(remove_slot, false) { - { - total_removed_storage_entries += 1; - total_removed_stored_bytes += store.accounts.capacity(); - } + total_removed_stored_bytes += store.accounts.capacity(); all_removed_slot_storages.push(store); } } remove_storage_entries_elapsed.stop(); let num_stored_slots_removed = all_removed_slot_storages.len(); - let recycle_stores_write_elapsed = - self.recycle_slot_stores(total_removed_storage_entries, &all_removed_slot_storages); - - let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed"); // Backing mmaps for removed storages entries explicitly dropped here outside // of any locks + let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed"); drop(all_removed_slot_storages); drop_storage_entries_elapsed.stop(); + purge_stats .remove_storage_entries_elapsed .fetch_add(remove_storage_entries_elapsed.as_us(), Ordering::Relaxed); @@ -5868,13 +5643,13 @@ impl AccountsDb { .fetch_add(num_stored_slots_removed, Ordering::Relaxed); purge_stats .total_removed_storage_entries - .fetch_add(total_removed_storage_entries, Ordering::Relaxed); + .fetch_add(num_stored_slots_removed, Ordering::Relaxed); purge_stats .total_removed_stored_bytes .fetch_add(total_removed_stored_bytes, Ordering::Relaxed); - purge_stats - .recycle_stores_write_elapsed - .fetch_add(recycle_stores_write_elapsed, Ordering::Relaxed); + self.stats + .dropped_stores + .fetch_add(num_stored_slots_removed as u64, Ordering::Relaxed); } fn purge_slot_cache(&self, purged_slot: Slot, slot_cache: SlotCache) { @@ -6196,12 +5971,7 @@ impl AccountsDb { accounts_and_meta_to_store.len() ); let special_store_size = std::cmp::max(data_len * 2, self.file_size); - if self - .try_recycle_and_insert_store(slot, special_store_size, std::u64::MAX) - .is_none() - { - self.create_and_insert_store(slot, special_store_size, "large create"); - } + self.create_and_insert_store(slot, special_store_size, "large create"); } continue; } @@ -6237,25 +6007,6 @@ impl AccountsDb { self.accounts_cache.report_size(); } - pub fn expire_old_recycle_stores(&self) { - let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time"); - let recycle_stores = self.recycle_stores.write().unwrap().expire_old_entries(); - recycle_stores_write_elapsed.stop(); - - let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed"); - drop(recycle_stores); - drop_storage_entries_elapsed.stop(); - - self.clean_accounts_stats - .purge_stats - .drop_storage_entries_elapsed - .fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed); - self.clean_accounts_stats - .purge_stats - .recycle_stores_write_elapsed - .fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed); - } - // These functions/fields are only usable from a dev context (i.e. tests and benches) #[cfg(feature = "dev-context-only-utils")] pub fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) { @@ -6779,11 +6530,6 @@ impl AccountsDb { datapoint_info!( "accounts_db-stores", ("total_count", total_count, i64), - ( - "recycle_count", - self.recycle_stores.read().unwrap().entry_count() as u64, - i64 - ), ("total_bytes", total_bytes, i64), ("total_alive_bytes", total_alive_bytes, i64), ("total_alive_ratio", total_alive_ratio, f64), @@ -8410,7 +8156,7 @@ impl AccountsDb { /// Store the account update. /// only called by tests pub fn store_uncached(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) { - let storage = self.find_storage_candidate(slot, 1); + let storage = self.find_storage_candidate(slot); self.store( (slot, accounts), &StoreTo::Storage(&storage), @@ -8568,24 +8314,8 @@ impl AccountsDb { ), ); - let recycle_stores = self.recycle_stores.read().unwrap(); datapoint_info!( "accounts_db_store_timings2", - ( - "recycle_store_count", - self.stats.recycle_store_count.swap(0, Ordering::Relaxed), - i64 - ), - ( - "current_recycle_store_count", - recycle_stores.entry_count(), - i64 - ), - ( - "current_recycle_store_bytes", - recycle_stores.total_bytes(), - i64 - ), ( "create_store_count", self.stats.create_store_count.swap(0, Ordering::Relaxed), @@ -9397,20 +9127,6 @@ impl AccountsDb { pub fn print_accounts_stats(&self, label: &str) { self.print_index(label); self.print_count_and_status(label); - info!("recycle_stores:"); - let recycle_stores = self.recycle_stores.read().unwrap(); - for (recycled_time, entry) in recycle_stores.iter() { - info!( - " slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {} (recycled: {:?})", - entry.slot(), - entry.append_vec_id(), - entry.count_and_status.read(), - entry.approx_store_count.load(Ordering::Relaxed), - entry.accounts.len(), - entry.accounts.capacity(), - recycled_time, - ); - } } fn print_index(&self, label: &str) { @@ -9766,7 +9482,7 @@ pub mod tests { std::{ iter::FromIterator, str::FromStr, - sync::atomic::AtomicBool, + sync::{atomic::AtomicBool, RwLock}, thread::{self, Builder, JoinHandle}, }, test_case::test_case, @@ -12522,7 +12238,7 @@ pub mod tests { db.store_accounts_unfrozen( (some_slot, &[(&key, &account)][..]), Some(vec![&AccountHash(Hash::default())]), - &StoreTo::Storage(&db.find_storage_candidate(some_slot, 1)), + &StoreTo::Storage(&db.find_storage_candidate(some_slot)), None, StoreReclaims::Default, UpdateIndexThreadSelection::PoolWithThreshold, @@ -12755,7 +12471,7 @@ pub mod tests { db.store_accounts_unfrozen( (some_slot, accounts), Some(vec![&some_hash]), - &StoreTo::Storage(&db.find_storage_candidate(some_slot, 1)), + &StoreTo::Storage(&db.find_storage_candidate(some_slot)), None, StoreReclaims::Default, UpdateIndexThreadSelection::PoolWithThreshold, @@ -13548,75 +13264,6 @@ pub mod tests { assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); } - #[test] - fn test_store_reuse() { - solana_logger::setup(); - let accounts = AccountsDb { - file_size: 4096, - ..AccountsDb::new_single_for_tests() - }; - - let size = 100; - let num_accounts: usize = 100; - let mut keys = Vec::new(); - for i in 0..num_accounts { - let account = AccountSharedData::new((i + 1) as u64, size, &Pubkey::default()); - let pubkey = solana_sdk::pubkey::new_rand(); - accounts.store_cached((0 as Slot, &[(&pubkey, &account)][..]), None); - keys.push(pubkey); - } - // get delta hash to feed these accounts to clean - accounts.calculate_accounts_delta_hash(0); - accounts.add_root(0); - // we have to flush just slot 0 - // if we slot 0 and 1 together, then they are cleaned and slot 0 doesn't contain the accounts - // this test wants to clean and then allow us to shrink - accounts.flush_accounts_cache(true, None); - - for (i, key) in keys[1..].iter().enumerate() { - let account = - AccountSharedData::new((1 + i + num_accounts) as u64, size, &Pubkey::default()); - accounts.store_cached((1 as Slot, &[(key, &account)][..]), None); - } - accounts.calculate_accounts_delta_hash(1); - accounts.add_root(1); - accounts.flush_accounts_cache(true, None); - accounts.clean_accounts_for_tests(); - accounts.shrink_all_slots(false, None, &EpochSchedule::default()); - - // Clean again to flush the dirty stores - // and allow them to be recycled in the next step - accounts.clean_accounts_for_tests(); - accounts.print_accounts_stats("post-shrink"); - let num_stores = accounts.recycle_stores.read().unwrap().entry_count(); - assert!(num_stores > 0); - - let mut account_refs = Vec::new(); - let num_to_store = 20; - for (i, key) in keys[..num_to_store].iter().enumerate() { - let account = AccountSharedData::new( - (1 + i + 2 * num_accounts) as u64, - i + 20, - &Pubkey::default(), - ); - accounts.store_uncached(2, &[(key, &account)]); - account_refs.push(account); - } - assert!(accounts.recycle_stores.read().unwrap().entry_count() < num_stores); - - accounts.print_accounts_stats("post-store"); - - let mut ancestors = Ancestors::default(); - ancestors.insert(1, 0); - ancestors.insert(2, 1); - for (key, account_ref) in keys[..num_to_store].iter().zip(account_refs) { - assert_eq!( - accounts.load_without_fixed_root(&ancestors, key).unwrap().0, - account_ref - ); - } - } - #[test] #[should_panic(expected = "We've run out of storage ids!")] fn test_wrapping_append_vec_id() { @@ -14870,77 +14517,6 @@ pub mod tests { assert!(!db.storage.is_empty_entry(1)); } - #[test] - fn test_recycle_stores_expiration() { - solana_logger::setup(); - - let common_store_path = Path::new(""); - let common_slot_id = 12; - let store_file_size = 1000; - - let store1_id = 22; - let entry1 = Arc::new(AccountStorageEntry::new( - common_store_path, - common_slot_id, - store1_id, - store_file_size, - )); - - let store2_id = 44; - let entry2 = Arc::new(AccountStorageEntry::new( - common_store_path, - common_slot_id, - store2_id, - store_file_size, - )); - - let mut recycle_stores = RecycleStores::default(); - recycle_stores.add_entry(entry1); - recycle_stores.add_entry(entry2); - assert_eq!(recycle_stores.entry_count(), 2); - - // no expiration for newly added entries - let expired = recycle_stores.expire_old_entries(); - assert_eq!( - expired - .iter() - .map(|e| e.append_vec_id()) - .collect::>(), - Vec::::new() - ); - assert_eq!( - recycle_stores - .iter() - .map(|(_, e)| e.append_vec_id()) - .collect::>(), - vec![store1_id, store2_id] - ); - assert_eq!(recycle_stores.entry_count(), 2); - assert_eq!(recycle_stores.total_bytes(), store_file_size * 2); - - // expiration for only too old entries - recycle_stores.entries[0].0 = Instant::now() - .checked_sub(Duration::from_secs(EXPIRATION_TTL_SECONDS + 1)) - .unwrap(); - let expired = recycle_stores.expire_old_entries(); - assert_eq!( - expired - .iter() - .map(|e| e.append_vec_id()) - .collect::>(), - vec![store1_id] - ); - assert_eq!( - recycle_stores - .iter() - .map(|(_, e)| e.append_vec_id()) - .collect::>(), - vec![store2_id] - ); - assert_eq!(recycle_stores.entry_count(), 1); - assert_eq!(recycle_stores.total_bytes(), store_file_size); - } - const RACY_SLEEP_MS: u64 = 10; const RACE_TIME: u64 = 5; diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 1ebcc77763ae27..3925b21e69f586 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -985,7 +985,7 @@ pub mod tests { create_db_with_storages_and_index, create_storages_and_update_index, get_all_accounts, remove_account_for_tests, CAN_RANDOMLY_SHRINK_FALSE, }, - ShrinkCollectRefs, MAX_RECYCLE_STORES, + ShrinkCollectRefs, }, accounts_index::UpsertReclaim, append_vec::{aligned_stored_size, AppendVec, AppendVecStoredAccountMeta}, @@ -3023,6 +3023,9 @@ pub mod tests { #[test] fn test_shrink_packed_ancient() { + // NOTE: The recycler has been removed. Creating this many extra storages is no longer + // necessary, but also does no harm either. + const MAX_RECYCLE_STORES: usize = 1000; solana_logger::setup(); // When we pack ancient append vecs, the packed append vecs are recycled first if possible. This means they aren't dropped directly. diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index efc17176d7337b..8910b2f300c4a6 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -39,14 +39,6 @@ use { const INTERVAL_MS: u64 = 100; const CLEAN_INTERVAL_BLOCKS: u64 = 100; -// This value is chosen to spread the dropping cost over 3 expiration checks -// RecycleStores are fully populated almost all of its lifetime. So, otherwise -// this would drop MAX_RECYCLE_STORES mmaps at once in the worst case... -// (Anyway, the dropping part is outside the AccountsDb::recycle_stores lock -// and dropped in this AccountsBackgroundServe, so this shouldn't matter much) -const RECYCLE_STORE_EXPIRATION_INTERVAL_SECS: u64 = - solana_accounts_db::accounts_db::EXPIRATION_TTL_SECONDS / 3; - pub type SnapshotRequestSender = Sender; pub type SnapshotRequestReceiver = Receiver; pub type DroppedSlotsSender = Sender<(Slot, BankId)>; @@ -605,7 +597,6 @@ impl AccountsBackgroundService { let mut last_cleaned_block_height = 0; let mut removed_slots_count = 0; let mut total_remove_slots_time = 0; - let mut last_expiration_check_time = Instant::now(); let t_background = Builder::new() .name("solBgAccounts".to_string()) .spawn(move || { @@ -631,8 +622,6 @@ impl AccountsBackgroundService { &mut total_remove_slots_time, ); - Self::expire_old_recycle_stores(&bank, &mut last_expiration_check_time); - let non_snapshot_time = last_snapshot_end_time .map(|last_snapshot_end_time: Instant| { last_snapshot_end_time.elapsed().as_micros() @@ -759,16 +748,6 @@ impl AccountsBackgroundService { pub fn join(self) -> thread::Result<()> { self.t_background.join() } - - fn expire_old_recycle_stores(bank: &Bank, last_expiration_check_time: &mut Instant) { - let now = Instant::now(); - if now.duration_since(*last_expiration_check_time).as_secs() - > RECYCLE_STORE_EXPIRATION_INTERVAL_SECS - { - bank.expire_old_recycle_stores(); - *last_expiration_check_time = now; - } - } } /// Get the AccountsPackageKind from a given SnapshotRequest diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f0ba75defa0517..d1a1805d0d3a20 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5864,10 +5864,6 @@ impl Bank { .flush_accounts_cache(false, Some(self.slot())) } - pub fn expire_old_recycle_stores(&self) { - self.rc.accounts.accounts_db.expire_old_recycle_stores() - } - /// Technically this issues (or even burns!) new lamports, /// so be extra careful for its usage fn store_account_and_update_capitalization( diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 556a854da0c41b..15fe706dc0e504 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -239,12 +239,8 @@ impl<'a> SnapshotMinimizer<'a> { measure!(self.purge_dead_slots(dead_slots), "purge dead slots"); info!("{purge_dead_slots_measure}"); - let (_, drop_or_recycle_stores_measure) = measure!( - self.accounts_db() - .drop_or_recycle_stores(dead_storages, &self.accounts_db().shrink_stats), - "drop or recycle stores" - ); - info!("{drop_or_recycle_stores_measure}"); + let (_, drop_storages_measure) = measure!(drop(dead_storages), "drop storages"); + info!("{drop_storages_measure}"); // Turn logging back on after minimization self.accounts_db() From ada06ca6ce22ea99ed04985d42a40d87800a982e Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Mon, 11 Mar 2024 17:30:04 -0300 Subject: [PATCH 364/401] Add tests for `svm/transaction_processor.rs` (#186) --- svm/src/transaction_processor.rs | 434 ++++++++++++++++++++++++++++++- 1 file changed, 431 insertions(+), 3 deletions(-) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index fec908619f14f8..5801b3b8316fdc 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -538,6 +538,9 @@ impl TransactionBatchProcessor { .finish_cooperative_loading_task(self.slot, key, program) && limit_to_load_programs { + // This branch is taken when there is an error in assigning a program to a + // cache slot. It is not possible to mock this error for SVM unit + // tests purposes. let mut ret = LoadedProgramsForTxBatch::new( self.slot, loaded_programs_cache @@ -571,6 +574,10 @@ impl TransactionBatchProcessor { // Once a task completes we'll wake up and try to load the // missing programs inside the tx batch again. let _new_cookie = task_waiter.wait(task_cookie); + + // This branch is not tested in the SVM because it requires concurrent threads. + // In addition, one of them must be holding the mutex while the other must be + // trying to lock it. } } @@ -942,7 +949,12 @@ mod tests { loaded_programs::BlockRelation, solana_rbpf::program::BuiltinProgram, }, solana_sdk::{ - account::WritableAccount, bpf_loader, sysvar::rent::Rent, + account::WritableAccount, + bpf_loader, + message::{LegacyMessage, Message, MessageHeader}, + rent_debits::RentDebits, + signature::Signature, + sysvar::rent::Rent, transaction_context::TransactionContext, }, std::{ @@ -960,7 +972,7 @@ mod tests { } } - #[derive(Default)] + #[derive(Default, Clone)] pub struct MockBankCallback { rent_collector: RentCollector, feature_set: Arc, @@ -985,7 +997,7 @@ mod tests { } fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { - todo!() + (Hash::new_unique(), 2) } fn get_rent_collector(&self) -> &RentCollector { @@ -1521,4 +1533,420 @@ mod tests { let slot = batch_processor.epoch_schedule.get_first_slot_in_epoch(20); assert_eq!(result.effective_slot, slot); } + + #[test] + fn test_program_modification_slot_account_not_found() { + let batch_processor = TransactionBatchProcessor::::default(); + let mut mock_bank = MockBankCallback::default(); + let key = Pubkey::new_unique(); + + let result = batch_processor.program_modification_slot(&mock_bank, &key); + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader_upgradeable::id()); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.program_modification_slot(&mock_bank, &key); + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + + let state = UpgradeableLoaderState::Program { + programdata_address: Pubkey::new_unique(), + }; + account_data.set_data(bincode::serialize(&state).unwrap()); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.program_modification_slot(&mock_bank, &key); + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + + account_data.set_owner(loader_v4::id()); + mock_bank + .account_shared_data + .insert(key, account_data.clone()); + + let result = batch_processor.program_modification_slot(&mock_bank, &key); + assert_eq!(result.err(), Some(TransactionError::ProgramAccountNotFound)); + } + + #[test] + fn test_program_modification_slot_success() { + let batch_processor = TransactionBatchProcessor::::default(); + let mut mock_bank = MockBankCallback::default(); + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader_upgradeable::id()); + + let state = UpgradeableLoaderState::Program { + programdata_address: key2, + }; + account_data.set_data(bincode::serialize(&state).unwrap()); + mock_bank.account_shared_data.insert(key1, account_data); + + let state = UpgradeableLoaderState::ProgramData { + slot: 77, + upgrade_authority_address: None, + }; + let mut account_data = AccountSharedData::default(); + account_data.set_data(bincode::serialize(&state).unwrap()); + mock_bank.account_shared_data.insert(key2, account_data); + + let result = batch_processor.program_modification_slot(&mock_bank, &key1); + assert_eq!(result.unwrap(), 77); + + let mut account_data = AccountSharedData::default(); + account_data.set_owner(loader_v4::id()); + let state = LoaderV4State { + slot: 58, + authority_address: Pubkey::new_unique(), + status: LoaderV4Status::Deployed, + }; + + let encoded = unsafe { + std::mem::transmute::<&LoaderV4State, &[u8; LoaderV4State::program_data_offset()]>( + &state, + ) + }; + account_data.set_data(encoded.to_vec()); + mock_bank + .account_shared_data + .insert(key1, account_data.clone()); + + let result = batch_processor.program_modification_slot(&mock_bank, &key1); + assert_eq!(result.unwrap(), 58); + + account_data.set_owner(Pubkey::new_unique()); + mock_bank.account_shared_data.insert(key2, account_data); + + let result = batch_processor.program_modification_slot(&mock_bank, &key2); + assert_eq!(result.unwrap(), 0); + } + + #[test] + fn test_execute_loaded_transaction_recordings() { + // Setting all the arguments correctly is too burdensome for testing + // execute_loaded_transaction separately.This function will be tested in an integration + // test with load_and_execute_sanitized_transactions + let message = Message { + account_keys: vec![Pubkey::new_from_array([0; 32])], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let loaded_programs = LoadedProgramsForTxBatch::default(); + let mock_bank = MockBankCallback::default(); + let batch_processor = TransactionBatchProcessor::::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + + let mut loaded_transaction = LoadedTransaction { + accounts: vec![(Pubkey::new_unique(), AccountSharedData::default())], + program_indices: vec![vec![0]], + rent: 0, + rent_debits: RentDebits::default(), + }; + + let mut record_config = ExecutionRecordingConfig { + enable_cpi_recording: false, + enable_log_recording: true, + enable_return_data_recording: false, + }; + + let result = batch_processor.execute_loaded_transaction( + &mock_bank, + &sanitized_transaction, + &mut loaded_transaction, + ComputeBudget::default(), + None, + record_config, + &mut ExecuteTimings::default(), + &mut TransactionErrorMetrics::default(), + None, + &loaded_programs, + ); + + let TransactionExecutionResult::Executed { + details: TransactionExecutionDetails { log_messages, .. }, + .. + } = result + else { + panic!("Unexpected result") + }; + assert!(log_messages.is_some()); + + let result = batch_processor.execute_loaded_transaction( + &mock_bank, + &sanitized_transaction, + &mut loaded_transaction, + ComputeBudget::default(), + None, + record_config, + &mut ExecuteTimings::default(), + &mut TransactionErrorMetrics::default(), + Some(2), + &loaded_programs, + ); + + let TransactionExecutionResult::Executed { + details: + TransactionExecutionDetails { + log_messages, + inner_instructions, + .. + }, + .. + } = result + else { + panic!("Unexpected result") + }; + assert!(log_messages.is_some()); + assert!(inner_instructions.is_none()); + + record_config.enable_log_recording = false; + record_config.enable_cpi_recording = true; + + let result = batch_processor.execute_loaded_transaction( + &mock_bank, + &sanitized_transaction, + &mut loaded_transaction, + ComputeBudget::default(), + None, + record_config, + &mut ExecuteTimings::default(), + &mut TransactionErrorMetrics::default(), + None, + &loaded_programs, + ); + + let TransactionExecutionResult::Executed { + details: + TransactionExecutionDetails { + log_messages, + inner_instructions, + .. + }, + .. + } = result + else { + panic!("Unexpected result") + }; + assert!(log_messages.is_none()); + assert!(inner_instructions.is_some()); + } + + #[test] + fn test_execute_loaded_transaction_error_metrics() { + // Setting all the arguments correctly is too burdensome for testing + // execute_loaded_transaction separately.This function will be tested in an integration + // test with load_and_execute_sanitized_transactions + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let message = Message { + account_keys: vec![key1, key2], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![2], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + let loaded_programs = LoadedProgramsForTxBatch::default(); + let mock_bank = MockBankCallback::default(); + let batch_processor = TransactionBatchProcessor::::default(); + + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader::id()); + let mut loaded_transaction = LoadedTransaction { + accounts: vec![ + (key1, AccountSharedData::default()), + (key2, AccountSharedData::default()), + ], + program_indices: vec![vec![0]], + rent: 0, + rent_debits: RentDebits::default(), + }; + + let record_config = ExecutionRecordingConfig::new_single_setting(false); + let mut error_metrics = TransactionErrorMetrics::new(); + + let _ = batch_processor.execute_loaded_transaction( + &mock_bank, + &sanitized_transaction, + &mut loaded_transaction, + ComputeBudget::default(), + None, + record_config, + &mut ExecuteTimings::default(), + &mut error_metrics, + None, + &loaded_programs, + ); + + assert_eq!(error_metrics.instruction_error, 1); + } + + #[test] + fn test_replenish_program_cache() { + // Case 1 + let mut mock_bank = MockBankCallback::default(); + let mut batch_processor = TransactionBatchProcessor:: { + check_program_modification_slot: true, + ..TransactionBatchProcessor::default() + }; + batch_processor + .loaded_programs_cache + .write() + .unwrap() + .fork_graph = Some(Arc::new(RwLock::new(TestForkGraph {}))); + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let owner = Pubkey::new_unique(); + + let mut account_data = AccountSharedData::default(); + account_data.set_owner(bpf_loader::id()); + mock_bank.account_shared_data.insert(key2, account_data); + + let mut account_maps: HashMap = HashMap::new(); + account_maps.insert(key1, (&owner, 2)); + + account_maps.insert(key2, (&owner, 4)); + let result = batch_processor.replenish_program_cache(&mock_bank, &account_maps, false); + + let program1 = result.find(&key1).unwrap(); + assert!(matches!(program1.program, LoadedProgramType::Closed)); + assert!(!result.hit_max_limit); + let program2 = result.find(&key2).unwrap(); + assert!(matches!( + program2.program, + LoadedProgramType::FailedVerification(_) + )); + + // Case 2 + batch_processor.check_program_modification_slot = false; + + let result = batch_processor.replenish_program_cache(&mock_bank, &account_maps, true); + + let program1 = result.find(&key1).unwrap(); + assert!(matches!(program1.program, LoadedProgramType::Closed)); + assert!(!result.hit_max_limit); + let program2 = result.find(&key2).unwrap(); + assert!(matches!( + program2.program, + LoadedProgramType::FailedVerification(_) + )); + } + + #[test] + fn test_filter_executable_program_accounts() { + let mut mock_bank = MockBankCallback::default(); + let key1 = Pubkey::new_unique(); + let owner1 = Pubkey::new_unique(); + + let mut data = AccountSharedData::default(); + data.set_owner(owner1); + data.set_lamports(93); + mock_bank.account_shared_data.insert(key1, data); + + let message = Message { + account_keys: vec![key1], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + + let sanitized_transaction_1 = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + + let key2 = Pubkey::new_unique(); + let owner2 = Pubkey::new_unique(); + + let mut account_data = AccountSharedData::default(); + account_data.set_owner(owner2); + account_data.set_lamports(90); + mock_bank.account_shared_data.insert(key2, account_data); + + let message = Message { + account_keys: vec![key1, key2], + header: MessageHeader::default(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let legacy = LegacyMessage::new(message); + let sanitized_message = SanitizedMessage::Legacy(legacy); + + let sanitized_transaction_2 = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + + let transactions = vec![ + sanitized_transaction_1.clone(), + sanitized_transaction_2.clone(), + sanitized_transaction_2, + sanitized_transaction_1, + ]; + let mut lock_results = vec![ + (Ok(()), None, Some(25)), + (Ok(()), None, Some(25)), + (Ok(()), None, None), + (Err(TransactionError::ProgramAccountNotFound), None, None), + ]; + let owners = vec![owner1, owner2]; + + let result = TransactionBatchProcessor::::filter_executable_program_accounts( + &mock_bank, + &transactions, + lock_results.as_mut_slice(), + &owners, + ); + + assert_eq!( + lock_results[2], + (Err(TransactionError::BlockhashNotFound), None, None) + ); + assert_eq!(result.len(), 2); + assert_eq!(result[&key1], (&owner1, 2)); + assert_eq!(result[&key2], (&owner2, 1)); + } } From 1a085c8d46ebb95d203a69c00773d30233e4a611 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 11 Mar 2024 17:09:08 -0400 Subject: [PATCH 365/401] Removes atomic-ness from AccountStorageEntry `id` and `slot` fields (#119) --- accounts-db/src/accounts_db.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 18ffa2d02e37ca..eab5ca33af417c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1005,9 +1005,9 @@ struct CleanKeyTimings { /// Persistent storage structure holding the accounts #[derive(Debug)] pub struct AccountStorageEntry { - pub(crate) id: AtomicAppendVecId, + pub(crate) id: AppendVecId, - pub(crate) slot: AtomicU64, + pub(crate) slot: Slot, /// storage holding the accounts pub accounts: AccountsFile, @@ -1037,8 +1037,8 @@ impl AccountStorageEntry { let accounts = AccountsFile::AppendVec(AppendVec::new(&path, true, file_size as usize)); Self { - id: AtomicAppendVecId::new(id), - slot: AtomicU64::new(slot), + id, + slot, accounts, count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), approx_store_count: AtomicUsize::new(0), @@ -1053,8 +1053,8 @@ impl AccountStorageEntry { num_accounts: usize, ) -> Self { Self { - id: AtomicAppendVecId::new(id), - slot: AtomicU64::new(slot), + id, + slot, accounts, count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), approx_store_count: AtomicUsize::new(num_accounts), @@ -1112,11 +1112,11 @@ impl AccountStorageEntry { } pub fn slot(&self) -> Slot { - self.slot.load(Ordering::Acquire) + self.slot } pub fn append_vec_id(&self) -> AppendVecId { - self.id.load(Ordering::Acquire) + self.id } pub fn flush(&self) -> Result<(), AccountsFileError> { From 88f6a7a45997fd5f56e5f9a0b4432f52f9f2a8b2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 11 Mar 2024 17:09:26 -0400 Subject: [PATCH 366/401] Removes holding storages in AccountsHashVerifier for fastboot (#120) --- accounts-db/src/lib.rs | 1 - accounts-db/src/starting_snapshot_storages.rs | 19 ----- core/src/accounts_hash_verifier.rs | 44 ---------- core/src/validator.rs | 43 ++++------ core/tests/epoch_accounts_hash.rs | 2 - core/tests/snapshots.rs | 2 - ledger-tool/src/ledger_utils.rs | 32 +++----- ledger/src/bank_forks_utils.rs | 81 +++++-------------- 8 files changed, 52 insertions(+), 172 deletions(-) delete mode 100644 accounts-db/src/starting_snapshot_storages.rs diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index b7994fe4354118..7883f852d1e3f2 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -37,7 +37,6 @@ pub mod secondary_index; pub mod shared_buffer_reader; pub mod sorted_storages; pub mod stake_rewards; -pub mod starting_snapshot_storages; pub mod storable_accounts; pub mod tiered_storage; pub mod utils; diff --git a/accounts-db/src/starting_snapshot_storages.rs b/accounts-db/src/starting_snapshot_storages.rs deleted file mode 100644 index cc5e26c61872b7..00000000000000 --- a/accounts-db/src/starting_snapshot_storages.rs +++ /dev/null @@ -1,19 +0,0 @@ -use {crate::accounts_db::AccountStorageEntry, std::sync::Arc}; - -/// Snapshot storages that the node loaded from -/// -/// This is used to support fastboot. Since fastboot reuses existing storages, we must carefully -/// handle the storages used to load at startup. If we do not handle these storages properly, -/// restarting from the same local state (i.e. bank snapshot) may fail. -#[derive(Debug)] -pub enum StartingSnapshotStorages { - /// Starting from genesis has no storages yet - Genesis, - /// Starting from a snapshot archive always extracts the storages from the archive, so no - /// special handling is necessary to preserve them. - Archive, - /// Starting from local state must preserve the loaded storages. These storages must *not* be - /// recycled or removed prior to taking the next snapshot, otherwise restarting from the same - /// bank snapshot may fail. - Fastboot(Vec>), -} diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index f5572d94a3c7d1..20adba99835eeb 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -9,7 +9,6 @@ use { IncrementalAccountsHash, }, sorted_storages::SortedStorages, - starting_snapshot_storages::StartingSnapshotStorages, }, solana_measure::measure_us, solana_runtime::{ @@ -43,7 +42,6 @@ impl AccountsHashVerifier { accounts_package_sender: Sender, accounts_package_receiver: Receiver, snapshot_package_sender: Option>, - starting_snapshot_storages: StartingSnapshotStorages, exit: Arc, snapshot_config: SnapshotConfig, ) -> Self { @@ -53,14 +51,6 @@ impl AccountsHashVerifier { .name("solAcctHashVer".to_string()) .spawn(move || { info!("AccountsHashVerifier has started"); - // To support fastboot, we must ensure the storages used in the latest POST snapshot are - // not recycled nor removed early. Hold an Arc of their AppendVecs to prevent them from - // expiring. - let mut fastboot_storages = match starting_snapshot_storages { - StartingSnapshotStorages::Genesis => None, - StartingSnapshotStorages::Archive => None, - StartingSnapshotStorages::Fastboot(storages) => Some(storages), - }; loop { if exit.load(Ordering::Relaxed) { break; @@ -81,14 +71,6 @@ impl AccountsHashVerifier { info!("handling accounts package: {accounts_package:?}"); let enqueued_time = accounts_package.enqueued.elapsed(); - // If this accounts package is for a snapshot, then clone the storages to - // save for fastboot. - let snapshot_storages_for_fastboot = accounts_package - .snapshot_info - .is_some() - .then(|| accounts_package.snapshot_storages.clone()); - - let slot = accounts_package.slot; let (_, handling_time_us) = measure_us!(Self::process_accounts_package( accounts_package, snapshot_package_sender.as_ref(), @@ -96,25 +78,6 @@ impl AccountsHashVerifier { &exit, )); - if let Some(snapshot_storages_for_fastboot) = snapshot_storages_for_fastboot { - // Get the number of storages that are being kept alive for fastboot. - // Looking at the storage Arc's strong reference count, we know that one - // ref is for fastboot, and one ref is for snapshot packaging. If there - // are no others, then the storage will be kept alive because of fastboot. - let num_storages_kept_alive = snapshot_storages_for_fastboot - .iter() - .filter(|storage| Arc::strong_count(storage) == 2) - .count(); - let num_storages_total = snapshot_storages_for_fastboot.len(); - fastboot_storages = Some(snapshot_storages_for_fastboot); - datapoint_info!( - "fastboot", - ("slot", slot, i64), - ("num_storages_total", num_storages_total, i64), - ("num_storages_kept_alive", num_storages_kept_alive, i64), - ); - } - datapoint_info!( "accounts_hash_verifier", ( @@ -132,13 +95,6 @@ impl AccountsHashVerifier { ); } info!("AccountsHashVerifier has stopped"); - debug!( - "Number of storages kept alive for fastboot: {}", - fastboot_storages - .as_ref() - .map(|storages| storages.len()) - .unwrap_or(0) - ); }) .unwrap(); Self { diff --git a/core/src/validator.rs b/core/src/validator.rs index a1c8293f86cb3a..3d2a93daecba2f 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -35,7 +35,6 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, - starting_snapshot_storages::StartingSnapshotStorages, utils::{move_and_async_delete_path, move_and_async_delete_path_contents}, }, solana_client::connection_cache::{ConnectionCache, Protocol}, @@ -691,7 +690,6 @@ impl Validator { completed_slots_receiver, leader_schedule_cache, starting_snapshot_hashes, - starting_snapshot_storages, TransactionHistoryServices { transaction_status_sender, transaction_status_service, @@ -781,7 +779,6 @@ impl Validator { accounts_package_sender.clone(), accounts_package_receiver, snapshot_package_sender, - starting_snapshot_storages, exit.clone(), config.snapshot_config.clone(), ); @@ -1770,7 +1767,6 @@ fn load_blockstore( CompletedSlotsReceiver, LeaderScheduleCache, Option, - StartingSnapshotStorages, TransactionHistoryServices, blockstore_processor::ProcessOptions, BlockstoreRootScan, @@ -1860,27 +1856,23 @@ fn load_blockstore( let entry_notifier_service = entry_notifier .map(|entry_notifier| EntryNotifierService::new(entry_notifier, exit.clone())); - let ( - bank_forks, - mut leader_schedule_cache, - starting_snapshot_hashes, - starting_snapshot_storages, - ) = bank_forks_utils::load_bank_forks( - &genesis_config, - &blockstore, - config.account_paths.clone(), - Some(&config.snapshot_config), - &process_options, - transaction_history_services - .cache_block_meta_sender - .as_ref(), - entry_notifier_service - .as_ref() - .map(|service| service.sender()), - accounts_update_notifier, - exit, - ) - .map_err(|err| err.to_string())?; + let (bank_forks, mut leader_schedule_cache, starting_snapshot_hashes) = + bank_forks_utils::load_bank_forks( + &genesis_config, + &blockstore, + config.account_paths.clone(), + Some(&config.snapshot_config), + &process_options, + transaction_history_services + .cache_block_meta_sender + .as_ref(), + entry_notifier_service + .as_ref() + .map(|service| service.sender()), + accounts_update_notifier, + exit, + ) + .map_err(|err| err.to_string())?; // Before replay starts, set the callbacks in each of the banks in BankForks so that // all dropped banks come through the `pruned_banks_receiver` channel. This way all bank @@ -1906,7 +1898,6 @@ fn load_blockstore( completed_slots_receiver, leader_schedule_cache, starting_snapshot_hashes, - starting_snapshot_storages, transaction_history_services, process_options, blockstore_root_scan, diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 25e97689923bb0..76b5e4c30dd018 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -9,7 +9,6 @@ use { accounts_hash::CalcAccountsHashConfig, accounts_index::AccountSecondaryIndexes, epoch_accounts_hash::EpochAccountsHash, - starting_snapshot_storages::StartingSnapshotStorages, }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, @@ -197,7 +196,6 @@ impl BackgroundServices { accounts_package_sender.clone(), accounts_package_receiver, Some(snapshot_package_sender), - StartingSnapshotStorages::Genesis, exit.clone(), snapshot_config.clone(), ); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 1607ebd3fa2094..730277e2c12a65 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -11,7 +11,6 @@ use { accounts_hash::AccountsHash, accounts_index::AccountSecondaryIndexes, epoch_accounts_hash::EpochAccountsHash, - starting_snapshot_storages::StartingSnapshotStorages, }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, @@ -1044,7 +1043,6 @@ fn test_snapshots_with_background_services( accounts_package_sender, accounts_package_receiver, Some(snapshot_package_sender), - StartingSnapshotStorages::Genesis, exit.clone(), snapshot_test_config.snapshot_config.clone(), ); diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 8a8302d7e4e94b..c05cc6c2d64cd0 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -268,24 +268,19 @@ pub fn load_and_process_ledger( }; let exit = Arc::new(AtomicBool::new(false)); - let ( - bank_forks, - leader_schedule_cache, - starting_snapshot_hashes, - starting_snapshot_storages, - .., - ) = bank_forks_utils::load_bank_forks( - genesis_config, - blockstore.as_ref(), - account_paths, - snapshot_config.as_ref(), - &process_options, - None, - None, // Maybe support this later, though - accounts_update_notifier, - exit.clone(), - ) - .map_err(LoadAndProcessLedgerError::LoadBankForks)?; + let (bank_forks, leader_schedule_cache, starting_snapshot_hashes, ..) = + bank_forks_utils::load_bank_forks( + genesis_config, + blockstore.as_ref(), + account_paths, + snapshot_config.as_ref(), + &process_options, + None, + None, // Maybe support this later, though + accounts_update_notifier, + exit.clone(), + ) + .map_err(LoadAndProcessLedgerError::LoadBankForks)?; let block_verification_method = value_t!( arg_matches, "block_verification_method", @@ -330,7 +325,6 @@ pub fn load_and_process_ledger( accounts_package_sender.clone(), accounts_package_receiver, None, - starting_snapshot_storages, exit.clone(), SnapshotConfig::new_load_only(), ); diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index b30f90986bb9c2..17412c1801ac68 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -10,10 +10,7 @@ use { use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, log::*, - solana_accounts_db::{ - accounts_update_notifier_interface::AccountsUpdateNotifier, - starting_snapshot_storages::StartingSnapshotStorages, - }, + solana_accounts_db::accounts_update_notifier_interface::AccountsUpdateNotifier, solana_runtime::{ accounts_background_service::AbsRequestSender, bank_forks::BankForks, @@ -70,7 +67,6 @@ pub type LoadResult = result::Result< Arc>, LeaderScheduleCache, Option, - StartingSnapshotStorages, ), BankForksUtilsError, >; @@ -92,13 +88,7 @@ pub fn load( accounts_update_notifier: Option, exit: Arc, ) -> LoadResult { - let ( - bank_forks, - leader_schedule_cache, - starting_snapshot_hashes, - starting_snapshot_storages, - .., - ) = load_bank_forks( + let (bank_forks, leader_schedule_cache, starting_snapshot_hashes, ..) = load_bank_forks( genesis_config, blockstore, account_paths, @@ -121,12 +111,7 @@ pub fn load( ) .map_err(BankForksUtilsError::ProcessBlockstoreFromRoot)?; - Ok(( - bank_forks, - leader_schedule_cache, - starting_snapshot_hashes, - starting_snapshot_storages, - )) + Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) } #[allow(clippy::too_many_arguments)] @@ -176,7 +161,7 @@ pub fn load_bank_forks( )) } - let (bank_forks, starting_snapshot_hashes, starting_snapshot_storages) = + let (bank_forks, starting_snapshot_hashes) = if let Some((full_snapshot_archive_info, incremental_snapshot_archive_info)) = get_snapshots_to_load(snapshot_config) { @@ -188,22 +173,17 @@ pub fn load_bank_forks( ); std::fs::create_dir_all(&snapshot_config.bank_snapshots_dir) .expect("create bank snapshots dir"); - let (bank_forks, starting_snapshot_hashes, starting_snapshot_storages) = - bank_forks_from_snapshot( - full_snapshot_archive_info, - incremental_snapshot_archive_info, - genesis_config, - account_paths, - snapshot_config, - process_options, - accounts_update_notifier, - exit, - )?; - ( - bank_forks, - Some(starting_snapshot_hashes), - starting_snapshot_storages, - ) + let (bank_forks, starting_snapshot_hashes) = bank_forks_from_snapshot( + full_snapshot_archive_info, + incremental_snapshot_archive_info, + genesis_config, + account_paths, + snapshot_config, + process_options, + accounts_update_notifier, + exit, + )?; + (bank_forks, Some(starting_snapshot_hashes)) } else { info!("Processing ledger from genesis"); let bank_forks = blockstore_processor::process_blockstore_for_bank_0( @@ -222,7 +202,7 @@ pub fn load_bank_forks( .root_bank() .set_startup_verification_complete(); - (bank_forks, None, StartingSnapshotStorages::Genesis) + (bank_forks, None) }; let mut leader_schedule_cache = @@ -238,12 +218,7 @@ pub fn load_bank_forks( .for_each(|hard_fork_slot| root_bank.register_hard_fork(*hard_fork_slot)); } - Ok(( - bank_forks, - leader_schedule_cache, - starting_snapshot_hashes, - starting_snapshot_storages, - )) + Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) } #[allow(clippy::too_many_arguments)] @@ -256,14 +231,7 @@ fn bank_forks_from_snapshot( process_options: &ProcessOptions, accounts_update_notifier: Option, exit: Arc, -) -> Result< - ( - Arc>, - StartingSnapshotHashes, - StartingSnapshotStorages, - ), - BankForksUtilsError, -> { +) -> Result<(Arc>, StartingSnapshotHashes), BankForksUtilsError> { // Fail hard here if snapshot fails to load, don't silently continue if account_paths.is_empty() { return Err(BankForksUtilsError::AccountPathsNotPresent); @@ -289,7 +257,7 @@ fn bank_forks_from_snapshot( .unwrap_or(true), }; - let (bank, starting_snapshot_storages) = if will_startup_from_snapshot_archives { + let bank = if will_startup_from_snapshot_archives { // Given that we are going to boot from an archive, the append vecs held in the snapshot dirs for fast-boot should // be released. They will be released by the account_background_service anyway. But in the case of the account_paths // using memory-mounted file system, they are not released early enough to give space for the new append-vecs from @@ -324,7 +292,7 @@ fn bank_forks_from_snapshot( .map(|archive| archive.path().display().to_string()) .unwrap_or("none".to_string()), })?; - (bank, StartingSnapshotStorages::Archive) + bank } else { let bank_snapshot = latest_bank_snapshot.ok_or_else(|| BankForksUtilsError::NoBankSnapshotDirectory { @@ -378,8 +346,7 @@ fn bank_forks_from_snapshot( // snapshot archive next time, which is safe. snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); - let storages = bank.get_snapshot_storages(None); - (bank, StartingSnapshotStorages::Fastboot(storages)) + bank }; let full_snapshot_hash = FullSnapshotHash(( @@ -398,9 +365,5 @@ fn bank_forks_from_snapshot( incremental: incremental_snapshot_hash, }; - Ok(( - BankForks::new_rw_arc(bank), - starting_snapshot_hashes, - starting_snapshot_storages, - )) + Ok((BankForks::new_rw_arc(bank), starting_snapshot_hashes)) } From 218de23ce22a9112e53a38b37e184d3e0659ab3e Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Mon, 11 Mar 2024 18:19:48 -0400 Subject: [PATCH 367/401] Remove `ThinClient` from `dos/` (#117) * remove `ThinClient` from `dos/` and replace `ThinClient` with `TpuClient` * remove test for valid_client_facing_addr since it is no longer used --- Cargo.lock | 4 +- client/src/tpu_client.rs | 6 +++ dos/Cargo.toml | 2 +- dos/src/main.rs | 83 ++++++++++++++++++------------- gossip/Cargo.toml | 2 +- gossip/src/gossip_service.rs | 60 ++++++++++++---------- gossip/src/legacy_contact_info.rs | 33 ------------ programs/sbf/Cargo.lock | 2 +- 8 files changed, 92 insertions(+), 100 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 88f0fa0925dcac..46091cfbca5e82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6077,11 +6077,11 @@ dependencies = [ "solana-measure", "solana-net-utils", "solana-perf", + "solana-quic-client", "solana-rpc", "solana-rpc-client", "solana-sdk", "solana-streamer", - "solana-thin-client", "solana-tpu-client", "solana-version", ] @@ -6276,6 +6276,7 @@ dependencies = [ "solana-bloom", "solana-clap-utils", "solana-client", + "solana-connection-cache", "solana-entry", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -6289,7 +6290,6 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-streamer", - "solana-thin-client", "solana-tpu-client", "solana-version", "solana-vote", diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs index 45394151340070..038dd86774ea98 100644 --- a/client/src/tpu_client.rs +++ b/client/src/tpu_client.rs @@ -13,6 +13,7 @@ use { transport::Result as TransportResult, }, solana_tpu_client::tpu_client::{Result, TpuClient as BackendTpuClient}, + solana_udp_client::{UdpConfig, UdpConnectionManager, UdpPool}, std::sync::Arc, }; pub use { @@ -20,6 +21,11 @@ pub use { solana_tpu_client::tpu_client::{TpuClientConfig, DEFAULT_FANOUT_SLOTS, MAX_FANOUT_SLOTS}, }; +pub enum TpuClientWrapper { + Quic(TpuClient), + Udp(TpuClient), +} + /// Client which sends transactions directly to the current leader's TPU port over UDP. /// The client uses RPC to determine the current leader and fetch node contact info /// This is just a thin wrapper over the "BackendTpuClient", use that directly for more efficiency. diff --git a/dos/Cargo.toml b/dos/Cargo.toml index 179fc40bf84820..0d7c76b007c4ea 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -26,6 +26,7 @@ solana-logger = { workspace = true } solana-measure = { workspace = true } solana-net-utils = { workspace = true } solana-perf = { workspace = true } +solana-quic-client = { workspace = true } solana-rpc = { workspace = true } solana-rpc-client = { workspace = true } solana-sdk = { workspace = true } @@ -38,4 +39,3 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] solana-local-cluster = { workspace = true } -solana-thin-client = { workspace = true } diff --git a/dos/src/main.rs b/dos/src/main.rs index b9e0dceba40bf0..055b1f4bb65d4c 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -46,12 +46,15 @@ use { log::*, rand::{thread_rng, Rng}, solana_bench_tps::{bench::generate_and_fund_keypairs, bench_tps_client::BenchTpsClient}, - solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection}, + solana_client::{ + connection_cache::ConnectionCache, tpu_client::TpuClientWrapper, + tpu_connection::TpuConnection, + }, solana_core::repair::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair}, solana_dos::cli::*, solana_gossip::{ contact_info::Protocol, - gossip_service::{discover, get_multi_client}, + gossip_service::{discover, get_client}, legacy_contact_info::LegacyContactInfo as ContactInfo, }, solana_measure::measure::Measure, @@ -791,33 +794,30 @@ fn main() { DEFAULT_TPU_CONNECTION_POOL_SIZE, ), }; - let (client, num_clients) = get_multi_client( - &validators, - &SocketAddrSpace::Unspecified, - Arc::new(connection_cache), - ); - if validators.len() < num_clients { - eprintln!( - "Error: Insufficient nodes discovered. Expecting {} or more", - validators.len() - ); - exit(1); - } - (gossip_nodes, Some(Arc::new(client))) + let client = get_client(&validators, Arc::new(connection_cache)); + (gossip_nodes, Some(client)) } else { (vec![], None) }; info!("done found {} nodes", nodes.len()); - - run_dos(&nodes, 0, client, cmd_params); + if let Some(tpu_client) = client { + match tpu_client { + TpuClientWrapper::Quic(quic_client) => { + run_dos(&nodes, 0, Some(Arc::new(quic_client)), cmd_params); + } + TpuClientWrapper::Udp(udp_client) => { + run_dos(&nodes, 0, Some(Arc::new(udp_client)), cmd_params); + } + }; + } } #[cfg(test)] pub mod test { use { super::*, - solana_client::thin_client::ThinClient, + solana_client::tpu_client::TpuClient, solana_core::validator::ValidatorConfig, solana_faucet::faucet::run_local_faucet, solana_gossip::contact_info::LegacyContactInfo, @@ -826,8 +826,10 @@ pub mod test { local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, + solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, solana_rpc::rpc::JsonRpcConfig, solana_sdk::timing::timestamp, + solana_tpu_client::tpu_client::TpuClientConfig, }; const TEST_SEND_BATCH_SIZE: usize = 1; @@ -835,7 +837,32 @@ pub mod test { // thin wrapper for the run_dos function // to avoid specifying everywhere generic parameters fn run_dos_no_client(nodes: &[ContactInfo], iterations: usize, params: DosClientParameters) { - run_dos::(nodes, iterations, None, params); + run_dos::>( + nodes, iterations, None, params, + ); + } + + fn build_tpu_quic_client( + cluster: &LocalCluster, + ) -> Arc> { + let rpc_pubsub_url = format!("ws://{}/", cluster.entry_point_info.rpc_pubsub().unwrap()); + let rpc_url = format!("http://{}", cluster.entry_point_info.rpc().unwrap()); + + let ConnectionCache::Quic(cache) = &*cluster.connection_cache else { + panic!("Expected a Quic ConnectionCache."); + }; + + Arc::new( + TpuClient::new_with_connection_cache( + Arc::new(RpcClient::new(rpc_url)), + rpc_pubsub_url.as_str(), + TpuClientConfig::default(), + cache.clone(), + ) + .unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + }), + ) } #[test] @@ -975,14 +1002,7 @@ pub mod test { .unwrap(); let nodes_slice = [node]; - let client = Arc::new(ThinClient::new( - cluster.entry_point_info.rpc().unwrap(), - cluster - .entry_point_info - .tpu(cluster.connection_cache.protocol()) - .unwrap(), - cluster.connection_cache.clone(), - )); + let client = build_tpu_quic_client(&cluster); // creates one transaction with 8 valid signatures and sends it 10 times run_dos( @@ -1114,14 +1134,7 @@ pub mod test { .unwrap(); let nodes_slice = [node]; - let client = Arc::new(ThinClient::new( - cluster.entry_point_info.rpc().unwrap(), - cluster - .entry_point_info - .tpu(cluster.connection_cache.protocol()) - .unwrap(), - cluster.connection_cache.clone(), - )); + let client = build_tpu_quic_client(&cluster); // creates one transaction and sends it 10 times // this is done in single thread diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index f9870ac1ee380c..2e62bc66f6866c 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -31,6 +31,7 @@ serde_derive = { workspace = true } solana-bloom = { workspace = true } solana-clap-utils = { workspace = true } solana-client = { workspace = true } +solana-connection-cache = { workspace = true } solana-entry = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } @@ -44,7 +45,6 @@ solana-rayon-threadlimit = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } -solana-thin-client = { workspace = true } solana-tpu-client = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 9e1c56520993c5..0bd4750e269a48 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -4,7 +4,11 @@ use { crate::{cluster_info::ClusterInfo, legacy_contact_info::LegacyContactInfo as ContactInfo}, crossbeam_channel::{unbounded, Sender}, rand::{thread_rng, Rng}, - solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient}, + solana_client::{ + connection_cache::ConnectionCache, + rpc_client::RpcClient, + tpu_client::{TpuClient, TpuClientConfig, TpuClientWrapper}, + }, solana_perf::recycler::Recycler, solana_runtime::bank_forks::BankForks, solana_sdk::{ @@ -197,35 +201,37 @@ pub fn discover( #[deprecated(since = "1.18.6", note = "Interface will change")] pub fn get_client( nodes: &[ContactInfo], - socket_addr_space: &SocketAddrSpace, connection_cache: Arc, -) -> ThinClient { - let protocol = connection_cache.protocol(); - let nodes: Vec<_> = nodes - .iter() - .filter_map(|node| node.valid_client_facing_addr(protocol, socket_addr_space)) - .collect(); +) -> TpuClientWrapper { let select = thread_rng().gen_range(0..nodes.len()); - let (rpc, tpu) = nodes[select]; - ThinClient::new(rpc, tpu, connection_cache) -} -#[deprecated(since = "1.18.6", note = "Will be removed in favor of get_client")] -pub fn get_multi_client( - nodes: &[ContactInfo], - socket_addr_space: &SocketAddrSpace, - connection_cache: Arc, -) -> (ThinClient, usize) { - let protocol = connection_cache.protocol(); - let (rpc_addrs, tpu_addrs): (Vec<_>, Vec<_>) = nodes - .iter() - .filter_map(|node| node.valid_client_facing_addr(protocol, socket_addr_space)) - .unzip(); - let num_nodes = tpu_addrs.len(); - ( - ThinClient::new_from_addrs(rpc_addrs, tpu_addrs, connection_cache), - num_nodes, - ) + let rpc_pubsub_url = format!("ws://{}/", nodes[select].rpc_pubsub().unwrap()); + let rpc_url = format!("http://{}", nodes[select].rpc().unwrap()); + + match &*connection_cache { + ConnectionCache::Quic(cache) => TpuClientWrapper::Quic( + TpuClient::new_with_connection_cache( + Arc::new(RpcClient::new(rpc_url)), + rpc_pubsub_url.as_str(), + TpuClientConfig::default(), + cache.clone(), + ) + .unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + }), + ), + ConnectionCache::Udp(cache) => TpuClientWrapper::Udp( + TpuClient::new_with_connection_cache( + Arc::new(RpcClient::new(rpc_url)), + rpc_pubsub_url.as_str(), + TpuClientConfig::default(), + cache.clone(), + ) + .unwrap_or_else(|err| { + panic!("Could not create TpuClient with Udp Cache {err:?}"); + }), + ), + } } fn spy( diff --git a/gossip/src/legacy_contact_info.rs b/gossip/src/legacy_contact_info.rs index d3dead1910d6ab..870f1c9aa49283 100644 --- a/gossip/src/legacy_contact_info.rs +++ b/gossip/src/legacy_contact_info.rs @@ -229,21 +229,6 @@ impl LegacyContactInfo { pub fn is_valid_address(addr: &SocketAddr, socket_addr_space: &SocketAddrSpace) -> bool { addr.port() != 0u16 && Self::is_valid_ip(addr.ip()) && socket_addr_space.check(addr) } - - pub(crate) fn valid_client_facing_addr( - &self, - protocol: Protocol, - socket_addr_space: &SocketAddrSpace, - ) -> Option<(SocketAddr, SocketAddr)> { - Some(( - self.rpc() - .ok() - .filter(|addr| socket_addr_space.check(addr))?, - self.tpu(protocol) - .ok() - .filter(|addr| socket_addr_space.check(addr))?, - )) - } } impl TryFrom<&ContactInfo> for LegacyContactInfo { @@ -342,24 +327,6 @@ mod tests { assert!(ci.serve_repair.ip().is_unspecified()); } - #[test] - fn test_valid_client_facing() { - let mut ci = LegacyContactInfo::default(); - assert_eq!( - ci.valid_client_facing_addr(Protocol::QUIC, &SocketAddrSpace::Unspecified), - None - ); - ci.tpu = socketaddr!(Ipv4Addr::LOCALHOST, 123); - assert_eq!( - ci.valid_client_facing_addr(Protocol::QUIC, &SocketAddrSpace::Unspecified), - None - ); - ci.rpc = socketaddr!(Ipv4Addr::LOCALHOST, 234); - assert!(ci - .valid_client_facing_addr(Protocol::QUIC, &SocketAddrSpace::Unspecified) - .is_some()); - } - #[test] fn test_sanitize() { let mut ci = LegacyContactInfo::default(); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c31befdf34c092..d148f0bea7b5d0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5169,6 +5169,7 @@ dependencies = [ "solana-bloom", "solana-clap-utils", "solana-client", + "solana-connection-cache", "solana-entry", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -5182,7 +5183,6 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-streamer", - "solana-thin-client", "solana-tpu-client", "solana-version", "solana-vote", From 14454a4a000b1125d65f5cadbe2b1215752b2c79 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 12 Mar 2024 11:42:21 +0800 Subject: [PATCH 368/401] ci: remove unused Github Actions (#124) --- .github/workflows/autolock_bot_PR.txt | 42 ---- .../workflows/autolock_bot_closed_issue.txt | 43 ---- .github/workflows/solana-action.yml.txt | 184 ------------------ 3 files changed, 269 deletions(-) delete mode 100644 .github/workflows/autolock_bot_PR.txt delete mode 100644 .github/workflows/autolock_bot_closed_issue.txt delete mode 100644 .github/workflows/solana-action.yml.txt diff --git a/.github/workflows/autolock_bot_PR.txt b/.github/workflows/autolock_bot_PR.txt deleted file mode 100644 index 4c6011fd4c8cfb..00000000000000 --- a/.github/workflows/autolock_bot_PR.txt +++ /dev/null @@ -1,42 +0,0 @@ -name: 'Autolock RitBot for for PR' - -on: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - -permissions: - issues: write - pull-requests: write - -concurrency: - group: lock - -jobs: - action: - # Forks do not need to run this, especially on cron schedule. - if: > - github.event_name != 'schedule' - || github.repository == 'solana-labs/solana' - - runs-on: ubuntu-latest - steps: - - uses: dessant/lock-threads@v3 - with: - - github-token: ${{ github.token }} - pr-inactive-days: '14' - exclude-pr-created-before: '' - exclude-pr-created-after: '' - exclude-pr-created-between: '' - exclude-pr-closed-before: '' - exclude-pr-closed-after: '' - exclude-pr-closed-between: '' - include-any-pr-labels: 'automerge' - include-all-pr-labels: '' - exclude-any-pr-labels: '' - add-pr-labels: 'locked PR' - remove-pr-labels: '' - pr-comment: 'This PR has been automatically locked since there has not been any activity in past 14 days after it was merged.' - pr-lock-reason: 'resolved' - log-output: true diff --git a/.github/workflows/autolock_bot_closed_issue.txt b/.github/workflows/autolock_bot_closed_issue.txt deleted file mode 100644 index dd8aa9ef835ba3..00000000000000 --- a/.github/workflows/autolock_bot_closed_issue.txt +++ /dev/null @@ -1,43 +0,0 @@ -name: 'Autolock NaviBot for closed issue' - -on: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - -permissions: - issues: write - pull-requests: write - -concurrency: - group: lock - -jobs: - action: - # Forks do not need to run this, especially on cron schedule. - if: > - github.event_name != 'schedule' - || github.repository == 'solana-labs/solana' - - runs-on: ubuntu-latest - steps: - - uses: dessant/lock-threads@v3 - with: - - github-token: ${{ github.token }} - issue-inactive-days: '7' - exclude-issue-created-before: '' - exclude-issue-created-after: '' - exclude-issue-created-between: '' - exclude-issue-closed-before: '' - exclude-issue-closed-after: '' - exclude-issue-closed-between: '' - include-any-issue-labels: '' - include-all-issue-labels: '' - exclude-any-issue-labels: '' - add-issue-labels: 'locked issue' - remove-issue-labels: '' - issue-comment: 'This issue has been automatically locked since there has not been any activity in past 7 days after it was closed. Please open a new issue for related bugs.' - issue-lock-reason: 'resolved' - process-only: 'issues' - log-output: true diff --git a/.github/workflows/solana-action.yml.txt b/.github/workflows/solana-action.yml.txt deleted file mode 100644 index 62c4c4864ffffd..00000000000000 --- a/.github/workflows/solana-action.yml.txt +++ /dev/null @@ -1,184 +0,0 @@ -name : minimal - -on: - push: - branches: [master] - pull_request: - branches: [master] - -jobs: - macos-artifacts: - needs: [Export_Github_Repositories] - strategy: - fail-fast: false - runs-on: macos-latest - if : ${{ github.event_name == 'api' && 'cron' || 'push' || startsWith(github.ref, 'refs/tags/v')}} - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - name: Setup | Rust - uses: ATiltedTree/setup-rust@v1 - with: - rust-version: stable - - name: release artifact - run: | - source ci/rust-version.sh - brew install coreutils - export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH" - greadlink -f . - source ci/env.sh - rustup set profile default - ci/publish-tarball.sh - shell: bash - - - name: Cache modules - uses: actions/cache@master - id: yarn-cache - with: - path: node_modules - key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }} - restore-keys: ${{ runner.os }}-yarn- - - -# - To stop from uploading on the production -# - uses: ochanje210/simple-s3-upload-action@master -# with: -# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }} -# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY}} -# AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} -# SOURCE_DIR: 'travis-s3-upload1' -# DEST_DIR: 'giitsol' - -# - uses: ochanje210/simple-s3-upload-action@master -# with: -# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }} -# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY}} -# AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} -# SOURCE_DIR: './docs/' -# DEST_DIR: 'giitsol' - - - windows-artifact: - needs: [Export_Github_Repositories] - strategy: - fail-fast: false - runs-on: windows-latest - if : ${{ github.event_name == 'api' && 'cron' || 'push' || startsWith(github.ref, 'refs/tags/v')}} - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - name: Setup | Rust - uses: ATiltedTree/setup-rust@v1 - with: - rust-version: stable - release-artifact: - needs: windows-artifact - runs-on: windows-latest - if : ${{ github.event_name == 'api' && 'cron' || github.ref == 'refs/heads/master'}} - steps: - - name: release artifact - run: | - git clone git://git.openssl.org/openssl.git - cd openssl - make - make test - make install - openssl version -# choco install openssl -# vcpkg integrate install -# refreshenv - - - name: Checkout repository - uses: actions/checkout@v2 - - uses: actions/checkout@v2 - - run: choco install msys2 - - uses: actions/checkout@v2 - - run: | - openssl version - bash ci/rust-version.sh - readlink -f . - bash ci/env.sh - rustup set profile default - bash ci/publish-tarball.sh - shell: bash - - - name: Cache modules - uses: actions/cache@v1 - id: yarn-cache - with: - path: node_modules - key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }} - restore-keys: ${{ runner.os }}-yarn- - -# - To stop from uploading on the production -# - name: Config. aws cred -# uses: aws-actions/configure-aws-credentials@v1 -# with: -# aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} -# aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} -# aws-region: us-east-2 -# - name: Deploy -# uses: shallwefootball/s3-upload-action@master -# with: -# folder: build -# aws_bucket: ${{ secrets.AWS_S3_BUCKET }} -# aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} -# aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} -# destination_dir: / -# bucket-region: us-east-2 -# delete-removed: true -# no-cache: true -# private: true - -# Docs: -# needs: [windows-artifact,release-artifact] -# runs-on: ubuntu-latest -# env: -# GITHUB_TOKEN: ${{secrets.PAT_NEW}} -# GITHUB_EVENT_BEFORE: ${{ github.event.before }} -# GITHUB_EVENT_AFTER: ${{ github.event.after }} -# COMMIT_RANGE: ${{ github.event.before}}...${{ github.event.after}} -# steps: -# - name: Checkout repo -# uses: actions/checkout@v2 -# with: -# fetch-depth: 2 -# - name: docs -# if: ${{github.event_name == 'pull_request' || startsWith(github.ref, 'refs/tags/v')}} -# run: | -# touch .env -# echo "COMMIT_RANGE=($COMMIT_RANGE)" > .env -# source ci/env.sh -# .travis/channel_restriction.sh edge beta || exit 0 -# .travis/affects.sh docs/ .travis || exit 0 -# cd docs/ -# source .travis/before_install.sh -# source .travis/script.sh -# - name: setup-node -# uses: actions/checkout@v2 -# - name: setup-node -# uses: actions/setup-node@v2 -# with: -# node-version: 'lts/*' -# - name: Cache -# uses: actions/cache@v1 -# with: -# path: ~/.npm -# key: ${{ runner.OS }}-npm-cache-${{ hashFiles('**/package-lock.json') }} -# restore-keys: | -# ${{ runner.OS }}-npm-cache-2 - -# auto_bump: -# needs: [windows-artifact,release-artifact,Docs] -# runs-on: ubuntu-latest -# steps: -# - name : checkout repo -# uses: actions/checkout@v2 -# with: -# fetch-depth: '0' -# - name: Bump version and push tag -# uses: anothrNick/github-tag-action@1.26.0 -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# WITH_V: true -# DEFAULT_BUMP: patch From 6b3d35e995f9910643d5df26c54521694d2ac683 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 12 Mar 2024 11:42:42 +0800 Subject: [PATCH 369/401] Revert "build(deps): bump cc from 1.0.83 to 1.0.89 (#40)" (#174) This reverts commit 7a8e29d4d5edb1d0d467458e36f87871d8dfc0fe. --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 46091cfbca5e82..ce428b22a3283b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1231,9 +1231,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.89" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "jobserver", "libc", diff --git a/Cargo.toml b/Cargo.toml index 453408a53b956e..6cbd56762fbfe8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -170,7 +170,7 @@ bytes = "1.5" bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" -cc = "1.0.89" +cc = "1.0.83" chrono = { version = "0.4.34", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d148f0bea7b5d0..1043b74c67c619 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -971,9 +971,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.89" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "jobserver", "libc", From 9e9aa05b332acf427b2e71c05fd933431e465593 Mon Sep 17 00:00:00 2001 From: Tyera Date: Mon, 11 Mar 2024 23:59:18 -0600 Subject: [PATCH 370/401] Rpc: add support for minimum context slot to `getBlocks(WithLimit)` endpoints (#191) * Support min_context_slot field in getBlocksWithLimit input * Use min_context_slot in get_blocks_with_limit * Support min_context_slot field in getBlocks input * Use min_context_slot in get_blocks --- rpc-client-api/src/config.rs | 6 +- rpc/src/rpc.rs | 132 ++++++++++++++++++++++------------- 2 files changed, 87 insertions(+), 51 deletions(-) diff --git a/rpc-client-api/src/config.rs b/rpc-client-api/src/config.rs index cecc0b64bdf7b2..be8bb1742457c4 100644 --- a/rpc-client-api/src/config.rs +++ b/rpc-client-api/src/config.rs @@ -321,14 +321,14 @@ impl EncodingConfig for RpcTransactionConfig { #[serde(untagged)] pub enum RpcBlocksConfigWrapper { EndSlotOnly(Option), - CommitmentOnly(Option), + ConfigOnly(Option), } impl RpcBlocksConfigWrapper { - pub fn unzip(&self) -> (Option, Option) { + pub fn unzip(&self) -> (Option, Option) { match &self { RpcBlocksConfigWrapper::EndSlotOnly(end_slot) => (*end_slot, None), - RpcBlocksConfigWrapper::CommitmentOnly(commitment) => (None, *commitment), + RpcBlocksConfigWrapper::ConfigOnly(config) => (None, *config), } } } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 41b26e5fa1e2c2..9c979ab1f5a6b2 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -527,13 +527,14 @@ impl JsonRpcRequestProcessor { let config = config.unwrap_or_default(); let epoch_schedule = self.get_epoch_schedule(); let first_available_block = self.get_first_available_block().await; + let context_config = RpcContextConfig { + commitment: config.commitment, + min_context_slot: config.min_context_slot, + }; let epoch = match config.epoch { Some(epoch) => epoch, None => epoch_schedule - .get_epoch(self.get_slot(RpcContextConfig { - commitment: config.commitment, - min_context_slot: config.min_context_slot, - })?) + .get_epoch(self.get_slot(context_config)?) .saturating_sub(1), }; @@ -555,7 +556,7 @@ impl JsonRpcRequestProcessor { } let first_confirmed_block_in_epoch = *self - .get_blocks_with_limit(first_slot_in_epoch, 1, config.commitment) + .get_blocks_with_limit(first_slot_in_epoch, 1, Some(context_config)) .await? .first() .ok_or(RpcCustomError::BlockNotAvailable { @@ -1170,9 +1171,10 @@ impl JsonRpcRequestProcessor { &self, start_slot: Slot, end_slot: Option, - commitment: Option, + config: Option, ) -> Result> { - let commitment = commitment.unwrap_or_default(); + let config = config.unwrap_or_default(); + let commitment = config.commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; let highest_super_majority_root = self @@ -1181,12 +1183,20 @@ impl JsonRpcRequestProcessor { .unwrap() .highest_super_majority_root(); + let min_context_slot = config.min_context_slot.unwrap_or_default(); + if commitment.is_finalized() && highest_super_majority_root < min_context_slot { + return Err(RpcCustomError::MinContextSlotNotReached { + context_slot: highest_super_majority_root, + } + .into()); + } + let end_slot = min( end_slot.unwrap_or_else(|| start_slot.saturating_add(MAX_GET_CONFIRMED_BLOCKS_RANGE)), if commitment.is_finalized() { highest_super_majority_root } else { - self.bank(Some(CommitmentConfig::confirmed())).slot() + self.get_bank_with_config(config)?.slot() }, ); if end_slot < start_slot { @@ -1236,14 +1246,16 @@ impl JsonRpcRequestProcessor { .unwrap_or_else(|| start_slot.saturating_sub(1)); // Maybe add confirmed blocks - if commitment.is_confirmed() && last_element < end_slot { - let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); - let mut confirmed_blocks = confirmed_bank - .status_cache_ancestors() - .into_iter() - .filter(|&slot| slot <= end_slot && slot > last_element) - .collect(); - blocks.append(&mut confirmed_blocks); + if commitment.is_confirmed() { + let confirmed_bank = self.get_bank_with_config(config)?; + if last_element < end_slot { + let mut confirmed_blocks = confirmed_bank + .status_cache_ancestors() + .into_iter() + .filter(|&slot| slot <= end_slot && slot > last_element) + .collect(); + blocks.append(&mut confirmed_blocks); + } } Ok(blocks) @@ -1253,9 +1265,10 @@ impl JsonRpcRequestProcessor { &self, start_slot: Slot, limit: usize, - commitment: Option, + config: Option, ) -> Result> { - let commitment = commitment.unwrap_or_default(); + let config = config.unwrap_or_default(); + let commitment = config.commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; if limit > MAX_GET_CONFIRMED_BLOCKS_RANGE as usize { @@ -1287,6 +1300,16 @@ impl JsonRpcRequestProcessor { .unwrap() .highest_super_majority_root(); + if commitment.is_finalized() { + let min_context_slot = config.min_context_slot.unwrap_or_default(); + if highest_super_majority_root < min_context_slot { + return Err(RpcCustomError::MinContextSlotNotReached { + context_slot: highest_super_majority_root, + } + .into()); + } + } + // Finalized blocks let mut blocks: Vec<_> = self .blockstore @@ -1297,19 +1320,21 @@ impl JsonRpcRequestProcessor { .collect(); // Maybe add confirmed blocks - if commitment.is_confirmed() && blocks.len() < limit { - let last_element = blocks - .last() - .cloned() - .unwrap_or_else(|| start_slot.saturating_sub(1)); - let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); - let mut confirmed_blocks = confirmed_bank - .status_cache_ancestors() - .into_iter() - .filter(|&slot| slot > last_element) - .collect(); - blocks.append(&mut confirmed_blocks); - blocks.truncate(limit); + if commitment.is_confirmed() { + let confirmed_bank = self.get_bank_with_config(config)?; + if blocks.len() < limit { + let last_element = blocks + .last() + .cloned() + .unwrap_or_else(|| start_slot.saturating_sub(1)); + let mut confirmed_blocks = confirmed_bank + .status_cache_ancestors() + .into_iter() + .filter(|&slot| slot > last_element) + .collect(); + blocks.append(&mut confirmed_blocks); + blocks.truncate(limit); + } } Ok(blocks) @@ -3339,8 +3364,8 @@ pub mod rpc_full { &self, meta: Self::Metadata, start_slot: Slot, - config: Option, - commitment: Option, + wrapper: Option, + config: Option, ) -> BoxFuture>>; #[rpc(meta, name = "getBlocksWithLimit")] @@ -3349,7 +3374,7 @@ pub mod rpc_full { meta: Self::Metadata, start_slot: Slot, limit: usize, - commitment: Option, + config: Option, ) -> BoxFuture>>; #[rpc(meta, name = "getTransaction")] @@ -3841,17 +3866,17 @@ pub mod rpc_full { &self, meta: Self::Metadata, start_slot: Slot, - config: Option, - commitment: Option, + wrapper: Option, + config: Option, ) -> BoxFuture>> { - let (end_slot, maybe_commitment) = - config.map(|config| config.unzip()).unwrap_or_default(); + let (end_slot, maybe_config) = + wrapper.map(|wrapper| wrapper.unzip()).unwrap_or_default(); debug!( "get_blocks rpc request received: {}-{:?}", start_slot, end_slot ); Box::pin(async move { - meta.get_blocks(start_slot, end_slot, commitment.or(maybe_commitment)) + meta.get_blocks(start_slot, end_slot, config.or(maybe_config)) .await }) } @@ -3861,16 +3886,13 @@ pub mod rpc_full { meta: Self::Metadata, start_slot: Slot, limit: usize, - commitment: Option, + config: Option, ) -> BoxFuture>> { debug!( "get_blocks_with_limit rpc request received: {}-{}", start_slot, limit, ); - Box::pin(async move { - meta.get_blocks_with_limit(start_slot, limit, commitment) - .await - }) + Box::pin(async move { meta.get_blocks_with_limit(start_slot, limit, config).await }) } fn get_block_time( @@ -4282,8 +4304,15 @@ pub mod rpc_deprecated_v1_7 { start_slot, end_slot ); Box::pin(async move { - meta.get_blocks(start_slot, end_slot, commitment.or(maybe_commitment)) - .await + meta.get_blocks( + start_slot, + end_slot, + Some(RpcContextConfig { + commitment: commitment.or(maybe_commitment), + min_context_slot: None, + }), + ) + .await }) } @@ -4299,8 +4328,15 @@ pub mod rpc_deprecated_v1_7 { start_slot, limit, ); Box::pin(async move { - meta.get_blocks_with_limit(start_slot, limit, commitment) - .await + meta.get_blocks_with_limit( + start_slot, + limit, + Some(RpcContextConfig { + commitment, + min_context_slot: None, + }), + ) + .await }) } From aaf3a91a95e6c50a7eaa194313f574cb958a5714 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 12 Mar 2024 02:25:59 -0400 Subject: [PATCH 371/401] Removes redundant imports (#193) --- frozen-abi/src/abi_digester.rs | 5 +---- programs/bpf_loader/gen-syscall-list/build.rs | 2 +- sdk/macro/src/lib.rs | 1 - 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/frozen-abi/src/abi_digester.rs b/frozen-abi/src/abi_digester.rs index b014efd2ba1570..fb1fc78fa6735e 100644 --- a/frozen-abi/src/abi_digester.rs +++ b/frozen-abi/src/abi_digester.rs @@ -4,10 +4,7 @@ use { hash::{Hash, Hasher}, }, log::*, - serde::{ - ser::{Error as SerdeError, *}, - Serialize, Serializer, - }, + serde::ser::{Error as SerdeError, *}, std::{any::type_name, io::Write}, thiserror::Error, }; diff --git a/programs/bpf_loader/gen-syscall-list/build.rs b/programs/bpf_loader/gen-syscall-list/build.rs index 96af426ec4763b..f06039ae84d696 100644 --- a/programs/bpf_loader/gen-syscall-list/build.rs +++ b/programs/bpf_loader/gen-syscall-list/build.rs @@ -2,7 +2,7 @@ use { regex::Regex, std::{ fs::File, - io::{prelude::*, BufWriter, Read}, + io::{prelude::*, BufWriter}, path::PathBuf, str, }, diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index 157592dc37bcaa..3c2a3bdb86a9ee 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -8,7 +8,6 @@ use { proc_macro::TokenStream, proc_macro2::{Delimiter, Span, TokenTree}, quote::{quote, ToTokens}, - std::convert::TryFrom, syn::{ bracketed, parse::{Parse, ParseStream, Result}, From 2ddb50d2f38a33dd110755c3bd2113ef4bad9438 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 12 Mar 2024 01:27:31 -0500 Subject: [PATCH 372/401] Make --wait-for-supermajority require --expected-shred-version (#192) In cluster restart scenarios, an important step is scanning the Blockstore for blocks that occur after the chosen restart slot with an incorrect shred version. This check ensures that any blocks that occurred pre-cluster restart and after the chosen restart slot get deleted. If a node skips this step, the node can encounter problems when that block is created again, after the cluster has restarted. This check only occurs if --wait-for-supermajority AND --expected-shred-version are set; however, --expected-... is currently optional when using --wait-... Our restart instructions typically mention that one should specify --expected-... as well, but we should just enforce it at the CLI level to prevent mistakes / wasted time debuggging. --- validator/src/cli.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index e3f46309724af7..d1ad63b760f031 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -745,6 +745,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { Arg::with_name("wait_for_supermajority") .long("wait-for-supermajority") .requires("expected_bank_hash") + .requires("expected_shred_version") .value_name("SLOT") .validator(is_slot) .help( From 076329381ae860d5832c723ab44fb193c27e1991 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 12 Mar 2024 07:46:23 -0400 Subject: [PATCH 373/401] Moves a clippy attribute (#194) --- memory-management/src/aligned_memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/memory-management/src/aligned_memory.rs b/memory-management/src/aligned_memory.rs index 689daaaed5e6aa..e5c203064414dc 100644 --- a/memory-management/src/aligned_memory.rs +++ b/memory-management/src/aligned_memory.rs @@ -207,8 +207,8 @@ impl> From for AlignedMemory { } #[cfg(test)] +#[allow(clippy::arithmetic_side_effects)] mod tests { - #![allow(clippy::arithmetic_side_effects)] use {super::*, std::io::Write}; fn do_test() { From 0f1ca20d38a111bb47621aa3108a2f9fc107f998 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 12 Mar 2024 07:46:41 -0400 Subject: [PATCH 374/401] [anza migration] Sets client id to Agave (#163) --- version/src/lib.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/version/src/lib.rs b/version/src/lib.rs index edeca08c960243..7a59406cf0647d 100644 --- a/version/src/lib.rs +++ b/version/src/lib.rs @@ -17,6 +17,7 @@ enum ClientId { SolanaLabs, JitoLabs, Firedancer, + Agave, // If new variants are added, update From and TryFrom. Unknown(u16), } @@ -63,7 +64,7 @@ impl Default for Version { commit: compute_commit(option_env!("CI_COMMIT")).unwrap_or_default(), feature_set, // Other client implementations need to modify this line. - client: u16::try_from(ClientId::SolanaLabs).unwrap(), + client: u16::try_from(ClientId::Agave).unwrap(), } } } @@ -97,6 +98,7 @@ impl From for ClientId { 0u16 => Self::SolanaLabs, 1u16 => Self::JitoLabs, 2u16 => Self::Firedancer, + 3u16 => Self::Agave, _ => Self::Unknown(client), } } @@ -110,7 +112,8 @@ impl TryFrom for u16 { ClientId::SolanaLabs => Ok(0u16), ClientId::JitoLabs => Ok(1u16), ClientId::Firedancer => Ok(2u16), - ClientId::Unknown(client @ 0u16..=2u16) => Err(format!("Invalid client: {client}")), + ClientId::Agave => Ok(3u16), + ClientId::Unknown(client @ 0u16..=3u16) => Err(format!("Invalid client: {client}")), ClientId::Unknown(client) => Ok(client), } } @@ -147,19 +150,21 @@ mod test { assert_eq!(ClientId::from(0u16), ClientId::SolanaLabs); assert_eq!(ClientId::from(1u16), ClientId::JitoLabs); assert_eq!(ClientId::from(2u16), ClientId::Firedancer); - for client in 3u16..=u16::MAX { + assert_eq!(ClientId::from(3u16), ClientId::Agave); + for client in 4u16..=u16::MAX { assert_eq!(ClientId::from(client), ClientId::Unknown(client)); } assert_eq!(u16::try_from(ClientId::SolanaLabs), Ok(0u16)); assert_eq!(u16::try_from(ClientId::JitoLabs), Ok(1u16)); assert_eq!(u16::try_from(ClientId::Firedancer), Ok(2u16)); - for client in 0..=2u16 { + assert_eq!(u16::try_from(ClientId::Agave), Ok(3u16)); + for client in 0..=3u16 { assert_eq!( u16::try_from(ClientId::Unknown(client)), Err(format!("Invalid client: {client}")) ); } - for client in 3u16..=u16::MAX { + for client in 4u16..=u16::MAX { assert_eq!(u16::try_from(ClientId::Unknown(client)), Ok(client)); } } From 5f6616518641240dfd38fe6d24e356eea95345cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Mar 2024 23:08:28 +0800 Subject: [PATCH 375/401] build(deps): bump proc-macro2 from 1.0.78 to 1.0.79 (#203) * build(deps): bump proc-macro2 from 1.0.78 to 1.0.79 Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.78 to 1.0.79. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.78...1.0.79) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce428b22a3283b..503b2280d86ec9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4180,9 +4180,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] diff --git a/Cargo.toml b/Cargo.toml index 6cbd56762fbfe8..f1ac84f6875b8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -265,7 +265,7 @@ pickledb = { version = "0.5.1", default-features = false } predicates = "2.1" pretty-hex = "0.3.0" prio-graph = "0.2.1" -proc-macro2 = "1.0.78" +proc-macro2 = "1.0.79" proptest = "1.4" prost = "0.11.9" prost-build = "0.11.9" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1043b74c67c619..4d606fc4e9ed51 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3646,9 +3646,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] From 0705a07b52af10f789e271618d9dbb51a5dc0aec Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 12 Mar 2024 11:48:05 -0500 Subject: [PATCH 376/401] Remove unused account_deps (#188) --- svm/src/account_loader.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index bf9b5b9c40bfee..ee06dd5fbf2198 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -196,7 +196,6 @@ fn load_transaction_accounts( let mut tx_rent: TransactionRent = 0; let account_keys = message.account_keys(); let mut accounts_found = Vec::with_capacity(account_keys.len()); - let mut account_deps = Vec::with_capacity(account_keys.len()); let mut rent_debits = RentDebits::default(); let rent_collector = callbacks.get_rent_collector(); @@ -316,13 +315,6 @@ fn load_transaction_accounts( return Err(TransactionError::AccountNotFound); } - // Appends the account_deps at the end of the accounts, - // this way they can be accessed in a uniform way. - // At places where only the accounts are needed, - // the account_deps are truncated using e.g: - // accounts.iter().take(message.account_keys.len()) - accounts.append(&mut account_deps); - let builtins_start_index = accounts.len(); let program_indices = message .instructions() From 2078153aa1c857ad0e66913c6acce27572ff1d27 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 13 Mar 2024 02:15:59 +0800 Subject: [PATCH 377/401] [anza migration]: fix download path for cluster test (#204) --- net/net.sh | 2 +- scripts/agave-install-deploy.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/net.sh b/net/net.sh index 36bc48efdb7861..c1e93d095be7eb 100755 --- a/net/net.sh +++ b/net/net.sh @@ -563,7 +563,7 @@ prepareDeploy() { if [[ -n $releaseChannel ]]; then echo "Downloading release from channel: $releaseChannel" rm -f "$SOLANA_ROOT"/solana-release.tar.bz2 - declare updateDownloadUrl=https://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2 + declare updateDownloadUrl=https://release.agave.xyz/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2 ( set -x curl -L -I "$updateDownloadUrl" diff --git a/scripts/agave-install-deploy.sh b/scripts/agave-install-deploy.sh index a8f8eeb65b3857..01366a1cfbc5af 100755 --- a/scripts/agave-install-deploy.sh +++ b/scripts/agave-install-deploy.sh @@ -57,10 +57,10 @@ esac case $TAG in edge|beta) - DOWNLOAD_URL=https://release.solana.com/"$TAG"/solana-release-$TARGET.tar.bz2 + DOWNLOAD_URL=https://release.agave.xyz/"$TAG"/solana-release-$TARGET.tar.bz2 ;; *) - DOWNLOAD_URL=https://github.com/solana-labs/solana/releases/download/"$TAG"/solana-release-$TARGET.tar.bz2 + DOWNLOAD_URL=https://github.com/anza-xyz/agave/releases/download/"$TAG"/solana-release-$TARGET.tar.bz2 ;; esac From 7a144e2b9faabf18077cc6a3df118b2bbe2d26dd Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 12 Mar 2024 13:21:11 -0500 Subject: [PATCH 378/401] Make ReplayStage own the threadpool for tx replay (#190) The threadpool used to replay multiple transactions in parallel is currently global state via a lazy_static definition. Making this pool owned by ReplayStage will enable subsequent work to make the pool size configurable on the CLI. This makes `ReplayStage` create and hold the threadpool which is passed down to blockstore_processor::confirm_slot(). blockstore_processor::process_blockstore_from_root() now creates its' own threadpool as well; however, this pool is only alive while for the scope of that function and does not persist the lifetime of the process. --- core/src/replay_stage.rs | 36 +++++++++++++--- ledger/src/blockstore_processor.rs | 69 ++++++++++++++++++++++++------ ledger/src/lib.rs | 3 -- 3 files changed, 86 insertions(+), 22 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 3683e257ed10a8..015ec5360448f9 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -51,6 +51,7 @@ use { solana_measure::measure::Measure, solana_poh::poh_recorder::{PohLeaderStatus, PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, solana_program_runtime::timings::ExecuteTimings, + solana_rayon_threadlimit::get_max_thread_count, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSenderConfig}, rpc_subscriptions::RpcSubscriptions, @@ -652,16 +653,23 @@ impl ReplayStage { r_bank_forks.get_vote_only_mode_signal(), ) }; + // Thread pool to (maybe) replay multiple threads in parallel let replay_mode = if replay_slots_concurrently { ForkReplayMode::Serial } else { let pool = rayon::ThreadPoolBuilder::new() .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) - .thread_name(|i| format!("solReplay{i:02}")) + .thread_name(|i| format!("solReplayFork{i:02}")) .build() .expect("new rayon threadpool"); ForkReplayMode::Parallel(pool) }; + // Thread pool to replay multiple transactions within one block in parallel + let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(get_max_thread_count()) + .thread_name(|i| format!("solReplayTx{i:02}")) + .build() + .expect("new rayon threadpool"); Self::reset_poh_recorder( &my_pubkey, @@ -724,6 +732,7 @@ impl ReplayStage { &mut replay_timing, log_messages_bytes_limit, &replay_mode, + &replay_tx_thread_pool, &prioritization_fee_cache, &mut purge_repair_slot_counter, ); @@ -2136,6 +2145,7 @@ impl ReplayStage { fn replay_blockstore_into_bank( bank: &BankWithScheduler, blockstore: &Blockstore, + replay_tx_thread_pool: &ThreadPool, replay_stats: &RwLock, replay_progress: &RwLock, transaction_status_sender: Option<&TransactionStatusSender>, @@ -2154,6 +2164,7 @@ impl ReplayStage { blockstore_processor::confirm_slot( blockstore, bank, + replay_tx_thread_pool, &mut w_replay_stats, &mut w_replay_progress, false, @@ -2712,7 +2723,8 @@ impl ReplayStage { fn replay_active_banks_concurrently( blockstore: &Blockstore, bank_forks: &RwLock, - thread_pool: &ThreadPool, + fork_thread_pool: &ThreadPool, + replay_tx_thread_pool: &ThreadPool, my_pubkey: &Pubkey, vote_account: &Pubkey, progress: &mut ProgressMap, @@ -2730,7 +2742,7 @@ impl ReplayStage { let longest_replay_time_us = AtomicU64::new(0); // Allow for concurrent replaying of slots from different forks. - let replay_result_vec: Vec = thread_pool.install(|| { + let replay_result_vec: Vec = fork_thread_pool.install(|| { active_bank_slots .into_par_iter() .map(|bank_slot| { @@ -2744,7 +2756,7 @@ impl ReplayStage { trace!( "Replay active bank: slot {}, thread_idx {}", bank_slot, - thread_pool.current_thread_index().unwrap_or_default() + fork_thread_pool.current_thread_index().unwrap_or_default() ); let mut progress_lock = progress.write().unwrap(); if progress_lock @@ -2797,6 +2809,7 @@ impl ReplayStage { let blockstore_result = Self::replay_blockstore_into_bank( &bank, blockstore, + replay_tx_thread_pool, &replay_stats, &replay_progress, transaction_status_sender, @@ -2826,6 +2839,7 @@ impl ReplayStage { fn replay_active_bank( blockstore: &Blockstore, bank_forks: &RwLock, + replay_tx_thread_pool: &ThreadPool, my_pubkey: &Pubkey, vote_account: &Pubkey, progress: &mut ProgressMap, @@ -2884,6 +2898,7 @@ impl ReplayStage { let blockstore_result = Self::replay_blockstore_into_bank( &bank, blockstore, + replay_tx_thread_pool, &bank_progress.replay_stats, &bank_progress.replay_progress, transaction_status_sender, @@ -3183,6 +3198,7 @@ impl ReplayStage { replay_timing: &mut ReplayLoopTiming, log_messages_bytes_limit: Option, replay_mode: &ForkReplayMode, + replay_tx_thread_pool: &ThreadPool, prioritization_fee_cache: &PrioritizationFeeCache, purge_repair_slot_counter: &mut PurgeRepairSlotCounter, ) -> bool /* completed a bank */ { @@ -3199,11 +3215,12 @@ impl ReplayStage { let replay_result_vec = match replay_mode { // Skip the overhead of the threadpool if there is only one bank to play - ForkReplayMode::Parallel(thread_pool) if num_active_banks > 1 => { + ForkReplayMode::Parallel(fork_thread_pool) if num_active_banks > 1 => { Self::replay_active_banks_concurrently( blockstore, bank_forks, - thread_pool, + fork_thread_pool, + replay_tx_thread_pool, my_pubkey, vote_account, progress, @@ -3223,6 +3240,7 @@ impl ReplayStage { Self::replay_active_bank( blockstore, bank_forks, + replay_tx_thread_pool, my_pubkey, vote_account, progress, @@ -5034,9 +5052,15 @@ pub(crate) mod tests { blockstore.insert_shreds(shreds, None, false).unwrap(); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let exit = Arc::new(AtomicBool::new(false)); + let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(1) + .thread_name(|i| format!("solReplayTest{i:02}")) + .build() + .expect("new rayon threadpool"); let res = ReplayStage::replay_blockstore_into_bank( &bank1, &blockstore, + &replay_tx_thread_pool, &bank1_progress.replay_stats, &bank1_progress.replay_progress, None, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index e4ae5f368b2afd..a76387f7cb2054 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -89,16 +89,6 @@ struct ReplayEntry { starting_index: usize, } -// get_max_thread_count to match number of threads in the old code. -// see: https://github.com/solana-labs/solana/pull/24853 -lazy_static! { - static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() - .num_threads(get_max_thread_count()) - .thread_name(|i| format!("solBstoreProc{i:02}")) - .build() - .unwrap(); -} - fn first_err(results: &[Result<()>]) -> Result<()> { for r in results { if r.is_err() { @@ -139,6 +129,14 @@ fn get_first_error( first_err } +fn create_thread_pool(num_threads: usize) -> ThreadPool { + rayon::ThreadPoolBuilder::new() + .num_threads(num_threads) + .thread_name(|i| format!("solReplayTx{i:02}")) + .build() + .expect("new rayon threadpool") +} + pub fn execute_batch( batch: &TransactionBatchWithIndexes, bank: &Arc, @@ -242,6 +240,7 @@ impl ExecuteBatchesInternalMetrics { fn execute_batches_internal( bank: &Arc, + replay_tx_thread_pool: &ThreadPool, batches: &[TransactionBatchWithIndexes], transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, @@ -253,7 +252,7 @@ fn execute_batches_internal( Mutex::new(HashMap::new()); let mut execute_batches_elapsed = Measure::start("execute_batches_elapsed"); - let results: Vec> = PAR_THREAD_POOL.install(|| { + let results: Vec> = replay_tx_thread_pool.install(|| { batches .into_par_iter() .map(|transaction_batch| { @@ -275,7 +274,7 @@ fn execute_batches_internal( "execute_batch", ); - let thread_index = PAR_THREAD_POOL.current_thread_index().unwrap(); + let thread_index = replay_tx_thread_pool.current_thread_index().unwrap(); execution_timings_per_thread .lock() .unwrap() @@ -324,6 +323,7 @@ fn execute_batches_internal( // invocation). fn process_batches( bank: &BankWithScheduler, + replay_tx_thread_pool: &ThreadPool, batches: &[TransactionBatchWithIndexes], transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, @@ -348,6 +348,7 @@ fn process_batches( ); rebatch_and_execute_batches( bank, + replay_tx_thread_pool, batches, transaction_status_sender, replay_vote_sender, @@ -398,6 +399,7 @@ fn rebatch_transactions<'a>( fn rebatch_and_execute_batches( bank: &Arc, + replay_tx_thread_pool: &ThreadPool, batches: &[TransactionBatchWithIndexes], transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, @@ -481,6 +483,7 @@ fn rebatch_and_execute_batches( let execute_batches_internal_metrics = execute_batches_internal( bank, + replay_tx_thread_pool, rebatched_txs, transaction_status_sender, replay_vote_sender, @@ -506,6 +509,7 @@ pub fn process_entries_for_tests( transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, ) -> Result<()> { + let replay_tx_thread_pool = create_thread_pool(1); let verify_transaction = { let bank = bank.clone_with_scheduler(); move |versioned_tx: VersionedTransaction| -> Result { @@ -533,6 +537,7 @@ pub fn process_entries_for_tests( let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); let result = process_entries( bank, + &replay_tx_thread_pool, &mut replay_entries, transaction_status_sender, replay_vote_sender, @@ -547,6 +552,7 @@ pub fn process_entries_for_tests( fn process_entries( bank: &BankWithScheduler, + replay_tx_thread_pool: &ThreadPool, entries: &mut [ReplayEntry], transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, @@ -572,6 +578,7 @@ fn process_entries( // execute the group and register the tick process_batches( bank, + replay_tx_thread_pool, &batches, transaction_status_sender, replay_vote_sender, @@ -625,6 +632,7 @@ fn process_entries( // execute the current queue and try to process this entry again process_batches( bank, + replay_tx_thread_pool, &batches, transaction_status_sender, replay_vote_sender, @@ -640,6 +648,7 @@ fn process_entries( } process_batches( bank, + replay_tx_thread_pool, &batches, transaction_status_sender, replay_vote_sender, @@ -805,6 +814,7 @@ pub(crate) fn process_blockstore_for_bank_0( let bank_forks = BankForks::new_rw_arc(bank0); info!("Processing ledger for slot 0..."); + let replay_tx_thread_pool = create_thread_pool(get_max_thread_count()); process_bank_0( &bank_forks .read() @@ -812,6 +822,7 @@ pub(crate) fn process_blockstore_for_bank_0( .get_with_scheduler(bank0_slot) .unwrap(), blockstore, + &replay_tx_thread_pool, opts, &VerifyRecyclers::default(), cache_block_meta_sender, @@ -871,10 +882,12 @@ pub fn process_blockstore_from_root( .meta(start_slot) .unwrap_or_else(|_| panic!("Failed to get meta for slot {start_slot}")) { + let replay_tx_thread_pool = create_thread_pool(get_max_thread_count()); load_frozen_forks( bank_forks, &start_slot_meta, blockstore, + &replay_tx_thread_pool, leader_schedule_cache, opts, transaction_status_sender, @@ -978,9 +991,11 @@ fn verify_ticks( Ok(()) } +#[allow(clippy::too_many_arguments)] fn confirm_full_slot( blockstore: &Blockstore, bank: &BankWithScheduler, + replay_tx_thread_pool: &ThreadPool, opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, @@ -996,6 +1011,7 @@ fn confirm_full_slot( confirm_slot( blockstore, bank, + replay_tx_thread_pool, &mut confirmation_timing, progress, skip_verification, @@ -1142,6 +1158,7 @@ impl ConfirmationProgress { pub fn confirm_slot( blockstore: &Blockstore, bank: &BankWithScheduler, + replay_tx_thread_pool: &ThreadPool, timing: &mut ConfirmationTiming, progress: &mut ConfirmationProgress, skip_verification: bool, @@ -1171,6 +1188,7 @@ pub fn confirm_slot( confirm_slot_entries( bank, + replay_tx_thread_pool, slot_entries_load_result, timing, progress, @@ -1187,6 +1205,7 @@ pub fn confirm_slot( #[allow(clippy::too_many_arguments)] fn confirm_slot_entries( bank: &BankWithScheduler, + replay_tx_thread_pool: &ThreadPool, slot_entries_load_result: (Vec, u64, bool), timing: &mut ConfirmationTiming, progress: &mut ConfirmationProgress, @@ -1328,6 +1347,7 @@ fn confirm_slot_entries( .collect(); let process_result = process_entries( bank, + replay_tx_thread_pool, &mut replay_entries, transaction_status_sender, replay_vote_sender, @@ -1385,6 +1405,7 @@ fn confirm_slot_entries( fn process_bank_0( bank0: &BankWithScheduler, blockstore: &Blockstore, + replay_tx_thread_pool: &ThreadPool, opts: &ProcessOptions, recyclers: &VerifyRecyclers, cache_block_meta_sender: Option<&CacheBlockMetaSender>, @@ -1395,6 +1416,7 @@ fn process_bank_0( confirm_full_slot( blockstore, bank0, + replay_tx_thread_pool, opts, recyclers, &mut progress, @@ -1479,6 +1501,7 @@ fn load_frozen_forks( bank_forks: &RwLock, start_slot_meta: &SlotMeta, blockstore: &Blockstore, + replay_tx_thread_pool: &ThreadPool, leader_schedule_cache: &LeaderScheduleCache, opts: &ProcessOptions, transaction_status_sender: Option<&TransactionStatusSender>, @@ -1566,6 +1589,7 @@ fn load_frozen_forks( if process_single_slot( blockstore, &bank, + replay_tx_thread_pool, opts, &recyclers, &mut progress, @@ -1771,6 +1795,7 @@ fn supermajority_root_from_vote_accounts( fn process_single_slot( blockstore: &Blockstore, bank: &BankWithScheduler, + replay_tx_thread_pool: &ThreadPool, opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, @@ -1785,6 +1810,7 @@ fn process_single_slot( confirm_full_slot( blockstore, bank, + replay_tx_thread_pool, opts, recyclers, progress, @@ -3692,7 +3718,16 @@ pub mod tests { ..ProcessOptions::default() }; let recyclers = VerifyRecyclers::default(); - process_bank_0(&bank0, &blockstore, &opts, &recyclers, None, None); + let replay_tx_thread_pool = create_thread_pool(1); + process_bank_0( + &bank0, + &blockstore, + &replay_tx_thread_pool, + &opts, + &recyclers, + None, + None, + ); let bank0_last_blockhash = bank0.last_blockhash(); let bank1 = bank_forks.write().unwrap().insert(Bank::new_from_parent( bank0.clone_without_scheduler(), @@ -3702,6 +3737,7 @@ pub mod tests { confirm_full_slot( &blockstore, &bank1, + &replay_tx_thread_pool, &opts, &recyclers, &mut ConfirmationProgress::new(bank0_last_blockhash), @@ -4342,8 +4378,10 @@ pub mod tests { slot_full: bool, prev_entry_hash: Hash, ) -> result::Result<(), BlockstoreProcessorError> { + let replay_tx_thread_pool = create_thread_pool(1); confirm_slot_entries( &BankWithScheduler::new_without_scheduler(bank.clone()), + &replay_tx_thread_pool, (slot_entries, 0, slot_full), &mut ConfirmationTiming::default(), &mut ConfirmationProgress::new(prev_entry_hash), @@ -4400,6 +4438,7 @@ pub mod tests { let bank = BankWithScheduler::new_without_scheduler( Bank::new_with_bank_forks_for_tests(&genesis_config).0, ); + let replay_tx_thread_pool = create_thread_pool(1); let mut timing = ConfirmationTiming::default(); let mut progress = ConfirmationProgress::new(genesis_hash); let amount = genesis_config.rent.minimum_balance(0); @@ -4436,6 +4475,7 @@ pub mod tests { confirm_slot_entries( &bank, + &replay_tx_thread_pool, (vec![entry], 0, false), &mut timing, &mut progress, @@ -4480,6 +4520,7 @@ pub mod tests { confirm_slot_entries( &bank, + &replay_tx_thread_pool, (vec![entry], 0, false), &mut timing, &mut progress, @@ -4592,10 +4633,12 @@ pub mod tests { transaction_indexes: (0..txs.len()).collect(), }; + let replay_tx_thread_pool = create_thread_pool(1); let mut batch_execution_timing = BatchExecutionTiming::default(); let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); assert!(process_batches( &bank, + &replay_tx_thread_pool, &[batch_with_indexes], None, None, diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index 10dd5182717841..5f577e3c938aaf 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -39,8 +39,5 @@ extern crate solana_metrics; #[macro_use] extern crate log; -#[macro_use] -extern crate lazy_static; - #[macro_use] extern crate solana_frozen_abi_macro; From 7020864d6c23c94ac744faddb95528cc2375cc35 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 12 Mar 2024 14:25:47 -0400 Subject: [PATCH 379/401] Adds a new bench for accounts delta hash (#210) --- accounts-db/benches/bench_hashing.rs | 50 +++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/accounts-db/benches/bench_hashing.rs b/accounts-db/benches/bench_hashing.rs index 3158f78c7a938f..78df86a97f5168 100644 --- a/accounts-db/benches/bench_hashing.rs +++ b/accounts-db/benches/bench_hashing.rs @@ -1,7 +1,11 @@ use { - criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}, - solana_accounts_db::accounts_db::AccountsDb, - solana_sdk::{account::AccountSharedData, pubkey::Pubkey}, + criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput}, + rand::seq::SliceRandom, + solana_accounts_db::{ + accounts_db::AccountsDb, + accounts_hash::{AccountHash, AccountsHasher}, + }, + solana_sdk::{account::AccountSharedData, hash::Hash, pubkey::Pubkey}, }; const KB: usize = 1024; @@ -39,5 +43,43 @@ fn bench_hash_account(c: &mut Criterion) { } } -criterion_group!(benches, bench_hash_account,); +fn bench_accounts_delta_hash(c: &mut Criterion) { + const ACCOUNTS_COUNTS: [usize; 4] = [ + 1, // the smallest count; will bench overhead + 100, // number of accounts written per slot on mnb (with *no* rent rewrites) + 1_000, // number of accounts written slot on mnb (with rent rewrites) + 10_000, // reasonable largest number of accounts written per slot + ]; + + fn create_account_hashes(accounts_count: usize) -> Vec<(Pubkey, AccountHash)> { + let mut account_hashes: Vec<_> = std::iter::repeat_with(|| { + let address = Pubkey::new_unique(); + let hash = AccountHash(Hash::new_unique()); + (address, hash) + }) + .take(accounts_count) + .collect(); + + // since the accounts delta hash needs to sort the accounts first, ensure we're not + // creating a pre-sorted vec. + let mut rng = rand::thread_rng(); + account_hashes.shuffle(&mut rng); + account_hashes + } + + let mut group = c.benchmark_group("accounts_delta_hash"); + for accounts_count in ACCOUNTS_COUNTS { + group.throughput(Throughput::Elements(accounts_count as u64)); + let account_hashes = create_account_hashes(accounts_count); + group.bench_function(BenchmarkId::new("accounts_count", accounts_count), |b| { + b.iter_batched( + || account_hashes.clone(), + AccountsHasher::accumulate_account_hashes, + BatchSize::SmallInput, + ); + }); + } +} + +criterion_group!(benches, bench_hash_account, bench_accounts_delta_hash); criterion_main!(benches); From 8c446f26cdadf26c442e1a2ce9a46b4ae41f484d Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Tue, 12 Mar 2024 13:41:54 -0500 Subject: [PATCH 380/401] Fully remove `ThinClient` from `bench-tps` (#132) remove ThinClient from bench-tps --- Cargo.lock | 1 - bench-tps/Cargo.toml | 1 - bench-tps/src/bench_tps_client.rs | 1 - bench-tps/src/bench_tps_client/thin_client.rs | 143 ------------------ 4 files changed, 146 deletions(-) delete mode 100644 bench-tps/src/bench_tps_client/thin_client.rs diff --git a/Cargo.lock b/Cargo.lock index 503b2280d86ec9..df8f1e1586134c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5588,7 +5588,6 @@ dependencies = [ "solana-sdk", "solana-streamer", "solana-test-validator", - "solana-thin-client", "solana-tpu-client", "solana-transaction-status", "solana-version", diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 2fc48c9e296d50..2c7060175f0a8c 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -35,7 +35,6 @@ solana-rpc-client-nonce-utils = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } -solana-thin-client = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } diff --git a/bench-tps/src/bench_tps_client.rs b/bench-tps/src/bench_tps_client.rs index 0715d739879165..173cdd7cc3e2a5 100644 --- a/bench-tps/src/bench_tps_client.rs +++ b/bench-tps/src/bench_tps_client.rs @@ -113,5 +113,4 @@ pub trait BenchTpsClient { mod bank_client; mod rpc_client; -mod thin_client; mod tpu_client; diff --git a/bench-tps/src/bench_tps_client/thin_client.rs b/bench-tps/src/bench_tps_client/thin_client.rs deleted file mode 100644 index 22945c4494f453..00000000000000 --- a/bench-tps/src/bench_tps_client/thin_client.rs +++ /dev/null @@ -1,143 +0,0 @@ -use { - crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, - solana_client::thin_client::ThinClient, - solana_rpc_client_api::config::RpcBlockConfig, - solana_sdk::{ - account::Account, - client::{AsyncClient, Client, SyncClient}, - commitment_config::CommitmentConfig, - epoch_info::EpochInfo, - hash::Hash, - message::Message, - pubkey::Pubkey, - signature::Signature, - slot_history::Slot, - transaction::Transaction, - }, - solana_transaction_status::UiConfirmedBlock, -}; - -impl BenchTpsClient for ThinClient { - fn send_transaction(&self, transaction: Transaction) -> Result { - AsyncClient::async_send_transaction(self, transaction).map_err(|err| err.into()) - } - fn send_batch(&self, transactions: Vec) -> Result<()> { - AsyncClient::async_send_batch(self, transactions).map_err(|err| err.into()) - } - fn get_latest_blockhash(&self) -> Result { - SyncClient::get_latest_blockhash(self).map_err(|err| err.into()) - } - - fn get_latest_blockhash_with_commitment( - &self, - commitment_config: CommitmentConfig, - ) -> Result<(Hash, u64)> { - SyncClient::get_latest_blockhash_with_commitment(self, commitment_config) - .map_err(|err| err.into()) - } - - fn get_transaction_count(&self) -> Result { - SyncClient::get_transaction_count(self).map_err(|err| err.into()) - } - - fn get_transaction_count_with_commitment( - &self, - commitment_config: CommitmentConfig, - ) -> Result { - SyncClient::get_transaction_count_with_commitment(self, commitment_config) - .map_err(|err| err.into()) - } - - fn get_epoch_info(&self) -> Result { - SyncClient::get_epoch_info(self).map_err(|err| err.into()) - } - - fn get_balance(&self, pubkey: &Pubkey) -> Result { - SyncClient::get_balance(self, pubkey).map_err(|err| err.into()) - } - - fn get_balance_with_commitment( - &self, - pubkey: &Pubkey, - commitment_config: CommitmentConfig, - ) -> Result { - SyncClient::get_balance_with_commitment(self, pubkey, commitment_config) - .map_err(|err| err.into()) - } - - fn get_fee_for_message(&self, message: &Message) -> Result { - SyncClient::get_fee_for_message(self, message).map_err(|err| err.into()) - } - - fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result { - SyncClient::get_minimum_balance_for_rent_exemption(self, data_len).map_err(|err| err.into()) - } - - fn addr(&self) -> String { - Client::tpu_addr(self) - } - - fn request_airdrop_with_blockhash( - &self, - pubkey: &Pubkey, - lamports: u64, - recent_blockhash: &Hash, - ) -> Result { - self.rpc_client() - .request_airdrop_with_blockhash(pubkey, lamports, recent_blockhash) - .map_err(|err| err.into()) - } - - fn get_account(&self, pubkey: &Pubkey) -> Result { - self.rpc_client() - .get_account(pubkey) - .map_err(|err| err.into()) - } - - fn get_account_with_commitment( - &self, - pubkey: &Pubkey, - commitment_config: CommitmentConfig, - ) -> Result { - SyncClient::get_account_with_commitment(self, pubkey, commitment_config) - .map_err(|err| err.into()) - .and_then(|account| { - account.ok_or_else(|| { - BenchTpsError::Custom(format!("AccountNotFound: pubkey={pubkey}")) - }) - }) - } - - fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> Result>> { - self.rpc_client() - .get_multiple_accounts(pubkeys) - .map_err(|err| err.into()) - } - - fn get_slot_with_commitment(&self, commitment_config: CommitmentConfig) -> Result { - self.rpc_client() - .get_slot_with_commitment(commitment_config) - .map_err(|err| err.into()) - } - - fn get_blocks_with_commitment( - &self, - start_slot: Slot, - end_slot: Option, - commitment_config: CommitmentConfig, - ) -> Result> { - self.rpc_client() - .get_blocks_with_commitment(start_slot, end_slot, commitment_config) - .map_err(|err| err.into()) - } - - fn get_block_with_config( - &self, - slot: Slot, - rpc_block_config: RpcBlockConfig, - ) -> Result { - self.rpc_client() - .get_block_with_config(slot, rpc_block_config) - .map_err(|err| err.into()) - } -} From f8bb98b5f4e7710a5ea75a44c0c0f6eddfb09dd7 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 12 Mar 2024 16:11:44 -0500 Subject: [PATCH 381/401] Move default value for --rpc-pubsub-notification-threads to CLI (#158) The default value was previously being determined down where the thread pool is being created. Providing a default value at the CLI level is consistent with other args, and gives an operator better visibility into what the default will actually be --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + rpc/src/rpc_pubsub_service.rs | 8 ++-- rpc/src/rpc_subscriptions.rs | 71 +++++++++++++++-------------------- validator/Cargo.toml | 1 + validator/src/cli.rs | 8 ++++ validator/src/main.rs | 8 ++-- 7 files changed, 50 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df8f1e1586134c..54d1cbc4e0b4b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -233,6 +233,7 @@ dependencies = [ "solana-perf", "solana-poh", "solana-program-runtime", + "solana-rayon-threadlimit", "solana-rpc", "solana-rpc-client", "solana-rpc-client-api", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 4d606fc4e9ed51..9c78461b1b0a81 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -118,6 +118,7 @@ dependencies = [ "solana-perf", "solana-poh", "solana-program-runtime", + "solana-rayon-threadlimit", "solana-rpc", "solana-rpc-client", "solana-rpc-client-api", diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 3e32503691d78e..99155e678675f5 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -12,10 +12,12 @@ use { jsonrpc_core::IoHandler, soketto::handshake::{server, Server}, solana_metrics::TokenCounter, + solana_rayon_threadlimit::get_thread_count, solana_sdk::timing::AtomicInterval, std::{ io, net::SocketAddr, + num::NonZeroUsize, str, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, @@ -43,7 +45,7 @@ pub struct PubSubConfig { pub queue_capacity_items: usize, pub queue_capacity_bytes: usize, pub worker_threads: usize, - pub notification_threads: Option, + pub notification_threads: Option, } impl Default for PubSubConfig { @@ -55,7 +57,7 @@ impl Default for PubSubConfig { queue_capacity_items: DEFAULT_QUEUE_CAPACITY_ITEMS, queue_capacity_bytes: DEFAULT_QUEUE_CAPACITY_BYTES, worker_threads: DEFAULT_WORKER_THREADS, - notification_threads: None, + notification_threads: NonZeroUsize::new(get_thread_count()), } } } @@ -69,7 +71,7 @@ impl PubSubConfig { queue_capacity_items: DEFAULT_TEST_QUEUE_CAPACITY_ITEMS, queue_capacity_bytes: DEFAULT_QUEUE_CAPACITY_BYTES, worker_threads: DEFAULT_WORKER_THREADS, - notification_threads: Some(2), + notification_threads: NonZeroUsize::new(2), } } } diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 7ecfd6a31a42cc..39d746c48049de 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -19,7 +19,6 @@ use { solana_account_decoder::{parse_token::is_known_spl_token_id, UiAccount, UiAccountEncoding}, solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}, solana_measure::measure::Measure, - solana_rayon_threadlimit::get_thread_count, solana_rpc_client_api::response::{ ProcessedSignatureResult, ReceivedSignatureResult, Response as RpcResponse, RpcBlockUpdate, RpcBlockUpdateError, RpcKeyedAccount, RpcLogsResponse, RpcResponseContext, @@ -631,41 +630,37 @@ impl RpcSubscriptions { config.queue_capacity_bytes, )), }; - let notification_threads = config.notification_threads.unwrap_or_else(get_thread_count); - let t_cleanup = if notification_threads == 0 { - None - } else { + + let t_cleanup = config.notification_threads.map(|notification_threads| { let exit = exit.clone(); - Some( - Builder::new() - .name("solRpcNotifier".to_string()) - .spawn(move || { - let pool = rayon::ThreadPoolBuilder::new() - .num_threads(notification_threads) - .thread_name(|i| format!("solRpcNotify{i:02}")) - .build() - .unwrap(); - pool.install(|| { - if let Some(rpc_notifier_ready) = rpc_notifier_ready { - rpc_notifier_ready.fetch_or(true, Ordering::Relaxed); - } - Self::process_notifications( - exit, - max_complete_transaction_status_slot, - max_complete_rewards_slot, - blockstore, - notifier, - notification_receiver, - subscriptions, - bank_forks, - block_commitment_cache, - optimistically_confirmed_bank, - ) - }); - }) - .unwrap(), - ) - }; + Builder::new() + .name("solRpcNotifier".to_string()) + .spawn(move || { + let pool = rayon::ThreadPoolBuilder::new() + .num_threads(notification_threads.get()) + .thread_name(|i| format!("solRpcNotify{i:02}")) + .build() + .unwrap(); + pool.install(|| { + if let Some(rpc_notifier_ready) = rpc_notifier_ready { + rpc_notifier_ready.fetch_or(true, Ordering::Relaxed); + } + Self::process_notifications( + exit, + max_complete_transaction_status_slot, + max_complete_rewards_slot, + blockstore, + notifier, + notification_receiver, + subscriptions, + bank_forks, + block_commitment_cache, + optimistically_confirmed_bank, + ) + }); + }) + .unwrap() + }); let control = SubscriptionControl::new( config.max_active_subscriptions, @@ -674,11 +669,7 @@ impl RpcSubscriptions { ); Self { - notification_sender: if notification_threads == 0 { - None - } else { - Some(notification_sender) - }, + notification_sender: config.notification_threads.map(|_| notification_sender), t_cleanup, exit, control, diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 74742c90faa29d..0a6324f454e2b2 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -50,6 +50,7 @@ solana-net-utils = { workspace = true } solana-perf = { workspace = true } solana-poh = { workspace = true } solana-program-runtime = { workspace = true } +solana-rayon-threadlimit = { workspace = true } solana-rpc = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index d1ad63b760f031..e9298d9c02928e 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -26,6 +26,7 @@ use { solana_faucet::faucet::{self, FAUCET_PORT}, solana_ledger::use_snapshot_archives_at_startup, solana_net_utils::{MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}, + solana_rayon_threadlimit::get_thread_count, solana_rpc::{rpc::MAX_REQUEST_BODY_SIZE, rpc_pubsub_service::PubSubConfig}, solana_rpc_client_api::request::MAX_MULTIPLE_ACCOUNTS, solana_runtime::{ @@ -1079,6 +1080,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .value_name("NUM_THREADS") .validator(is_parsable::) + .default_value_if( + "full_rpc_api", + None, + &default_args.rpc_pubsub_notification_threads, + ) .help( "The maximum number of threads that RPC PubSub will use for generating \ notifications. 0 will disable RPC PubSub notifications", @@ -2138,6 +2144,7 @@ pub struct DefaultArgs { pub rpc_bigtable_max_message_size: String, pub rpc_max_request_body_size: String, pub rpc_pubsub_worker_threads: String, + pub rpc_pubsub_notification_threads: String, pub maximum_local_snapshot_age: String, pub maximum_full_snapshot_archives_to_retain: String, @@ -2225,6 +2232,7 @@ impl DefaultArgs { rpc_bigtable_max_message_size: solana_storage_bigtable::DEFAULT_MAX_MESSAGE_SIZE .to_string(), rpc_pubsub_worker_threads: "4".to_string(), + rpc_pubsub_notification_threads: get_thread_count().to_string(), maximum_full_snapshot_archives_to_retain: DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN .to_string(), maximum_incremental_snapshot_archives_to_retain: diff --git a/validator/src/main.rs b/validator/src/main.rs index b00eabfef9a7b0..7f3de66b457c74 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1382,11 +1382,9 @@ pub fn main() { usize ), worker_threads: value_t_or_exit!(matches, "rpc_pubsub_worker_threads", usize), - notification_threads: if full_api { - value_of(&matches, "rpc_pubsub_notification_threads") - } else { - Some(0) - }, + notification_threads: value_t!(matches, "rpc_pubsub_notification_threads", usize) + .ok() + .and_then(NonZeroUsize::new), }, voting_disabled: matches.is_present("no_voting") || restricted_repair_only_mode, wait_for_supermajority: value_t!(matches, "wait_for_supermajority", Slot).ok(), From e682fec28e6c40741c7e6c5a566553d0c3f916f4 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 13 Mar 2024 12:17:08 +0800 Subject: [PATCH 382/401] [anza migration]: fix download link for net scripts (#219) --- net/net.sh | 2 +- scripts/agave-install-deploy.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/net.sh b/net/net.sh index c1e93d095be7eb..b82872f5046d8e 100755 --- a/net/net.sh +++ b/net/net.sh @@ -563,7 +563,7 @@ prepareDeploy() { if [[ -n $releaseChannel ]]; then echo "Downloading release from channel: $releaseChannel" rm -f "$SOLANA_ROOT"/solana-release.tar.bz2 - declare updateDownloadUrl=https://release.agave.xyz/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2 + declare updateDownloadUrl=https://release.anza.xyz/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2 ( set -x curl -L -I "$updateDownloadUrl" diff --git a/scripts/agave-install-deploy.sh b/scripts/agave-install-deploy.sh index 01366a1cfbc5af..dcdec14ffb635d 100755 --- a/scripts/agave-install-deploy.sh +++ b/scripts/agave-install-deploy.sh @@ -57,7 +57,7 @@ esac case $TAG in edge|beta) - DOWNLOAD_URL=https://release.agave.xyz/"$TAG"/solana-release-$TARGET.tar.bz2 + DOWNLOAD_URL=https://release.anza.xyz/"$TAG"/solana-release-$TARGET.tar.bz2 ;; *) DOWNLOAD_URL=https://github.com/anza-xyz/agave/releases/download/"$TAG"/solana-release-$TARGET.tar.bz2 From 91bff85ffcbe85ba7d7154ac87372f5a73b06d43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 17:37:40 +0800 Subject: [PATCH 383/401] build(deps): bump http from 0.2.11 to 0.2.12 (#85) * build(deps): bump http from 0.2.11 to 0.2.12 Bumps [http](https://github.com/hyperium/http) from 0.2.11 to 0.2.12. - [Release notes](https://github.com/hyperium/http/releases) - [Changelog](https://github.com/hyperium/http/blob/v0.2.12/CHANGELOG.md) - [Commits](https://github.com/hyperium/http/compare/v0.2.11...v0.2.12) --- updated-dependencies: - dependency-name: http dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54d1cbc4e0b4b8..ac5ce30231bd0c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2661,9 +2661,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", diff --git a/Cargo.toml b/Cargo.toml index f1ac84f6875b8f..430ce25c4bf102 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -216,7 +216,7 @@ hex = "0.4.3" hidapi = { version = "2.6.0", default-features = false } histogram = "0.6.9" hmac = "0.12.1" -http = "0.2.11" +http = "0.2.12" humantime = "2.0.1" hyper = "0.14.28" hyper-proxy = "0.9.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9c78461b1b0a81..fadf7b3e28a5cc 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2138,9 +2138,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", From 9b16df25d33a0f00caef85d328a4d4d4e39572c9 Mon Sep 17 00:00:00 2001 From: ripatel-fd Date: Wed, 13 Mar 2024 13:24:39 +0100 Subject: [PATCH 384/401] sdk: support Pubkey::to_bytes as constexpr (#220) --- sdk/program/src/pubkey.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 728a5cd252d89f..2f1ccbdcfbfd7e 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -631,7 +631,7 @@ impl Pubkey { } } - pub fn to_bytes(self) -> [u8; 32] { + pub const fn to_bytes(self) -> [u8; 32] { self.0 } From 7f27644e6556a9f3b765df0ee52dd05098364cda Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 21:59:25 +0800 Subject: [PATCH 385/401] build(deps): bump raptorq from 1.8.0 to 1.8.1 (#222) Bumps [raptorq](https://github.com/cberner/raptorq) from 1.8.0 to 1.8.1. - [Release notes](https://github.com/cberner/raptorq/releases) - [Commits](https://github.com/cberner/raptorq/compare/v1.8.0...v1.8.1) --- updated-dependencies: - dependency-name: raptorq dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ac5ce30231bd0c..8555c174ad1cbf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4481,9 +4481,9 @@ dependencies = [ [[package]] name = "raptorq" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c9cf9270cc5903afdef387f06ef1cd89fb77f45c357c2a425bae78b839fd866" +checksum = "7cc8cd0bcb2d520fff368264b5a6295e064c60955349517d09b14473afae4856" [[package]] name = "rayon" diff --git a/Cargo.toml b/Cargo.toml index 430ce25c4bf102..89a373d295135d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,7 +278,7 @@ quinn-proto = "0.10.6" quote = "1.0" rand = "0.8.5" rand_chacha = "0.3.1" -raptorq = "1.8.0" +raptorq = "1.8.1" rayon = "1.9.0" reed-solomon-erasure = "6.0.0" regex = "1.10.3" From 81075e60b36a68dbf718d46798769617571aaa6e Mon Sep 17 00:00:00 2001 From: Brennan Date: Wed, 13 Mar 2024 08:21:54 -0700 Subject: [PATCH 386/401] Loaded program test robustness (#59) * tests robust to prog entry count --- program-runtime/src/loaded_programs.rs | 320 +++++++++++-------------- 1 file changed, 135 insertions(+), 185 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index e8a691c537934f..f6163d63cd738c 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -1301,100 +1301,93 @@ mod tests { assert_eq!(program.decayed_usage_counter(100), 0); } - #[test] - fn test_random_eviction() { - let mut programs = vec![]; - - let mut cache = new_mock_cache::(); - - // This test adds different kind of entries to the cache. - // Tombstones and unloaded entries are expected to not be evicted. - // It also adds multiple entries for three programs as it tries to create a typical cache instance. - let program1 = Pubkey::new_unique(); - let program1_deployment_slots = [0, 10, 20]; - let program1_usage_counters = [4, 5, 25]; - program1_deployment_slots + fn program_deploy_test_helper( + cache: &mut LoadedPrograms, + program: Pubkey, + deployment_slots: Vec, + usage_counters: Vec, + programs: &mut Vec<(Pubkey, Slot, u64)>, + ) { + // Add multiple entries for program + deployment_slots .iter() .enumerate() .for_each(|(i, deployment_slot)| { - let usage_counter = *program1_usage_counters.get(i).unwrap_or(&0); + let usage_counter = *usage_counters.get(i).unwrap_or(&0); cache.assign_program( - program1, + program, new_test_loaded_program_with_usage( *deployment_slot, - (*deployment_slot) + 2, + (*deployment_slot).saturating_add(2), AtomicU64::new(usage_counter), ), ); - programs.push((program1, *deployment_slot, usage_counter)); + programs.push((program, *deployment_slot, usage_counter)); }); + // Add tombstones entries for program let env = Arc::new(BuiltinProgram::new_mock()); for slot in 21..31 { set_tombstone( - &mut cache, - program1, + cache, + program, slot, LoadedProgramType::FailedVerification(env.clone()), ); } + // Add unloaded entries for program for slot in 31..41 { - insert_unloaded_program(&mut cache, program1, slot); + insert_unloaded_program(cache, program, slot); } + } - let program2 = Pubkey::new_unique(); - let program2_deployment_slots = [5, 11]; - let program2_usage_counters = [0, 2]; - program2_deployment_slots - .iter() - .enumerate() - .for_each(|(i, deployment_slot)| { - let usage_counter = *program2_usage_counters.get(i).unwrap_or(&0); - cache.assign_program( - program2, - new_test_loaded_program_with_usage( - *deployment_slot, - (*deployment_slot) + 2, - AtomicU64::new(usage_counter), - ), - ); - programs.push((program2, *deployment_slot, usage_counter)); - }); + #[test] + fn test_random_eviction() { + let mut programs = vec![]; - for slot in 31..41 { - insert_unloaded_program(&mut cache, program2, slot); - } + let mut cache = new_mock_cache::(); - let program3 = Pubkey::new_unique(); - let program3_deployment_slots = [0, 5, 15]; - let program3_usage_counters = [100, 3, 20]; - program3_deployment_slots - .iter() - .enumerate() - .for_each(|(i, deployment_slot)| { - let usage_counter = *program3_usage_counters.get(i).unwrap_or(&0); - cache.assign_program( - program3, - new_test_loaded_program_with_usage( - *deployment_slot, - (*deployment_slot) + 2, - AtomicU64::new(usage_counter), - ), - ); - programs.push((program3, *deployment_slot, usage_counter)); - }); + // This test adds different kind of entries to the cache. + // Tombstones and unloaded entries are expected to not be evicted. + // It also adds multiple entries for three programs as it tries to create a typical cache instance. - for slot in 21..31 { - set_tombstone(&mut cache, program3, slot, LoadedProgramType::Closed); - } + // Program 1 + program_deploy_test_helper( + &mut cache, + Pubkey::new_unique(), + vec![0, 10, 20], + vec![4, 5, 25], + &mut programs, + ); - for slot in 31..41 { - insert_unloaded_program(&mut cache, program3, slot); - } + // Program 2 + program_deploy_test_helper( + &mut cache, + Pubkey::new_unique(), + vec![5, 11], + vec![0, 2], + &mut programs, + ); - programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count); + // Program 3 + program_deploy_test_helper( + &mut cache, + Pubkey::new_unique(), + vec![0, 5, 15], + vec![100, 3, 20], + &mut programs, + ); + // 1 for each deployment slot + let num_loaded_expected = 8; + // 10 for each program + let num_unloaded_expected = 30; + // 10 for each program + let num_tombstones_expected = 30; + + // Count the number of loaded, unloaded and tombstone entries. + programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count); let num_loaded = num_matching_entries(&cache, |program_type| { matches!(program_type, LoadedProgramType::TestLoaded(_)) }); @@ -1411,16 +1404,19 @@ mod tests { }); // Test that the cache is constructed with the expected number of entries. - assert_eq!(num_loaded, 8); - assert_eq!(num_unloaded, 30); - assert_eq!(num_tombstones, 20); + assert_eq!(num_loaded, num_loaded_expected); + assert_eq!(num_unloaded, num_unloaded_expected); + assert_eq!(num_tombstones, num_tombstones_expected); - // Evicting to 2% should update cache with - // * 5 active entries - // * 33 unloaded entries (3 active programs will get unloaded) - // * 20 tombstones (tombstones are not evicted) - cache.evict_using_2s_random_selection(Percentage::from(2), 21); + // Evict entries from the cache + let eviction_pct = 2; + let num_loaded_expected = + Percentage::from(eviction_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT); + let num_unloaded_expected = num_unloaded_expected + num_loaded - num_loaded_expected; + cache.evict_using_2s_random_selection(Percentage::from(eviction_pct), 21); + + // Count the number of loaded, unloaded and tombstone entries. let num_loaded = num_matching_entries(&cache, |program_type| { matches!(program_type, LoadedProgramType::TestLoaded(_)) }); @@ -1428,111 +1424,58 @@ mod tests { matches!(program_type, LoadedProgramType::Unloaded(_)) }); let num_tombstones = num_matching_entries(&cache, |program_type| { - matches!( - program_type, - LoadedProgramType::DelayVisibility - | LoadedProgramType::FailedVerification(_) - | LoadedProgramType::Closed - ) + matches!(program_type, LoadedProgramType::FailedVerification(_)) }); - // Test that expected number of loaded entries get evicted/unloaded. - assert_eq!(num_loaded, 5); - assert_eq!(num_unloaded, 33); - assert_eq!(num_tombstones, 20); + // However many entries are left after the shrink + assert_eq!(num_loaded, num_loaded_expected); + // The original unloaded entries + the evicted loaded entries + assert_eq!(num_unloaded, num_unloaded_expected); + // The original tombstones are not evicted + assert_eq!(num_tombstones, num_tombstones_expected); } #[test] fn test_eviction() { let mut programs = vec![]; - let mut cache = new_mock_cache::(); - let program1 = Pubkey::new_unique(); - let program1_deployment_slots = [0, 10, 20]; - let program1_usage_counters = [4, 5, 25]; - program1_deployment_slots - .iter() - .enumerate() - .for_each(|(i, deployment_slot)| { - let usage_counter = *program1_usage_counters.get(i).unwrap_or(&0); - cache.assign_program( - program1, - new_test_loaded_program_with_usage( - *deployment_slot, - (*deployment_slot) + 2, - AtomicU64::new(usage_counter), - ), - ); - programs.push((program1, *deployment_slot, usage_counter)); - }); - - let env = Arc::new(BuiltinProgram::new_mock()); - for slot in 21..31 { - set_tombstone( - &mut cache, - program1, - slot, - LoadedProgramType::FailedVerification(env.clone()), - ); - } - - for slot in 31..41 { - insert_unloaded_program(&mut cache, program1, slot); - } - - let program2 = Pubkey::new_unique(); - let program2_deployment_slots = [5, 11]; - let program2_usage_counters = [0, 2]; - program2_deployment_slots - .iter() - .enumerate() - .for_each(|(i, deployment_slot)| { - let usage_counter = *program2_usage_counters.get(i).unwrap_or(&0); - cache.assign_program( - program2, - new_test_loaded_program_with_usage( - *deployment_slot, - (*deployment_slot) + 2, - AtomicU64::new(usage_counter), - ), - ); - programs.push((program2, *deployment_slot, usage_counter)); - }); - - for slot in 31..41 { - insert_unloaded_program(&mut cache, program2, slot); - } + // Program 1 + program_deploy_test_helper( + &mut cache, + Pubkey::new_unique(), + vec![0, 10, 20], + vec![4, 5, 25], + &mut programs, + ); - let program3 = Pubkey::new_unique(); - let program3_deployment_slots = [0, 5, 15]; - let program3_usage_counters = [100, 3, 20]; - program3_deployment_slots - .iter() - .enumerate() - .for_each(|(i, deployment_slot)| { - let usage_counter = *program3_usage_counters.get(i).unwrap_or(&0); - cache.assign_program( - program3, - new_test_loaded_program_with_usage( - *deployment_slot, - (*deployment_slot) + 2, - AtomicU64::new(usage_counter), - ), - ); - programs.push((program3, *deployment_slot, usage_counter)); - }); + // Program 2 + program_deploy_test_helper( + &mut cache, + Pubkey::new_unique(), + vec![5, 11], + vec![0, 2], + &mut programs, + ); - for slot in 21..31 { - set_tombstone(&mut cache, program3, slot, LoadedProgramType::Closed); - } + // Program 3 + program_deploy_test_helper( + &mut cache, + Pubkey::new_unique(), + vec![0, 5, 15], + vec![100, 3, 20], + &mut programs, + ); - for slot in 31..41 { - insert_unloaded_program(&mut cache, program3, slot); - } + // 1 for each deployment slot + let num_loaded_expected = 8; + // 10 for each program + let num_unloaded_expected = 30; + // 10 for each program + let num_tombstones_expected = 30; + // Count the number of loaded, unloaded and tombstone entries. programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count); - let num_loaded = num_matching_entries(&cache, |program_type| { matches!(program_type, LoadedProgramType::TestLoaded(_)) }); @@ -1540,23 +1483,23 @@ mod tests { matches!(program_type, LoadedProgramType::Unloaded(_)) }); let num_tombstones = num_matching_entries(&cache, |program_type| { - matches!( - program_type, - LoadedProgramType::DelayVisibility - | LoadedProgramType::FailedVerification(_) - | LoadedProgramType::Closed - ) + matches!(program_type, LoadedProgramType::FailedVerification(_)) }); - assert_eq!(num_loaded, 8); - assert_eq!(num_unloaded, 30); - assert_eq!(num_tombstones, 20); + // Test that the cache is constructed with the expected number of entries. + assert_eq!(num_loaded, num_loaded_expected); + assert_eq!(num_unloaded, num_unloaded_expected); + assert_eq!(num_tombstones, num_tombstones_expected); + + // Evict entries from the cache + let eviction_pct = 2; + + let num_loaded_expected = + Percentage::from(eviction_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT); + let num_unloaded_expected = num_unloaded_expected + num_loaded - num_loaded_expected; + + cache.sort_and_unload(Percentage::from(eviction_pct)); - // Evicting to 2% should update cache with - // * 5 active entries - // * 33 unloaded entries (3 active programs will get unloaded) - // * 20 tombstones (tombstones are not evicted) - cache.sort_and_unload(Percentage::from(2)); // Check that every program is still in the cache. programs.iter().for_each(|entry| { assert!(cache.entries.get(&entry.0).is_some()); @@ -1578,6 +1521,7 @@ mod tests { assert!(unloaded.contains(&(expected.0, expected.2))); } + // Count the number of loaded, unloaded and tombstone entries. let num_loaded = num_matching_entries(&cache, |program_type| { matches!(program_type, LoadedProgramType::TestLoaded(_)) }); @@ -1593,9 +1537,12 @@ mod tests { ) }); - assert_eq!(num_loaded, 5); - assert_eq!(num_unloaded, 33); - assert_eq!(num_tombstones, 20); + // However many entries are left after the shrink + assert_eq!(num_loaded, num_loaded_expected); + // The original unloaded entries + the evicted loaded entries + assert_eq!(num_unloaded, num_unloaded_expected); + // The original tombstones are not evicted + assert_eq!(num_tombstones, num_tombstones_expected); } #[test] @@ -1603,7 +1550,11 @@ mod tests { let mut cache = new_mock_cache::(); let program = Pubkey::new_unique(); - let num_total_programs = 6; + let evict_to_pct = 2; + let cache_capacity_after_shrink = + Percentage::from(evict_to_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT); + // Add enough programs to the cache to trigger 1 eviction after shrinking. + let num_total_programs = (cache_capacity_after_shrink + 1) as u64; (0..num_total_programs).for_each(|i| { cache.assign_program( program, @@ -1611,8 +1562,7 @@ mod tests { ); }); - // This will unload the program deployed at slot 0, with usage count = 10 - cache.sort_and_unload(Percentage::from(2)); + cache.sort_and_unload(Percentage::from(evict_to_pct)); let num_unloaded = num_matching_entries(&cache, |program_type| { matches!(program_type, LoadedProgramType::Unloaded(_)) From 33f941d473092dd2dbd576f171d6c1c945aedcb4 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 13 Mar 2024 12:15:24 -0400 Subject: [PATCH 387/401] SVM: Eliminate filter_executable_program_accounts from pub interface (#214) --- svm/src/transaction_processor.rs | 188 +++++++++++++++++++++++++- svm/tests/transaction_processor.rs | 206 ----------------------------- 2 files changed, 186 insertions(+), 208 deletions(-) delete mode 100644 svm/tests/transaction_processor.rs diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 5801b3b8316fdc..c42566fc9876f9 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -339,7 +339,7 @@ impl TransactionBatchProcessor { /// Returns a hash map of executable program accounts (program accounts that are not writable /// in the given transactions), and their owners, for the transactions with a valid /// blockhash or nonce. - pub fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( + fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( callbacks: &CB, txs: &[SanitizedTransaction], lock_results: &mut [TransactionCheckResult], @@ -953,8 +953,9 @@ mod tests { bpf_loader, message::{LegacyMessage, Message, MessageHeader}, rent_debits::RentDebits, - signature::Signature, + signature::{Keypair, Signature}, sysvar::rent::Rent, + transaction::{SanitizedTransaction, Transaction, TransactionError}, transaction_context::TransactionContext, }, std::{ @@ -1949,4 +1950,187 @@ mod tests { assert_eq!(result[&key1], (&owner1, 2)); assert_eq!(result[&key2], (&owner2, 1)); } + + #[test] + fn test_filter_executable_program_accounts_no_errors() { + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + + let non_program_pubkey1 = Pubkey::new_unique(); + let non_program_pubkey2 = Pubkey::new_unique(); + let program1_pubkey = Pubkey::new_unique(); + let program2_pubkey = Pubkey::new_unique(); + let account1_pubkey = Pubkey::new_unique(); + let account2_pubkey = Pubkey::new_unique(); + let account3_pubkey = Pubkey::new_unique(); + let account4_pubkey = Pubkey::new_unique(); + + let account5_pubkey = Pubkey::new_unique(); + + let mut bank = MockBankCallback::default(); + bank.account_shared_data.insert( + non_program_pubkey1, + AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.account_shared_data.insert( + non_program_pubkey2, + AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.account_shared_data.insert( + program1_pubkey, + AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.account_shared_data.insert( + program2_pubkey, + AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.account_shared_data.insert( + account1_pubkey, + AccountSharedData::new(1, 10, &non_program_pubkey1), + ); + bank.account_shared_data.insert( + account2_pubkey, + AccountSharedData::new(1, 10, &non_program_pubkey2), + ); + bank.account_shared_data.insert( + account3_pubkey, + AccountSharedData::new(40, 1, &program1_pubkey), + ); + bank.account_shared_data.insert( + account4_pubkey, + AccountSharedData::new(40, 1, &program2_pubkey), + ); + + let tx1 = Transaction::new_with_compiled_instructions( + &[&keypair1], + &[non_program_pubkey1], + Hash::new_unique(), + vec![account1_pubkey, account2_pubkey, account3_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); + + let tx2 = Transaction::new_with_compiled_instructions( + &[&keypair2], + &[non_program_pubkey2], + Hash::new_unique(), + vec![account4_pubkey, account3_pubkey, account2_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); + + let owners = &[program1_pubkey, program2_pubkey]; + let programs = + TransactionBatchProcessor::::filter_executable_program_accounts( + &bank, + &[sanitized_tx1, sanitized_tx2], + &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], + owners, + ); + + // The result should contain only account3_pubkey, and account4_pubkey as the program accounts + assert_eq!(programs.len(), 2); + assert_eq!( + programs + .get(&account3_pubkey) + .expect("failed to find the program account"), + &(&program1_pubkey, 2) + ); + assert_eq!( + programs + .get(&account4_pubkey) + .expect("failed to find the program account"), + &(&program2_pubkey, 1) + ); + } + + #[test] + fn test_filter_executable_program_accounts_invalid_blockhash() { + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + + let non_program_pubkey1 = Pubkey::new_unique(); + let non_program_pubkey2 = Pubkey::new_unique(); + let program1_pubkey = Pubkey::new_unique(); + let program2_pubkey = Pubkey::new_unique(); + let account1_pubkey = Pubkey::new_unique(); + let account2_pubkey = Pubkey::new_unique(); + let account3_pubkey = Pubkey::new_unique(); + let account4_pubkey = Pubkey::new_unique(); + + let account5_pubkey = Pubkey::new_unique(); + + let mut bank = MockBankCallback::default(); + bank.account_shared_data.insert( + non_program_pubkey1, + AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.account_shared_data.insert( + non_program_pubkey2, + AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.account_shared_data.insert( + program1_pubkey, + AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.account_shared_data.insert( + program2_pubkey, + AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.account_shared_data.insert( + account1_pubkey, + AccountSharedData::new(1, 10, &non_program_pubkey1), + ); + bank.account_shared_data.insert( + account2_pubkey, + AccountSharedData::new(1, 10, &non_program_pubkey2), + ); + bank.account_shared_data.insert( + account3_pubkey, + AccountSharedData::new(40, 1, &program1_pubkey), + ); + bank.account_shared_data.insert( + account4_pubkey, + AccountSharedData::new(40, 1, &program2_pubkey), + ); + + let tx1 = Transaction::new_with_compiled_instructions( + &[&keypair1], + &[non_program_pubkey1], + Hash::new_unique(), + vec![account1_pubkey, account2_pubkey, account3_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); + + let tx2 = Transaction::new_with_compiled_instructions( + &[&keypair2], + &[non_program_pubkey2], + Hash::new_unique(), + vec![account4_pubkey, account3_pubkey, account2_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + // Let's not register blockhash from tx2. This should cause the tx2 to fail + let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); + + let owners = &[program1_pubkey, program2_pubkey]; + let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; + let programs = + TransactionBatchProcessor::::filter_executable_program_accounts( + &bank, + &[sanitized_tx1, sanitized_tx2], + &mut lock_results, + owners, + ); + + // The result should contain only account3_pubkey as the program accounts + assert_eq!(programs.len(), 1); + assert_eq!( + programs + .get(&account3_pubkey) + .expect("failed to find the program account"), + &(&program1_pubkey, 1) + ); + assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); + } } diff --git a/svm/tests/transaction_processor.rs b/svm/tests/transaction_processor.rs deleted file mode 100644 index 1704054246748d..00000000000000 --- a/svm/tests/transaction_processor.rs +++ /dev/null @@ -1,206 +0,0 @@ -#![cfg(test)] - -use { - solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph}, - solana_sdk::{ - account::AccountSharedData, - clock::Slot, - hash::Hash, - instruction::CompiledInstruction, - pubkey::Pubkey, - signature::Keypair, - transaction::{SanitizedTransaction, Transaction, TransactionError}, - }, - solana_svm::transaction_processor::TransactionBatchProcessor, -}; - -mod mock_bank; - -struct MockForkGraph {} - -impl ForkGraph for MockForkGraph { - fn relationship(&self, _a: Slot, _b: Slot) -> BlockRelation { - todo!() - } -} - -#[test] -fn test_filter_executable_program_accounts() { - let keypair1 = Keypair::new(); - let keypair2 = Keypair::new(); - - let non_program_pubkey1 = Pubkey::new_unique(); - let non_program_pubkey2 = Pubkey::new_unique(); - let program1_pubkey = Pubkey::new_unique(); - let program2_pubkey = Pubkey::new_unique(); - let account1_pubkey = Pubkey::new_unique(); - let account2_pubkey = Pubkey::new_unique(); - let account3_pubkey = Pubkey::new_unique(); - let account4_pubkey = Pubkey::new_unique(); - - let account5_pubkey = Pubkey::new_unique(); - - let mut bank = mock_bank::MockBankCallback::default(); - bank.account_shared_data.insert( - non_program_pubkey1, - AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.account_shared_data.insert( - non_program_pubkey2, - AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.account_shared_data.insert( - program1_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.account_shared_data.insert( - program2_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.account_shared_data.insert( - account1_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey1), - ); - bank.account_shared_data.insert( - account2_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey2), - ); - bank.account_shared_data.insert( - account3_pubkey, - AccountSharedData::new(40, 1, &program1_pubkey), - ); - bank.account_shared_data.insert( - account4_pubkey, - AccountSharedData::new(40, 1, &program2_pubkey), - ); - - let tx1 = Transaction::new_with_compiled_instructions( - &[&keypair1], - &[non_program_pubkey1], - Hash::new_unique(), - vec![account1_pubkey, account2_pubkey, account3_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); - - let tx2 = Transaction::new_with_compiled_instructions( - &[&keypair2], - &[non_program_pubkey2], - Hash::new_unique(), - vec![account4_pubkey, account3_pubkey, account2_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - - let owners = &[program1_pubkey, program2_pubkey]; - let programs = TransactionBatchProcessor::::filter_executable_program_accounts( - &bank, - &[sanitized_tx1, sanitized_tx2], - &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], - owners, - ); - - // The result should contain only account3_pubkey, and account4_pubkey as the program accounts - assert_eq!(programs.len(), 2); - assert_eq!( - programs - .get(&account3_pubkey) - .expect("failed to find the program account"), - &(&program1_pubkey, 2) - ); - assert_eq!( - programs - .get(&account4_pubkey) - .expect("failed to find the program account"), - &(&program2_pubkey, 1) - ); -} - -#[test] -fn test_filter_executable_program_accounts_invalid_blockhash() { - let keypair1 = Keypair::new(); - let keypair2 = Keypair::new(); - - let non_program_pubkey1 = Pubkey::new_unique(); - let non_program_pubkey2 = Pubkey::new_unique(); - let program1_pubkey = Pubkey::new_unique(); - let program2_pubkey = Pubkey::new_unique(); - let account1_pubkey = Pubkey::new_unique(); - let account2_pubkey = Pubkey::new_unique(); - let account3_pubkey = Pubkey::new_unique(); - let account4_pubkey = Pubkey::new_unique(); - - let account5_pubkey = Pubkey::new_unique(); - - let mut bank = mock_bank::MockBankCallback::default(); - bank.account_shared_data.insert( - non_program_pubkey1, - AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.account_shared_data.insert( - non_program_pubkey2, - AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.account_shared_data.insert( - program1_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.account_shared_data.insert( - program2_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.account_shared_data.insert( - account1_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey1), - ); - bank.account_shared_data.insert( - account2_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey2), - ); - bank.account_shared_data.insert( - account3_pubkey, - AccountSharedData::new(40, 1, &program1_pubkey), - ); - bank.account_shared_data.insert( - account4_pubkey, - AccountSharedData::new(40, 1, &program2_pubkey), - ); - - let tx1 = Transaction::new_with_compiled_instructions( - &[&keypair1], - &[non_program_pubkey1], - Hash::new_unique(), - vec![account1_pubkey, account2_pubkey, account3_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); - - let tx2 = Transaction::new_with_compiled_instructions( - &[&keypair2], - &[non_program_pubkey2], - Hash::new_unique(), - vec![account4_pubkey, account3_pubkey, account2_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - // Let's not register blockhash from tx2. This should cause the tx2 to fail - let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - - let owners = &[program1_pubkey, program2_pubkey]; - let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; - let programs = TransactionBatchProcessor::::filter_executable_program_accounts( - &bank, - &[sanitized_tx1, sanitized_tx2], - &mut lock_results, - owners, - ); - - // The result should contain only account3_pubkey as the program accounts - assert_eq!(programs.len(), 1); - assert_eq!( - programs - .get(&account3_pubkey) - .expect("failed to find the program account"), - &(&program1_pubkey, 1) - ); - assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); -} From 5ed30beb2a771820879e3eb5ffec93361a1ed8b4 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 13 Mar 2024 11:29:05 -0500 Subject: [PATCH 388/401] ledger-tool: Allow compute-slot-cost to operate on dead slots (#213) Make this command accept the --allow-dead-slots arg as well --- ledger-tool/src/main.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 8445782f840931..94298623e953cd 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -445,14 +445,14 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String { dot.join("\n") } -fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> { - if blockstore.is_dead(slot) { - return Err("Dead slot".to_string()); - } - +fn compute_slot_cost( + blockstore: &Blockstore, + slot: Slot, + allow_dead_slots: bool, +) -> Result<(), String> { let (entries, _num_shreds, _is_full) = blockstore - .get_slot_entries_with_shred_info(slot, 0, false) - .map_err(|err| format!(" Slot: {slot}, Failed to load entries, err {err:?}"))?; + .get_slot_entries_with_shred_info(slot, 0, allow_dead_slots) + .map_err(|err| format!("Slot: {slot}, Failed to load entries, err {err:?}"))?; let num_entries = entries.len(); let mut num_transactions = 0; @@ -1482,7 +1482,8 @@ fn main() { "Slots that their blocks are computed for cost, default to all slots \ in ledger", ), - ), + ) + .arg(&allow_dead_slots_arg), ) .program_subcommand() .get_matches(); @@ -2947,9 +2948,10 @@ fn main() { } else { slots = values_t_or_exit!(arg_matches, "slots", Slot); } + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); for slot in slots { - if let Err(err) = compute_slot_cost(&blockstore, slot) { + if let Err(err) = compute_slot_cost(&blockstore, slot, allow_dead_slots) { eprintln!("{err}"); } } From e13fbeb198fdb00c4381cff0c7ca627bd4cec365 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 13 Mar 2024 10:07:11 -0700 Subject: [PATCH 389/401] [TieredStorage] Repurpose TieredReadableAccount to HotAccount (#218) #### Problem As we further optimize the HotStorageMeta in #146, there is a need for a HotAccount struct that contains all the hot account information. Meanwhile, we currently don't have plans to develop a cold account format at this moment. As a result, this makes it desirable to repurpose TieredReadableAccount to HotAccount. #### Summary of Changes Repurpose TieredReadableAccount to HotAccount. #### Test Plan Existing tiered-storage tests. --- accounts-db/src/account_storage/meta.rs | 4 +- accounts-db/src/tiered_storage/hot.rs | 78 +++++++++++++++++++- accounts-db/src/tiered_storage/readable.rs | 83 +--------------------- 3 files changed, 79 insertions(+), 86 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index b6c8d72042097a..cc01ba164b077f 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -3,7 +3,7 @@ use { accounts_hash::AccountHash, append_vec::AppendVecStoredAccountMeta, storable_accounts::StorableAccounts, - tiered_storage::{hot::HotAccountMeta, readable::TieredReadableAccount}, + tiered_storage::hot::{HotAccount, HotAccountMeta}, }, solana_sdk::{account::ReadableAccount, hash::Hash, pubkey::Pubkey, stake_history::Epoch}, std::{borrow::Borrow, marker::PhantomData}, @@ -114,7 +114,7 @@ impl< #[derive(PartialEq, Eq, Debug)] pub enum StoredAccountMeta<'storage> { AppendVec(AppendVecStoredAccountMeta<'storage>), - Hot(TieredReadableAccount<'storage, HotAccountMeta>), + Hot(HotAccount<'storage, HotAccountMeta>), } impl<'storage> StoredAccountMeta<'storage> { diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 34f7915186ba9b..5448c9b0f8a5ee 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -13,7 +13,6 @@ use { meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, mmap_utils::{get_pod, get_slice}, owners::{OwnerOffset, OwnersBlockFormat, OwnersTable, OWNER_NO_OWNER}, - readable::TieredReadableAccount, StorableAccounts, StorableAccountsWithHashesAndWriteVersions, TieredStorageError, TieredStorageFormat, TieredStorageResult, }, @@ -264,6 +263,81 @@ impl TieredAccountMeta for HotAccountMeta { } } +/// The struct that offers read APIs for accessing a hot account. +#[derive(PartialEq, Eq, Debug)] +pub struct HotAccount<'accounts_file, M: TieredAccountMeta> { + /// TieredAccountMeta + pub meta: &'accounts_file M, + /// The address of the account + pub address: &'accounts_file Pubkey, + /// The address of the account owner + pub owner: &'accounts_file Pubkey, + /// The index for accessing the account inside its belonging AccountsFile + pub index: IndexOffset, + /// The account block that contains this account. Note that this account + /// block may be shared with other accounts. + pub account_block: &'accounts_file [u8], +} + +impl<'accounts_file, M: TieredAccountMeta> HotAccount<'accounts_file, M> { + /// Returns the address of this account. + pub fn address(&self) -> &'accounts_file Pubkey { + self.address + } + + /// Returns the index to this account in its AccountsFile. + pub fn index(&self) -> IndexOffset { + self.index + } + + /// Returns the data associated to this account. + pub fn data(&self) -> &'accounts_file [u8] { + self.meta.account_data(self.account_block) + } +} + +impl<'accounts_file, M: TieredAccountMeta> ReadableAccount for HotAccount<'accounts_file, M> { + /// Returns the balance of the lamports of this account. + fn lamports(&self) -> u64 { + self.meta.lamports() + } + + /// Returns the address of the owner of this account. + fn owner(&self) -> &'accounts_file Pubkey { + self.owner + } + + /// Returns true if the data associated to this account is executable. + fn executable(&self) -> bool { + self.meta.flags().executable() + } + + /// Returns the epoch that this account will next owe rent by parsing + /// the specified account block. RENT_EXEMPT_RENT_EPOCH will be returned + /// if the account is rent-exempt. + /// + /// For a zero-lamport account, Epoch::default() will be returned to + /// default states of an AccountSharedData. + fn rent_epoch(&self) -> Epoch { + self.meta + .rent_epoch(self.account_block) + .unwrap_or(if self.lamports() != 0 { + RENT_EXEMPT_RENT_EPOCH + } else { + // While there is no valid-values for any fields of a zero + // lamport account, here we return Epoch::default() to + // match the default states of AccountSharedData. Otherwise, + // a hash mismatch will occur. + Epoch::default() + }) + } + + /// Returns the data associated to this account. + fn data(&self) -> &'accounts_file [u8] { + self.data() + } +} + /// The reader to a hot accounts file. #[derive(Debug)] pub struct HotStorageReader { @@ -437,7 +511,7 @@ impl HotStorageReader { let account_block = self.get_account_block(account_offset, index_offset)?; Ok(Some(( - StoredAccountMeta::Hot(TieredReadableAccount { + StoredAccountMeta::Hot(HotAccount { meta, address, owner, diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 8f1d2007182a5b..e3d169d4f6d99e 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -6,94 +6,13 @@ use { footer::{AccountMetaFormat, TieredStorageFooter}, hot::HotStorageReader, index::IndexOffset, - meta::TieredAccountMeta, TieredStorageResult, }, }, - solana_sdk::{ - account::ReadableAccount, pubkey::Pubkey, rent_collector::RENT_EXEMPT_RENT_EPOCH, - stake_history::Epoch, - }, + solana_sdk::pubkey::Pubkey, std::path::Path, }; -/// The struct that offers read APIs for accessing a TieredAccount. -#[derive(PartialEq, Eq, Debug)] -pub struct TieredReadableAccount<'accounts_file, M: TieredAccountMeta> { - /// TieredAccountMeta - pub meta: &'accounts_file M, - /// The address of the account - pub address: &'accounts_file Pubkey, - /// The address of the account owner - pub owner: &'accounts_file Pubkey, - /// The index for accessing the account inside its belonging AccountsFile - pub index: IndexOffset, - /// The account block that contains this account. Note that this account - /// block may be shared with other accounts. - pub account_block: &'accounts_file [u8], -} - -impl<'accounts_file, M: TieredAccountMeta> TieredReadableAccount<'accounts_file, M> { - /// Returns the address of this account. - pub fn address(&self) -> &'accounts_file Pubkey { - self.address - } - - /// Returns the index to this account in its AccountsFile. - pub fn index(&self) -> IndexOffset { - self.index - } - - /// Returns the data associated to this account. - pub fn data(&self) -> &'accounts_file [u8] { - self.meta.account_data(self.account_block) - } -} - -impl<'accounts_file, M: TieredAccountMeta> ReadableAccount - for TieredReadableAccount<'accounts_file, M> -{ - /// Returns the balance of the lamports of this account. - fn lamports(&self) -> u64 { - self.meta.lamports() - } - - /// Returns the address of the owner of this account. - fn owner(&self) -> &'accounts_file Pubkey { - self.owner - } - - /// Returns true if the data associated to this account is executable. - fn executable(&self) -> bool { - self.meta.flags().executable() - } - - /// Returns the epoch that this account will next owe rent by parsing - /// the specified account block. RENT_EXEMPT_RENT_EPOCH will be returned - /// if the account is rent-exempt. - /// - /// For a zero-lamport account, Epoch::default() will be returned to - /// default states of an AccountSharedData. - fn rent_epoch(&self) -> Epoch { - self.meta - .rent_epoch(self.account_block) - .unwrap_or(if self.lamports() != 0 { - RENT_EXEMPT_RENT_EPOCH - } else { - // While there is no valid-values for any fields of a zero - // lamport account, here we return Epoch::default() to - // match the default states of AccountSharedData. Otherwise, - // a hash mismatch will occur. - Epoch::default() - }) - } - - /// Returns the data associated to this account. - fn data(&self) -> &'accounts_file [u8] { - self.data() - } -} - /// The reader of a tiered storage instance. #[derive(Debug)] pub enum TieredStorageReader { From 69b6d5a376d2bf4ccfe5e6a58bf629a19ea25478 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 13 Mar 2024 10:26:37 -0700 Subject: [PATCH 390/401] [TieredStorage] Remove the general-purposed TieredStorageWriter (#196) #### Problem tiered_storage/writer.rs was added when we planned to support multiple tiers in the tiered-storage (i.e., at least hot and cold). However, as we changed our plan to handle cold accounts as state-compressed accounts, we don't need a general purposed tiered-storage writer at this moment. #### Summary of Changes Remove tiered_storage/writer.rs as we currently don't have plans to develop cold storage. #### Test Plan Existing tiered-storage tests. --- accounts-db/src/tiered_storage.rs | 1 - accounts-db/src/tiered_storage/writer.rs | 63 ------------------------ 2 files changed, 64 deletions(-) delete mode 100644 accounts-db/src/tiered_storage/writer.rs diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 2f8ebac65e3b57..e15adb388605c2 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -11,7 +11,6 @@ pub mod mmap_utils; pub mod owners; pub mod readable; mod test_utils; -pub mod writer; use { crate::{ diff --git a/accounts-db/src/tiered_storage/writer.rs b/accounts-db/src/tiered_storage/writer.rs deleted file mode 100644 index 113d331e4a15c4..00000000000000 --- a/accounts-db/src/tiered_storage/writer.rs +++ /dev/null @@ -1,63 +0,0 @@ -//! docs/src/proposals/append-vec-storage.md - -use { - crate::{ - account_storage::meta::{StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo}, - accounts_hash::AccountHash, - storable_accounts::StorableAccounts, - tiered_storage::{ - error::TieredStorageError, file::TieredStorageFile, footer::TieredStorageFooter, - TieredStorageFormat, TieredStorageResult, - }, - }, - solana_sdk::account::ReadableAccount, - std::{borrow::Borrow, path::Path}, -}; - -#[derive(Debug)] -pub struct TieredStorageWriter<'format> { - storage: TieredStorageFile, - format: &'format TieredStorageFormat, -} - -impl<'format> TieredStorageWriter<'format> { - pub fn new( - file_path: impl AsRef, - format: &'format TieredStorageFormat, - ) -> TieredStorageResult { - Ok(Self { - storage: TieredStorageFile::new_writable(file_path)?, - format, - }) - } - - pub fn write_accounts< - 'a, - 'b, - T: ReadableAccount + Sync, - U: StorableAccounts<'a, T>, - V: Borrow, - >( - &self, - accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, - skip: usize, - ) -> TieredStorageResult> { - let footer = TieredStorageFooter { - account_meta_format: self.format.account_meta_format, - owners_block_format: self.format.owners_block_format, - account_block_format: self.format.account_block_format, - index_block_format: self.format.index_block_format, - account_entry_count: accounts - .accounts - .len() - .saturating_sub(skip) - .try_into() - .expect("num accounts <= u32::MAX"), - ..TieredStorageFooter::default() - }; - - footer.write_footer_block(&self.storage)?; - - Err(TieredStorageError::Unsupported()) - } -} From 926c5713fc629cd4670ea67d76f4c18c43f26ab4 Mon Sep 17 00:00:00 2001 From: anwayde <111067470+anwayde@users.noreply.github.com> Date: Wed, 13 Mar 2024 14:29:27 -0500 Subject: [PATCH 391/401] bench-tps: allow option to not set account data size on every transaction (#209) bench-tps: allow option to not set account data size --- bench-tps/src/bench.rs | 60 ++++++++++++++++++++++++++---------- bench-tps/src/cli.rs | 13 ++++++++ bench-tps/src/keypairs.rs | 3 ++ bench-tps/src/main.rs | 2 ++ bench-tps/src/send_batch.rs | 14 +++++---- bench-tps/tests/bench_tps.rs | 2 ++ dos/src/main.rs | 1 + 7 files changed, 72 insertions(+), 23 deletions(-) diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index bddce402ac6382..8b370786861cea 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -139,6 +139,7 @@ struct TransactionChunkGenerator<'a, 'b, T: ?Sized> { reclaim_lamports_back_to_source_account: bool, compute_unit_price: Option, instruction_padding_config: Option, + skip_tx_account_data_size: bool, } impl<'a, 'b, T> TransactionChunkGenerator<'a, 'b, T> @@ -153,6 +154,7 @@ where compute_unit_price: Option, instruction_padding_config: Option, num_conflict_groups: Option, + skip_tx_account_data_size: bool, ) -> Self { let account_chunks = if let Some(num_conflict_groups) = num_conflict_groups { KeypairChunks::new_with_conflict_groups(gen_keypairs, chunk_size, num_conflict_groups) @@ -170,6 +172,7 @@ where reclaim_lamports_back_to_source_account: false, compute_unit_price, instruction_padding_config, + skip_tx_account_data_size, } } @@ -195,6 +198,7 @@ where source_nonce_chunk, dest_nonce_chunk, self.reclaim_lamports_back_to_source_account, + self.skip_tx_account_data_size, &self.instruction_padding_config, ) } else { @@ -206,6 +210,7 @@ where blockhash.unwrap(), &self.instruction_padding_config, &self.compute_unit_price, + self.skip_tx_account_data_size, ) }; @@ -397,6 +402,7 @@ where sustained, target_slots_per_epoch, compute_unit_price, + skip_tx_account_data_size, use_durable_nonce, instruction_padding_config, num_conflict_groups, @@ -412,6 +418,7 @@ where compute_unit_price, instruction_padding_config, num_conflict_groups, + skip_tx_account_data_size, ); let first_tx_count = loop { @@ -538,6 +545,7 @@ fn generate_system_txs( blockhash: &Hash, instruction_padding_config: &Option, compute_unit_price: &Option, + skip_tx_account_data_size: bool, ) -> Vec { let pairs: Vec<_> = if !reclaim { source.iter().zip(dest.iter()).collect() @@ -575,6 +583,7 @@ fn generate_system_txs( *blockhash, instruction_padding_config, Some(**compute_unit_price), + skip_tx_account_data_size, ), Some(timestamp()), ) @@ -592,6 +601,7 @@ fn generate_system_txs( *blockhash, instruction_padding_config, None, + skip_tx_account_data_size, ), Some(timestamp()), ) @@ -607,6 +617,7 @@ fn transfer_with_compute_unit_price_and_padding( recent_blockhash: Hash, instruction_padding_config: &Option, compute_unit_price: Option, + skip_tx_account_data_size: bool, ) -> Transaction { let from_pubkey = from_keypair.pubkey(); let transfer_instruction = system_instruction::transfer(&from_pubkey, to, lamports); @@ -621,12 +632,15 @@ fn transfer_with_compute_unit_price_and_padding( } else { transfer_instruction }; - let mut instructions = vec![ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( - get_transaction_loaded_accounts_data_size(instruction_padding_config.is_some()), - ), - instruction, - ]; + let mut instructions = vec![]; + if !skip_tx_account_data_size { + instructions.push( + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + get_transaction_loaded_accounts_data_size(instruction_padding_config.is_some()), + ), + ) + } + instructions.push(instruction); if instruction_padding_config.is_some() { // By default, CU budget is DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT which is much larger than needed instructions.push(ComputeBudgetInstruction::set_compute_unit_limit( @@ -711,6 +725,7 @@ fn nonced_transfer_with_padding( nonce_account: &Pubkey, nonce_authority: &Keypair, nonce_hash: Hash, + skip_tx_account_data_size: bool, instruction_padding_config: &Option, ) -> Transaction { let from_pubkey = from_keypair.pubkey(); @@ -726,12 +741,15 @@ fn nonced_transfer_with_padding( } else { transfer_instruction }; - let instructions = vec![ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( - get_transaction_loaded_accounts_data_size(instruction_padding_config.is_some()), - ), - instruction, - ]; + let mut instructions = vec![]; + if !skip_tx_account_data_size { + instructions.push( + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + get_transaction_loaded_accounts_data_size(instruction_padding_config.is_some()), + ), + ) + } + instructions.push(instruction); let message = Message::new_with_nonce( instructions, Some(&from_pubkey), @@ -748,6 +766,7 @@ fn generate_nonced_system_txs, reclaim: bool, + skip_tx_account_data_size: bool, instruction_padding_config: &Option, ) -> Vec { let length = source.len(); @@ -768,6 +787,7 @@ fn generate_nonced_system_txs Result> { let rent = client.get_minimum_balance_for_rent_exemption(0)?; @@ -1059,6 +1081,7 @@ pub fn generate_and_fund_keypairs( keypairs: &[Keypair], extra: u64, lamports_per_account: u64, + skip_tx_account_data_size: bool, enable_padding: bool, ) -> Result<()> { let rent = client.get_minimum_balance_for_rent_exemption(0)?; @@ -1131,6 +1155,8 @@ pub fn fund_keypairs( return Err(BenchTpsError::AirdropFailure); } } + let data_size_limit = (!skip_tx_account_data_size) + .then(|| get_transaction_loaded_accounts_data_size(enable_padding)); fund_keys( client, @@ -1139,7 +1165,7 @@ pub fn fund_keypairs( total, max_fee, lamports_per_account, - get_transaction_loaded_accounts_data_size(enable_padding), + data_size_limit, ); } Ok(()) @@ -1181,7 +1207,7 @@ mod tests { let keypair_count = config.tx_count * config.keypair_multiplier; let keypairs = - generate_and_fund_keypairs(client.clone(), &config.id, keypair_count, 20, false) + generate_and_fund_keypairs(client.clone(), &config.id, keypair_count, 20, false, false) .unwrap(); do_bench_tps(client, config, keypairs, None); @@ -1197,7 +1223,7 @@ mod tests { let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap(); let keypairs = - generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false) + generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false, false) .unwrap(); for kp in &keypairs { @@ -1222,7 +1248,7 @@ mod tests { let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap(); let keypairs = - generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false) + generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false, false) .unwrap(); for kp in &keypairs { @@ -1239,7 +1265,7 @@ mod tests { let lamports = 10_000_000; let authority_keypairs = - generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false) + generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false, false) .unwrap(); let nonce_keypairs = generate_durable_nonce_accounts(client.clone(), &authority_keypairs); diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index e2ee75fc551400..1804dbbc454e02 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -69,6 +69,7 @@ pub struct Config { pub use_quic: bool, pub tpu_connection_pool_size: usize, pub compute_unit_price: Option, + pub skip_tx_account_data_size: bool, pub use_durable_nonce: bool, pub instruction_padding_config: Option, pub num_conflict_groups: Option, @@ -101,6 +102,7 @@ impl Default for Config { use_quic: DEFAULT_TPU_USE_QUIC, tpu_connection_pool_size: DEFAULT_TPU_CONNECTION_POOL_SIZE, compute_unit_price: None, + skip_tx_account_data_size: false, use_durable_nonce: false, instruction_padding_config: None, num_conflict_groups: None, @@ -358,6 +360,13 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .conflicts_with("compute_unit_price") .help("Sets random compute-unit-price in range [0..100] to transfer transactions"), ) + .arg( + Arg::with_name("skip_tx_account_data_size") + .long("skip-tx-account-data-size") + .takes_value(false) + .conflicts_with("instruction_padding_data_size") + .help("Skips setting the account data size for each transaction"), + ) .arg( Arg::with_name("use_durable_nonce") .long("use-durable-nonce") @@ -537,6 +546,10 @@ pub fn parse_args(matches: &ArgMatches) -> Result { args.compute_unit_price = Some(ComputeUnitPrice::Random); } + if matches.is_present("skip_tx_account_data_size") { + args.skip_tx_account_data_size = true; + } + if matches.is_present("use_durable_nonce") { args.use_durable_nonce = true; } diff --git a/bench-tps/src/keypairs.rs b/bench-tps/src/keypairs.rs index d5f839190bd638..177e15bf5fca5f 100644 --- a/bench-tps/src/keypairs.rs +++ b/bench-tps/src/keypairs.rs @@ -16,6 +16,7 @@ pub fn get_keypairs( num_lamports_per_account: u64, client_ids_and_stake_file: &str, read_from_client_file: bool, + skip_tx_account_data_size: bool, enable_padding: bool, ) -> Vec where @@ -57,6 +58,7 @@ where &keypairs, keypairs.len().saturating_sub(keypair_count) as u64, last_balance, + skip_tx_account_data_size, enable_padding, ) .unwrap_or_else(|e| { @@ -70,6 +72,7 @@ where id, keypair_count, num_lamports_per_account, + skip_tx_account_data_size, enable_padding, ) .unwrap_or_else(|e| { diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 1560b9346ed28c..fa0fc1509055e4 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -194,6 +194,7 @@ fn main() { external_client_type, use_quic, tpu_connection_pool_size, + skip_tx_account_data_size, compute_unit_price, use_durable_nonce, instruction_padding_config, @@ -267,6 +268,7 @@ fn main() { *num_lamports_per_account, client_ids_and_stake_file, *read_from_client_file, + *skip_tx_account_data_size, instruction_padding_config.is_some(), ); diff --git a/bench-tps/src/send_batch.rs b/bench-tps/src/send_batch.rs index 75079c72ab020a..6acd5bbb675719 100644 --- a/bench-tps/src/send_batch.rs +++ b/bench-tps/src/send_batch.rs @@ -66,7 +66,7 @@ pub fn fund_keys( total: u64, max_fee: u64, lamports_per_account: u64, - data_size_limit: u32, + data_size_limit: Option, ) { let mut funded: Vec<&Keypair> = vec![source]; let mut funded_funds = total; @@ -354,7 +354,7 @@ trait FundingTransactions<'a>: SendBatchTransactions<'a, FundingSigners<'a>> { client: &Arc, to_fund: &FundingChunk<'a>, to_lamports: u64, - data_size_limit: u32, + data_size_limit: Option, ); } @@ -364,13 +364,15 @@ impl<'a> FundingTransactions<'a> for FundingContainer<'a> { client: &Arc, to_fund: &FundingChunk<'a>, to_lamports: u64, - data_size_limit: u32, + data_size_limit: Option, ) { self.make(to_fund, |(k, t)| -> (FundingSigners<'a>, Transaction) { let mut instructions = system_instruction::transfer_many(&k.pubkey(), t); - instructions.push( - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size_limit), - ); + if let Some(data_size_limit) = data_size_limit { + instructions.push( + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size_limit), + ); + } let message = Message::new(&instructions, Some(&k.pubkey())); (*k, Transaction::new_unsigned(message)) }); diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 2efdd6c8ff6ef4..7a2b0fe20a5b8d 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -106,6 +106,7 @@ fn test_bench_tps_local_cluster(config: Config) { keypair_count, lamports_per_account, false, + false, ) .unwrap(); @@ -152,6 +153,7 @@ fn test_bench_tps_test_validator(config: Config) { keypair_count, lamports_per_account, false, + false, ) .unwrap(); let nonce_keypairs = if config.use_durable_nonce { diff --git a/dos/src/main.rs b/dos/src/main.rs index 055b1f4bb65d4c..577e4a2d067393 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -560,6 +560,7 @@ fn create_payers( size, 1_000_000, false, + false, ) .unwrap_or_else(|e| { eprintln!("Error could not fund keys: {e:?}"); From e93854cc33b1319636488bd0efb45c2426a03834 Mon Sep 17 00:00:00 2001 From: Jon C Date: Wed, 13 Mar 2024 20:39:45 +0100 Subject: [PATCH 392/401] deps: Update base64 to 0.22 (#225) * deps: Update base64 to 0.22 * Add changelog entry --- CHANGELOG.md | 1 + Cargo.lock | 36 +++++++++++++++++++++--------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 34 ++++++++++++++++++++-------------- 4 files changed, 43 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 779a1301802391..5803745a401afb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ Release channels have their own copy of this changelog: ## [2.0.0] - Unreleased * Changes * `central-scheduler` as default option for `--block-production-method` (#34891) + * `solana-rpc-client-api`: `RpcFilterError` depends on `base64` version 0.22, so users may need to upgrade to `base64` version 0.22 ## [1.18.0] * Changes diff --git a/Cargo.lock b/Cargo.lock index 8555c174ad1cbf..4620b4411b4c90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -785,6 +785,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "bincode" version = "1.3.3" @@ -5315,7 +5321,7 @@ version = "2.0.0" dependencies = [ "Inflector", "assert_matches", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bs58", "bv", @@ -5818,7 +5824,7 @@ name = "solana-cli-output" version = "2.0.0" dependencies = [ "Inflector", - "base64 0.21.7", + "base64 0.22.0", "chrono", "clap 2.33.3", "console", @@ -5950,7 +5956,7 @@ name = "solana-core" version = "2.0.0" dependencies = [ "assert_matches", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bs58", "bytes", @@ -6191,7 +6197,7 @@ dependencies = [ name = "solana-genesis" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bincode", "clap 2.33.3", "itertools", @@ -6635,7 +6641,7 @@ dependencies = [ "ark-serialize", "array-bytes", "assert_matches", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bitflags 2.4.2", "blake3", @@ -6687,7 +6693,7 @@ name = "solana-program-runtime" version = "2.0.0" dependencies = [ "assert_matches", - "base64 0.21.7", + "base64 0.22.0", "bincode", "eager", "enum-iterator", @@ -6718,7 +6724,7 @@ version = "2.0.0" dependencies = [ "assert_matches", "async-trait", - "base64 0.21.7", + "base64 0.22.0", "bincode", "chrono-humanize", "crossbeam-channel", @@ -6824,7 +6830,7 @@ dependencies = [ name = "solana-rpc" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bincode", "bs58", "crossbeam-channel", @@ -6886,7 +6892,7 @@ version = "2.0.0" dependencies = [ "assert_matches", "async-trait", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bs58", "crossbeam-channel", @@ -6913,7 +6919,7 @@ dependencies = [ name = "solana-rpc-client-api" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bs58", "jsonrpc-core", "reqwest", @@ -6980,7 +6986,7 @@ dependencies = [ "aquamarine", "arrayref", "assert_matches", - "base64 0.21.7", + "base64 0.22.0", "bincode", "blake3", "bv", @@ -7076,7 +7082,7 @@ version = "2.0.0" dependencies = [ "anyhow", "assert_matches", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bitflags 2.4.2", "borsh 1.2.1", @@ -7323,7 +7329,7 @@ dependencies = [ name = "solana-test-validator" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bincode", "crossbeam-channel", "log", @@ -7450,7 +7456,7 @@ name = "solana-transaction-status" version = "2.0.0" dependencies = [ "Inflector", - "base64 0.21.7", + "base64 0.22.0", "bincode", "borsh 0.10.3", "bs58", @@ -7683,7 +7689,7 @@ name = "solana-zk-token-sdk" version = "2.0.0" dependencies = [ "aes-gcm-siv", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bytemuck", "byteorder", diff --git a/Cargo.toml b/Cargo.toml index 89a373d295135d..496b7aa42bcd96 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -154,7 +154,7 @@ async-mutex = "1.4.0" async-trait = "0.1.77" atty = "0.2.11" backoff = "0.4.0" -base64 = "0.21.7" +base64 = "0.22.0" bincode = "1.3.3" bitflags = { version = "2.4.2", features = ["serde"] } blake3 = "1.5.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index fadf7b3e28a5cc..3204776825622b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -615,6 +615,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "bincode" version = "1.3.3" @@ -4602,7 +4608,7 @@ name = "solana-account-decoder" version = "2.0.0" dependencies = [ "Inflector", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bs58", "bv", @@ -4835,7 +4841,7 @@ name = "solana-cli-output" version = "2.0.0" dependencies = [ "Inflector", - "base64 0.21.7", + "base64 0.22.0", "chrono", "clap 2.33.3", "console", @@ -4929,7 +4935,7 @@ dependencies = [ name = "solana-core" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bincode", "bs58", "bytes", @@ -5384,7 +5390,7 @@ dependencies = [ "ark-ec", "ark-ff", "ark-serialize", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bitflags 2.4.2", "blake3", @@ -5433,7 +5439,7 @@ dependencies = [ name = "solana-program-runtime" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bincode", "eager", "enum-iterator", @@ -5461,7 +5467,7 @@ version = "2.0.0" dependencies = [ "assert_matches", "async-trait", - "base64 0.21.7", + "base64 0.22.0", "bincode", "chrono-humanize", "crossbeam-channel", @@ -5560,7 +5566,7 @@ dependencies = [ name = "solana-rpc" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bincode", "bs58", "crossbeam-channel", @@ -5617,7 +5623,7 @@ name = "solana-rpc-client" version = "2.0.0" dependencies = [ "async-trait", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bs58", "indicatif", @@ -5640,7 +5646,7 @@ dependencies = [ name = "solana-rpc-client-api" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bs58", "jsonrpc-core", "reqwest", @@ -5673,7 +5679,7 @@ version = "2.0.0" dependencies = [ "aquamarine", "arrayref", - "base64 0.21.7", + "base64 0.22.0", "bincode", "blake3", "bv", @@ -6170,7 +6176,7 @@ name = "solana-sdk" version = "2.0.0" dependencies = [ "assert_matches", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bitflags 2.4.2", "borsh 1.2.1", @@ -6372,7 +6378,7 @@ dependencies = [ name = "solana-test-validator" version = "2.0.0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "bincode", "crossbeam-channel", "log", @@ -6438,7 +6444,7 @@ name = "solana-transaction-status" version = "2.0.0" dependencies = [ "Inflector", - "base64 0.21.7", + "base64 0.22.0", "bincode", "borsh 0.10.3", "bs58", @@ -6614,7 +6620,7 @@ name = "solana-zk-token-sdk" version = "2.0.0" dependencies = [ "aes-gcm-siv", - "base64 0.21.7", + "base64 0.22.0", "bincode", "bytemuck", "byteorder 1.5.0", From 151675b5ca924d76c5058db310d6bbdda76692ee Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Wed, 13 Mar 2024 13:26:54 -0700 Subject: [PATCH 393/401] update changelog and remove deprecated label on `gossip_service::get_client()` (#227) update changelog and remove deprecated label on get_client --- CHANGELOG.md | 1 + gossip/src/gossip_service.rs | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5803745a401afb..c2898a3aab0d3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ Release channels have their own copy of this changelog: * New program deployments default to the exact size of a program, instead of double the size. Program accounts must be extended with `solana program extend` before an upgrade if they need to accommodate larger programs. + * Interface for `gossip_service::get_client()` has changed. `gossip_service::get_multi_client()` has been removed. * Upgrade Notes * `solana-program` and `solana-sdk` default to support for Borsh v1, with limited backward compatibility for v0.10 and v0.9. Please upgrade to Borsh v1. diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 0bd4750e269a48..0bc258306edb32 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -197,8 +197,7 @@ pub fn discover( )) } -/// Creates a ThinClient by selecting a valid node at random -#[deprecated(since = "1.18.6", note = "Interface will change")] +/// Creates a TpuClient by selecting a valid node at random pub fn get_client( nodes: &[ContactInfo], connection_cache: Arc, From 7c007ea737897aff2131a7b65029cb39d29d9cea Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Wed, 13 Mar 2024 18:46:48 -0500 Subject: [PATCH 394/401] rpc: disable BigTable with header (#226) * rpc: disable BigTable with header * use is_some_and instead map+unwrap_or Co-authored-by: Tyera --------- Co-authored-by: Tyera --- rpc/src/rpc.rs | 9 +++++++++ rpc/src/rpc_service.rs | 9 ++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 9c979ab1f5a6b2..f0856aeae4b1a2 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -213,6 +213,15 @@ pub struct JsonRpcRequestProcessor { } impl Metadata for JsonRpcRequestProcessor {} +impl JsonRpcRequestProcessor { + pub fn clone_without_bigtable(&self) -> JsonRpcRequestProcessor { + Self { + bigtable_ledger_storage: None, // Disable BigTable + ..self.clone() + } + } +} + impl JsonRpcRequestProcessor { fn get_bank_with_config(&self, config: RpcContextConfig) -> Result> { let RpcContextConfig { diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index d8791ab6c3bf6b..303a1e94b223b2 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -525,7 +525,14 @@ impl JsonRpcService { ); let server = ServerBuilder::with_meta_extractor( io, - move |_req: &hyper::Request| request_processor.clone(), + move |req: &hyper::Request| { + let xbigtable = req.headers().get("x-bigtable"); + if xbigtable.is_some_and(|v| v == "disabled") { + request_processor.clone_without_bigtable() + } else { + request_processor.clone() + } + }, ) .event_loop_executor(runtime.handle().clone()) .threads(1) From 794cb2f85674bc3ac45c66b28df8ea2b6863d8c9 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 13 Mar 2024 18:47:55 -0500 Subject: [PATCH 395/401] allow FlushStats to accumulate (#215) --- accounts-db/src/accounts_db.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index eab5ca33af417c..e706958af8d0f2 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1721,6 +1721,14 @@ struct FlushStats { total_size: u64, } +impl FlushStats { + fn accumulate(&mut self, other: &Self) { + saturating_add_assign!(self.num_flushed, other.num_flushed); + saturating_add_assign!(self.num_purged, other.num_purged); + saturating_add_assign!(self.total_size, other.total_size); + } +} + #[derive(Debug, Default)] struct LatestAccountsIndexRootsStats { roots_len: AtomicUsize, @@ -6078,9 +6086,7 @@ impl AccountsDb { if old_slot > max_flushed_root { if self.should_aggressively_flush_cache() { if let Some(stats) = self.flush_slot_cache(old_slot) { - flush_stats.num_flushed += stats.num_flushed; - flush_stats.num_purged += stats.num_purged; - flush_stats.total_size += stats.total_size; + flush_stats.accumulate(&stats); } } } else { From 72d6d78d05738c3976c37de8eb250281898b7c1c Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 14 Mar 2024 15:03:44 +0800 Subject: [PATCH 396/401] Add `reserved_account_keys` module to sdk (#84) --- sdk/src/feature_set.rs | 5 + sdk/src/lib.rs | 1 + sdk/src/reserved_account_keys.rs | 256 +++++++++++++++++++++++++++++++ 3 files changed, 262 insertions(+) create mode 100644 sdk/src/reserved_account_keys.rs diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 7d956bd13f405c..8536282cee8efe 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -736,6 +736,10 @@ pub mod allow_commission_decrease_at_any_time { solana_sdk::declare_id!("decoMktMcnmiq6t3u7g5BfgcQu91nKZr6RvMYf9z1Jb"); } +pub mod add_new_reserved_account_keys { + solana_sdk::declare_id!("8U4skmMVnF6k2kMvrWbQuRUT3qQSiTYpSjqmhmgfthZu"); +} + pub mod consume_blockstore_duplicate_proofs { solana_sdk::declare_id!("6YsBCejwK96GZCkJ6mkZ4b68oP63z2PLoQmWjC7ggTqZ"); } @@ -955,6 +959,7 @@ lazy_static! { (drop_legacy_shreds::id(), "drops legacy shreds #34328"), (allow_commission_decrease_at_any_time::id(), "Allow commission decrease at any time in epoch #33843"), (consume_blockstore_duplicate_proofs::id(), "consume duplicate proofs from blockstore in consensus #34372"), + (add_new_reserved_account_keys::id(), "add new unwritable reserved accounts #34899"), (index_erasure_conflict_duplicate_proofs::id(), "generate duplicate proofs for index and erasure conflicts #34360"), (merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for merkle root conflicts #34270"), (disable_bpf_loader_instructions::id(), "disable bpf loader management instructions #34194"), diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index ecc186f0494191..5b5c6acdcfe572 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -94,6 +94,7 @@ pub mod quic; pub mod recent_blockhashes_account; pub mod rent_collector; pub mod rent_debits; +pub mod reserved_account_keys; pub mod reward_info; pub mod reward_type; pub mod rpc_port; diff --git a/sdk/src/reserved_account_keys.rs b/sdk/src/reserved_account_keys.rs new file mode 100644 index 00000000000000..2102949b240f49 --- /dev/null +++ b/sdk/src/reserved_account_keys.rs @@ -0,0 +1,256 @@ +//! Collection of reserved account keys that cannot be write-locked by transactions. +//! New reserved account keys may be added as long as they specify a feature +//! gate that transitions the key into read-only at an epoch boundary. + +#![cfg(feature = "full")] + +use { + crate::{ + address_lookup_table, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, + compute_budget, config, ed25519_program, feature, + feature_set::{self, FeatureSet}, + loader_v4, native_loader, + pubkey::Pubkey, + secp256k1_program, stake, system_program, sysvar, vote, + }, + lazy_static::lazy_static, + std::collections::{HashMap, HashSet}, +}; + +// Inline zk token program id since it isn't available in the sdk +mod zk_token_proof_program { + solana_sdk::declare_id!("ZkTokenProof1111111111111111111111111111111"); +} + +/// `ReservedAccountKeys` holds the set of currently active/inactive +/// account keys that are reserved by the protocol and may not be write-locked +/// during transaction processing. +#[derive(Debug, Clone, PartialEq)] +pub struct ReservedAccountKeys { + /// Set of currently active reserved account keys + pub active: HashSet, + /// Set of currently inactive reserved account keys that will be moved to the + /// active set when their feature id is activated + inactive: HashMap, +} + +impl Default for ReservedAccountKeys { + fn default() -> Self { + Self::new(&RESERVED_ACCOUNTS) + } +} + +impl ReservedAccountKeys { + /// Compute a set of active / inactive reserved account keys from a list of + /// keys with a designated feature id. If a reserved account key doesn't + /// designate a feature id, it's already activated and should be inserted + /// into the active set. If it does have a feature id, insert the key and + /// its feature id into the inactive map. + fn new(reserved_accounts: &[ReservedAccount]) -> Self { + Self { + active: reserved_accounts + .iter() + .filter(|reserved| reserved.feature_id.is_none()) + .map(|reserved| reserved.key) + .collect(), + inactive: reserved_accounts + .iter() + .filter_map(|ReservedAccount { key, feature_id }| { + feature_id.as_ref().map(|feature_id| (*key, *feature_id)) + }) + .collect(), + } + } + + /// Compute a set with all reserved keys active, regardless of whether their + /// feature was activated. This is not to be used by the runtime. Useful for + /// off-chain utilities that need to filter out reserved accounts. + pub fn new_all_activated() -> Self { + Self { + active: Self::all_keys_iter().copied().collect(), + inactive: HashMap::default(), + } + } + + /// Returns whether the specified key is reserved + pub fn is_reserved(&self, key: &Pubkey) -> bool { + self.active.contains(key) + } + + /// Move inactive reserved account keys to the active set if their feature + /// is active. + pub fn update_active_set(&mut self, feature_set: &FeatureSet) { + self.inactive.retain(|reserved_key, feature_id| { + if feature_set.is_active(feature_id) { + self.active.insert(*reserved_key); + false + } else { + true + } + }); + } + + /// Return an iterator over all active / inactive reserved keys. This is not + /// to be used by the runtime. Useful for off-chain utilities that need to + /// filter out reserved accounts. + pub fn all_keys_iter() -> impl Iterator { + RESERVED_ACCOUNTS + .iter() + .map(|reserved_key| &reserved_key.key) + } + + /// Return an empty set of reserved keys for visibility when using in + /// tests where the dynamic reserved key set is not available + pub fn empty_key_set() -> HashSet { + HashSet::default() + } +} + +/// `ReservedAccount` represents a reserved account that will not be +/// write-lockable by transactions. If a feature id is set, the account will +/// become read-only only after the feature has been activated. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +struct ReservedAccount { + key: Pubkey, + feature_id: Option, +} + +impl ReservedAccount { + fn new_pending(key: Pubkey, feature_id: Pubkey) -> Self { + Self { + key, + feature_id: Some(feature_id), + } + } + + fn new_active(key: Pubkey) -> Self { + Self { + key, + feature_id: None, + } + } +} + +// New reserved accounts should be added in alphabetical order and must specify +// a feature id for activation. Reserved accounts cannot be removed from this +// list without breaking consensus. +lazy_static! { + static ref RESERVED_ACCOUNTS: Vec = [ + // builtin programs + ReservedAccount::new_pending(address_lookup_table::program::id(), feature_set::add_new_reserved_account_keys::id()), + ReservedAccount::new_active(bpf_loader::id()), + ReservedAccount::new_active(bpf_loader_deprecated::id()), + ReservedAccount::new_active(bpf_loader_upgradeable::id()), + ReservedAccount::new_pending(compute_budget::id(), feature_set::add_new_reserved_account_keys::id()), + ReservedAccount::new_active(config::program::id()), + ReservedAccount::new_pending(ed25519_program::id(), feature_set::add_new_reserved_account_keys::id()), + ReservedAccount::new_active(feature::id()), + ReservedAccount::new_pending(loader_v4::id(), feature_set::add_new_reserved_account_keys::id()), + ReservedAccount::new_pending(secp256k1_program::id(), feature_set::add_new_reserved_account_keys::id()), + #[allow(deprecated)] + ReservedAccount::new_active(stake::config::id()), + ReservedAccount::new_active(stake::program::id()), + ReservedAccount::new_active(system_program::id()), + ReservedAccount::new_active(vote::program::id()), + ReservedAccount::new_pending(zk_token_proof_program::id(), feature_set::add_new_reserved_account_keys::id()), + + // sysvars + ReservedAccount::new_active(sysvar::clock::id()), + ReservedAccount::new_pending(sysvar::epoch_rewards::id(), feature_set::add_new_reserved_account_keys::id()), + ReservedAccount::new_active(sysvar::epoch_schedule::id()), + #[allow(deprecated)] + ReservedAccount::new_active(sysvar::fees::id()), + ReservedAccount::new_active(sysvar::instructions::id()), + ReservedAccount::new_pending(sysvar::last_restart_slot::id(), feature_set::add_new_reserved_account_keys::id()), + #[allow(deprecated)] + ReservedAccount::new_active(sysvar::recent_blockhashes::id()), + ReservedAccount::new_active(sysvar::rent::id()), + ReservedAccount::new_active(sysvar::rewards::id()), + ReservedAccount::new_active(sysvar::slot_hashes::id()), + ReservedAccount::new_active(sysvar::slot_history::id()), + ReservedAccount::new_active(sysvar::stake_history::id()), + + // other + ReservedAccount::new_active(native_loader::id()), + ReservedAccount::new_pending(sysvar::id(), feature_set::add_new_reserved_account_keys::id()), + ].to_vec(); +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_program::{message::legacy::BUILTIN_PROGRAMS_KEYS, sysvar::ALL_IDS}, + }; + + #[test] + fn test_is_reserved() { + let feature_id = Pubkey::new_unique(); + let active_reserved_account = ReservedAccount::new_active(Pubkey::new_unique()); + let pending_reserved_account = + ReservedAccount::new_pending(Pubkey::new_unique(), feature_id); + let reserved_account_keys = + ReservedAccountKeys::new(&[active_reserved_account, pending_reserved_account]); + + assert!( + reserved_account_keys.is_reserved(&active_reserved_account.key), + "active reserved accounts should be inserted into the active set" + ); + assert!( + !reserved_account_keys.is_reserved(&pending_reserved_account.key), + "pending reserved accounts should NOT be inserted into the active set" + ); + } + + #[test] + fn test_update_active_set() { + let feature_ids = [Pubkey::new_unique(), Pubkey::new_unique()]; + let active_reserved_key = Pubkey::new_unique(); + let pending_reserved_keys = [Pubkey::new_unique(), Pubkey::new_unique()]; + let reserved_accounts = vec![ + ReservedAccount::new_active(active_reserved_key), + ReservedAccount::new_pending(pending_reserved_keys[0], feature_ids[0]), + ReservedAccount::new_pending(pending_reserved_keys[1], feature_ids[1]), + ]; + + let mut reserved_account_keys = ReservedAccountKeys::new(&reserved_accounts); + assert!(reserved_account_keys.is_reserved(&active_reserved_key)); + assert!(!reserved_account_keys.is_reserved(&pending_reserved_keys[0])); + assert!(!reserved_account_keys.is_reserved(&pending_reserved_keys[1])); + + // Updating the active set with a default feature set should be a no-op + let previous_reserved_account_keys = reserved_account_keys.clone(); + let mut feature_set = FeatureSet::default(); + reserved_account_keys.update_active_set(&feature_set); + assert_eq!(reserved_account_keys, previous_reserved_account_keys); + + // Updating the active set with an activated feature should also activate + // the corresponding reserved key from inactive to active + feature_set.active.insert(feature_ids[0], 0); + reserved_account_keys.update_active_set(&feature_set); + + assert!(reserved_account_keys.is_reserved(&active_reserved_key)); + assert!(reserved_account_keys.is_reserved(&pending_reserved_keys[0])); + assert!(!reserved_account_keys.is_reserved(&pending_reserved_keys[1])); + + // Update the active set again to ensure that the inactive map is + // properly retained + feature_set.active.insert(feature_ids[1], 0); + reserved_account_keys.update_active_set(&feature_set); + + assert!(reserved_account_keys.is_reserved(&active_reserved_key)); + assert!(reserved_account_keys.is_reserved(&pending_reserved_keys[0])); + assert!(reserved_account_keys.is_reserved(&pending_reserved_keys[1])); + } + + #[test] + fn test_static_list_compat() { + let mut static_set = HashSet::new(); + static_set.extend(ALL_IDS.iter().cloned()); + static_set.extend(BUILTIN_PROGRAMS_KEYS.iter().cloned()); + + let initial_active_set = ReservedAccountKeys::default().active; + + assert_eq!(initial_active_set, static_set); + } +} From 51dc7e6fb7589344497ce030950663c800685408 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 14 Mar 2024 20:35:33 +0800 Subject: [PATCH 397/401] [anza migration]: add 'agave=info' to default log level (#223) --- accounts-db/store-tool/src/main.rs | 2 +- bench-tps/src/main.rs | 2 +- cargo-registry/src/main.rs | 2 +- dos/src/main.rs | 2 +- faucet/src/bin/faucet.rs | 2 +- gossip/src/main.rs | 2 +- ledger-tool/src/main.rs | 2 +- logger/src/lib.rs | 7 +++++++ scripts/run.sh | 2 +- transaction-dos/src/main.rs | 2 +- validator/src/lib.rs | 5 ++--- watchtower/src/main.rs | 2 +- 12 files changed, 19 insertions(+), 13 deletions(-) diff --git a/accounts-db/store-tool/src/main.rs b/accounts-db/store-tool/src/main.rs index cb5838af4f21ad..86482feb2afda8 100644 --- a/accounts-db/store-tool/src/main.rs +++ b/accounts-db/store-tool/src/main.rs @@ -13,7 +13,7 @@ use { }; fn main() { - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); let matches = App::new(crate_name!()) .about(crate_description!()) .version(solana_version::version!()) diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index fa0fc1509055e4..d3def39ed4d383 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -168,7 +168,7 @@ fn create_client( } fn main() { - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); solana_metrics::set_panic_hook("bench-tps", /*version:*/ None); let matches = cli::build_args(solana_version::version!()).get_matches(); diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index 0bfc2c7f3ff004..317e86341927f7 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -263,7 +263,7 @@ impl CargoRegistryService { #[tokio::main] async fn main() { - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); let client = Arc::new(Client::new().expect("Failed to get RPC Client instance")); let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), client.port); diff --git a/dos/src/main.rs b/dos/src/main.rs index 577e4a2d067393..3bf7cce0e782cc 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -760,7 +760,7 @@ fn run_dos( } fn main() { - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); let cmd_params = build_cli_parameters(); let (nodes, client) = if !cmd_params.skip_gossip { diff --git a/faucet/src/bin/faucet.rs b/faucet/src/bin/faucet.rs index 8e45ef98155a54..56cc7542623185 100644 --- a/faucet/src/bin/faucet.rs +++ b/faucet/src/bin/faucet.rs @@ -19,7 +19,7 @@ use { async fn main() { let default_keypair = solana_cli_config::Config::default().keypair_path; - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); solana_metrics::set_panic_hook("faucet", /*version:*/ None); let matches = App::new(crate_name!()) .about(crate_description!()) diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 226fab8d9d43a8..1f31195f431d36 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -326,7 +326,7 @@ fn process_rpc_url( } fn main() -> Result<(), Box> { - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); let matches = parse_matches(); let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr")); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 94298623e953cd..4509e975cf10a1 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -555,7 +555,7 @@ fn main() { const DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN: usize = std::usize::MAX; const DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN: usize = std::usize::MAX; - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); let no_snapshot_arg = Arg::with_name("no_snapshot") .long("no-snapshot") diff --git a/logger/src/lib.rs b/logger/src/lib.rs index 6cc57e81c531f3..8d6a20662c2c29 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -10,6 +10,8 @@ lazy_static! { Arc::new(RwLock::new(env_logger::Logger::from_default_env())); } +pub const DEFAULT_FILTER: &str = "solana=info,agave=info"; + struct LoggerShim {} impl log::Log for LoggerShim { @@ -49,6 +51,11 @@ pub fn setup_with_default(filter: &str) { replace_logger(logger); } +// Configures logging with the `DEFAULT_FILTER` if RUST_LOG is not set +pub fn setup_with_default_filter() { + setup_with_default(DEFAULT_FILTER); +} + // Configures logging with the default filter "error" if RUST_LOG is not set pub fn setup() { setup_with_default("error"); diff --git a/scripts/run.sh b/scripts/run.sh index 2d8e1ec88ac450..70994c921f47ac 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -37,7 +37,7 @@ $ok || { exit 1 } -export RUST_LOG=${RUST_LOG:-solana=info,solana_runtime::message_processor=debug} # if RUST_LOG is unset, default to info +export RUST_LOG=${RUST_LOG:-solana=info,agave=info,solana_runtime::message_processor=debug} # if RUST_LOG is unset, default to info export RUST_BACKTRACE=1 dataDir=$PWD/config/"$(basename "$0" .sh)" ledgerDir=$PWD/config/ledger diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index 94fecf5e6d5f73..dedbcdab27ef79 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -426,7 +426,7 @@ fn run_transactions_dos( } fn main() { - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); let matches = App::new(crate_name!()) .about(crate_description!()) .version(solana_version::version!()) diff --git a/validator/src/lib.rs b/validator/src/lib.rs index e1b9df96b9b03e..9ed2aeab6470a3 100644 --- a/validator/src/lib.rs +++ b/validator/src/lib.rs @@ -41,10 +41,9 @@ pub fn redirect_stderr_to_file(logfile: Option) -> Option env::set_var("RUST_BACKTRACE", "1") } - let filter = "solana=info"; match logfile { None => { - solana_logger::setup_with_default(filter); + solana_logger::setup_with_default_filter(); None } Some(logfile) => { @@ -58,7 +57,7 @@ pub fn redirect_stderr_to_file(logfile: Option) -> Option exit(1); }); - solana_logger::setup_with_default(filter); + solana_logger::setup_with_default_filter(); redirect_stderr(&logfile); Some( std::thread::Builder::new() diff --git a/watchtower/src/main.rs b/watchtower/src/main.rs index 341b7903c0a33e..11dd70e27285dc 100644 --- a/watchtower/src/main.rs +++ b/watchtower/src/main.rs @@ -246,7 +246,7 @@ fn get_cluster_info( } fn main() -> Result<(), Box> { - solana_logger::setup_with_default("solana=info"); + solana_logger::setup_with_default_filter(); solana_metrics::set_panic_hook("watchtower", /*version:*/ None); let config = get_config(); From 75ef68ffe8ee360d27ac3aac3f32478d21f58d57 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Mar 2024 01:42:32 +0800 Subject: [PATCH 398/401] build(deps): bump hidapi from 2.6.0 to 2.6.1 (#237) Bumps [hidapi](https://github.com/ruabmbua/hidapi-rs) from 2.6.0 to 2.6.1. - [Release notes](https://github.com/ruabmbua/hidapi-rs/releases) - [Commits](https://github.com/ruabmbua/hidapi-rs/commits) --- updated-dependencies: - dependency-name: hidapi dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4620b4411b4c90..c35f20738a8c1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2618,9 +2618,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hidapi" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a722fb137d008dbf264f54612457f8eb6a299efbcb0138178964a0809035d74" +checksum = "9e58251020fe88fe0dae5ebcc1be92b4995214af84725b375d08354d0311c23c" dependencies = [ "cc", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 496b7aa42bcd96..c4802c1ba0fd5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -213,7 +213,7 @@ gethostname = "0.2.3" getrandom = "0.2.10" goauth = "0.13.1" hex = "0.4.3" -hidapi = { version = "2.6.0", default-features = false } +hidapi = { version = "2.6.1", default-features = false } histogram = "0.6.9" hmac = "0.12.1" http = "0.2.12" From 6bcb77dcfa3544d43919aee8671802be0ae67adc Mon Sep 17 00:00:00 2001 From: Nagaprasad V R <81755170+Nagaprasadvr@users.noreply.github.com> Date: Thu, 14 Mar 2024 23:13:34 +0530 Subject: [PATCH 399/401] relax stake split destination check (#162) * relax stake split destination check * change stake_account error handling logic * fmt * modify logic * change error message when account is neither owned by stake program or system program * add a comment explaining nested ifs in stake_account error handling * fix typos in comments * update comment --- cli/src/stake.rs | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 45c6e12e072d4e..ac08fd3425dc65 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -56,6 +56,7 @@ use { }, stake_history::{Epoch, StakeHistory}, system_instruction::{self, SystemError}, + system_program, sysvar::{clock, stake_history}, transaction::Transaction, }, @@ -1980,15 +1981,26 @@ pub fn process_split_stake( let rent_exempt_reserve = if !sign_only { if let Ok(stake_account) = rpc_client.get_account(&split_stake_account_address) { - let err_msg = if stake_account.owner == stake::program::id() { - format!("Stake account {split_stake_account_address} already exists") + if stake_account.owner == stake::program::id() { + return Err(CliError::BadParameter(format!( + "Stake account {split_stake_account_address} already exists" + )) + .into()); + } else if stake_account.owner == system_program::id() { + if !stake_account.data.is_empty() { + return Err(CliError::BadParameter(format!( + "Account {split_stake_account_address} has data and cannot be used to split stake" + )) + .into()); + } + // if `stake_account`'s owner is the system_program and its data is + // empty, `stake_account` is allowed to receive the stake split } else { - format!( - "Account {split_stake_account_address} already exists and is not a stake \ - account" - ) - }; - return Err(CliError::BadParameter(err_msg).into()); + return Err(CliError::BadParameter(format!( + "Account {split_stake_account_address} already exists and cannot be used to split stake" + )) + .into()); + } } let minimum_balance = From d49ceb0e3fc1077ffa62b5e95e67e10228e836f8 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Thu, 14 Mar 2024 11:22:52 -0700 Subject: [PATCH 400/401] Add in metrics for detecting Redundant Pulls (#199) --- gossip/src/cluster_info_metrics.rs | 5 ++++ gossip/src/crds.rs | 41 +++++++++++++++++++++++------- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/gossip/src/cluster_info_metrics.rs b/gossip/src/cluster_info_metrics.rs index 0e474d3cf5284f..74dc0c43e9606e 100644 --- a/gossip/src/cluster_info_metrics.rs +++ b/gossip/src/cluster_info_metrics.rs @@ -315,6 +315,11 @@ pub(crate) fn submit_gossip_stats( stats.process_pull_response_timeout.clear(), i64 ), + ( + "num_redundant_pull_responses", + crds_stats.num_redundant_pull_responses, + i64 + ), ( "push_response_count", stats.push_response_count.clear(), diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index 719bc138479096..dbb6c43c0356c0 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -115,6 +115,9 @@ pub(crate) struct CrdsDataStats { pub(crate) struct CrdsStats { pub(crate) pull: CrdsDataStats, pub(crate) push: CrdsDataStats, + /// number of times a message was first received via a PullResponse + /// and that message was later received via a PushMessage + pub(crate) num_redundant_pull_responses: u64, } /// This structure stores some local metadata associated with the CrdsValue @@ -127,8 +130,10 @@ pub struct VersionedCrdsValue { pub(crate) local_timestamp: u64, /// value hash pub(crate) value_hash: Hash, - /// Number of times duplicates of this value are recevied from gossip push. - num_push_dups: u8, + /// None -> value upserted by GossipRoute::{LocalMessage,PullRequest} + /// Some(0) -> value upserted by GossipRoute::PullResponse + /// Some(k) if k > 0 -> value upserted by GossipRoute::PushMessage w/ k - 1 push duplicates + num_push_recv: Option, } #[derive(Clone, Copy, Default)] @@ -147,14 +152,21 @@ impl Cursor { } impl VersionedCrdsValue { - fn new(value: CrdsValue, cursor: Cursor, local_timestamp: u64) -> Self { + fn new(value: CrdsValue, cursor: Cursor, local_timestamp: u64, route: GossipRoute) -> Self { let value_hash = hash(&serialize(&value).unwrap()); + let num_push_recv = match route { + GossipRoute::LocalMessage => None, + GossipRoute::PullRequest => None, + GossipRoute::PullResponse => Some(0), + GossipRoute::PushMessage(_) => Some(1), + }; + VersionedCrdsValue { ordinal: cursor.ordinal(), value, local_timestamp, value_hash, - num_push_dups: 0u8, + num_push_recv, } } } @@ -222,7 +234,7 @@ impl Crds { ) -> Result<(), CrdsError> { let label = value.label(); let pubkey = value.pubkey(); - let value = VersionedCrdsValue::new(value, self.cursor, now); + let value = VersionedCrdsValue::new(value, self.cursor, now, route); match self.table.entry(label) { Entry::Vacant(entry) => { self.stats.lock().unwrap().record_insert(&value, route); @@ -303,8 +315,12 @@ impl Crds { Err(CrdsError::InsertFailed) } else if matches!(route, GossipRoute::PushMessage(_)) { let entry = entry.get_mut(); - entry.num_push_dups = entry.num_push_dups.saturating_add(1); - Err(CrdsError::DuplicatePush(entry.num_push_dups)) + if entry.num_push_recv == Some(0) { + self.stats.lock().unwrap().num_redundant_pull_responses += 1; + } + let num_push_dups = entry.num_push_recv.unwrap_or_default(); + entry.num_push_recv = Some(num_push_dups.saturating_add(1)); + Err(CrdsError::DuplicatePush(num_push_dups)) } else { Err(CrdsError::InsertFailed) } @@ -1450,8 +1466,9 @@ mod tests { #[allow(clippy::neg_cmp_op_on_partial_ord)] fn test_equal() { let val = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ContactInfo::default())); - let v1 = VersionedCrdsValue::new(val.clone(), Cursor::default(), 1); - let v2 = VersionedCrdsValue::new(val, Cursor::default(), 1); + let v1 = + VersionedCrdsValue::new(val.clone(), Cursor::default(), 1, GossipRoute::LocalMessage); + let v2 = VersionedCrdsValue::new(val, Cursor::default(), 1, GossipRoute::LocalMessage); assert_eq!(v1, v2); assert!(!(v1 != v2)); assert!(!overrides(&v1.value, &v2)); @@ -1467,6 +1484,7 @@ mod tests { ))), Cursor::default(), 1, // local_timestamp + GossipRoute::LocalMessage, ); let v2 = VersionedCrdsValue::new( { @@ -1476,6 +1494,7 @@ mod tests { }, Cursor::default(), 1, // local_timestamp + GossipRoute::LocalMessage, ); assert_eq!(v1.value.label(), v2.value.label()); @@ -1501,6 +1520,7 @@ mod tests { ))), Cursor::default(), 1, // local_timestamp + GossipRoute::LocalMessage, ); let v2 = VersionedCrdsValue::new( CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ContactInfo::new_localhost( @@ -1509,6 +1529,7 @@ mod tests { ))), Cursor::default(), 1, // local_timestamp + GossipRoute::LocalMessage, ); assert_eq!(v1.value.label(), v2.value.label()); assert!(overrides(&v1.value, &v2)); @@ -1527,6 +1548,7 @@ mod tests { ))), Cursor::default(), 1, // local_timestamp + GossipRoute::LocalMessage, ); let v2 = VersionedCrdsValue::new( CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ContactInfo::new_localhost( @@ -1535,6 +1557,7 @@ mod tests { ))), Cursor::default(), 1, // local_timestamp + GossipRoute::LocalMessage, ); assert_ne!(v1, v2); assert!(!(v1 == v2)); From b3fd87fe8189e7e278d882eb2cacafe1f9503d89 Mon Sep 17 00:00:00 2001 From: sakridge Date: Thu, 14 Mar 2024 19:43:59 +0100 Subject: [PATCH 401/401] Fix gossip contact trace (#241) --- gossip/src/cluster_info.rs | 59 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 3 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 7cddbdb5a963b1..471d768a101051 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -765,7 +765,7 @@ impl ClusterInfo { "" }, now.saturating_sub(last_updated), - node.pubkey(), + node.pubkey().to_string(), if let Some(node_version) = node_version { node_version.to_string() } else { @@ -827,7 +827,7 @@ impl ClusterInfo { .unwrap_or_else(|| String::from("none")), if node.pubkey() == &my_pubkey { "me" } else { "" }, now.saturating_sub(last_updated), - node.pubkey(), + node.pubkey().to_string(), if let Some(node_version) = node_version { node_version.to_string() } else { @@ -849,7 +849,7 @@ impl ClusterInfo { format!( "IP Address |Age(ms)| Node identifier \ | Version |Gossip|TPUvote| TPU |TPUfwd| TVU |TVU Q |ServeR|ShredVer\n\ - ------------------+-------+---------------------------------------\ + ------------------+-------+----------------------------------------------\ +---------+------+-------+------+------+------+------+------+--------\n\ {}\ Nodes: {}{}{}", @@ -4682,4 +4682,57 @@ mod tests { assert_eq!(heaviest_forks.len(), 1); assert_eq!(heaviest_forks[0].from, pubkey2); } + + #[test] + fn test_contact_trace() { + solana_logger::setup(); + let keypair43 = Arc::new( + Keypair::from_bytes(&[ + 198, 203, 8, 178, 196, 71, 119, 152, 31, 96, 221, 142, 115, 224, 45, 34, 173, 138, + 254, 39, 181, 238, 168, 70, 183, 47, 210, 91, 221, 179, 237, 153, 14, 58, 154, 59, + 67, 220, 235, 106, 241, 99, 4, 72, 60, 245, 53, 30, 225, 122, 145, 225, 8, 40, 30, + 174, 26, 228, 125, 127, 125, 21, 96, 28, + ]) + .unwrap(), + ); + let keypair44 = Arc::new( + Keypair::from_bytes(&[ + 66, 88, 3, 70, 228, 215, 125, 64, 130, 183, 180, 98, 22, 166, 201, 234, 89, 80, + 135, 24, 228, 35, 20, 52, 105, 130, 50, 51, 46, 229, 244, 108, 70, 57, 45, 247, 57, + 177, 39, 126, 190, 238, 50, 96, 186, 208, 28, 168, 148, 56, 9, 106, 92, 213, 63, + 205, 252, 225, 244, 101, 77, 182, 4, 2, + ]) + .unwrap(), + ); + + let cluster_info44 = Arc::new({ + let mut node = Node::new_localhost_with_pubkey(&keypair44.pubkey()); + node.sockets.gossip = UdpSocket::bind("127.0.0.1:65534").unwrap(); + info!("{:?}", node); + ClusterInfo::new(node.info, keypair44.clone(), SocketAddrSpace::Unspecified) + }); + let cluster_info43 = Arc::new({ + let node = Node::new_localhost_with_pubkey(&keypair43.pubkey()); + ClusterInfo::new(node.info, keypair43.clone(), SocketAddrSpace::Unspecified) + }); + + assert_eq!(keypair43.pubkey().to_string().len(), 43); + assert_eq!(keypair44.pubkey().to_string().len(), 44); + + let trace = cluster_info44.contact_info_trace(); + info!("cluster:\n{}", trace); + assert_eq!(trace.len(), 431); + + let trace = cluster_info44.rpc_info_trace(); + info!("rpc:\n{}", trace); + assert_eq!(trace.len(), 335); + + let trace = cluster_info43.contact_info_trace(); + info!("cluster:\n{}", trace); + assert_eq!(trace.len(), 431); + + let trace = cluster_info43.rpc_info_trace(); + info!("rpc:\n{}", trace); + assert_eq!(trace.len(), 335); + } }