From 835a554927d120cdd6c2b0780c6276f17c708d41 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Mon, 24 Dec 2018 18:11:53 -0800 Subject: [PATCH 01/14] Refactoring for checkpoint and fork selection --- Cargo.toml | 2 +- src/accounts.rs | 347 ++---- src/bank.rs | 1248 ++++++-------------- src/bank_state.rs | 803 +++++++++++++ src/banking_stage.rs | 2 +- src/bloom.rs | 37 +- src/checkpoint.rs | 14 - src/checkpoints.rs | 141 +++ src/compute_leader_confirmation_service.rs | 11 +- src/crds_gossip_pull.rs | 2 +- src/entry.rs | 1 - src/forks.rs | 230 ++++ src/fullnode.rs | 2 +- src/last_id_queue.rs | 213 ++++ src/leader_scheduler.rs | 7 +- src/lib.rs | 7 +- src/poh_recorder.rs | 2 +- src/replay_stage.rs | 170 +-- src/rpc.rs | 33 +- src/rpc_pubsub.rs | 3 +- src/status_cache.rs | 236 ++++ src/status_deque.rs | 364 ------ src/tvu.rs | 7 +- tests/programs.rs | 6 +- 24 files changed, 2254 insertions(+), 1634 deletions(-) create mode 100644 src/bank_state.rs delete mode 100644 src/checkpoint.rs create mode 100644 src/checkpoints.rs create mode 100644 src/forks.rs create mode 100644 src/last_id_queue.rs create mode 100644 src/status_cache.rs delete mode 100644 src/status_deque.rs diff --git a/Cargo.toml b/Cargo.toml index e25daf03417fee..330e8b45f6b50b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,11 +29,11 @@ bs58 = "0.2.0" bv = { version = "0.11.0", features = ["serde"] } byteorder = "1.3.1" chrono = { version = "0.4.0", features = ["serde"] } -fnv = "1.0.6" hashbrown = "0.1.8" indexmap = "1.0" itertools = "0.8.0" libc = "0.2.48" +fnv = "1.0.6" log = "0.4.2" nix = "0.13.0" rand = "0.6.4" diff --git a/src/accounts.rs b/src/accounts.rs index de8e6b15dcd314..257c18d6c42a7f 100644 --- a/src/accounts.rs +++ b/src/accounts.rs @@ -1,8 +1,6 @@ use crate::bank::BankError; use crate::bank::Result; -use crate::checkpoint::Checkpoint; use crate::counter::Counter; -use crate::status_deque::{StatusDeque, StatusDequeError}; use bincode::serialize; use hashbrown::{HashMap, HashSet}; use log::Level; @@ -11,7 +9,7 @@ use solana_sdk::hash::{hash, Hash}; use solana_sdk::pubkey::Pubkey; use solana_sdk::transaction::Transaction; use std::collections::BTreeMap; -use std::collections::VecDeque; +use std::ops::Deref; use std::sync::atomic::AtomicUsize; use std::sync::{Mutex, RwLock}; @@ -23,6 +21,7 @@ pub struct ErrorCounters { pub account_not_found: usize, pub account_in_use: usize, pub last_id_not_found: usize, + pub last_id_too_old: usize, pub reserve_last_id: usize, pub insufficient_funds: usize, pub duplicate_signature: usize, @@ -35,9 +34,6 @@ pub struct AccountsDB { /// Mapping of known public keys/IDs to accounts pub accounts: HashMap, - /// list of prior states - checkpoints: VecDeque<(HashMap, u64)>, - /// The number of transactions the bank has processed without error since the /// start of the ledger. transaction_count: u64, @@ -55,7 +51,6 @@ impl Default for AccountsDB { fn default() -> Self { Self { accounts: HashMap::new(), - checkpoints: VecDeque::new(), transaction_count: 0, } } @@ -71,10 +66,6 @@ impl Default for Accounts { } impl AccountsDB { - pub fn keys(&self) -> Vec { - self.accounts.keys().cloned().collect() - } - pub fn hash_internal_state(&self) -> Hash { let mut ordered_accounts = BTreeMap::new(); @@ -87,22 +78,21 @@ impl AccountsDB { hash(&serialize(&ordered_accounts).unwrap()) } - fn load(&self, pubkey: &Pubkey) -> Option<&Account> { - if let Some(account) = self.accounts.get(pubkey) { - return Some(account); - } - - for (accounts, _) in &self.checkpoints { - if let Some(account) = accounts.get(pubkey) { - return Some(account); + fn load(checkpoints: &[U], pubkey: &Pubkey) -> Option + where + U: Deref, + { + for db in checkpoints { + if let Some(account) = db.accounts.get(pubkey) { + return Some(account.clone()); } } None } - - pub fn store(&mut self, pubkey: &Pubkey, account: &Account) { + /// purge == checkpoints.is_empty() + pub fn store(&mut self, purge: bool, pubkey: &Pubkey, account: &Account) { if account.tokens == 0 { - if self.checkpoints.is_empty() { + if purge { // purge if balance is 0 and no checkpoints self.accounts.remove(pubkey); } else { @@ -116,6 +106,7 @@ impl AccountsDB { pub fn store_accounts( &mut self, + purge: bool, txs: &[Transaction], res: &[Result<()>], loaded: &[Result<(InstructionAccounts, InstructionLoaders)>], @@ -126,65 +117,51 @@ impl AccountsDB { } let tx = &txs[i]; - let accs = raccs.as_ref().unwrap(); - for (key, account) in tx.account_keys.iter().zip(accs.0.iter()) { - self.store(key, account); + let acc = raccs.as_ref().unwrap(); + for (key, account) in tx.account_keys.iter().zip(acc.0.iter()) { + self.store(purge, key, account); } } } - - fn load_tx_accounts( - &self, + fn load_tx_accounts( + checkpoints: &[U], tx: &Transaction, - last_ids: &mut StatusDeque>, - max_age: usize, error_counters: &mut ErrorCounters, - ) -> Result> { + ) -> Result> + where + U: Deref, + { // Copy all the accounts if tx.signatures.is_empty() && tx.fee != 0 { Err(BankError::MissingSignatureForFee) - } else if tx.account_keys.is_empty() || self.load(&tx.account_keys[0]).is_none() { - error_counters.account_not_found += 1; - Err(BankError::AccountNotFound) - } else if self.load(&tx.account_keys[0]).unwrap().tokens < tx.fee { - error_counters.insufficient_funds += 1; - Err(BankError::InsufficientFundsForFee) } else { - if !last_ids.check_entry_id_age(tx.last_id, max_age) { - error_counters.last_id_not_found += 1; - return Err(BankError::LastIdNotFound); - } - // There is no way to predict what program will execute without an error // If a fee can pay for execution then the program will be scheduled - last_ids - .reserve_signature_with_last_id(&tx.last_id, &tx.signatures[0]) - .map_err(|err| match err { - StatusDequeError::LastIdNotFound => { - error_counters.reserve_last_id += 1; - BankError::LastIdNotFound - } - StatusDequeError::DuplicateSignature => { - error_counters.duplicate_signature += 1; - BankError::DuplicateSignature - } - })?; - - let mut called_accounts: Vec = tx - .account_keys - .iter() - .map(|key| self.load(key).cloned().unwrap_or_default()) - .collect(); - called_accounts[0].tokens -= tx.fee; - Ok(called_accounts) + let mut called_accounts: Vec = vec![]; + for key in &tx.account_keys { + called_accounts.push(Self::load(checkpoints, key).unwrap_or_default()); + } + if called_accounts[0].tokens == 0 { + error_counters.account_not_found += 1; + Err(BankError::AccountNotFound) + } else if called_accounts[0].tokens < tx.fee { + error_counters.insufficient_funds += 1; + Err(BankError::InsufficientFundsForFee) + } else { + called_accounts[0].tokens -= tx.fee; + Ok(called_accounts) + } } } - fn load_executable_accounts( - &self, + fn load_executable_accounts( + checkpoints: &[U], mut program_id: Pubkey, error_counters: &mut ErrorCounters, - ) -> Result> { + ) -> Result> + where + U: Deref, + { let mut accounts = Vec::new(); let mut depth = 0; loop { @@ -199,7 +176,7 @@ impl AccountsDB { } depth += 1; - let program = match self.load(&program_id) { + let program = match Self::load(checkpoints, &program_id) { Some(program) => program, None => { error_counters.account_not_found += 1; @@ -220,11 +197,14 @@ impl AccountsDB { } /// For each program_id in the transaction, load its loaders. - fn load_loaders( - &self, + fn load_loaders( + checkpoints: &[U], tx: &Transaction, error_counters: &mut ErrorCounters, - ) -> Result>> { + ) -> Result>> + where + U: Deref, + { tx.instructions .iter() .map(|ix| { @@ -233,25 +213,26 @@ impl AccountsDB { return Err(BankError::AccountNotFound); } let program_id = tx.program_ids[ix.program_ids_index as usize]; - self.load_executable_accounts(program_id, error_counters) + Self::load_executable_accounts(checkpoints, program_id, error_counters) }) .collect() } - fn load_accounts( - &self, + fn load_accounts( + checkpoints: &[U], txs: &[Transaction], - last_ids: &mut StatusDeque>, lock_results: Vec>, - max_age: usize, error_counters: &mut ErrorCounters, - ) -> Vec> { + ) -> Vec> + where + U: Deref, + { txs.iter() .zip(lock_results.into_iter()) .map(|etx| match etx { (tx, Ok(())) => { - let accounts = self.load_tx_accounts(tx, last_ids, max_age, error_counters)?; - let loaders = self.load_loaders(tx, error_counters)?; + let accounts = Self::load_tx_accounts(checkpoints, tx, error_counters)?; + let loaders = Self::load_loaders(checkpoints, tx, error_counters)?; Ok((accounts, loaders)) } (_, Err(e)) => Err(e), @@ -266,21 +247,36 @@ impl AccountsDB { pub fn transaction_count(&self) -> u64 { self.transaction_count } + pub fn account_values_slow(&self) -> Vec<(Pubkey, solana_sdk::account::Account)> { + self.accounts + .iter() + .map(|(x, y)| (x.clone(), y.clone())) + .collect() + } + fn merge(&mut self, other: Self) { + self.transaction_count += other.transaction_count; + self.accounts.extend(other.accounts) + } } impl Accounts { - pub fn keys(&self) -> Vec { - self.accounts_db.read().unwrap().keys() - } - - /// Slow because lock is held for 1 operation instead of many - pub fn load_slow(&self, pubkey: &Pubkey) -> Option { - self.accounts_db.read().unwrap().load(pubkey).cloned() + /// Slow because lock is held for 1 operation insted of many + pub fn load_slow(checkpoints: &[U], pubkey: &Pubkey) -> Option + where + U: Deref, + { + let dbs: Vec<_> = checkpoints + .iter() + .map(|obj| obj.accounts_db.read().unwrap()) + .collect(); + AccountsDB::load(&dbs, pubkey) } - - /// Slow because lock is held for 1 operation instead of many - pub fn store_slow(&self, pubkey: &Pubkey, account: &Account) { - self.accounts_db.write().unwrap().store(pubkey, account) + /// Slow because lock is held for 1 operation insted of many + pub fn store_slow(&self, purge: bool, pubkey: &Pubkey, account: &Account) { + self.accounts_db + .write() + .unwrap() + .store(purge, pubkey, account) } fn lock_account( @@ -344,25 +340,25 @@ impl Accounts { .for_each(|(tx, result)| Self::unlock_account(tx, result, &mut account_locks)); } - pub fn load_accounts( - &self, + pub fn load_accounts( + checkpoints: &[U], txs: &[Transaction], - last_ids: &mut StatusDeque>, - lock_results: Vec>, - max_age: usize, + results: Vec>, error_counters: &mut ErrorCounters, - ) -> Vec> { - self.accounts_db.read().unwrap().load_accounts( - txs, - last_ids, - lock_results, - max_age, - error_counters, - ) + ) -> Vec> + where + U: Deref, + { + let dbs: Vec<_> = checkpoints + .iter() + .map(|obj| obj.accounts_db.read().unwrap()) + .collect(); + AccountsDB::load_accounts(&dbs, txs, results, error_counters) } pub fn store_accounts( &self, + purge: bool, txs: &[Transaction], res: &[Result<()>], loaded: &[Result<(InstructionAccounts, InstructionLoaders)>], @@ -370,7 +366,7 @@ impl Accounts { self.accounts_db .write() .unwrap() - .store_accounts(txs, res, loaded) + .store_accounts(purge, txs, res, loaded) } pub fn increment_transaction_count(&self, tx_count: usize) { @@ -383,59 +379,13 @@ impl Accounts { pub fn transaction_count(&self) -> u64 { self.accounts_db.read().unwrap().transaction_count() } - - pub fn checkpoint(&self) { - self.accounts_db.write().unwrap().checkpoint() - } - - pub fn rollback(&self) { - self.accounts_db.write().unwrap().rollback() - } - - pub fn purge(&self, depth: usize) { - self.accounts_db.write().unwrap().purge(depth) - } - - pub fn depth(&self) -> usize { - self.accounts_db.read().unwrap().depth() - } -} - -impl Checkpoint for AccountsDB { - fn checkpoint(&mut self) { - let mut accounts = HashMap::new(); - std::mem::swap(&mut self.accounts, &mut accounts); - - self.checkpoints - .push_front((accounts, self.transaction_count())); - } - - fn rollback(&mut self) { - let (accounts, transaction_count) = self.checkpoints.pop_front().unwrap(); - self.accounts = accounts; - self.transaction_count = transaction_count; - } - - fn purge(&mut self, depth: usize) { - fn merge(into: &mut HashMap, purge: &mut HashMap) { - purge.retain(|pubkey, _| !into.contains_key(pubkey)); - into.extend(purge.drain()); - into.retain(|_, account| account.tokens != 0); - } - - while self.depth() > depth { - let (mut purge, _) = self.checkpoints.pop_back().unwrap(); - - if let Some((into, _)) = self.checkpoints.back_mut() { - merge(into, &mut purge); - continue; - } - merge(&mut self.accounts, &mut purge); - } - } - - fn depth(&self) -> usize { - self.checkpoints.len() + /// accounts starts with an empty data structure for every fork + /// self is trunk, merge the fork into self + pub fn merge_into_trunk(&self, other: Self) { + assert!(other.account_locks.lock().unwrap().is_empty()); + let db = other.accounts_db.into_inner().unwrap(); + let mut mydb = self.accounts_db.write().unwrap(); + mydb.merge(db) } } @@ -455,18 +405,13 @@ mod tests { tx: Transaction, ka: &Vec<(Pubkey, Account)>, error_counters: &mut ErrorCounters, - max_age: usize, ) -> Vec> { let accounts = Accounts::default(); for ka in ka.iter() { - accounts.store_slow(&ka.0, &ka.1); + accounts.store_slow(true, &ka.0, &ka.1); } - let id = Default::default(); - let mut last_ids: StatusDeque> = StatusDeque::default(); - last_ids.register_tick(&id); - - accounts.load_accounts(&[tx], &mut last_ids, vec![Ok(())], max_age, error_counters) + Accounts::load_accounts(&[&accounts], &[tx], vec![Ok(())], error_counters) } fn assert_counters(error_counters: &ErrorCounters, expected: [usize; 8]) { @@ -480,34 +425,6 @@ mod tests { assert_eq!(error_counters.missing_signature_for_fee, expected[7]); } - #[test] - fn test_load_accounts_index_out_of_bounds() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); - let mut error_counters = ErrorCounters::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - - let account = Account::new(1, 1, Pubkey::default()); - accounts.push((key0, account)); - - let instructions = vec![Instruction::new(1, &(), vec![0, 1])]; - let tx = Transaction::new_with_instructions( - &[&keypair], - &[], // TODO this should contain a key, should fail - Hash::default(), - 0, - vec![solana_native_loader::id()], - instructions, - ); - - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); - - assert_counters(&error_counters, [1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!(loaded_accounts[0], Err(BankError::AccountNotFound)); - } - #[test] fn test_load_accounts_no_key() { let accounts: Vec<(Pubkey, Account)> = Vec::new(); @@ -523,7 +440,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [1, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(loaded_accounts.len(), 1); @@ -547,7 +464,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [1, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(loaded_accounts.len(), 1); @@ -579,45 +496,13 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [1, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(loaded_accounts.len(), 1); assert_eq!(loaded_accounts[0], Err(BankError::AccountNotFound)); } - #[test] - fn test_load_accounts_max_age() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); - let mut error_counters = ErrorCounters::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::new(&[5u8; 32]); - - let account = Account::new(1, 1, Pubkey::default()); - accounts.push((key0, account)); - - let account = Account::new(2, 1, Pubkey::default()); - accounts.push((key1, account)); - - let instructions = vec![Instruction::new(1, &(), vec![0])]; - let tx = Transaction::new_with_instructions( - &[&keypair], - &[], - Hash::default(), - 0, - vec![solana_native_loader::id()], - instructions, - ); - - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 0); - - assert_counters(&error_counters, [0, 0, 1, 0, 0, 0, 0, 0]); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!(loaded_accounts[0], Err(BankError::LastIdNotFound)); - } - #[test] fn test_load_accounts_insufficient_funds() { let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); @@ -639,7 +524,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [0, 0, 0, 0, 1, 0, 0, 0]); assert_eq!(loaded_accounts.len(), 1); @@ -671,7 +556,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [0, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(loaded_accounts.len(), 1); @@ -743,7 +628,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [0, 0, 0, 0, 0, 0, 1, 0]); assert_eq!(loaded_accounts.len(), 1); @@ -777,7 +662,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [1, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(loaded_accounts.len(), 1); @@ -810,7 +695,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [1, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(loaded_accounts.len(), 1); @@ -859,7 +744,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters, 10); + let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_counters(&error_counters, [0, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(loaded_accounts.len(), 1); diff --git a/src/bank.rs b/src/bank.rs index 6da5afad203a40..d928da932a6cd3 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -3,21 +3,18 @@ //! on behalf of the caller, and a low-level API for when they have //! already been signed and verified. -use crate::accounts::{Accounts, ErrorCounters, InstructionAccounts, InstructionLoaders}; -use crate::checkpoint::Checkpoint; -use crate::counter::Counter; -use crate::entry::Entry; -use crate::entry::EntrySlice; +use crate::bank_state::{BankCheckpoint, BankState}; +use crate::entry::{Entry, EntrySlice}; +use crate::forks::Forks; +//use crate::jsonrpc_macros::pubsub::Sink; use crate::leader_scheduler::LeaderScheduler; +use crate::leader_scheduler::TICKS_PER_BLOCK; use crate::mint::Mint; use crate::poh_recorder::PohRecorder; -use crate::runtime::{self, RuntimeError}; -use crate::status_deque::{Status, StatusDeque, MAX_ENTRY_IDS}; +//use crate::rpc::RpcSignatureStatus; use crate::storage_stage::StorageState; use bincode::deserialize; use itertools::Itertools; -use log::Level; -use rayon::prelude::*; use solana_native_loader; use solana_sdk::account::Account; use solana_sdk::bpf_loader; @@ -32,7 +29,6 @@ use solana_sdk::storage_program; use solana_sdk::system_instruction::SystemInstruction; use solana_sdk::system_program; use solana_sdk::system_transaction::SystemTransaction; -use solana_sdk::timing::duration_as_us; use solana_sdk::token_program; use solana_sdk::transaction::Transaction; use solana_sdk::vote_program; @@ -40,7 +36,6 @@ use std; use std::result; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, RwLock}; -use std::time::Instant; /// Reasons a transaction might be rejected. #[derive(Debug, PartialEq, Eq, Clone)] @@ -77,6 +72,12 @@ pub enum BankError { /// Transaction has a fee but has no signature present MissingSignatureForFee, + + //TODO: move into forks.rs + UnknownFork, + InvalidTrunk, + CheckpointNotFinalized, + CheckpointIsFinalized, } pub type Result = result::Result; @@ -88,7 +89,7 @@ pub trait BankSubscriptions { fn check_signature(&self, signature: &Signature, status: &Result<()>); } -struct LocalSubscriptions {} +pub struct LocalSubscriptions {} impl Default for LocalSubscriptions { fn default() -> Self { LocalSubscriptions {} @@ -102,53 +103,48 @@ impl BankSubscriptions for LocalSubscriptions { /// Manager for the state of all accounts and programs after processing its entries. pub struct Bank { - pub accounts: Accounts, - - /// FIFO queue of `last_id` items - last_ids: RwLock>>, + forks: RwLock, // The latest confirmation time for the network confirmation_time: AtomicUsize, + subscriptions: RwLock>>, + /// Tracks and updates the leader schedule based on the votes and account stakes /// processed by the bank pub leader_scheduler: Arc>, pub storage_state: StorageState, - - subscriptions: RwLock>>, } impl Default for Bank { fn default() -> Self { Bank { - accounts: Accounts::default(), - last_ids: RwLock::new(StatusDeque::default()), + forks: RwLock::new(Forks::default()), + subscriptions: RwLock::new(Box::new(Arc::new(LocalSubscriptions::default()))), confirmation_time: AtomicUsize::new(std::usize::MAX), leader_scheduler: Arc::new(RwLock::new(LeaderScheduler::default())), storage_state: StorageState::new(), - subscriptions: RwLock::new(Box::new(Arc::new(LocalSubscriptions::default()))), } } } impl Bank { - /// Create an Bank with built-in programs. - pub fn new_with_builtin_programs() -> Self { - let bank = Self::default(); - bank.add_builtin_programs(); - bank - } - /// Create an Bank using a deposit. - pub fn new_from_deposits(deposits: &[Payment]) -> Self { + fn new_from_deposits(deposits: &[Payment], trunk: u64, last_id: &Hash) -> Self { let bank = Self::default(); - for deposit in deposits { - let mut account = Account::default(); - account.tokens += deposit.tokens; + let accounts: Vec<_> = deposits + .iter() + .map(|deposit| { + let mut account = Account::default(); + account.tokens += deposit.tokens; + (deposit.to, account) + }) + .collect(); + let bank_checkpoint = BankCheckpoint::new_from_accounts(trunk, &accounts, last_id); - bank.accounts.store_slow(&deposit.to, &account); - } + bank_checkpoint.register_tick(last_id); + bank.forks.write().unwrap().init_trunk_fork(bank_checkpoint); bank.add_builtin_programs(); bank } @@ -158,34 +154,6 @@ impl Bank { *sub = subscriptions } - pub fn checkpoint(&self) { - self.accounts.checkpoint(); - self.last_ids.write().unwrap().checkpoint(); - } - pub fn purge(&self, depth: usize) { - self.accounts.purge(depth); - self.last_ids.write().unwrap().purge(depth); - } - - pub fn rollback(&self) { - let rolled_back_pubkeys: Vec = self.accounts.keys(); - self.accounts.rollback(); - - rolled_back_pubkeys.iter().for_each(|pubkey| { - if let Some(account) = self.accounts.load_slow(&pubkey) { - self.subscriptions - .read() - .unwrap() - .check_account(&pubkey, &account) - } - }); - - self.last_ids.write().unwrap().rollback(); - } - pub fn checkpoint_depth(&self) -> usize { - self.accounts.depth() - } - /// Create an Bank with only a Mint. Typically used by unit tests. pub fn new(mint: &Mint) -> Self { let mint_tokens = if mint.bootstrap_leader_id != Pubkey::default() { @@ -208,9 +176,20 @@ impl Bank { } else { vec![mint_deposit] }; - let bank = Self::new_from_deposits(&deposits); - bank.register_tick(&mint.last_id()); - bank + Self::new_from_deposits(&deposits, 0, &mint.last_id()) + } + + pub fn init_fork(&self, current: u64, last_id: &Hash, base: u64) -> Result<()> { + if self.forks.read().unwrap().is_active_fork(current) { + return Ok(()); + } + self.forks + .write() + .unwrap() + .init_fork(current, last_id, base) + } + fn live_fork(&self) -> BankState { + self.forks.read().unwrap().live_fork() } fn add_system_program(&self) { @@ -221,8 +200,9 @@ impl Bank { executable: true, loader: solana_native_loader::id(), }; - self.accounts - .store_slow(&system_program::id(), &system_program_account); + self.live_fork() + .head() + .store_slow(false, &system_program::id(), &system_program_account); } fn add_builtin_programs(&self) { @@ -236,8 +216,9 @@ impl Bank { executable: true, loader: solana_native_loader::id(), }; - self.accounts - .store_slow(&vote_program::id(), &vote_program_account); + self.live_fork() + .head() + .store_slow(false, &vote_program::id(), &vote_program_account); // Storage program let storage_program_account = Account { @@ -247,8 +228,9 @@ impl Bank { executable: true, loader: solana_native_loader::id(), }; - self.accounts - .store_slow(&storage_program::id(), &storage_program_account); + self.live_fork() + .head() + .store_slow(false, &storage_program::id(), &storage_program_account); let storage_system_account = Account { tokens: 1, @@ -257,8 +239,11 @@ impl Bank { executable: false, loader: Pubkey::default(), }; - self.accounts - .store_slow(&storage_program::system_id(), &storage_system_account); + self.live_fork().head().store_slow( + false, + &storage_program::system_id(), + &storage_system_account, + ); // Bpf Loader let bpf_loader_account = Account { @@ -269,8 +254,9 @@ impl Bank { loader: solana_native_loader::id(), }; - self.accounts - .store_slow(&bpf_loader::id(), &bpf_loader_account); + self.live_fork() + .head() + .store_slow(false, &bpf_loader::id(), &bpf_loader_account); // Budget program let budget_program_account = Account { @@ -280,8 +266,9 @@ impl Bank { executable: true, loader: solana_native_loader::id(), }; - self.accounts - .store_slow(&budget_program::id(), &budget_program_account); + self.live_fork() + .head() + .store_slow(false, &budget_program::id(), &budget_program_account); // Erc20 token program let erc20_account = Account { @@ -292,17 +279,18 @@ impl Bank { loader: solana_native_loader::id(), }; - self.accounts - .store_slow(&token_program::id(), &erc20_account); + self.live_fork() + .head() + .store_slow(false, &token_program::id(), &erc20_account); + } + + pub fn tpu_register_tick(&self, last_id: &Hash) { + self.live_fork().head().register_tick(last_id) } /// Return the last entry ID registered. pub fn last_id(&self) -> Hash { - self.last_ids - .read() - .unwrap() - .last_id - .expect("no last_id has been set") + self.live_fork().head().last_id() } pub fn get_pubkeys_for_entry_height(&self, entry_height: u64) -> Vec { @@ -337,20 +325,13 @@ impl Bank { Hash::default() } - /// Forget all signatures. Useful for benchmarking. - pub fn clear_signatures(&self) { - self.last_ids.write().unwrap().clear_signatures(); - } - - fn update_transaction_statuses(&self, txs: &[Transaction], res: &[Result<()>]) { - let mut last_ids = self.last_ids.write().unwrap(); - for (i, tx) in txs.iter().enumerate() { - last_ids.update_signature_status_with_last_id(&tx.signatures[0], &res[i], &tx.last_id); - self.subscriptions - .read() - .unwrap() - .check_signature(&tx.signatures[0], &res[i]); - } + /// Look through the last_ids and find all the valid ids + /// This is batched to avoid holding the lock for a significant amount of time + /// + /// Return a vec of tuple of (valid index, timestamp) + /// index is into the passed ids slice to avoid copying hashes + pub fn count_valid_ids(&self, ids: &[Hash]) -> Vec<(usize, u64)> { + self.live_fork().head().count_valid_ids(ids) } /// Looks through a list of tick heights and stakes, and finds the latest @@ -360,18 +341,12 @@ impl Bank { ticks_and_stakes: &mut [(u64, u64)], supermajority_stake: u64, ) -> Option { - let last_ids = self.last_ids.read().unwrap(); - last_ids.get_confirmation_timestamp(ticks_and_stakes, supermajority_stake) + self.live_fork() + .head() + .get_confirmation_timestamp(ticks_and_stakes, supermajority_stake) } - - /// Tell the bank which Entry IDs exist on the ledger. This function - /// assumes subsequent calls correspond to later entries, and will boot - /// the oldest ones once its internal cache is full. Once boot, the - /// bank will reject transactions using that `last_id`. - pub fn register_tick(&self, last_id: &Hash) { - let mut last_ids = self.last_ids.write().unwrap(); - inc_new_counter_info!("bank-register_tick-registered", 1); - last_ids.register_tick(last_id) + pub fn tick_height(&self) -> u64 { + self.live_fork().head().tick_height() } /// Process a Transaction. This is used for unit tests and simply calls the vector Bank::process_transactions method. @@ -386,362 +361,48 @@ impl Bank { } } - fn lock_accounts(&self, txs: &[Transaction]) -> Vec> { - self.accounts.lock_accounts(txs) - } - - fn unlock_accounts(&self, txs: &[Transaction], results: &[Result<()>]) { - self.accounts.unlock_accounts(txs, results) - } - - pub fn process_and_record_transactions( - &self, - txs: &[Transaction], - poh: &PohRecorder, - ) -> Result<()> { - let now = Instant::now(); - // Once accounts are locked, other threads cannot encode transactions that will modify the - // same account state - let lock_results = self.lock_accounts(txs); - let lock_time = now.elapsed(); - - let now = Instant::now(); - // Use a shorter maximum age when adding transactions into the pipeline. This will reduce - // the likelihood of any single thread getting starved and processing old ids. - // TODO: Banking stage threads should be prioritized to complete faster then this queue - // expires. - let (loaded_accounts, results) = - self.load_and_execute_transactions(txs, lock_results, MAX_ENTRY_IDS as usize / 2); - let load_execute_time = now.elapsed(); - - let record_time = { - let now = Instant::now(); - self.record_transactions(txs, &results, poh)?; - now.elapsed() - }; - - let commit_time = { - let now = Instant::now(); - self.commit_transactions(txs, &loaded_accounts, &results); - now.elapsed() - }; - - let now = Instant::now(); - // Once the accounts are new transactions can enter the pipeline to process them - self.unlock_accounts(&txs, &results); - let unlock_time = now.elapsed(); - debug!( - "lock: {}us load_execute: {}us record: {}us commit: {}us unlock: {}us txs_len: {}", - duration_as_us(&lock_time), - duration_as_us(&load_execute_time), - duration_as_us(&record_time), - duration_as_us(&commit_time), - duration_as_us(&unlock_time), - txs.len(), - ); - Ok(()) - } - - fn record_transactions( - &self, - txs: &[Transaction], - results: &[Result<()>], - poh: &PohRecorder, - ) -> Result<()> { - let processed_transactions: Vec<_> = results - .iter() - .zip(txs.iter()) - .filter_map(|(r, x)| match r { - Ok(_) => Some(x.clone()), - Err(BankError::ProgramError(index, err)) => { - info!("program error {:?}, {:?}", index, err); - Some(x.clone()) - } - Err(ref e) => { - debug!("process transaction failed {:?}", e); - None - } - }) - .collect(); - debug!("processed: {} ", processed_transactions.len()); - // unlock all the accounts with errors which are filtered by the above `filter_map` - if !processed_transactions.is_empty() { - let hash = Transaction::hash(&processed_transactions); - // record and unlock will unlock all the successfull transactions - poh.record(hash, processed_transactions).map_err(|e| { - warn!("record failure: {:?}", e); - BankError::RecordFailure - })?; - } - Ok(()) - } - - fn load_accounts( - &self, - txs: &[Transaction], - lock_results: Vec>, - max_age: usize, - error_counters: &mut ErrorCounters, - ) -> Vec> { - let mut last_ids = self.last_ids.write().unwrap(); - self.accounts - .load_accounts(txs, &mut last_ids, lock_results, max_age, error_counters) - } - - #[allow(clippy::type_complexity)] - fn load_and_execute_transactions( - &self, - txs: &[Transaction], - lock_results: Vec>, - max_age: usize, - ) -> ( - Vec>, - Vec>, - ) { - debug!("processing transactions: {}", txs.len()); - let mut error_counters = ErrorCounters::default(); - let now = Instant::now(); - let mut loaded_accounts = - self.load_accounts(txs, lock_results, max_age, &mut error_counters); - let tick_height = self.tick_height(); - - let load_elapsed = now.elapsed(); - let now = Instant::now(); - let executed: Vec> = loaded_accounts - .iter_mut() - .zip(txs.iter()) - .map(|(accs, tx)| match accs { - Err(e) => Err(e.clone()), - Ok((ref mut accounts, ref mut loaders)) => { - runtime::execute_transaction(tx, loaders, accounts, tick_height).map_err( - |RuntimeError::ProgramError(index, err)| { - BankError::ProgramError(index, err) - }, - ) - } - }) - .collect(); - - let execution_elapsed = now.elapsed(); - - debug!( - "load: {}us execute: {}us txs_len={}", - duration_as_us(&load_elapsed), - duration_as_us(&execution_elapsed), - txs.len(), - ); - let mut tx_count = 0; - let mut err_count = 0; - for (r, tx) in executed.iter().zip(txs.iter()) { - if r.is_ok() { - tx_count += 1; - } else { - if err_count == 0 { - info!("tx error: {:?} {:?}", r, tx); - } - err_count += 1; - } - } - if err_count > 0 { - info!("{} errors of {} txs", err_count, err_count + tx_count); - inc_new_counter_info!( - "bank-process_transactions-account_not_found", - error_counters.account_not_found - ); - inc_new_counter_info!("bank-process_transactions-error_count", err_count); - } - - self.accounts.increment_transaction_count(tx_count); - - inc_new_counter_info!("bank-process_transactions-txs", tx_count); - if 0 != error_counters.last_id_not_found { - inc_new_counter_info!( - "bank-process_transactions-error-last_id_not_found", - error_counters.last_id_not_found - ); - } - if 0 != error_counters.reserve_last_id { - inc_new_counter_info!( - "bank-process_transactions-error-reserve_last_id", - error_counters.reserve_last_id - ); - } - if 0 != error_counters.duplicate_signature { - inc_new_counter_info!( - "bank-process_transactions-error-duplicate_signature", - error_counters.duplicate_signature - ); - } - if 0 != error_counters.insufficient_funds { - inc_new_counter_info!( - "bank-process_transactions-error-insufficient_funds", - error_counters.insufficient_funds - ); - } - (loaded_accounts, executed) + pub fn trunk_fork(&self) -> BankState { + self.forks.read().unwrap().trunk_fork() } - fn commit_transactions( - &self, - txs: &[Transaction], - loaded_accounts: &[Result<(InstructionAccounts, InstructionLoaders)>], - executed: &[Result<()>], - ) { - let now = Instant::now(); - self.accounts.store_accounts(txs, executed, loaded_accounts); - - // Check account subscriptions and send notifications - self.send_account_notifications(txs, executed, loaded_accounts); - - // once committed there is no way to unroll - let write_elapsed = now.elapsed(); - debug!( - "store: {}us txs_len={}", - duration_as_us(&write_elapsed), - txs.len(), - ); - self.update_transaction_statuses(txs, &executed); + pub fn bank_state(&self, fork: u64) -> Option { + self.forks.read().unwrap().bank_state(fork) } - /// Process a batch of transactions. #[must_use] - pub fn load_execute_and_commit_transactions( + pub fn process_and_record_transactions( &self, txs: &[Transaction], - lock_results: Vec>, - max_age: usize, - ) -> Vec> { - let (loaded_accounts, executed) = - self.load_and_execute_transactions(txs, lock_results, max_age); - - self.commit_transactions(txs, &loaded_accounts, &executed); - executed + poh: Option<&PohRecorder>, + ) -> Result<(Vec>)> { + let state = self.forks.read().unwrap().live_fork(); + //TODO: pass the pubsub to process_and_record + state.process_and_record_transactions(txs, poh) } - #[must_use] pub fn process_transactions(&self, txs: &[Transaction]) -> Vec> { - let lock_results = self.lock_accounts(txs); - let results = self.load_execute_and_commit_transactions(txs, lock_results, MAX_ENTRY_IDS); - self.unlock_accounts(txs, &results); - results - } - - pub fn process_entry(&self, entry: &Entry) -> Result<()> { - if !entry.is_tick() { - for result in self.process_transactions(&entry.transactions) { - match result { - // Entries that result in a ProgramError are still valid and are written in the - // ledger so map them to an ok return value - Err(BankError::ProgramError(_, _)) => Ok(()), - _ => result, - }?; - } - } else { - self.register_tick(&entry.id); - self.leader_scheduler - .write() - .unwrap() - .update_height(self.tick_height(), self); - } - - Ok(()) + self.process_and_record_transactions(txs, None) + .expect("record skipped") } /// Process an ordered list of entries. pub fn process_entries(&self, entries: &[Entry]) -> Result<()> { - self.par_process_entries(entries) + let state = self.forks.read().unwrap().live_fork(); + state.par_process_entries(entries) } - pub fn first_err(results: &[Result<()>]) -> Result<()> { - for r in results { - r.clone()?; - } - Ok(()) - } - - fn ignore_program_errors(results: Vec>) -> Vec> { - results - .into_iter() - .map(|result| match result { - // Entries that result in a ProgramError are still valid and are written in the - // ledger so map them to an ok return value - Err(BankError::ProgramError(index, err)) => { - info!("program error {:?}, {:?}", index, err); - inc_new_counter_info!("bank-ignore_program_err", 1); - Ok(()) - } - _ => result, - }) - .collect() - } - - fn par_execute_entries(&self, entries: &[(&Entry, Vec>)]) -> Result<()> { - inc_new_counter_info!("bank-par_execute_entries-count", entries.len()); - let results: Vec> = entries - .into_par_iter() - .map(|(e, lock_results)| { - let old_results = self.load_execute_and_commit_transactions( - &e.transactions, - lock_results.to_vec(), - MAX_ENTRY_IDS, - ); - let results = Bank::ignore_program_errors(old_results); - self.unlock_accounts(&e.transactions, &results); - Self::first_err(&results) - }) - .collect(); - Self::first_err(&results) - } - - /// process entries in parallel - /// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry - /// 2. Process the locked group in parallel - /// 3. Register the `Tick` if it's available, goto 1 - pub fn par_process_entries(&self, entries: &[Entry]) -> Result<()> { - // accumulator for entries that can be processed in parallel - let mut mt_group = vec![]; - for entry in entries { - if entry.is_tick() { - // if its a tick, execute the group and register the tick - self.par_execute_entries(&mt_group)?; - self.register_tick(&entry.id); - self.leader_scheduler - .write() - .unwrap() - .update_height(self.tick_height(), self); - mt_group = vec![]; - continue; - } - // try to lock the accounts - let lock_results = self.lock_accounts(&entry.transactions); - // if any of the locks error out - // execute the current group - if Self::first_err(&lock_results).is_err() { - self.par_execute_entries(&mt_group)?; - mt_group = vec![]; - //reset the lock and push the entry - self.unlock_accounts(&entry.transactions, &lock_results); - let lock_results = self.lock_accounts(&entry.transactions); - mt_group.push((entry, lock_results)); - } else { - // push the entry to the mt_group - mt_group.push((entry, lock_results)); - } - } - self.par_execute_entries(&mt_group)?; - Ok(()) + /// Process an ordered list of entries in a fork. + pub fn process_fork_entries(&self, fork: u64, entries: &[Entry]) -> Result<()> { + let state = self + .forks + .read() + .unwrap() + .bank_state(fork) + .ok_or(BankError::UnknownFork)?; + state.par_process_entries(entries) } - /// Process an ordered list of entries, populating a circular buffer "tail" /// as we go. - fn process_block(&self, entries: &[Entry]) -> Result<()> { - for entry in entries { - self.process_entry(entry)?; - } - - Ok(()) - } - /// Append entry blocks to the ledger, verifying them along the way. fn process_ledger_blocks( &self, @@ -759,15 +420,37 @@ impl Bank { // Ledger verification needs to be parallelized, but we can't pull the whole // thing into memory. We therefore chunk it. - for block in &entries.into_iter().chunks(VERIFY_BLOCK_SIZE) { - let block: Vec<_> = block.collect(); + // TODO: this is a broken way to group entries by block, the ledger shoudl correclty do + // this and present the data with the correct slot index for each fork + for (current, group) in &entries + .into_iter() + .group_by(|e| e.tick_height / TICKS_PER_BLOCK) + { + let block: Vec = group.collect(); - if !block.verify(&last_id) { + if !block.as_slice().verify(&last_id) { warn!("Ledger proof of history failed at entry: {}", entry_height); return Err(BankError::LedgerVerificationFailed); } - self.process_block(&block)?; + if !self.forks.read().unwrap().is_active_fork(current) { + let new = current; + let base = self.forks.read().unwrap().live_fork; + // create a new fork + self.init_fork(new, &block[0].id, base) + .expect("initializing fork for replay"); + //there is only one base, and its the current live fork + self.forks + .write() + .unwrap() + .merge_into_trunk(base, new) + .expect("merge into trunk"); + } else { + // only the first fork should be active at the start of the loop + // every block should be unique otherwise + assert_eq!(current, 0); + } + self.process_entries(&block)?; last_id = block.last().unwrap().id; entry_height += block.len() as u64; @@ -786,6 +469,16 @@ impl Bank { // which implies its id can be used as the ledger's seed. let entry0 = entries.next().expect("invalid ledger: empty"); + // initialize the first checkpoint + // entry id is used by the bank state to filter duplicate signatures + + self.forks + .write() + .unwrap() + .init_trunk_fork(BankCheckpoint::new(0, &entry0.id)); + + self.add_builtin_programs(); + // The second item in the ledger consists of a transaction with // two special instructions: // 1) The first is a special move instruction where the to and from @@ -827,12 +520,11 @@ impl Bank { { // 1) Deposit into the mint - let mut account = self - .accounts - .load_slow(&tx.account_keys[0]) - .unwrap_or_default(); + let mut account = self.get_account(&tx.account_keys[0]).unwrap_or_default(); account.tokens += mint_deposit - leader_payment; - self.accounts.store_slow(&tx.account_keys[0], &account); + self.live_fork() + .head() + .store_slow(false, &tx.account_keys[0], &account); trace!( "applied genesis payment {:?} => {:?}", mint_deposit - leader_payment, @@ -845,12 +537,11 @@ impl Bank { // mint itself), so we look at the third account key to find the first // leader id. let bootstrap_leader_id = tx.account_keys[2]; - let mut account = self - .accounts - .load_slow(&bootstrap_leader_id) - .unwrap_or_default(); + let mut account = self.get_account(&bootstrap_leader_id).unwrap_or_default(); account.tokens += leader_payment; - self.accounts.store_slow(&bootstrap_leader_id, &account); + self.live_fork() + .head() + .store_slow(false, &bootstrap_leader_id, &account); self.leader_scheduler.write().unwrap().bootstrap_leader = bootstrap_leader_id; @@ -897,39 +588,32 @@ impl Bank { } pub fn get_account(&self, pubkey: &Pubkey) -> Option { - self.accounts.load_slow(pubkey) + let state = self.live_fork(); + state.load_slow(pubkey) } + pub fn store_slow(&self, pubkey: &Pubkey, account: &Account) { + let state = self.live_fork(); + let purge = state.checkpoints.len() == 1; + state.head().store_slow(purge, pubkey, account) + } pub fn transaction_count(&self) -> u64 { - self.accounts.transaction_count() + self.live_fork().head().transaction_count() } - pub fn get_signature_status(&self, signature: &Signature) -> Option>> { - self.last_ids - .read() - .unwrap() - .get_signature_status(signature) + pub fn get_signature_status(&self, signature: &Signature) -> Option> { + self.live_fork().get_signature_status(signature) } pub fn has_signature(&self, signature: &Signature) -> bool { - self.last_ids.read().unwrap().has_signature(signature) - } - - pub fn get_signature( - &self, - last_id: &Hash, - signature: &Signature, - ) -> Option>> { - self.last_ids - .read() - .unwrap() - .get_signature(last_id, signature) + self.live_fork().has_signature(signature) } /// Hash the `accounts` HashMap. This represents a validator's interpretation /// of the delta of the ledger since the last vote and up to now pub fn hash_internal_state(&self) -> Hash { - self.accounts.hash_internal_state() + //TODO: this probably needs to iterate the checkpoints and update a merkle + self.live_fork().head().hash_internal_state() } pub fn confirmation_time(&self) -> usize { @@ -941,58 +625,33 @@ impl Bank { .store(confirmation, Ordering::Relaxed); } - fn send_account_notifications( - &self, - txs: &[Transaction], - res: &[Result<()>], - loaded: &[Result<(InstructionAccounts, InstructionLoaders)>], - ) { - for (i, raccs) in loaded.iter().enumerate() { - if res[i].is_err() || raccs.is_err() { - continue; - } - - let tx = &txs[i]; - let accs = raccs.as_ref().unwrap(); - for (key, account) in tx.account_keys.iter().zip(accs.0.iter()) { - self.subscriptions - .read() - .unwrap() - .check_account(&key, account); - } - } - } - pub fn get_current_leader(&self) -> Option<(Pubkey, u64)> { self.leader_scheduler .read() .unwrap() .get_scheduled_leader(self.tick_height() + 1) } - - pub fn tick_height(&self) -> u64 { - self.last_ids.read().unwrap().tick_height - } } #[cfg(test)] mod tests { use super::*; use crate::entry::{next_entries, next_entry, Entry}; + //use crate::jsonrpc_macros::pubsub::Subscriber; + //use crate::jsonrpc_pubsub::SubscriptionId; use crate::signature::GenKeys; - use crate::status_deque; - use crate::status_deque::StatusDequeError; use bincode::serialize; use hashbrown::HashSet; use solana_sdk::hash::hash; use solana_sdk::native_program::ProgramError; use solana_sdk::signature::Keypair; use solana_sdk::signature::KeypairUtil; - use solana_sdk::storage_program::{StorageTransaction, ENTRIES_PER_SEGMENT}; + //use solana_sdk::storage_program::{StorageTransaction, ENTRIES_PER_SEGMENT}; use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::transaction::Instruction; use std; - use std::sync::mpsc::channel; + //use std::sync::mpsc::channel; + //use tokio::prelude::{Stream, Async}; #[test] fn test_bank_new() { @@ -1045,14 +704,11 @@ mod tests { assert_eq!(bank.get_balance(&mint.pubkey()), 0); assert_eq!(bank.get_balance(&key1), 1); assert_eq!(bank.get_balance(&key2), 0); - assert_eq!( - bank.get_signature(&t1.last_id, &t1.signatures[0]), - Some(Status::Complete(Ok(()))) - ); + assert_eq!(bank.get_signature_status(&t1.signatures[0]), Some(Ok(()))); // TODO: Transactions that fail to pay a fee could be dropped silently assert_eq!( - bank.get_signature(&t2.last_id, &t2.signatures[0]), - Some(Status::Complete(Err(BankError::AccountInUse))) + bank.get_signature_status(&t2.signatures[0]), + Some(Err(BankError::AccountInUse)) ); } @@ -1097,11 +753,11 @@ mod tests { assert_eq!(bank.get_balance(&key1), 0); assert_eq!(bank.get_balance(&key2), 0); assert_eq!( - bank.get_signature(&t1.last_id, &t1.signatures[0]), - Some(Status::Complete(Err(BankError::ProgramError( + bank.get_signature_status(&t1.signatures[0]), + Some(Err(BankError::ProgramError( 1, ProgramError::ResultWithNegativeTokens - )))) + ))) ); } @@ -1123,10 +779,7 @@ mod tests { assert_eq!(bank.get_balance(&mint.pubkey()), 0); assert_eq!(bank.get_balance(&key1), 1); assert_eq!(bank.get_balance(&key2), 1); - assert_eq!( - bank.get_signature(&t1.last_id, &t1.signatures[0]), - Some(Status::Complete(Ok(()))) - ); + assert_eq!(bank.get_signature_status(&t1.signatures[0]), Some(Ok(()))); } // TODO: This test demonstrates that fees are not paid when a program fails. @@ -1156,10 +809,10 @@ mod tests { assert!(bank.has_signature(&signature)); assert_matches!( bank.get_signature_status(&signature), - Some(Status::Complete(Err(BankError::ProgramError( + Some(Err(BankError::ProgramError( 0, ProgramError::ResultWithNegativeTokens - )))) + ))) ); // The tokens didn't move, but the from address paid the transaction fee. @@ -1351,7 +1004,6 @@ mod tests { fn test_process_ledger_simple() { let (ledger, pubkey) = create_sample_ledger(1); let bank = Bank::default(); - bank.add_system_program(); let (ledger_height, last_id) = bank.process_ledger(ledger).unwrap(); assert_eq!(bank.get_balance(&pubkey), 1); assert_eq!(ledger_height, 6); @@ -1372,10 +1024,8 @@ mod tests { let ledger1 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs); let bank0 = Bank::default(); - bank0.add_system_program(); bank0.process_ledger(ledger0).unwrap(); let bank1 = Bank::default(); - bank1.add_system_program(); bank1.process_ledger(ledger1).unwrap(); let initial_state = bank0.hash_internal_state(); @@ -1399,50 +1049,104 @@ mod tests { def_bank.set_confirmation_time(90); assert_eq!(def_bank.confirmation_time(), 90); } - #[test] - fn test_interleaving_locks() { - let mint = Mint::new(3); - let bank = Bank::new(&mint); - let alice = Keypair::new(); - let bob = Keypair::new(); - - let tx1 = Transaction::system_new(&mint.keypair(), alice.pubkey(), 1, mint.last_id()); - let pay_alice = vec![tx1]; - - let lock_result = bank.lock_accounts(&pay_alice); - let results_alice = - bank.load_execute_and_commit_transactions(&pay_alice, lock_result, MAX_ENTRY_IDS); - assert_eq!(results_alice[0], Ok(())); - - // try executing an interleaved transfer twice - assert_eq!( - bank.transfer(1, &mint.keypair(), bob.pubkey(), mint.last_id()), - Err(BankError::AccountInUse) - ); - // the second time should fail as well - // this verifies that `unlock_accounts` doesn't unlock `AccountInUse` accounts - assert_eq!( - bank.transfer(1, &mint.keypair(), bob.pubkey(), mint.last_id()), - Err(BankError::AccountInUse) - ); - - bank.unlock_accounts(&pay_alice, &results_alice); - - assert_matches!( - bank.transfer(2, &mint.keypair(), bob.pubkey(), mint.last_id()), - Ok(_) - ); - } + // #[test] + // fn test_bank_account_subscribe() { + // let mint = Mint::new(100); + // let bank = Bank::new(&mint); + // let alice = Keypair::new(); + // let bank_sub_id = Keypair::new().pubkey(); + // let last_id = bank.last_id(); + // let tx = Transaction::system_create( + // &mint.keypair(), + // alice.pubkey(), + // last_id, + // 1, + // 16, + // budget_program::id(), + // 0, + // ); + // bank.process_transaction(&tx).unwrap(); + + // let (subscriber, _id_receiver, mut transport_receiver) = + // Subscriber::new_test("accountNotification"); + // let sub_id = SubscriptionId::Number(0 as u64); + // let sink = subscriber.assign_id(sub_id.clone()).unwrap(); + // bank.add_account_subscription(bank_sub_id, alice.pubkey(), sink); + + // assert!(bank + // .subscriptions + // .accounts + // .write() + // .unwrap() + // .contains_key(&alice.pubkey())); + + // let account = bank.get_account(&alice.pubkey()).unwrap(); + // bank.check_account_subscriptions(&alice.pubkey(), &account); + // let string = transport_receiver.poll(); + // assert!(string.is_ok()); + // if let Async::Ready(Some(response)) = string.unwrap() { + // let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#); + // assert_eq!(expected, response); + // } + + // bank.remove_account_subscription(&bank_sub_id, &alice.pubkey()); + // assert!(!bank + // .subscriptions + // .accounts + // .write() + // .unwrap() + // .contains_key(&alice.pubkey())); + // } + //#[test] + //fn test_bank_signature_subscribe() { + // let mint = Mint::new(100); + // let bank = Bank::new(&mint); + // let alice = Keypair::new(); + // let bank_sub_id = Keypair::new().pubkey(); + // let last_id = bank.last_id(); + // let tx = Transaction::system_move(&mint.keypair(), alice.pubkey(), 20, last_id, 0); + // let signature = tx.signatures[0]; + // bank.process_transaction(&tx).unwrap(); + + // let (subscriber, _id_receiver, mut transport_receiver) = + // Subscriber::new_test("signatureNotification"); + // let sub_id = SubscriptionId::Number(0 as u64); + // let sink = subscriber.assign_id(sub_id.clone()).unwrap(); + // bank.add_signature_subscription(bank_sub_id, signature, sink); + + // assert!(bank + // .subscriptions + // .signatures + // .write() + // .unwrap() + // .contains_key(&signature)); + + // bank.check_signature_subscriptions(&signature, RpcSignatureStatus::Confirmed); + // let string = transport_receiver.poll(); + // assert!(string.is_ok()); + // if let Async::Ready(Some(response)) = string.unwrap() { + // let expected = format!(r#"{{"jsonrpc":"2.0","method":"signatureNotification","params":{{"result":"Confirmed","subscription":0}}}}"#); + // assert_eq!(expected, response); + // } + + // bank.remove_signature_subscription(&bank_sub_id, &signature); + // assert!(!bank + // .subscriptions + // .signatures + // .write() + // .unwrap() + // .contains_key(&signature)); + //} #[test] fn test_first_err() { - assert_eq!(Bank::first_err(&[Ok(())]), Ok(())); + assert_eq!(BankCheckpoint::first_err(&[Ok(())]), Ok(())); assert_eq!( - Bank::first_err(&[Ok(()), Err(BankError::DuplicateSignature)]), + BankCheckpoint::first_err(&[Ok(()), Err(BankError::DuplicateSignature)]), Err(BankError::DuplicateSignature) ); assert_eq!( - Bank::first_err(&[ + BankCheckpoint::first_err(&[ Ok(()), Err(BankError::DuplicateSignature), Err(BankError::AccountInUse) @@ -1450,7 +1154,7 @@ mod tests { Err(BankError::DuplicateSignature) ); assert_eq!( - Bank::first_err(&[ + BankCheckpoint::first_err(&[ Ok(()), Err(BankError::AccountInUse), Err(BankError::DuplicateSignature) @@ -1458,7 +1162,7 @@ mod tests { Err(BankError::AccountInUse) ); assert_eq!( - Bank::first_err(&[ + BankCheckpoint::first_err(&[ Err(BankError::AccountInUse), Ok(()), Err(BankError::DuplicateSignature) @@ -1473,7 +1177,7 @@ mod tests { // ensure bank can process a tick let tick = next_entry(&mint.last_id(), 1, vec![]); - assert_eq!(bank.par_process_entries(&[tick.clone()]), Ok(())); + assert_eq!(bank.process_entries(&[tick.clone()]), Ok(())); assert_eq!(bank.last_id(), tick.id); } #[test] @@ -1490,7 +1194,7 @@ mod tests { let entry_1 = next_entry(&last_id, 1, vec![tx]); let tx = Transaction::system_new(&mint.keypair(), keypair2.pubkey(), 2, bank.last_id()); let entry_2 = next_entry(&entry_1.id, 1, vec![tx]); - assert_eq!(bank.par_process_entries(&[entry_1, entry_2]), Ok(())); + assert_eq!(bank.process_entries(&[entry_1, entry_2]), Ok(())); assert_eq!(bank.get_balance(&keypair1.pubkey()), 2); assert_eq!(bank.get_balance(&keypair2.pubkey()), 2); assert_eq!(bank.last_id(), last_id); @@ -1539,7 +1243,7 @@ mod tests { ); assert_eq!( - bank.par_process_entries(&[entry_1_to_mint, entry_2_to_3_mint_to_1]), + bank.process_entries(&[entry_1_to_mint, entry_2_to_3_mint_to_1]), Ok(()) ); @@ -1568,7 +1272,7 @@ mod tests { let entry_1 = next_entry(&last_id, 1, vec![tx]); let tx = Transaction::system_new(&keypair2, keypair4.pubkey(), 1, bank.last_id()); let entry_2 = next_entry(&entry_1.id, 1, vec![tx]); - assert_eq!(bank.par_process_entries(&[entry_1, entry_2]), Ok(())); + assert_eq!(bank.process_entries(&[entry_1, entry_2]), Ok(())); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); assert_eq!(bank.get_balance(&keypair4.pubkey()), 1); assert_eq!(bank.last_id(), last_id); @@ -1582,7 +1286,7 @@ mod tests { let keypair3 = Keypair::new(); let keypair4 = Keypair::new(); - //load accounts + // load accounts with 1 each let tx = Transaction::system_new(&mint.keypair(), keypair1.pubkey(), 1, bank.last_id()); assert_eq!(bank.process_transaction(&tx), Ok(())); let tx = Transaction::system_new(&mint.keypair(), keypair2.pubkey(), 1, bank.last_id()); @@ -1597,15 +1301,17 @@ mod tests { let tx = Transaction::system_new(&keypair1, keypair4.pubkey(), 1, tick.id); let entry_2 = next_entry(&tick.id, 1, vec![tx]); assert_eq!( - bank.par_process_entries(&[entry_1.clone(), tick.clone(), entry_2]), + bank.process_entries(&[entry_1.clone(), tick.clone(), entry_2.clone()]), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); assert_eq!(bank.get_balance(&keypair4.pubkey()), 1); assert_eq!(bank.last_id(), tick.id); - // ensure that errors are returned + // ensure that an error is returned for an empty account (keypair2) + let tx = Transaction::system_new(&keypair2, keypair3.pubkey(), 1, tick.id); + let entry_3 = next_entry(&entry_2.id, 1, vec![tx]); assert_eq!( - bank.par_process_entries(&[entry_1]), + bank.process_entries(&[entry_3]), Err(BankError::AccountNotFound) ); } @@ -1671,60 +1377,60 @@ mod tests { assert!(ids.into_iter().all(move |id| unique.insert(id))); } - #[test] - fn test_bank_purge() { - let alice = Mint::new(10_000); - let bank = Bank::new(&alice); - let bob = Keypair::new(); - let charlie = Keypair::new(); - - // bob should have 500 - bank.transfer(500, &alice.keypair(), bob.pubkey(), alice.last_id()) - .unwrap(); - assert_eq!(bank.get_balance(&bob.pubkey()), 500); - - bank.transfer(500, &alice.keypair(), charlie.pubkey(), alice.last_id()) - .unwrap(); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - - bank.checkpoint(); - bank.checkpoint(); - assert_eq!(bank.checkpoint_depth(), 2); - assert_eq!(bank.get_balance(&bob.pubkey()), 500); - assert_eq!(bank.get_balance(&alice.pubkey()), 9_000); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - assert_eq!(bank.transaction_count(), 2); - - // transfer money back, so bob has zero - bank.transfer(500, &bob, alice.keypair().pubkey(), alice.last_id()) - .unwrap(); - // this has to be stored as zero in the top accounts hashmap ;) - assert!(bank.accounts.load_slow(&bob.pubkey()).is_some()); - assert_eq!(bank.get_balance(&bob.pubkey()), 0); - // double-checks - assert_eq!(bank.get_balance(&alice.pubkey()), 9_500); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - assert_eq!(bank.transaction_count(), 3); - bank.purge(1); - - assert_eq!(bank.get_balance(&bob.pubkey()), 0); - // double-checks - assert_eq!(bank.get_balance(&alice.pubkey()), 9_500); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - assert_eq!(bank.transaction_count(), 3); - assert_eq!(bank.checkpoint_depth(), 1); - - bank.purge(0); - - // bob should still have 0, alice should have 10_000 - assert_eq!(bank.get_balance(&bob.pubkey()), 0); - assert!(bank.accounts.load_slow(&bob.pubkey()).is_none()); - // double-checks - assert_eq!(bank.get_balance(&alice.pubkey()), 9_500); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - assert_eq!(bank.transaction_count(), 3); - assert_eq!(bank.checkpoint_depth(), 0); - } + //#[test] + //fn test_bank_purge() { + // let alice = Mint::new(10_000); + // let bank = Bank::new(&alice); + // let bob = Keypair::new(); + // let charlie = Keypair::new(); + + // // bob should have 500 + // bank.transfer(500, &alice.keypair(), bob.pubkey(), alice.last_id()) + // .unwrap(); + // assert_eq!(bank.get_balance(&bob.pubkey()), 500); + + // bank.transfer(500, &alice.keypair(), charlie.pubkey(), alice.last_id()) + // .unwrap(); + // assert_eq!(bank.get_balance(&charlie.pubkey()), 500); + + // bank.checkpoint(); + // bank.checkpoint(); + // assert_eq!(bank.checkpoint_depth(), 2); + // assert_eq!(bank.get_balance(&bob.pubkey()), 500); + // assert_eq!(bank.get_balance(&alice.pubkey()), 9_000); + // assert_eq!(bank.get_balance(&charlie.pubkey()), 500); + // assert_eq!(bank.transaction_count(), 2); + + // // transfer money back, so bob has zero + // bank.transfer(500, &bob, alice.keypair().pubkey(), alice.last_id()) + // .unwrap(); + // // this has to be stored as zero in the top accounts hashmap ;) + // assert!(bank.accounts.load_slow(&bob.pubkey()).is_some()); + // assert_eq!(bank.get_balance(&bob.pubkey()), 0); + // // double-checks + // assert_eq!(bank.get_balance(&alice.pubkey()), 9_500); + // assert_eq!(bank.get_balance(&charlie.pubkey()), 500); + // assert_eq!(bank.transaction_count(), 3); + // bank.purge(1); + + // assert_eq!(bank.get_balance(&bob.pubkey()), 0); + // // double-checks + // assert_eq!(bank.get_balance(&alice.pubkey()), 9_500); + // assert_eq!(bank.get_balance(&charlie.pubkey()), 500); + // assert_eq!(bank.transaction_count(), 3); + // assert_eq!(bank.checkpoint_depth(), 1); + + // bank.purge(0); + + // // bob should still have 0, alice should have 10_000 + // assert_eq!(bank.get_balance(&bob.pubkey()), 0); + // assert!(bank.accounts.load_slow(&bob.pubkey()).is_none()); + // // double-checks + // assert_eq!(bank.get_balance(&alice.pubkey()), 9_500); + // assert_eq!(bank.get_balance(&charlie.pubkey()), 500); + // assert_eq!(bank.transaction_count(), 3); + // assert_eq!(bank.checkpoint_depth(), 0); + //} #[test] fn test_bank_checkpoint_zero_balance() { @@ -1737,7 +1443,7 @@ mod tests { bank.transfer(500, &alice.keypair(), bob.pubkey(), alice.last_id()) .unwrap(); assert_eq!(bank.get_balance(&bob.pubkey()), 500); - assert_eq!(bank.checkpoint_depth(), 0); + assert_eq!(bank.live_fork().checkpoint_depth(), 1); let account = bank.get_account(&alice.pubkey()).unwrap(); let default_account = Account::default(); @@ -1746,8 +1452,13 @@ mod tests { assert_eq!(account.executable, default_account.executable); assert_eq!(account.loader, default_account.loader); - bank.checkpoint(); - assert_eq!(bank.checkpoint_depth(), 1); + let base = bank.live_fork().head().fork_id(); + let last_id = hash(alice.last_id().as_ref()); + bank.live_fork().head().finalize(); + assert_eq!(bank.init_fork(base + 1, &alice.last_id(), base), Ok(())); + assert_eq!(bank.live_fork().head().fork_id(), base + 1); + bank.live_fork().head().register_tick(&last_id); + assert_eq!(bank.live_fork().checkpoint_depth(), 2); // charlie should have 500, alice should have 0 bank.transfer(500, &alice.keypair(), charlie.pubkey(), alice.last_id()) @@ -1763,249 +1474,54 @@ mod tests { assert_eq!(account.loader, default_account.loader); } - fn reserve_signature_with_last_id_test( - bank: &Bank, - sig: &Signature, - last_id: &Hash, - ) -> status_deque::Result<()> { - let mut last_ids = bank.last_ids.write().unwrap(); - last_ids.reserve_signature_with_last_id(last_id, sig) - } + // #[test] + // fn test_bank_storage() { + // solana_logger::setup(); + // let alice = Mint::new(1000); + // let bank = Bank::new(&alice); - #[test] - fn test_bank_checkpoint_rollback() { - let alice = Mint::new(10_000); - let bank = Bank::new(&alice); - let bob = Keypair::new(); - let charlie = Keypair::new(); + // let bob = Keypair::new(); + // let jack = Keypair::new(); + // let jill = Keypair::new(); - // bob should have 500 - bank.transfer(500, &alice.keypair(), bob.pubkey(), alice.last_id()) - .unwrap(); - assert_eq!(bank.get_balance(&bob.pubkey()), 500); - bank.transfer(500, &alice.keypair(), charlie.pubkey(), alice.last_id()) - .unwrap(); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - assert_eq!(bank.checkpoint_depth(), 0); + // let x = 42; + // let last_id = hash(&[x]); + // let x2 = x * 2; + // let storage_last_id = hash(&[x2]); - bank.checkpoint(); - bank.checkpoint(); - assert_eq!(bank.checkpoint_depth(), 2); - assert_eq!(bank.get_balance(&bob.pubkey()), 500); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - assert_eq!(bank.transaction_count(), 2); + // bank.tpu_register_tick(&last_id); - // transfer money back, so bob has zero - bank.transfer(500, &bob, alice.keypair().pubkey(), alice.last_id()) - .unwrap(); - // this has to be stored as zero in the top accounts hashmap ;) - assert_eq!(bank.get_balance(&bob.pubkey()), 0); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - assert_eq!(bank.transaction_count(), 3); - bank.rollback(); + // bank.transfer(10, &alice.keypair(), jill.pubkey(), last_id) + // .unwrap(); - // bob should have 500 again - assert_eq!(bank.get_balance(&bob.pubkey()), 500); - assert_eq!(bank.get_balance(&charlie.pubkey()), 500); - assert_eq!(bank.transaction_count(), 2); - assert_eq!(bank.checkpoint_depth(), 1); + // bank.transfer(10, &alice.keypair(), bob.pubkey(), last_id) + // .unwrap(); + // bank.transfer(10, &alice.keypair(), jack.pubkey(), last_id) + // .unwrap(); - let signature = Signature::default(); - for i in 0..MAX_ENTRY_IDS + 1 { - let last_id = hash(&serialize(&i).unwrap()); // Unique hash - bank.register_tick(&last_id); - } - assert_eq!(bank.tick_height(), MAX_ENTRY_IDS as u64 + 2); - assert_eq!( - reserve_signature_with_last_id_test(&bank, &signature, &alice.last_id()), - Err(StatusDequeError::LastIdNotFound) - ); - bank.rollback(); - assert_eq!(bank.tick_height(), 1); - assert_eq!( - reserve_signature_with_last_id_test(&bank, &signature, &alice.last_id()), - Ok(()) - ); - bank.checkpoint(); - assert_eq!( - reserve_signature_with_last_id_test(&bank, &signature, &alice.last_id()), - Err(StatusDequeError::DuplicateSignature) - ); - } - - #[test] - #[should_panic] - fn test_bank_rollback_panic() { - let alice = Mint::new(10_000); - let bank = Bank::new(&alice); - bank.rollback(); - } - - #[test] - fn test_bank_record_transactions() { - let mint = Mint::new(10_000); - let bank = Arc::new(Bank::new(&mint)); - let (entry_sender, entry_receiver) = channel(); - let poh_recorder = PohRecorder::new(bank.clone(), entry_sender, bank.last_id(), None); - let pubkey = Keypair::new().pubkey(); - - let transactions = vec![ - Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), - Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), - ]; - - let mut results = vec![Ok(()), Ok(())]; - bank.record_transactions(&transactions, &results, &poh_recorder) - .unwrap(); - let entries = entry_receiver.recv().unwrap(); - assert_eq!(entries[0].transactions.len(), transactions.len()); - - // ProgramErrors should still be recorded - results[0] = Err(BankError::ProgramError( - 1, - ProgramError::ResultWithNegativeTokens, - )); - bank.record_transactions(&transactions, &results, &poh_recorder) - .unwrap(); - let entries = entry_receiver.recv().unwrap(); - assert_eq!(entries[0].transactions.len(), transactions.len()); - - // Other BankErrors should not be recorded - results[0] = Err(BankError::AccountNotFound); - bank.record_transactions(&transactions, &results, &poh_recorder) - .unwrap(); - let entries = entry_receiver.recv().unwrap(); - assert_eq!(entries[0].transactions.len(), transactions.len() - 1); - } - - #[test] - fn test_bank_ignore_program_errors() { - let expected_results = vec![Ok(()), Ok(())]; - let results = vec![Ok(()), Ok(())]; - let updated_results = Bank::ignore_program_errors(results); - assert_eq!(updated_results, expected_results); - - let results = vec![ - Err(BankError::ProgramError( - 1, - ProgramError::ResultWithNegativeTokens, - )), - Ok(()), - ]; - let updated_results = Bank::ignore_program_errors(results); - assert_eq!(updated_results, expected_results); - - // Other BankErrors should not be ignored - let results = vec![Err(BankError::AccountNotFound), Ok(())]; - let updated_results = Bank::ignore_program_errors(results); - assert_ne!(updated_results, expected_results); - } - - #[test] - fn test_bank_storage() { - solana_logger::setup(); - let alice = Mint::new(1000); - let bank = Bank::new(&alice); - - let bob = Keypair::new(); - let jack = Keypair::new(); - let jill = Keypair::new(); - - let x = 42; - let last_id = hash(&[x]); - let x2 = x * 2; - let storage_last_id = hash(&[x2]); + // let tx = Transaction::storage_new_advertise_last_id( + // &bob, + // storage_last_id, + // last_id, + // ENTRIES_PER_SEGMENT, + // ); - bank.register_tick(&last_id); + // assert!(bank.process_transaction(&tx).is_ok()); - bank.transfer(10, &alice.keypair(), jill.pubkey(), last_id) - .unwrap(); - - bank.transfer(10, &alice.keypair(), bob.pubkey(), last_id) - .unwrap(); - bank.transfer(10, &alice.keypair(), jack.pubkey(), last_id) - .unwrap(); - - let tx = Transaction::storage_new_advertise_last_id( - &bob, - storage_last_id, - last_id, - ENTRIES_PER_SEGMENT, - ); - - assert!(bank.process_transaction(&tx).is_ok()); - - let entry_height = 0; - - let tx = Transaction::storage_new_mining_proof( - &jack, - Hash::default(), - last_id, - entry_height, - Signature::default(), - ); + // let entry_height = 0; - assert!(bank.process_transaction(&tx).is_ok()); + // let tx = Transaction::storage_new_mining_proof( + // &jack, + // Hash::default(), + // last_id, + // entry_height, + // Signature::default(), + // ); - assert_eq!(bank.get_storage_entry_height(), ENTRIES_PER_SEGMENT); - assert_eq!(bank.get_storage_last_id(), storage_last_id); - assert_eq!(bank.get_pubkeys_for_entry_height(0), vec![]); - } - - #[test] - fn test_bank_process_and_record_transactions() { - let mint = Mint::new(10_000); - let bank = Arc::new(Bank::new(&mint)); - let pubkey = Keypair::new().pubkey(); - - let transactions = vec![Transaction::system_move( - &mint.keypair(), - pubkey, - 1, - mint.last_id(), - 0, - )]; - - let (entry_sender, entry_receiver) = channel(); - let mut poh_recorder = PohRecorder::new( - bank.clone(), - entry_sender, - bank.last_id(), - Some(bank.tick_height() + 1), - ); - - bank.process_and_record_transactions(&transactions, &poh_recorder) - .unwrap(); - poh_recorder.tick().unwrap(); - - let mut need_tick = true; - // read entries until I find mine, might be ticks... - while need_tick { - let entries = entry_receiver.recv().unwrap(); - for entry in entries { - if !entry.is_tick() { - assert_eq!(entry.transactions.len(), transactions.len()); - assert_eq!(bank.get_balance(&pubkey), 1); - } else { - need_tick = false; - } - } - } - - let transactions = vec![Transaction::system_move( - &mint.keypair(), - pubkey, - 2, - mint.last_id(), - 0, - )]; - - assert_eq!( - bank.process_and_record_transactions(&transactions, &poh_recorder), - Err(BankError::RecordFailure) - ); - - assert_eq!(bank.get_balance(&pubkey), 1); - } + // assert!(bank.process_transaction(&tx).is_ok()); + // assert_eq!(bank.get_storage_entry_height(), ENTRIES_PER_SEGMENT); + // assert_eq!(bank.get_storage_last_id(), storage_last_id); + // assert_eq!(bank.get_pubkeys_for_entry_height(0), vec![]); + // } } diff --git a/src/bank_state.rs b/src/bank_state.rs new file mode 100644 index 00000000000000..ffeea45cd4cbde --- /dev/null +++ b/src/bank_state.rs @@ -0,0 +1,803 @@ +use crate::accounts::{Accounts, ErrorCounters, InstructionAccounts, InstructionLoaders}; +use crate::bank::{BankError, Result}; +use crate::counter::Counter; +use crate::entry::Entry; +use crate::last_id_queue::{LastIdQueue, MAX_ENTRY_IDS}; +use crate::leader_scheduler::TICKS_PER_BLOCK; +use crate::poh_recorder::PohRecorder; +use crate::runtime::{self, RuntimeError}; +use crate::status_cache::StatusCache; +use log::Level; +use rayon::prelude::*; +use solana_native_loader; +use solana_sdk::account::Account; +use solana_sdk::hash::Hash; +use solana_sdk::pubkey::Pubkey; +use solana_sdk::signature::Signature; +use solana_sdk::timing::duration_as_us; +use solana_sdk::transaction::Transaction; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::sync::Arc; +use std::sync::RwLock; +use std::time::Instant; + +pub struct BankCheckpoint { + /// accounts database + pub accounts: Accounts, + /// entries + entry_q: RwLock, + /// status cache + status_cache: RwLock, + finalized: AtomicBool, + fork_id: AtomicUsize, +} + +impl std::fmt::Debug for BankCheckpoint { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "BankCheckpoint {{ fork_id: {} }}", self.fork_id()) + } +} + +impl BankCheckpoint { + // last_id id is used by the status_cache to filter duplicate signatures + pub fn new(fork_id: u64, last_id: &Hash) -> Self { + BankCheckpoint { + accounts: Accounts::default(), + entry_q: RwLock::new(LastIdQueue::default()), + status_cache: RwLock::new(StatusCache::new(last_id)), + finalized: AtomicBool::new(false), + fork_id: AtomicUsize::new(fork_id as usize), + } + } + /// Create an Bank using a deposit. + pub fn new_from_accounts(fork: u64, accounts: &[(Pubkey, Account)], last_id: &Hash) -> Self { + let bank_state = BankCheckpoint::new(fork, last_id); + for (to, account) in accounts { + bank_state.accounts.store_slow(false, &to, &account); + } + bank_state + } + pub fn store_slow(&self, purge: bool, pubkey: &Pubkey, account: &Account) { + self.accounts.store_slow(purge, pubkey, account) + } + + /// Forget all signatures. Useful for benchmarking. + pub fn clear_signatures(&self) { + self.entry_q.write().unwrap().clear(); + self.status_cache.write().unwrap().clear(); + } + /// Return the last entry ID registered. + pub fn last_id(&self) -> Hash { + self.entry_q + .read() + .unwrap() + .last_id + .expect("no last_id has been set") + } + + pub fn transaction_count(&self) -> u64 { + self.accounts.transaction_count() + } + pub fn finalize(&self) { + self.finalized.store(true, Ordering::Relaxed); + } + pub fn finalized(&self) -> bool { + self.finalized.load(Ordering::Relaxed) + } + + /// Look through the last_ids and find all the valid ids + /// This is batched to avoid holding the lock for a significant amount of time + /// + /// Return a vec of tuple of (valid index, timestamp) + /// index is into the passed ids slice to avoid copying hashes + pub fn count_valid_ids(&self, ids: &[Hash]) -> Vec<(usize, u64)> { + let entry_q = self.entry_q.read().unwrap(); + entry_q.count_valid_ids(ids) + } + + /// Looks through a list of tick heights and stakes, and finds the latest + /// tick that has achieved finality + pub fn get_confirmation_timestamp( + &self, + ticks_and_stakes: &mut [(u64, u64)], + supermajority_stake: u64, + ) -> Option { + let entry_q = self.entry_q.read().unwrap(); + entry_q.get_confirmation_timestamp(ticks_and_stakes, supermajority_stake) + } + pub fn get_signature_status(&self, signature: &Signature) -> Option> { + self.status_cache + .read() + .unwrap() + .get_signature_status(signature) + } + pub fn has_signature(&self, signature: &Signature) -> bool { + self.status_cache.read().unwrap().has_signature(signature) + } + + pub fn tick_height(&self) -> u64 { + self.entry_q.read().unwrap().tick_height + } + + /// Tell the bank which Entry IDs exist on the ledger. This function + /// assumes subsequent calls correspond to later entries, and will boot + /// the oldest ones once its internal cache is full. Once boot, the + /// bank will reject transactions using that `last_id`. + pub fn register_tick(&self, last_id: &Hash) { + let mut entry_q = self.entry_q.write().unwrap(); + inc_new_counter_info!("bank-register_tick-registered", 1); + entry_q.register_tick(last_id) + } + fn lock_accounts(&self, txs: &[Transaction]) -> Vec> { + self.accounts.lock_accounts(txs) + } + fn unlock_accounts(&self, txs: &[Transaction], results: &[Result<()>]) { + self.accounts.unlock_accounts(txs, results) + } + + /// Check the transactions last_id age. + fn check_last_id_age( + &self, + txs: &[Transaction], + max_age: usize, + results: Vec>, + error_counters: &mut ErrorCounters, + ) -> Vec> { + let entry_q = self.entry_q.read().unwrap(); + txs.iter() + .zip(results.into_iter()) + .map(|etx| match etx { + (tx, Ok(())) if entry_q.check_entry_id_age(tx.last_id, max_age) => Ok(()), + (_, Ok(())) => { + error_counters.last_id_too_old += 1; + Err(BankError::LastIdNotFound) + } + (_, Err(e)) => Err(e), + }) + .collect() + } + + /// Add signature to the current checkpoint + fn add_signatures(&self, txs: &[Transaction], results: &[Result<()>]) { + let mut status_cache = self.status_cache.write().unwrap(); + for (i, tx) in txs.iter().enumerate() { + match results[i] { + Err(BankError::LastIdNotFound) => (), + Err(BankError::DuplicateSignature) => (), + Err(BankError::AccountNotFound) => (), + _ => status_cache.add(&tx.signatures[0]), + } + } + } + + pub fn first_err(results: &[Result<()>]) -> Result<()> { + for r in results { + r.clone()?; + } + Ok(()) + } + + fn update_transaction_statuses(&self, txs: &[Transaction], res: &[Result<()>]) { + let mut status_cache = self.status_cache.write().unwrap(); + for (i, tx) in txs.iter().enumerate() { + match res[i] { + Ok(_) => (), + Err(BankError::LastIdNotFound) => (), + Err(BankError::DuplicateSignature) => (), + Err(BankError::AccountNotFound) => (), + _ => status_cache + .save_failure_status(&tx.signatures[0], res[i].clone().err().unwrap()), + } + if res[i].is_err() {} + } + } + pub fn hash_internal_state(&self) -> Hash { + self.accounts.hash_internal_state() + } + + pub fn fork_id(&self) -> u64 { + self.fork_id.load(Ordering::Relaxed) as u64 + } + /// create a new fork for the bank state + pub fn fork(&self, fork_id: u64, last_id: &Hash) -> Self { + Self { + accounts: Accounts::default(), + entry_q: RwLock::new(self.entry_q.read().unwrap().fork()), + status_cache: RwLock::new(StatusCache::new(last_id)), + finalized: AtomicBool::new(false), + fork_id: AtomicUsize::new(fork_id as usize), + } + } + /// consume the checkpoint into the trunk state + /// self becomes the new trunk and its fork_id is updated + pub fn merge_into_trunk(&self, other: Self) { + let (accounts, entry_q, status_cache, fork_id) = { + ( + other.accounts, + other.entry_q, + other.status_cache, + other.fork_id, + ) + }; + self.accounts.merge_into_trunk(accounts); + self.entry_q + .write() + .unwrap() + .merge_into_trunk(entry_q.into_inner().unwrap()); + self.status_cache + .write() + .unwrap() + .merge_into_trunk(status_cache.into_inner().unwrap()); + self.fork_id + .store(fork_id.load(Ordering::Relaxed), Ordering::Relaxed); + } +} + +pub struct BankState { + pub checkpoints: Vec>, +} + +impl BankState { + pub fn head(&self) -> &Arc { + self.checkpoints + .first() + .expect("at least 1 checkpoint needs to be available for the state") + } + pub fn load_slow(&self, pubkey: &Pubkey) -> Option { + let accounts: Vec<&Accounts> = self.checkpoints.iter().map(|c| &c.accounts).collect(); + Accounts::load_slow(&accounts, pubkey) + } + + fn load_accounts( + &self, + txs: &[Transaction], + results: Vec>, + error_counters: &mut ErrorCounters, + ) -> Vec> { + let accounts: Vec<&Accounts> = self.checkpoints.iter().map(|c| &c.accounts).collect(); + Accounts::load_accounts(&accounts, txs, results, error_counters) + } + + /// Look through all the checkpoints to check for a duplicate signature + fn check_duplicate_signatures( + &self, + txs: &[Transaction], + mut results: Vec>, + error_counters: &mut ErrorCounters, + ) -> Vec> { + for c in &self.checkpoints { + let status_cache = c.status_cache.read().unwrap(); + for (i, tx) in txs.iter().enumerate() { + if results[i] == Ok(()) && status_cache.has_signature(&tx.signatures[0]) { + results[i] = Err(BankError::DuplicateSignature); + error_counters.duplicate_signature += 1; + } + } + } + results + } + + /// Process a batch of transactions. + #[allow(clippy::type_complexity)] + fn load_and_execute_transactions( + &self, + txs: &[Transaction], + lock_results: Vec>, + max_age: usize, + ) -> ( + Vec>, + Vec>, + ) { + let head = &self.checkpoints[0]; + debug!("processing transactions: {}", txs.len()); + let mut error_counters = ErrorCounters::default(); + let now = Instant::now(); + let results = head.check_last_id_age(txs, max_age, lock_results, &mut error_counters); + let results = self.check_duplicate_signatures(txs, results, &mut error_counters); + head.add_signatures(txs, &results); + let mut loaded_accounts = self.load_accounts(txs, results, &mut error_counters); + let tick_height = head.tick_height(); + + let load_elapsed = now.elapsed(); + let now = Instant::now(); + let executed: Vec> = loaded_accounts + .iter_mut() + .zip(txs.iter()) + .map(|(accs, tx)| match accs { + Err(e) => Err(e.clone()), + Ok((ref mut accounts, ref mut loaders)) => { + runtime::execute_transaction(tx, loaders, accounts, tick_height).map_err( + |RuntimeError::ProgramError(index, err)| { + BankError::ProgramError(index, err) + }, + ) + } + }) + .collect(); + + let execution_elapsed = now.elapsed(); + + debug!( + "load: {}us execute: {}us txs_len={}", + duration_as_us(&load_elapsed), + duration_as_us(&execution_elapsed), + txs.len(), + ); + let mut tx_count = 0; + let mut err_count = 0; + for (r, tx) in executed.iter().zip(txs.iter()) { + if r.is_ok() { + tx_count += 1; + } else { + if err_count == 0 { + info!("tx error: {:?} {:?}", r, tx); + } + err_count += 1; + } + } + if err_count > 0 { + info!("{} errors of {} txs", err_count, err_count + tx_count); + inc_new_counter_info!( + "bank-process_transactions-account_not_found", + error_counters.account_not_found + ); + inc_new_counter_info!("bank-process_transactions-error_count", err_count); + } + + head.accounts.increment_transaction_count(tx_count); + + inc_new_counter_info!("bank-process_transactions-txs", tx_count); + if 0 != error_counters.last_id_not_found { + inc_new_counter_info!( + "bank-process_transactions-error-last_id_not_found", + error_counters.last_id_not_found + ); + } + if 0 != error_counters.reserve_last_id { + inc_new_counter_info!( + "bank-process_transactions-error-reserve_last_id", + error_counters.reserve_last_id + ); + } + if 0 != error_counters.duplicate_signature { + inc_new_counter_info!( + "bank-process_transactions-error-duplicate_signature", + error_counters.duplicate_signature + ); + } + if 0 != error_counters.insufficient_funds { + inc_new_counter_info!( + "bank-process_transactions-error-insufficient_funds", + error_counters.insufficient_funds + ); + } + (loaded_accounts, executed) + } + + fn commit_transactions( + &self, + txs: &[Transaction], + loaded_accounts: &[Result<(InstructionAccounts, InstructionLoaders)>], + executed: &[Result<()>], + ) { + let head = &self.checkpoints[0]; + let now = Instant::now(); + let purge = self.checkpoints.len() == 1; + head.accounts + .store_accounts(purge, txs, executed, loaded_accounts); + + // TODO: Thread subscriptions here + // // Check account subscriptions and send notifications + // if let Some(subs) = maybe_subs { + // subs.send_account_notifications(txs, executed, loaded_accounts); + // } + + // once committed there is no way to unroll + let write_elapsed = now.elapsed(); + debug!( + "store: {}us txs_len={}", + duration_as_us(&write_elapsed), + txs.len(), + ); + head.update_transaction_statuses(txs, &executed); + } + + /// Process a batch of transactions. + #[must_use] + pub fn load_execute_and_commit_transactions( + &self, + txs: &[Transaction], + lock_results: Vec>, + max_age: usize, + ) -> Vec> { + let (loaded_accounts, executed) = + self.load_and_execute_transactions(txs, lock_results, max_age); + + self.commit_transactions(txs, &loaded_accounts, &executed); + executed + } + + pub fn process_and_record_transactions( + &self, + txs: &[Transaction], + recorder: Option<&PohRecorder>, + ) -> Result<(Vec>)> { + let now = Instant::now(); + // Once accounts are locked, other threads cannot encode transactions that will modify the + // same account state + let head = &self.checkpoints[0]; + let lock_results = head.lock_accounts(txs); + let lock_time = now.elapsed(); + + let now = Instant::now(); + // Use a shorter maximum age when adding transactions into the pipeline. This will reduce + // the likelihood of any single thread getting starved and processing old ids. + // TODO: Banking stage threads should be prioritized to complete faster then this queue + // expires. + let (loaded_accounts, results) = + self.load_and_execute_transactions(txs, lock_results, MAX_ENTRY_IDS as usize / 2); + let load_execute_time = now.elapsed(); + + let record_time = { + let now = Instant::now(); + if let Some(poh) = recorder { + Self::record_transactions(txs, &results, poh)?; + } + now.elapsed() + }; + + let commit_time = { + let now = Instant::now(); + self.commit_transactions(txs, &loaded_accounts, &results); + now.elapsed() + }; + + let now = Instant::now(); + // Once the accounts are new transactions can enter the pipeline to process them + head.unlock_accounts(&txs, &results); + let unlock_time = now.elapsed(); + debug!( + "lock: {}us load_execute: {}us record: {}us commit: {}us unlock: {}us txs_len: {}", + duration_as_us(&lock_time), + duration_as_us(&load_execute_time), + duration_as_us(&record_time), + duration_as_us(&commit_time), + duration_as_us(&unlock_time), + txs.len(), + ); + Ok(results) + } + fn ignore_program_errors(results: Vec>) -> Vec> { + results + .into_iter() + .map(|result| match result { + // Entries that result in a ProgramError are still valid and are written in the + // ledger so map them to an ok return value + Err(BankError::ProgramError(index, err)) => { + info!("program error {:?}, {:?}", index, err); + inc_new_counter_info!("bank-ignore_program_err", 1); + Ok(()) + } + _ => result, + }) + .collect() + } + pub fn par_execute_entries(&self, entries: &[(&Entry, Vec>)]) -> Result<()> { + let head = &self.checkpoints[0]; + inc_new_counter_info!("bank-par_execute_entries-count", entries.len()); + let results: Vec> = entries + .into_par_iter() + .map(|(e, locks)| { + // Fork sanity check + //TODO: this sanity check needs to be fixed once forks contain the PoH ticks that + //connect them to the previous fork. We need a way to identify the fork from the + //entry itself, or have that information passed through. + assert_eq!(e.tick_height / TICKS_PER_BLOCK, head.fork_id()); + let results = self.load_execute_and_commit_transactions( + &e.transactions, + locks.to_vec(), + MAX_ENTRY_IDS, + ); + let results = Bank::ignore_program_errors(old_results); + head.unlock_accounts(&e.transactions, &results); + BankCheckpoint::first_err(&results) + }) + .collect(); + BankCheckpoint::first_err(&results) + } + + /// process entries in parallel + /// The entries must be for the same checkpoint + /// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry + /// 2. Process the locked group in parallel + /// 3. Register the `Tick` if it's available, goto 1 + pub fn par_process_entries(&self, entries: &[Entry]) -> Result<()> { + let head = &self.checkpoints[0]; + inc_new_counter_info!("bank-par_process_entries-count", entries.len()); + // accumulator for entries that can be processed in parallel + let mut mt_group = vec![]; + for entry in entries { + if entry.is_tick() { + // if its a tick, execute the group and register the tick + self.par_execute_entries(&mt_group)?; + head.register_tick(&entry.id); + + mt_group = vec![]; + continue; + } + // try to lock the accounts + let locked = head.lock_accounts(&entry.transactions); + // if any of the locks error out + // execute the current group + if BankCheckpoint::first_err(&locked).is_err() { + self.par_execute_entries(&mt_group)?; + mt_group = vec![]; + //reset the lock and push the entry + head.unlock_accounts(&entry.transactions, &locked); + let locked = head.lock_accounts(&entry.transactions); + mt_group.push((entry, locked)); + } else { + // push the entry to the mt_group + mt_group.push((entry, locked)); + } + } + self.par_execute_entries(&mt_group)?; + + if !entries.is_empty() { + let finish = entries.last().unwrap().tick_height; + // Fork sanity check + //TODO: same as the other fork sanity check + assert_eq!(finish / TICKS_PER_BLOCK, head.fork_id()); + if (finish + 1) / TICKS_PER_BLOCK != head.fork_id() { + head.finalize(); + } + } + Ok(()) + } + + fn record_transactions( + txs: &[Transaction], + results: &[Result<()>], + poh: &PohRecorder, + ) -> Result<()> { + let processed_transactions: Vec<_> = results + .iter() + .zip(txs.iter()) + .filter_map(|(r, x)| match r { + Ok(_) => Some(x.clone()), + Err(ref e) => { + debug!("process transaction failed {:?}", e); + None + } + }) + .collect(); + // unlock all the accounts with errors which are filtered by the above `filter_map` + if !processed_transactions.is_empty() { + let hash = Transaction::hash(&processed_transactions); + debug!("processed ok: {} {}", processed_transactions.len(), hash); + // record and unlock will unlock all the successfull transactions + poh.record(hash, processed_transactions).map_err(|e| { + warn!("record failure: {:?}", e); + BankError::RecordFailure + })?; + } + Ok(()) + } + pub fn get_signature_status(&self, sig: &Signature) -> Option> { + let checkpoints: Vec<_> = self + .checkpoints + .iter() + .map(|c| c.status_cache.read().unwrap()) + .collect(); + StatusCache::get_signature_status_all(&checkpoints, sig) + } + pub fn has_signature(&self, sig: &Signature) -> bool { + let checkpoints: Vec<_> = self + .checkpoints + .iter() + .map(|c| c.status_cache.read().unwrap()) + .collect(); + StatusCache::has_signature_all(&checkpoints, sig) + } + pub fn checkpoint_depth(&self) -> usize { + self.checkpoints.len() + } +} + +#[cfg(test)] +mod test { + use super::*; + use solana_sdk::signature::Keypair; + use solana_sdk::signature::KeypairUtil; + use solana_sdk::system_program; + use solana_sdk::system_transaction::SystemTransaction; + + /// Create, sign, and process a Transaction from `keypair` to `to` of + /// `n` tokens where `last_id` is the last Entry ID observed by the client. + pub fn transfer( + bank: &BankState, + n: u64, + keypair: &Keypair, + to: Pubkey, + last_id: Hash, + ) -> Result { + let tx = Transaction::system_new(keypair, to, n, last_id); + let signature = tx.signatures[0]; + let e = bank.process_and_record_transactions(&[tx], None)?; + match &e[0] { + Ok(_) => Ok(signature), + Err(e) => Err(e.clone()), + } + } + + fn new_state(mint: &Keypair, tokens: u64, last_id: &Hash) -> BankState { + let accounts = [(mint.pubkey(), Account::new(tokens, 0, Pubkey::default()))]; + let bank = Arc::new(BankCheckpoint::new_from_accounts(0, &accounts, &last_id)); + BankState { + checkpoints: vec![bank], + } + } + + fn add_system_program(checkpoint: &BankCheckpoint) { + let system_program_account = Account { + tokens: 1, + owner: system_program::id(), + userdata: b"solana_system_program".to_vec(), + executable: true, + loader: solana_native_loader::id(), + }; + checkpoint.store_slow(false, &system_program::id(), &system_program_account); + } + + #[test] + fn test_interleaving_locks() { + let last_id = Hash::default(); + let mint = Keypair::new(); + let alice = Keypair::new(); + let bob = Keypair::new(); + let bank = new_state(&mint, 3, &last_id); + bank.head().register_tick(&last_id); + add_system_program(bank.head()); + + let tx1 = Transaction::system_new(&mint, alice.pubkey(), 1, last_id); + let pay_alice = vec![tx1]; + + let locked_alice = bank.head().lock_accounts(&pay_alice); + assert!(locked_alice[0].is_ok()); + let results_alice = + bank.load_execute_and_commit_transactions(&pay_alice, locked_alice, MAX_ENTRY_IDS); + assert_eq!(results_alice[0], Ok(())); + + // try executing an interleaved transfer twice + assert_eq!( + transfer(&bank, 1, &mint, bob.pubkey(), last_id), + Err(BankError::AccountInUse) + ); + // the second time should fail as well + // this verifies that `unlock_accounts` doesn't unlock `AccountInUse` accounts + assert_eq!( + transfer(&bank, 1, &mint, bob.pubkey(), last_id), + Err(BankError::AccountInUse) + ); + + bank.head().unlock_accounts(&pay_alice, &results_alice); + + assert_matches!(transfer(&bank, 2, &mint, bob.pubkey(), last_id), Ok(_)); + } + #[test] + fn test_bank_ignore_program_errors() { + let expected_results = vec![Ok(()), Ok(())]; + let results = vec![Ok(()), Ok(())]; + let updated_results = BankState::ignore_program_errors(results); + assert_eq!(updated_results, expected_results); + + let results = vec![ + Err(BankError::ProgramError( + 1, + ProgramError::ResultWithNegativeTokens, + )), + Ok(()), + ]; + let updated_results = BankState::ignore_program_errors(results); + assert_eq!(updated_results, expected_results); + + // Other BankErrors should not be ignored + let results = vec![Err(BankError::AccountNotFound), Ok(())]; + let updated_results = BankState::ignore_program_errors(results); + assert_ne!(updated_results, expected_results); + } + + //#[test] + //fn test_bank_record_transactions() { + // let mint = Mint::new(10_000); + // let bank = Arc::new(Bank::new(&mint)); + // let (entry_sender, entry_receiver) = channel(); + // let poh_recorder = PohRecorder::new(bank.clone(), entry_sender, bank.last_id(), None); + // let pubkey = Keypair::new().pubkey(); + + // let transactions = vec![ + // Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), + // Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), + // ]; + + // let mut results = vec![Ok(()), Ok(())]; + // BankStater::record_transactions(&transactions, &results, &poh_recorder) + // .unwrap(); + // let entries = entry_receiver.recv().unwrap(); + // assert_eq!(entries[0].transactions.len(), transactions.len()); + + // // ProgramErrors should still be recorded + // results[0] = Err(BankError::ProgramError( + // 1, + // ProgramError::ResultWithNegativeTokens, + // )); + // BankState::record_transactions(&transactions, &results, &poh_recorder) + // .unwrap(); + // let entries = entry_receiver.recv().unwrap(); + // assert_eq!(entries[0].transactions.len(), transactions.len()); + + // // Other BankErrors should not be recorded + // results[0] = Err(BankError::AccountNotFound); + // BankState::record_transactions(&transactions, &results, &poh_recorder) + // .unwrap(); + // let entries = entry_receiver.recv().unwrap(); + // assert_eq!(entries[0].transactions.len(), transactions.len() - 1); + //} + // + // #[test] + // fn test_bank_process_and_record_transactions() { + // let mint = Mint::new(10_000); + // let bank = Arc::new(Bank::new(&mint)); + // let pubkey = Keypair::new().pubkey(); + + // let transactions = vec![Transaction::system_move( + // &mint.keypair(), + // pubkey, + // 1, + // mint.last_id(), + // 0, + // )]; + + // let (entry_sender, entry_receiver) = channel(); + // let mut poh_recorder = PohRecorder::new( + // bank.clone(), + // entry_sender, + // bank.last_id(), + // Some(bank.tick_height() + 1), + // ); + + // bank.process_and_record_transactions(&transactions, &poh_recorder) + // .unwrap(); + // poh_recorder.tick().unwrap(); + + // let mut need_tick = true; + // // read entries until I find mine, might be ticks... + // while need_tick { + // let entries = entry_receiver.recv().unwrap(); + // for entry in entries { + // if !entry.is_tick() { + // assert_eq!(entry.transactions.len(), transactions.len()); + // assert_eq!(bank.get_balance(&pubkey), 1); + // } else { + // need_tick = false; + // } + // } + // } + + // let transactions = vec![Transaction::system_move( + // &mint.keypair(), + // pubkey, + // 2, + // mint.last_id(), + // 0, + // )]; + + // assert_eq!( + // bank.process_and_record_transactions(&transactions, &poh_recorder), + // Err(BankError::RecordFailure) + // ); + + // assert_eq!(bank.get_balance(&pubkey), 1); + // } + +} diff --git a/src/banking_stage.rs b/src/banking_stage.rs index 3ad63e8d033f2b..bc825fbd4ad27e 100644 --- a/src/banking_stage.rs +++ b/src/banking_stage.rs @@ -155,7 +155,7 @@ impl BankingStage { while chunk_start != transactions.len() { let chunk_end = chunk_start + Entry::num_will_fit(&transactions[chunk_start..]); - bank.process_and_record_transactions(&transactions[chunk_start..chunk_end], poh)?; + bank.process_and_record_transactions(&transactions[chunk_start..chunk_end], Some(poh))?; chunk_start = chunk_end; } diff --git a/src/bloom.rs b/src/bloom.rs index f81f0730f2d1f9..c6b943b6bdc845 100644 --- a/src/bloom.rs +++ b/src/bloom.rs @@ -45,7 +45,7 @@ impl Bloom { key.hash_at_index(k) % self.bits.len() } pub fn clear(&mut self) { - self.bits.clear(); + self.bits = BitVec::new_fill(false, self.bits.len()); } pub fn add(&mut self, key: &T) { for k in &self.keys { @@ -53,7 +53,7 @@ impl Bloom { self.bits.set(pos, true); } } - pub fn contains(&mut self, key: &T) -> bool { + pub fn contains(&self, key: &T) -> bool { for k in &self.keys { let pos = self.pos(key, *k); if !self.bits.get(pos) { @@ -64,30 +64,6 @@ impl Bloom { } } -//fn to_slice(v: u64) -> [u8; 8] { -// [ -// v as u8, -// (v >> 8) as u8, -// (v >> 16) as u8, -// (v >> 24) as u8, -// (v >> 32) as u8, -// (v >> 40) as u8, -// (v >> 48) as u8, -// (v >> 56) as u8, -// ] -//} - -//fn from_slice(v: &[u8]) -> u64 { -// u64::from(v[0]) -// | u64::from(v[1]) << 8 -// | u64::from(v[2]) << 16 -// | u64::from(v[3]) << 24 -// | u64::from(v[4]) << 32 -// | u64::from(v[5]) << 40 -// | u64::from(v[6]) << 48 -// | u64::from(v[7]) << 56 -//} -// fn slice_hash(slice: &[u8], hash_index: u64) -> u64 { let mut hasher = FnvHasher::with_key(hash_index); hasher.write(slice); @@ -104,15 +80,6 @@ impl> BloomHashIndex for T { mod test { use super::*; use solana_sdk::hash::{hash, Hash}; - // #[test] - // fn test_slice() { - // assert_eq!(from_slice(&to_slice(10)), 10); - // assert_eq!(from_slice(&to_slice(0x7fff7fff)), 0x7fff7fff); - // assert_eq!( - // from_slice(&to_slice(0x7fff7fff7fff7fff)), - // 0x7fff7fff7fff7fff - // ); - // } #[test] fn test_bloom_filter() { diff --git a/src/checkpoint.rs b/src/checkpoint.rs deleted file mode 100644 index 9a5355f7469edc..00000000000000 --- a/src/checkpoint.rs +++ /dev/null @@ -1,14 +0,0 @@ -pub trait Checkpoint { - /// add a checkpoint to this data at current state - fn checkpoint(&mut self); - - /// rollback to previous state, panics if no prior checkpoint - fn rollback(&mut self); - - /// cull checkpoints to depth, that is depth of zero means - /// no checkpoints, only current state - fn purge(&mut self, depth: usize); - - /// returns the number of checkpoints - fn depth(&self) -> usize; -} diff --git a/src/checkpoints.rs b/src/checkpoints.rs new file mode 100644 index 00000000000000..f0a832411d12c7 --- /dev/null +++ b/src/checkpoints.rs @@ -0,0 +1,141 @@ +//! Simple data structure to keep track of checkpointed state. It stores a map of forks to a type +//! and parent forks. +//! +//! `latest` forks is a set of all the forks with no children. +//! +//! A trunk is the latest fork that is a parent all the `latest` forks. If consensus works correctly, then latest should be pruned such that only one trunk exists within N links. + +use hashbrown::{HashMap, HashSet}; +use std::collections::VecDeque; + +pub struct Checkpoints { + /// Stores a map from fork to a T and a parent fork + pub checkpoints: HashMap, + /// The latest forks that have been added + pub latest: HashSet, +} + +impl Checkpoints { + pub fn is_empty(&self) -> bool { + self.checkpoints.is_empty() + } + pub fn load(&self, fork: u64) -> Option<&(T, u64)> { + self.checkpoints.get(&fork) + } + pub fn store(&mut self, fork: u64, data: T, trunk: u64) { + self.latest.remove(&trunk); + self.latest.insert(fork); + self.insert(fork, data, trunk); + } + pub fn insert(&mut self, fork: u64, data: T, trunk: u64) { + self.checkpoints.insert(fork, (data, trunk)); + } + /// Given a base fork, and a maximum number, collect all the + /// forks starting from the base fork backwards + pub fn collect(&self, num: usize, mut base: u64) -> Vec<(u64, &T)> { + let mut rv = vec![]; + loop { + if rv.len() == num { + break; + } + if let Some((val, next)) = self.load(base) { + rv.push((base, val)); + base = *next; + } else { + break; + } + } + rv + } + + ///invert the dag + pub fn invert(&self) -> HashMap> { + let mut idag = HashMap::new(); + for (k, (_, v)) in &self.checkpoints { + idag.entry(*v).or_insert(HashSet::new()).insert(*k); + } + idag + } + + ///create a new Checkpoints tree that only derives from the trunk + pub fn prune(&self, trunk: u64, inverse: &HashMap>) -> Self { + let mut new = Self::default(); + // simple BFS + let mut queue = VecDeque::new(); + queue.push_back(trunk); + loop { + if queue.is_empty() { + break; + } + let trunk = queue.pop_front().unwrap(); + let (data, prev) = self.load(trunk).expect("load from inverse").clone(); + new.store(trunk, data.clone(), prev); + if let Some(children) = inverse.get(&trunk) { + let mut next = children.into_iter().map(|x| *x).collect(); + queue.append(&mut next); + } + } + new + } +} + +impl Default for Checkpoints { + fn default() -> Self { + Self { + checkpoints: HashMap::new(), + latest: HashSet::new(), + } + } +} +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_new() { + let cp: Checkpoints = Checkpoints::default(); + assert!(cp.is_empty()); + } + + #[test] + fn test_load_store() { + let mut cp: Checkpoints = Checkpoints::default(); + assert_eq!(cp.load(1), None); + cp.store(1, true, 0); + assert_eq!(cp.load(1), Some(&(true, 0))); + } + #[test] + fn test_collect() { + let mut cp: Checkpoints = Checkpoints::default(); + assert_eq!(cp.load(1), None); + cp.store(1, true, 0); + assert_eq!(cp.collect(0, 1), vec![]); + assert_eq!(cp.collect(1, 1), vec![(1, &true)]); + } + #[test] + fn test_invert() { + let mut cp: Checkpoints = Checkpoints::default(); + assert_eq!(cp.load(1), None); + cp.store(1, true, 0); + cp.store(2, true, 0); + let inverse = cp.invert(); + assert_eq!(inverse.len(), 1); + assert_eq!(inverse[&0].len(), 2); + let list: Vec = inverse[&0].iter().cloned().collect(); + assert_eq!(list, vec![1, 2]); + } + #[test] + fn test_prune() { + let mut cp: Checkpoints = Checkpoints::default(); + assert_eq!(cp.load(1), None); + cp.store(1, true, 0); + cp.store(2, true, 0); + cp.store(3, true, 1); + let inverse = cp.invert(); + let pruned = cp.prune(1, &inverse); + assert_eq!(pruned.load(0), None); + assert_eq!(pruned.load(1), Some(&(true, 0))); + assert_eq!(pruned.load(2), None); + assert_eq!(pruned.load(3), Some(&(true, 1))); + } +} diff --git a/src/compute_leader_confirmation_service.rs b/src/compute_leader_confirmation_service.rs index 73d8b66cad228a..084b5dd8b30d14 100644 --- a/src/compute_leader_confirmation_service.rs +++ b/src/compute_leader_confirmation_service.rs @@ -38,11 +38,10 @@ impl ComputeLeaderConfirmationService { // Hold an accounts_db read lock as briefly as possible, just long enough to collect all // the vote states - let vote_states: Vec = bank - .accounts - .accounts_db - .read() - .unwrap() + // TODO: do we use trunk or live fork here? + let state = bank.trunk_fork(); + let accounts = state.head().accounts.accounts_db.read().unwrap(); + let vote_states: Vec = accounts .accounts .values() .filter_map(|account| { @@ -181,7 +180,7 @@ pub mod tests { let ids: Vec<_> = (0..10) .map(|i| { let last_id = hash(&serialize(&i).unwrap()); // Unique hash - bank.register_tick(&last_id); + bank.tpu_register_tick(&last_id); // sleep to get a different timestamp in the bank sleep(Duration::from_millis(1)); last_id diff --git a/src/crds_gossip_pull.rs b/src/crds_gossip_pull.rs index ee1e71a78cddd7..918cc81a119475 100644 --- a/src/crds_gossip_pull.rs +++ b/src/crds_gossip_pull.rs @@ -371,7 +371,7 @@ mod test { // there is a chance of a false positive with bloom filters // assert that purged value is still in the set // chance of 30 consecutive false positives is 0.1^30 - let mut filter = node.build_crds_filter(&node_crds); + let filter = node.build_crds_filter(&node_crds); assert!(filter.contains(&value_hash)); } diff --git a/src/entry.rs b/src/entry.rs index e469c3f1b9afe1..78eb1594b2a8b5 100644 --- a/src/entry.rs +++ b/src/entry.rs @@ -207,7 +207,6 @@ impl Entry { } true } - pub fn is_tick(&self) -> bool { self.transactions.is_empty() } diff --git a/src/forks.rs b/src/forks.rs new file mode 100644 index 00000000000000..119d3e3c473bf7 --- /dev/null +++ b/src/forks.rs @@ -0,0 +1,230 @@ +/// This module tracks the forks in the bank +use crate::bank_state::{BankCheckpoint, BankState}; +use std::sync::Arc; +//TODO: own module error +use crate::bank::{BankError, Result}; +use crate::checkpoints::Checkpoints; +use solana_sdk::hash::Hash; +use std; + +const ROLLBACK_DEPTH: usize = 32usize; + +#[derive(Default)] +pub struct Forks { + pub checkpoints: Checkpoints>, + + /// Last fork to be initialized + /// This should be the last fork to be replayed or the TPU fork + pub live_fork: u64, + + /// Fork that is trunk + pub trunk_fork: u64, +} + +impl Forks { + pub fn live_fork(&self) -> BankState { + self.bank_state(self.live_fork).expect("live fork") + } + pub fn trunk_fork(&self) -> BankState { + self.bank_state(self.trunk_fork).expect("trunk fork") + } + + pub fn bank_state(&self, fork: u64) -> Option { + let cp: Vec<_> = self + .checkpoints + .collect(ROLLBACK_DEPTH, fork) + .into_iter() + .map(|x| x.1) + .cloned() + .collect(); + if cp.is_empty() { + None + } else { + Some(BankState { checkpoints: cp }) + } + } + /// Collapse the bottom two checkpoints. + /// The tree is computed from the `leaf` to the `trunk` + /// The leaf is the last possible fork, it should have no descendants. + /// The expected oldest fork must be the trunk + /// The direct child of the trunk that leads the leaf becomes the new trunk. + /// The forks that are not a decendant of the new trunk -> leaf path are pruned. + /// live_fork is the leaf. + /// trunk_fork is the new trunk. + /// Return the new trunk id. + pub fn merge_into_trunk(&mut self, trunk: u64, leaf: u64) -> Result { + // `old` trunk, should have `trunk` as its fork_id + // `new` trunk is a direct decendant of old and has new_trunk_id as its fork_id + // new is merged into old + // and old is swapped into the checkpoint under new_trunk_id + let (old_trunk, new_trunk, new_trunk_id) = { + let states = self.checkpoints.collect(ROLLBACK_DEPTH + 1, leaf); + let leaf_id = states.first().map(|x| x.0).ok_or(BankError::UnknownFork)?; + assert_eq!(leaf_id, leaf); + let trunk_id = states.last().map(|x| x.0).ok_or(BankError::UnknownFork)?; + if trunk_id != trunk { + return Err(BankError::InvalidTrunk); + } + let len = states.len(); + let old_trunk = states[len - 1].clone(); + let new_trunk = states[len - 2].clone(); + if !new_trunk.1.finalized() { + println!("new_trunk id {}", new_trunk.1.fork_id()); + return Err(BankError::CheckpointNotFinalized); + } + if !old_trunk.1.finalized() { + println!("old id {}", old_trunk.1.fork_id()); + return Err(BankError::CheckpointNotFinalized); + } + //stupid sanity checks + assert_eq!(new_trunk.1.fork_id(), new_trunk.0); + assert_eq!(old_trunk.1.fork_id(), old_trunk.0); + (old_trunk.1.clone(), new_trunk.1.clone(), new_trunk.0) + }; + let idag = self.checkpoints.invert(); + let new_checkpoints = self.checkpoints.prune(new_trunk_id, &idag); + let old_trunk_id = old_trunk.fork_id(); + self.checkpoints = new_checkpoints; + self.trunk_fork = new_trunk_id; + self.live_fork = leaf; + // old should have been pruned + assert!(self.checkpoints.load(old_trunk_id).is_none()); + // new_trunk id should be in the new tree + assert!(!self.checkpoints.load(new_trunk_id).is_none()); + + // swap in the old instance under the new_trunk id + // this should be the last external ref to `new_trunk` + self.checkpoints + .insert(new_trunk_id, old_trunk.clone(), old_trunk_id); + + // merge all the new changes into the old instance under the new id + // this should consume `new` + // new should have no other references + let new_trunk: BankCheckpoint = Arc::try_unwrap(new_trunk).unwrap(); + old_trunk.merge_into_trunk(new_trunk); + assert_eq!(old_trunk.fork_id(), new_trunk_id); + Ok(new_trunk_id) + } + + /// Initialize the first trunk + pub fn init_trunk_fork(&mut self, checkpoint: BankCheckpoint) { + assert!(self.checkpoints.is_empty()); + self.live_fork = checkpoint.fork_id(); + self.trunk_fork = checkpoint.fork_id(); + //TODO: using u64::MAX as the impossible checkpoint + //this should be a None instead + self.checkpoints + .store(self.live_fork, Arc::new(checkpoint), std::u64::MAX); + } + + pub fn is_active_fork(&self, fork: u64) -> bool { + if let Some(state) = self.checkpoints.load(fork) { + !state.0.finalized() && self.live_fork == fork + } else { + false + } + } + /// Initalize the `current` fork that is a direct decendant of the `base` fork. + pub fn init_fork(&mut self, current: u64, last_id: &Hash, base: u64) -> Result<()> { + if let Some(state) = self.checkpoints.load(base) { + if !state.0.finalized() { + return Err(BankError::CheckpointNotFinalized); + } + let new = state.0.fork(current, last_id); + self.checkpoints.store(current, Arc::new(new), base); + self.live_fork = current; + Ok(()) + } else { + return Err(BankError::UnknownFork); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::bank_state::BankCheckpoint; + use solana_sdk::hash::hash; + + #[test] + fn forks_init_trunk() { + let mut forks = Forks::default(); + let cp = BankCheckpoint::new(0, &Hash::default()); + forks.init_trunk_fork(cp); + assert!(forks.is_active_fork(0)); + assert_eq!(forks.trunk_fork().checkpoints.len(), 1); + assert_eq!(forks.trunk_fork().head().fork_id(), 0); + assert_eq!(forks.live_fork().head().fork_id(), 0); + } + + #[test] + fn forks_init_fork() { + let mut forks = Forks::default(); + let last_id = Hash::default(); + let cp = BankCheckpoint::new(0, &last_id); + cp.register_tick(&last_id); + forks.init_trunk_fork(cp); + let last_id = hash(last_id.as_ref()); + assert_eq!(forks.init_fork(1, &last_id, 1), Err(BankError::UnknownFork)); + assert_eq!( + forks.init_fork(1, &last_id, 0), + Err(BankError::CheckpointNotFinalized) + ); + forks.trunk_fork().head().finalize(); + assert_eq!(forks.init_fork(1, &last_id, 0), Ok(())); + + assert_eq!(forks.trunk_fork().head().fork_id(), 0); + assert_eq!(forks.live_fork().head().fork_id(), 1); + assert_eq!(forks.live_fork().checkpoints.len(), 2); + } + + #[test] + fn forks_merge() { + let mut forks = Forks::default(); + let last_id = Hash::default(); + let cp = BankCheckpoint::new(0, &last_id); + cp.register_tick(&last_id); + forks.init_trunk_fork(cp); + let last_id = hash(last_id.as_ref()); + forks.trunk_fork().head().finalize(); + assert_eq!(forks.init_fork(1, &last_id, 0), Ok(())); + forks.live_fork().head().register_tick(&last_id); + forks.live_fork().head().finalize(); + assert_eq!(forks.merge_into_trunk(0, 1), Ok(1)); + + assert_eq!(forks.live_fork().checkpoints.len(), 1); + assert_eq!(forks.trunk_fork().head().fork_id(), 1); + assert_eq!(forks.live_fork().head().fork_id(), 1); + } + #[test] + fn forks_merge_prune() { + let mut forks = Forks::default(); + let last_id = Hash::default(); + let cp = BankCheckpoint::new(0, &last_id); + cp.register_tick(&last_id); + forks.init_trunk_fork(cp); + let last_id = hash(last_id.as_ref()); + forks.trunk_fork().head().finalize(); + assert_eq!(forks.init_fork(1, &last_id, 0), Ok(())); + assert_eq!(forks.bank_state(1).unwrap().checkpoints.len(), 2); + forks.bank_state(1).unwrap().head().register_tick(&last_id); + + // add a fork 2 to be pruned + // fork 2 connects to 0 + let last_id = hash(last_id.as_ref()); + assert_eq!(forks.init_fork(2, &last_id, 0), Ok(())); + assert_eq!(forks.bank_state(2).unwrap().checkpoints.len(), 2); + forks.bank_state(2).unwrap().head().register_tick(&last_id); + + forks.bank_state(1).unwrap().head().finalize(); + // fork 1 is the new trunk, only forks that are descendant from 1 are valid + assert_eq!(forks.merge_into_trunk(0, 1), Ok(1)); + + // fork 2 is gone since it does not connect to 1 + assert!(forks.bank_state(2).is_none()); + + assert_eq!(forks.live_fork().checkpoints.len(), 1); + assert_eq!(forks.trunk_fork().head().fork_id(), 1); + assert_eq!(forks.live_fork().head().fork_id(), 1); + } +} diff --git a/src/fullnode.rs b/src/fullnode.rs index edec109626c5ad..57e8aefa0fc598 100644 --- a/src/fullnode.rs +++ b/src/fullnode.rs @@ -569,7 +569,7 @@ impl Fullnode { db_ledger: &DbLedger, leader_scheduler: Arc>, ) -> (Bank, u64, Hash) { - let mut bank = Bank::new_with_builtin_programs(); + let mut bank = Bank::default(); bank.leader_scheduler = leader_scheduler; let now = Instant::now(); diff --git a/src/last_id_queue.rs b/src/last_id_queue.rs new file mode 100644 index 00000000000000..995084d869fa2a --- /dev/null +++ b/src/last_id_queue.rs @@ -0,0 +1,213 @@ +use crate::poh_service::NUM_TICKS_PER_SECOND; +use hashbrown::HashMap; +use solana_sdk::hash::Hash; +use solana_sdk::timing::timestamp; + +/// The number of most recent `last_id` values that the bank will track the signatures +/// of. Once the bank discards a `last_id`, it will reject any transactions that use +/// that `last_id` in a transaction. Lowering this value reduces memory consumption, +/// but requires clients to update its `last_id` more frequently. Raising the value +/// lengthens the time a client must wait to be certain a missing transaction will +/// not be processed by the network. +pub const MAX_ENTRY_IDS: usize = NUM_TICKS_PER_SECOND * 120; + +#[derive(Debug, PartialEq, Eq, Clone)] +struct LastIdEntry { + timestamp: u64, + tick_height: u64, +} + +/// Low memory overhead, so can be cloned for every checkpoint +#[derive(Clone)] +pub struct LastIdQueue { + /// updated whenever an id is registered, at each tick ;) + pub tick_height: u64, + + /// last tick to be registered + pub last_id: Option, + + entries: HashMap, +} +impl Default for LastIdQueue { + fn default() -> Self { + Self { + entries: HashMap::new(), + tick_height: 0, + last_id: None, + } + } +} + +impl LastIdQueue { + /// Check if the age of the entry_id is within the max_age + /// return false for any entries with an age equal to or above max_age + pub fn check_entry_id_age(&self, entry_id: Hash, max_age: usize) -> bool { + let entry = self.entries.get(&entry_id); + match entry { + Some(entry) => self.tick_height - entry.tick_height < max_age as u64, + _ => false, + } + } + /// check if entry is valid + pub fn check_entry(&self, entry_id: Hash) -> bool { + self.entries.get(&entry_id).is_some() + } + /// Tell the bank which Entry IDs exist on the ledger. This function + /// assumes subsequent calls correspond to later entries, and will boot + /// the oldest ones once its internal cache is full. Once boot, the + /// bank will reject transactions using that `last_id`. + pub fn register_tick(&mut self, last_id: &Hash) { + self.tick_height += 1; + let tick_height = self.tick_height; + + // this clean up can be deferred until sigs gets larger + // because we verify entry.nth every place we check for validity + if self.entries.len() >= MAX_ENTRY_IDS as usize { + self.entries + .retain(|_, entry| tick_height - entry.tick_height <= MAX_ENTRY_IDS as u64); + } + + self.entries.insert( + *last_id, + LastIdEntry { + tick_height, + timestamp: timestamp(), + }, + ); + + self.last_id = Some(*last_id); + } + + /// Looks through a list of tick heights and stakes, and finds the latest + /// tick that has achieved confirmation + pub fn get_confirmation_timestamp( + &self, + ticks_and_stakes: &mut [(u64, u64)], + supermajority_stake: u64, + ) -> Option { + // Sort by tick height + ticks_and_stakes.sort_by(|a, b| a.0.cmp(&b.0)); + let current_tick_height = self.tick_height; + let mut total = 0; + for (tick_height, stake) in ticks_and_stakes.iter() { + if ((current_tick_height - tick_height) as usize) < MAX_ENTRY_IDS { + total += stake; + if total > supermajority_stake { + return self.tick_height_to_timestamp(*tick_height); + } + } + } + None + } + + /// Maps a tick height to a timestamp + fn tick_height_to_timestamp(&self, tick_height: u64) -> Option { + for entry in self.entries.values() { + if entry.tick_height == tick_height { + return Some(entry.timestamp); + } + } + None + } + + /// Look through the last_ids and find all the valid ids + /// This is batched to avoid holding the lock for a significant amount of time + /// + /// Return a vec of tuple of (valid index, timestamp) + /// index is into the passed ids slice to avoid copying hashes + pub fn count_valid_ids(&self, ids: &[Hash]) -> Vec<(usize, u64)> { + let mut ret = Vec::new(); + for (i, id) in ids.iter().enumerate() { + if let Some(entry) = self.entries.get(id) { + if self.tick_height - entry.tick_height < MAX_ENTRY_IDS as u64 { + ret.push((i, entry.timestamp)); + } + } + } + ret + } + pub fn clear(&mut self) { + self.entries = HashMap::new(); + self.tick_height = 0; + self.last_id = None; + } + /// fork for LastIdQueue is a simple clone + pub fn fork(&self) -> Self { + Self { + entries: self.entries.clone(), + tick_height: self.tick_height.clone(), + last_id: self.last_id.clone(), + } + } + /// merge for entryq is a swap + pub fn merge_into_trunk(&mut self, other: Self) { + let (entries, tick_height, last_id) = { (other.entries, other.tick_height, other.last_id) }; + self.entries = entries; + self.tick_height = tick_height; + self.last_id = last_id; + } +} +#[cfg(test)] +mod tests { + use super::*; + use bincode::serialize; + use solana_sdk::hash::hash; + + #[test] + fn test_count_valid_ids() { + let first_id = Hash::default(); + let mut entry_queue = LastIdQueue::default(); + entry_queue.register_tick(&first_id); + let ids: Vec<_> = (0..MAX_ENTRY_IDS) + .map(|i| { + let last_id = hash(&serialize(&i).unwrap()); // Unique hash + entry_queue.register_tick(&last_id); + last_id + }) + .collect(); + assert_eq!(entry_queue.count_valid_ids(&[]).len(), 0); + assert_eq!(entry_queue.count_valid_ids(&[first_id]).len(), 0); + for (i, id) in entry_queue.count_valid_ids(&ids).iter().enumerate() { + assert_eq!(id.0, i); + } + } + + #[test] + fn test_register_tick() { + let last_id = Hash::default(); + let mut entry_queue = LastIdQueue::default(); + assert!(!entry_queue.check_entry(last_id)); + entry_queue.register_tick(&last_id); + assert!(entry_queue.check_entry(last_id)); + } + #[test] + fn test_reject_old_last_id() { + let last_id = Hash::default(); + let mut entry_queue = LastIdQueue::default(); + for i in 0..MAX_ENTRY_IDS { + let last_id = hash(&serialize(&i).unwrap()); // Unique hash + entry_queue.register_tick(&last_id); + } + // Assert we're no longer able to use the oldest entry ID. + assert!(!entry_queue.check_entry(last_id)); + } + #[test] + fn test_fork() { + let last_id = Hash::default(); + let mut first = LastIdQueue::default(); + assert!(!first.check_entry(last_id)); + first.register_tick(&last_id); + let second = first.fork(); + assert!(second.check_entry(last_id)); + } + #[test] + fn test_merge() { + let last_id = Hash::default(); + let mut first = LastIdQueue::default(); + assert!(!first.check_entry(last_id)); + let mut second = first.fork(); + second.register_tick(&last_id); + first.merge_into_trunk(second); + assert!(first.check_entry(last_id)); + } +} diff --git a/src/leader_scheduler.rs b/src/leader_scheduler.rs index 0ca6fcd5d88378..6d2ac970739f07 100644 --- a/src/leader_scheduler.rs +++ b/src/leader_scheduler.rs @@ -321,13 +321,14 @@ impl LeaderScheduler { let lower_bound = height.saturating_sub(self.active_window_length); { - let accounts = bank.accounts.accounts_db.read().unwrap(); + let state = bank.trunk_fork(); + let accounts = state.head().accounts.accounts_db.read().unwrap(); // TODO: iterate through checkpoints, too accounts .accounts - .values() - .filter_map(|account| { + .iter() + .flat_map(|(_, account)| { if vote_program::check_id(&account.owner) { if let Ok(vote_state) = VoteProgram::deserialize(&account.userdata) { return vote_state diff --git a/src/lib.rs b/src/lib.rs index 24c88634bf51be..192471c9de8f44 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,6 +11,7 @@ pub mod counter; pub mod accounts; pub mod bank; +pub mod bank_state; pub mod banking_stage; pub mod blob_fetch_stage; pub mod bloom; @@ -19,7 +20,7 @@ pub mod broadcast_service; pub mod chacha; #[cfg(all(feature = "chacha", feature = "cuda"))] pub mod chacha_cuda; -pub mod checkpoint; +pub mod checkpoints; pub mod client; pub mod crds; pub mod crds_gossip; @@ -27,6 +28,8 @@ pub mod crds_gossip_error; pub mod crds_gossip_pull; pub mod crds_gossip_push; pub mod crds_value; +pub mod forks; +pub mod last_id_queue; #[macro_use] pub mod contact_info; pub mod cluster_info; @@ -60,7 +63,7 @@ pub mod service; pub mod signature; pub mod sigverify; pub mod sigverify_stage; -pub mod status_deque; +pub mod status_cache; pub mod storage_stage; pub mod streamer; pub mod test_tx; diff --git a/src/poh_recorder.rs b/src/poh_recorder.rs index 11e3282be035f9..e021cb37f56cd2 100644 --- a/src/poh_recorder.rs +++ b/src/poh_recorder.rs @@ -106,7 +106,7 @@ impl PohRecorder { id: tick.id, transactions: vec![], }; - self.bank.register_tick(&tick.id); + self.bank.tpu_register_tick(&tick.id); self.sender.send(vec![tick])?; Ok(()) } diff --git a/src/replay_stage.rs b/src/replay_stage.rs index ef62756dc81d19..db25e9f8b43969 100644 --- a/src/replay_stage.rs +++ b/src/replay_stage.rs @@ -3,6 +3,7 @@ use crate::bank::Bank; use crate::cluster_info::ClusterInfo; use crate::counter::Counter; +use crate::entry::Entry; use crate::entry::{EntryReceiver, EntrySender}; use solana_sdk::hash::Hash; @@ -13,6 +14,7 @@ use crate::result::{Error, Result}; use crate::service::Service; use crate::streamer::{responder, BlobSender}; use crate::vote_signer_proxy::VoteSignerProxy; +//use crate::vote_stage::send_validator_vote; use log::Level; use solana_metrics::{influxdb, submit}; use solana_sdk::signature::{Keypair, KeypairUtil}; @@ -57,13 +59,35 @@ pub struct ReplayStage { } impl ReplayStage { + //TODO: This should be handled by the ledger, and this implementaiton is intentionally stupid. + //returns a vec of blocks where a block contains + //* entries + //* current block index + //* base block index + //The entries are at most to the block height + fn entries_to_blocks(entries: Vec) -> Vec<(Vec, u64, u64)> { + let mut blocks = vec![]; + for e in entries { + let current = e.tick_height / TICKS_PER_BLOCK; + let prev = current - 1; + if blocks.is_empty() { + blocks.push((vec![], current, prev)); + } + if blocks.last().unwrap().1 != current { + blocks.push((vec![], current, prev)); + } + blocks.last_mut().unwrap().0.push(e); + } + blocks + } + /// Process entry blobs, already in order #[allow(clippy::too_many_arguments)] fn process_entries( bank: &Arc, cluster_info: &Arc>, window_receiver: &EntryReceiver, - keypair: &Arc, + _keypair: &Arc, vote_signer: Option<&Arc>, vote_blob_sender: Option<&BlobSender>, ledger_entry_sender: &EntrySender, @@ -86,8 +110,6 @@ impl ReplayStage { .to_owned(), ); - let mut res = Ok(()); - let mut num_entries_to_write = entries.len(); let now = Instant::now(); if !entries.as_slice().verify(last_entry_id) { inc_new_counter_info!("replicate_stage-verify-fail", entries.len()); @@ -101,90 +123,90 @@ impl ReplayStage { let (current_leader, _) = bank .get_current_leader() .expect("Scheduled leader should be calculated by this point"); - let my_id = keypair.pubkey(); // Next vote tick is ceiling of (current tick/ticks per block) - let mut num_ticks_to_next_vote = TICKS_PER_BLOCK - (bank.tick_height() % TICKS_PER_BLOCK); - let mut start_entry_index = 0; - for (i, entry) in entries.iter().enumerate() { - inc_new_counter_info!("replicate-stage_bank-tick", bank.tick_height() as usize); - if entry.is_tick() { - num_ticks_to_next_vote -= 1; + let start_block = bank.tick_height() / TICKS_PER_BLOCK; + let blocks = Self::entries_to_blocks(entries); + for (entries, current_slot, base_slot) in blocks { + let now = Instant::now(); + //TODO: skip forks that cannot be voted on due to lockouts + if !entries.as_slice().verify(last_entry_id) { + inc_new_counter_info!("replicate_stage-verify-fail", entries.len()); + return Err(Error::BlobError(BlobError::VerificationFailed)); } inc_new_counter_info!( - "replicate-stage_tick-to-vote", - num_ticks_to_next_vote as usize + "replicate_stage-verify-duration", + duration_as_ms(&now.elapsed()) as usize ); - // If it's the last entry in the vector, i will be vec len - 1. - // If we don't process the entry now, the for loop will exit and the entry - // will be dropped. - if 0 == num_ticks_to_next_vote || (i + 1) == entries.len() { - res = bank.process_entries(&entries[start_entry_index..=i]); - - if res.is_err() { - // TODO: This will return early from the first entry that has an erroneous - // transaction, instead of processing the rest of the entries in the vector - // of received entries. This is in line with previous behavior when - // bank.process_entries() was used to process the entries, but doesn't solve the - // issue that the bank state was still changed, leading to inconsistencies with the - // leader as the leader currently should not be publishing erroneous transactions - inc_new_counter_info!( - "replicate-stage_failed_process_entries", - (i - start_entry_index) - ); - - break; - } - - if 0 == num_ticks_to_next_vote { - if let Some(signer) = vote_signer { + if bank.bank_state(current_slot).is_none() { + let tick = &entries[0]; + bank.init_fork(current_slot, &tick.id, base_slot) + .expect("init_checkpoint"); + } + let res = bank.process_fork_entries(current_slot, &entries); + if res.is_err() { + inc_new_counter_info!("replicate-stage_failed_process_entries", 1); + } + inc_new_counter_info!( + "replicate-transactions", + entries.iter().map(|x| x.transactions.len()).sum() + ); + let entries_len = entries.len() as u64; + if entries_len != 0 { + ledger_entry_sender.send(entries)?; + } + *entry_height += entries_len; + } + // do this at the trunk crossover + bank.leader_scheduler + .write() + .unwrap() + .update_height(bank.tick_height(), bank); + let now = Instant::now(); + let end_block = bank.tick_height() / TICKS_PER_BLOCK; + // TODO: make this work + // let options = vec![]; + // for c in bank.live_forks() { + // let state = bank.bank_state(c); + // if is_valid_vote(state.entry_height()) { + // options.push(state); + // } + // } + // options.sort_by(|o| o.compute_network_lockout()); + // if options.last().is_none() { + // return; + // } + // let new_leaf = options.last().unwrap(); + // let new_trunk = bank.merge_trunk(old_trunk, new_leaf); + // if old_trunk < new_leader_scheduler && new_trunk >= new_leader_scheduler { + // // If the schedule is computed every 100 periods + // // this computes the schedule when trunk is >= 100 + // // this schedule is active at 200 + // leader_scheduler + // .write() + // .unwrap() + // .generate_future_schedule(new_trunk); + // } + // vote(options.last()); + + if end_block != start_block { + if let Some(signer) = vote_signer { if let Some(sender) = vote_blob_sender { signer .send_validator_vote(bank, &cluster_info, sender) .unwrap(); } - } - } - let (scheduled_leader, _) = bank - .get_current_leader() - .expect("Scheduled leader should be calculated by this point"); - - // TODO: Remove this soon once we boot the leader from ClusterInfo - if scheduled_leader != current_leader { - cluster_info.write().unwrap().set_leader(scheduled_leader); - } - - if my_id == scheduled_leader { - num_entries_to_write = i + 1; - break; - } - start_entry_index = i + 1; - num_ticks_to_next_vote = TICKS_PER_BLOCK; - } + } } + let (scheduled_leader, _) = bank + .get_current_leader() + .expect("Scheduled leader should be calculated by this point"); - // If leader rotation happened, only write the entries up to leader rotation. - entries.truncate(num_entries_to_write); - *last_entry_id = entries - .last() - .expect("Entries cannot be empty at this point") - .id; - - inc_new_counter_info!( - "replicate-transactions", - entries.iter().map(|x| x.transactions.len()).sum() - ); - - let entries_len = entries.len() as u64; - // TODO: In line with previous behavior, this will write all the entries even if - // an error occurred processing one of the entries (causing the rest of the entries to - // not be processed). - if entries_len != 0 { - ledger_entry_sender.send(entries)?; + // TODO: Remove this soon once we boot the leader from ClusterInfo + if scheduled_leader != current_leader { + cluster_info.write().unwrap().set_leader(scheduled_leader); } - *entry_height += entries_len; - res?; inc_new_counter_info!( "replicate_stage-duration", duration_as_ms(&now.elapsed()) as usize @@ -290,13 +312,13 @@ mod test { use crate::leader_scheduler::{ make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig, }; + use solana_sdk::hash::Hash; use crate::packet::BlobError; use crate::replay_stage::{ReplayStage, ReplayStageReturnType}; use crate::result::Error; use crate::service::Service; use crate::vote_signer_proxy::VoteSignerProxy; - use solana_sdk::hash::Hash; use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_vote_signer::rpc::LocalVoteSigner; use std::fs::remove_dir_all; diff --git a/src/rpc.rs b/src/rpc.rs index 775f324daca0f3..1a1eb50307161f 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -6,7 +6,6 @@ use crate::jsonrpc_core::*; use crate::jsonrpc_http_server::*; use crate::packet::PACKET_DATA_SIZE; use crate::service::Service; -use crate::status_deque::Status; use bincode::{deserialize, serialize}; use bs58; use solana_drone::drone::request_airdrop_transaction; @@ -221,26 +220,14 @@ impl RpcSol for RpcSolImpl { .get_signature_status(signature); let status = { - if res.is_none() { - RpcSignatureStatus::SignatureNotFound - } else { - match res.unwrap() { - Status::Reserved => { - // Report SignatureReserved as SignatureNotFound as SignatureReserved is - // transitory while the bank processes the associated transaction. - RpcSignatureStatus::SignatureNotFound - } - Status::Complete(res) => match res { - Ok(_) => RpcSignatureStatus::Confirmed, - Err(BankError::AccountInUse) => RpcSignatureStatus::AccountInUse, - Err(BankError::ProgramError(_, _)) => { - RpcSignatureStatus::ProgramRuntimeError - } - Err(err) => { - trace!("mapping {:?} to GenericFailure", err); - RpcSignatureStatus::GenericFailure - } - }, + match res { + None => RpcSignatureStatus::SignatureNotFound, + Some(Ok(_)) => RpcSignatureStatus::Confirmed, + Some(Err(BankError::AccountInUse)) => RpcSignatureStatus::AccountInUse, + Some(Err(BankError::ProgramError(_, _))) => RpcSignatureStatus::ProgramRuntimeError, + Some(Err(err)) => { + trace!("mapping {:?} to GenericFailure", err); + RpcSignatureStatus::GenericFailure } } }; @@ -289,7 +276,7 @@ impl RpcSol for RpcSolImpl { .unwrap() .get_signature_status(signature); - if signature_status == Some(Status::Complete(Ok(()))) { + if signature_status == Some(Ok(())) { info!("airdrop signature ok"); return Ok(bs58::encode(signature).into_string()); } else if now.elapsed().as_secs() > 5 { @@ -379,7 +366,7 @@ impl JsonRpcRequestProcessor { let id = self.bank.last_id(); Ok(bs58::encode(id).into_string()) } - pub fn get_signature_status(&self, signature: Signature) -> Option>> { + pub fn get_signature_status(&self, signature: Signature) -> Option> { self.bank.get_signature_status(&signature) } fn get_transaction_count(&self) -> Result { diff --git a/src/rpc_pubsub.rs b/src/rpc_pubsub.rs index e32a7c8b98ab0d..d3f02d023ad8cb 100644 --- a/src/rpc_pubsub.rs +++ b/src/rpc_pubsub.rs @@ -10,7 +10,6 @@ use crate::jsonrpc_pubsub::{PubSubHandler, Session, SubscriptionId}; use crate::jsonrpc_ws_server::{RequestContext, Sender, ServerBuilder}; use crate::rpc::RpcSignatureStatus; use crate::service::Service; -use crate::status_deque::Status; use bs58; use solana_sdk::account::Account; use solana_sdk::pubkey::Pubkey; @@ -341,7 +340,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl { } match status.unwrap() { - Status::Complete(Ok(_)) => { + Ok(_) => { sink.notify(Ok(RpcSignatureStatus::Confirmed)) .wait() .unwrap(); diff --git a/src/status_cache.rs b/src/status_cache.rs new file mode 100644 index 00000000000000..236850ce749a53 --- /dev/null +++ b/src/status_cache.rs @@ -0,0 +1,236 @@ +use crate::bank::{BankError, Result}; +use crate::bloom::{Bloom, BloomHashIndex}; +use crate::last_id_queue::MAX_ENTRY_IDS; +use hashbrown::HashMap; +use solana_sdk::hash::Hash; +use solana_sdk::signature::Signature; +use std::collections::VecDeque; +use std::ops::{Deref, DerefMut}; + +type FailureMap = HashMap; + +pub struct StatusCache { + /// all signatures seen at this checkpoint + signatures: Bloom, + + /// failures + failures: FailureMap, + + /// Merges are empty unless this is the trunk checkpoint which cannot be unrolled + merges: VecDeque, +} + +impl StatusCache { + pub fn new(last_id: &Hash) -> Self { + let keys = (0..27) + .into_iter() + .map(|i| last_id.hash_at_index(i)) + .collect(); + Self { + signatures: Bloom::new(38340234, keys), + failures: HashMap::new(), + merges: VecDeque::new(), + } + } + fn has_signature_merged(&self, sig: &Signature) -> bool { + for c in &self.merges { + if c.has_signature(sig) { + return true; + } + } + return false; + } + /// test if a signature is known + pub fn has_signature(&self, sig: &Signature) -> bool { + self.signatures.contains(&sig) || self.has_signature_merged(sig) + } + /// add a signature + pub fn add(&mut self, sig: &Signature) { + // any mutable cache is "live" and should not be merged into + // since it cannot be a valid trunk checkpoint + assert!(self.merges.is_empty()); + + self.signatures.add(&sig) + } + /// Save an error status for a signature + pub fn save_failure_status(&mut self, sig: &Signature, err: BankError) { + assert!(self.has_signature(sig), "sig not found with err {:?}", err); + // any mutable cache is "live" and should not be merged into + // since it cannot be a valid trunk checkpoint + assert!(self.merges.is_empty()); + self.failures.insert(*sig, err); + } + /// Forget all signatures. Useful for benchmarking. + pub fn clear(&mut self) { + self.failures.clear(); + self.signatures.clear(); + } + fn get_signature_status_merged(&self, sig: &Signature) -> Option> { + for c in &self.merges { + if c.has_signature(sig) { + return c.get_signature_status(sig); + } + } + None + } + pub fn get_signature_status(&self, sig: &Signature) -> Option> { + if let Some(res) = self.failures.get(sig) { + return Some(Err(res.clone())); + } else if self.signatures.contains(sig) { + return Some(Ok(())); + } + self.get_signature_status_merged(sig) + } + /// like accounts, status cache starts with an new data structure for every checkpoint + /// so only merge is implemented + /// but the merges maintains a history + pub fn merge_into_trunk(&mut self, other: Self) { + // merges should be empty for every other checkpoint accept the trunk + // which cannot be rolled back + assert!(other.merges.is_empty()); + self.merges.push_front(other); + if self.merges.len() > MAX_ENTRY_IDS { + //TODO check if this is the right size ^ + self.merges.pop_back(); + } + } + pub fn get_signature_status_all( + checkpoints: &[U], + signature: &Signature, + ) -> Option> + where + U: Deref, + { + for c in checkpoints { + if let Some(status) = c.get_signature_status(signature) { + return Some(status); + } + } + None + } + pub fn has_signature_all(checkpoints: &[U], signature: &Signature) -> bool + where + U: Deref, + { + for c in checkpoints { + if c.has_signature(signature) { + return true; + } + } + false + } + pub fn clear_all(checkpoints: &mut [U]) -> bool + where + U: DerefMut, + { + for c in checkpoints.iter_mut() { + c.clear(); + } + false + } +} +#[cfg(test)] +mod tests { + use super::*; + //use bincode::serialize; + use solana_sdk::hash::hash; + #[test] + fn test_has_signature() { + let sig = Default::default(); + let last_id = hash(Hash::default().as_ref()); + let mut status_cache = StatusCache::new(&last_id); + assert_eq!(status_cache.has_signature(&sig), false); + assert_eq!(status_cache.get_signature_status(&sig), None,); + status_cache.add(&sig); + assert_eq!(status_cache.has_signature(&sig), true); + assert_eq!(status_cache.get_signature_status(&sig), Some(Ok(())),); + } + + #[test] + fn test_has_signature_checkpoint() { + let sig = Default::default(); + let last_id = hash(Hash::default().as_ref()); + let mut first = StatusCache::new(&last_id); + first.add(&sig); + assert_eq!(first.get_signature_status(&sig), Some(Ok(()))); + let last_id = hash(last_id.as_ref()); + let second = StatusCache::new(&last_id); + let checkpoints = [&second, &first]; + assert_eq!( + StatusCache::get_signature_status_all(&checkpoints, &sig), + Some(Ok(())), + ); + assert!(StatusCache::has_signature_all(&checkpoints, &sig)); + } + + #[test] + fn test_has_signature_merged1() { + let sig = Default::default(); + let last_id = hash(Hash::default().as_ref()); + let mut first = StatusCache::new(&last_id); + first.add(&sig); + assert_eq!(first.get_signature_status(&sig), Some(Ok(()))); + let last_id = hash(last_id.as_ref()); + let second = StatusCache::new(&last_id); + first.merge_into_trunk(second); + assert_eq!(first.get_signature_status(&sig), Some(Ok(())),); + assert!(first.has_signature(&sig)); + } + + #[test] + fn test_has_signature_merged2() { + let sig = Default::default(); + let last_id = hash(Hash::default().as_ref()); + let mut first = StatusCache::new(&last_id); + first.add(&sig); + assert_eq!(first.get_signature_status(&sig), Some(Ok(()))); + let last_id = hash(last_id.as_ref()); + let mut second = StatusCache::new(&last_id); + second.merge_into_trunk(first); + assert_eq!(second.get_signature_status(&sig), Some(Ok(())),); + assert!(second.has_signature(&sig)); + } + + #[test] + fn test_failure_status() { + let sig = Default::default(); + let last_id = hash(Hash::default().as_ref()); + let mut first = StatusCache::new(&last_id); + first.add(&sig); + first.save_failure_status(&sig, BankError::DuplicateSignature); + assert_eq!(first.has_signature(&sig), true); + assert_eq!( + first.get_signature_status(&sig), + Some(Err(BankError::DuplicateSignature)), + ); + } + + #[test] + fn test_clear_signatures() { + let sig = Default::default(); + let last_id = hash(Hash::default().as_ref()); + let mut first = StatusCache::new(&last_id); + first.add(&sig); + assert_eq!(first.has_signature(&sig), true); + first.save_failure_status(&sig, BankError::DuplicateSignature); + assert_eq!( + first.get_signature_status(&sig), + Some(Err(BankError::DuplicateSignature)), + ); + first.clear(); + assert_eq!(first.has_signature(&sig), false); + assert_eq!(first.get_signature_status(&sig), None,); + } + #[test] + fn test_clear_signatures_all() { + let sig = Default::default(); + let last_id = hash(Hash::default().as_ref()); + let mut first = StatusCache::new(&last_id); + first.add(&sig); + assert_eq!(first.has_signature(&sig), true); + let mut second = StatusCache::new(&last_id); + let mut checkpoints = [&mut second, &mut first]; + StatusCache::clear_all(&mut checkpoints); + assert_eq!(StatusCache::has_signature_all(&checkpoints, &sig), false); + } +} diff --git a/src/status_deque.rs b/src/status_deque.rs deleted file mode 100644 index 8d975ad914b49c..00000000000000 --- a/src/status_deque.rs +++ /dev/null @@ -1,364 +0,0 @@ -use crate::checkpoint::Checkpoint; -use crate::poh_service::NUM_TICKS_PER_SECOND; -use hashbrown::HashMap; -use solana_sdk::hash::Hash; -use solana_sdk::signature::Signature; -use solana_sdk::timing::timestamp; -use std::collections::VecDeque; -use std::result; - -/// The number of most recent `last_id` values that the bank will track the signatures -/// of. Once the bank discards a `last_id`, it will reject any transactions that use -/// that `last_id` in a transaction. Lowering this value reduces memory consumption, -/// but requires clients to update its `last_id` more frequently. Raising the value -/// lengthens the time a client must wait to be certain a missing transaction will -/// not be processed by the network. -pub const MAX_ENTRY_IDS: usize = NUM_TICKS_PER_SECOND * 120; - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum Status { - Reserved, - Complete(T), -} - -type StatusMap = HashMap>; -type StatusEntryMap = HashMap>; - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum StatusDequeError { - /// The `Signature` has been seen before. This can occur under normal operation - /// when a UDP packet is duplicated, as a user error from a client not updating - /// its `last_id`, or as a double-spend attack. - DuplicateSignature, - - /// The bank has not seen the given `last_id` or the transaction is too old and - /// the `last_id` has been discarded. - LastIdNotFound, -} - -pub type Result = result::Result; - -/// a record of a tick, from register_tick -#[derive(Clone)] -struct StatusEntry { - /// when the id was registered, according to network time - tick_height: u64, - - /// timestamp when this id was registered, used for stats/confirmation - timestamp: u64, - - /// a map of signature status, used for duplicate detection - statuses: StatusMap, -} - -pub struct StatusDeque { - /// A FIFO queue of `last_id` items, where each item is a set of signatures - /// that have been processed using that `last_id`. Rejected `last_id` - /// values are so old that the `last_id` has been pulled out of the queue. - - /// updated whenever an id is registered, at each tick ;) - pub tick_height: u64, - - /// last tick to be registered - pub last_id: Option, - - /// Mapping of hashes to signature sets along with timestamp and what tick_height - /// was when the id was added. The bank uses this data to - /// reject transactions with signatures it's seen before and to reject - /// transactions that are too old (nth is too small) - entries: StatusEntryMap, - - checkpoints: VecDeque<(u64, Option, StatusEntryMap)>, -} - -impl Default for StatusDeque { - fn default() -> Self { - Self { - tick_height: 0, - last_id: None, - entries: HashMap::new(), - checkpoints: VecDeque::new(), - } - } -} - -impl Checkpoint for StatusDeque { - fn checkpoint(&mut self) { - self.checkpoints - .push_front((self.tick_height, self.last_id, self.entries.clone())); - } - fn rollback(&mut self) { - let (tick_height, last_id, entries) = self.checkpoints.pop_front().unwrap(); - self.tick_height = tick_height; - self.last_id = last_id; - self.entries = entries; - } - fn purge(&mut self, depth: usize) { - while self.depth() > depth { - self.checkpoints.pop_back().unwrap(); - } - } - fn depth(&self) -> usize { - self.checkpoints.len() - } -} - -impl StatusDeque { - pub fn update_signature_status_with_last_id( - &mut self, - signature: &Signature, - result: &T, - last_id: &Hash, - ) { - if let Some(entry) = self.entries.get_mut(last_id) { - entry - .statuses - .insert(*signature, Status::Complete(result.clone())); - } - } - pub fn reserve_signature_with_last_id( - &mut self, - last_id: &Hash, - sig: &Signature, - ) -> Result<()> { - if let Some(entry) = self.entries.get_mut(last_id) { - if self.tick_height - entry.tick_height < MAX_ENTRY_IDS as u64 { - return Self::reserve_signature(&mut entry.statuses, sig); - } - } - Err(StatusDequeError::LastIdNotFound) - } - - /// Store the given signature. The bank will reject any transaction with the same signature. - fn reserve_signature(statuses: &mut StatusMap, signature: &Signature) -> Result<()> { - if let Some(_result) = statuses.get(signature) { - return Err(StatusDequeError::DuplicateSignature); - } - statuses.insert(*signature, Status::Reserved); - Ok(()) - } - - /// Forget all signatures. Useful for benchmarking. - pub fn clear_signatures(&mut self) { - for entry in &mut self.entries.values_mut() { - entry.statuses.clear(); - } - } - - /// Check if the age of the entry_id is within the max_age - /// return false for any entries with an age equal to or above max_age - pub fn check_entry_id_age(&self, entry_id: Hash, max_age: usize) -> bool { - let entry = self.entries.get(&entry_id); - - match entry { - Some(entry) => self.tick_height - entry.tick_height < max_age as u64, - _ => false, - } - } - /// Tell the bank which Entry IDs exist on the ledger. This function - /// assumes subsequent calls correspond to later entries, and will boot - /// the oldest ones once its internal cache is full. Once boot, the - /// bank will reject transactions using that `last_id`. - pub fn register_tick(&mut self, last_id: &Hash) { - self.tick_height += 1; - let tick_height = self.tick_height; - - // this clean up can be deferred until sigs gets larger - // because we verify entry.nth every place we check for validity - if self.entries.len() >= MAX_ENTRY_IDS as usize { - self.entries - .retain(|_, entry| tick_height - entry.tick_height <= MAX_ENTRY_IDS as u64); - } - - self.entries.insert( - *last_id, - StatusEntry { - tick_height, - timestamp: timestamp(), - statuses: HashMap::new(), - }, - ); - - self.last_id = Some(*last_id); - } - - /// Looks through a list of tick heights and stakes, and finds the latest - /// tick that has achieved confirmation - pub fn get_confirmation_timestamp( - &self, - ticks_and_stakes: &mut [(u64, u64)], - supermajority_stake: u64, - ) -> Option { - // Sort by tick height - ticks_and_stakes.sort_by(|a, b| b.0.cmp(&a.0)); - let current_tick_height = self.tick_height; - let mut total = 0; - for (tick_height, stake) in ticks_and_stakes.iter() { - if ((current_tick_height - tick_height) as usize) < MAX_ENTRY_IDS { - total += stake; - if total > supermajority_stake { - return self.tick_height_to_timestamp(*tick_height); - } - } - } - None - } - - /// Maps a tick height to a timestamp - fn tick_height_to_timestamp(&self, tick_height: u64) -> Option { - for entry in self.entries.values() { - if entry.tick_height == tick_height { - return Some(entry.timestamp); - } - } - None - } - - pub fn get_signature_status(&self, signature: &Signature) -> Option> { - for entry in self.entries.values() { - if let Some(res) = entry.statuses.get(signature) { - return Some(res.clone()); - } - } - None - } - pub fn has_signature(&self, signature: &Signature) -> bool { - self.get_signature_status(signature).is_some() - } - - pub fn get_signature(&self, last_id: &Hash, signature: &Signature) -> Option> { - self.entries - .get(last_id) - .and_then(|entry| entry.statuses.get(signature).cloned()) - } -} -#[cfg(test)] -mod tests { - use super::*; - use bincode::serialize; - use solana_sdk::hash::hash; - #[test] - fn test_duplicate_transaction_signature() { - let sig = Default::default(); - let last_id = Default::default(); - let mut status_deque: StatusDeque<()> = StatusDeque::default(); - status_deque.register_tick(&last_id); - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &sig), - Ok(()) - ); - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &sig), - Err(StatusDequeError::DuplicateSignature) - ); - } - - #[test] - fn test_duplicate_transaction_signature_checkpoint() { - let sig = Default::default(); - let last_id = Default::default(); - let mut status_deque: StatusDeque<()> = StatusDeque::default(); - status_deque.register_tick(&last_id); - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &sig), - Ok(()) - ); - status_deque.checkpoint(); - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &sig), - Err(StatusDequeError::DuplicateSignature) - ); - } - - #[test] - fn test_clear_signatures() { - let signature = Signature::default(); - let last_id = Default::default(); - let mut status_deque: StatusDeque<()> = StatusDeque::default(); - status_deque.register_tick(&last_id); - status_deque - .reserve_signature_with_last_id(&last_id, &signature) - .unwrap(); - status_deque.clear_signatures(); - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &signature), - Ok(()) - ); - } - - #[test] - fn test_clear_signatures_checkpoint() { - let signature = Signature::default(); - let last_id = Default::default(); - let mut status_deque: StatusDeque<()> = StatusDeque::default(); - status_deque.register_tick(&last_id); - status_deque - .reserve_signature_with_last_id(&last_id, &signature) - .unwrap(); - status_deque.checkpoint(); - status_deque.clear_signatures(); - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &signature), - Ok(()) - ); - } - - #[test] - fn test_get_signature_status() { - let signature = Signature::default(); - let last_id = Default::default(); - let mut status_deque: StatusDeque<()> = StatusDeque::default(); - status_deque.register_tick(&last_id); - status_deque - .reserve_signature_with_last_id(&last_id, &signature) - .expect("reserve signature"); - assert_eq!( - status_deque.get_signature_status(&signature), - Some(Status::Reserved) - ); - } - - #[test] - fn test_register_tick() { - let signature = Signature::default(); - let last_id = Default::default(); - let mut status_deque: StatusDeque<()> = StatusDeque::default(); - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &signature), - Err(StatusDequeError::LastIdNotFound) - ); - status_deque.register_tick(&last_id); - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &signature), - Ok(()) - ); - } - - #[test] - fn test_has_signature() { - let signature = Signature::default(); - let last_id = Default::default(); - let mut status_deque: StatusDeque<()> = StatusDeque::default(); - status_deque.register_tick(&last_id); - status_deque - .reserve_signature_with_last_id(&last_id, &signature) - .expect("reserve signature"); - assert!(status_deque.has_signature(&signature)); - } - - #[test] - fn test_reject_old_last_id() { - let signature = Signature::default(); - let last_id = Default::default(); - let mut status_deque: StatusDeque<()> = StatusDeque::default(); - for i in 0..MAX_ENTRY_IDS { - let last_id = hash(&serialize(&i).unwrap()); // Unique hash - status_deque.register_tick(&last_id); - } - // Assert we're no longer able to use the oldest entry ID. - assert_eq!( - status_deque.reserve_signature_with_last_id(&last_id, &signature), - Err(StatusDequeError::LastIdNotFound) - ); - } -} diff --git a/src/tvu.rs b/src/tvu.rs index 75b52963a77e18..559d2df69c719b 100644 --- a/src/tvu.rs +++ b/src/tvu.rs @@ -300,7 +300,8 @@ pub mod tests { for i in 0..num_transfers { let entry0 = Entry::new(&cur_hash, 0, i, vec![]); cur_hash = entry0.id; - bank.register_tick(&cur_hash); + //TODO: fix these + bank.tpu_register_tick(&cur_hash); let entry_tick0 = Entry::new(&cur_hash, 0, i + 1, vec![]); cur_hash = entry_tick0.id; @@ -310,11 +311,11 @@ pub mod tests { transfer_amount, cur_hash, ); - bank.register_tick(&cur_hash); + bank.tpu_register_tick(&cur_hash); let entry_tick1 = Entry::new(&cur_hash, 0, i + 1, vec![]); cur_hash = entry_tick1.id; let entry1 = Entry::new(&cur_hash, 0, i + num_transfers, vec![tx0]); - bank.register_tick(&entry1.id); + bank.tpu_register_tick(&entry1.id); let entry_tick2 = Entry::new(&entry1.id, 0, i + 1, vec![]); cur_hash = entry_tick2.id; diff --git a/tests/programs.rs b/tests/programs.rs index 036272cd2c1d86..1dfbcda42afa58 100644 --- a/tests/programs.rs +++ b/tests/programs.rs @@ -3,7 +3,6 @@ use solana_native_loader; use solana::bank::Bank; use solana::mint::Mint; -use solana::status_deque::Status; #[cfg(feature = "bpf_c")] use solana_sdk::bpf_loader; use solana_sdk::loader_transaction::LoaderTransaction; @@ -39,10 +38,7 @@ fn create_bpf_path(name: &str) -> PathBuf { fn check_tx_results(bank: &Bank, tx: &Transaction, result: Vec>) { assert_eq!(result.len(), 1); assert_eq!(result[0], Ok(())); - assert_eq!( - bank.get_signature(&tx.last_id, &tx.signatures[0]), - Some(Status::Complete(Ok(()))) - ); + assert_eq!(bank.get_signature_status(&tx.signatures[0]), Some(Ok(()))); } struct Loader { From c0bc83b063f179ff3f4f7af86810f76360bd09ff Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Thu, 24 Jan 2019 17:35:39 -0800 Subject: [PATCH 02/14] rebasing --- src/bank_state.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bank_state.rs b/src/bank_state.rs index ffeea45cd4cbde..9ce8c999b3e554 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -1,6 +1,7 @@ use crate::accounts::{Accounts, ErrorCounters, InstructionAccounts, InstructionLoaders}; use crate::bank::{BankError, Result}; use crate::counter::Counter; +use solana_sdk::native_program::ProgramError; use crate::entry::Entry; use crate::last_id_queue::{LastIdQueue, MAX_ENTRY_IDS}; use crate::leader_scheduler::TICKS_PER_BLOCK; @@ -498,7 +499,7 @@ impl BankState { locks.to_vec(), MAX_ENTRY_IDS, ); - let results = Bank::ignore_program_errors(old_results); + let results = BankState::ignore_program_errors(results); head.unlock_accounts(&e.transactions, &results); BankCheckpoint::first_err(&results) }) From 25ad5d1b945d2373c066959f12e45e74a772113c Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Thu, 24 Jan 2019 17:59:15 -0800 Subject: [PATCH 03/14] make lock/unlock more balanced and fix the record exit --- src/bank.rs | 3 +- src/bank_state.rs | 78 ++++++++++++++++++++------------------------- src/replay_stage.rs | 14 ++++---- 3 files changed, 42 insertions(+), 53 deletions(-) diff --git a/src/bank.rs b/src/bank.rs index d928da932a6cd3..c279e1cfc73fe2 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -401,8 +401,7 @@ impl Bank { .ok_or(BankError::UnknownFork)?; state.par_process_entries(entries) } - /// Process an ordered list of entries, populating a circular buffer "tail" - /// as we go. + /// Process an ordered list of entries /// Append entry blocks to the ledger, verifying them along the way. fn process_ledger_blocks( &self, diff --git a/src/bank_state.rs b/src/bank_state.rs index 9ce8c999b3e554..8f413f228fcd63 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -1,7 +1,6 @@ use crate::accounts::{Accounts, ErrorCounters, InstructionAccounts, InstructionLoaders}; use crate::bank::{BankError, Result}; use crate::counter::Counter; -use solana_sdk::native_program::ProgramError; use crate::entry::Entry; use crate::last_id_queue::{LastIdQueue, MAX_ENTRY_IDS}; use crate::leader_scheduler::TICKS_PER_BLOCK; @@ -141,19 +140,19 @@ impl BankCheckpoint { &self, txs: &[Transaction], max_age: usize, - results: Vec>, + results: &[Result<()>], error_counters: &mut ErrorCounters, ) -> Vec> { let entry_q = self.entry_q.read().unwrap(); txs.iter() - .zip(results.into_iter()) + .zip(results.iter()) .map(|etx| match etx { (tx, Ok(())) if entry_q.check_entry_id_age(tx.last_id, max_age) => Ok(()), (_, Ok(())) => { error_counters.last_id_too_old += 1; Err(BankError::LastIdNotFound) } - (_, Err(e)) => Err(e), + (_, Err(e)) => Err(e.clone()), }) .collect() } @@ -283,7 +282,7 @@ impl BankState { fn load_and_execute_transactions( &self, txs: &[Transaction], - lock_results: Vec>, + lock_results: &[Result<()>], max_age: usize, ) -> ( Vec>, @@ -405,17 +404,20 @@ impl BankState { /// Process a batch of transactions. #[must_use] - pub fn load_execute_and_commit_transactions( + pub fn load_execute_record_commit( &self, txs: &[Transaction], - lock_results: Vec>, + recorder: Option<&PohRecorder>, + lock_results: &[Result<()>], max_age: usize, - ) -> Vec> { + ) -> Result>> { let (loaded_accounts, executed) = self.load_and_execute_transactions(txs, lock_results, max_age); - + if let Some(poh) = recorder { + Self::record_transactions(txs, &executed, poh)?; + } self.commit_transactions(txs, &loaded_accounts, &executed); - executed + Ok(executed) } pub fn process_and_record_transactions( @@ -435,38 +437,26 @@ impl BankState { // the likelihood of any single thread getting starved and processing old ids. // TODO: Banking stage threads should be prioritized to complete faster then this queue // expires. - let (loaded_accounts, results) = - self.load_and_execute_transactions(txs, lock_results, MAX_ENTRY_IDS as usize / 2); - let load_execute_time = now.elapsed(); - - let record_time = { - let now = Instant::now(); - if let Some(poh) = recorder { - Self::record_transactions(txs, &results, poh)?; - } - now.elapsed() - }; - - let commit_time = { - let now = Instant::now(); - self.commit_transactions(txs, &loaded_accounts, &results); - now.elapsed() - }; + let results = self.load_execute_record_commit( + txs, + recorder, + &lock_results, + MAX_ENTRY_IDS as usize / 2, + ); + let lerc_time = now.elapsed(); let now = Instant::now(); // Once the accounts are new transactions can enter the pipeline to process them - head.unlock_accounts(&txs, &results); + head.unlock_accounts(&txs, &lock_results); let unlock_time = now.elapsed(); debug!( - "lock: {}us load_execute: {}us record: {}us commit: {}us unlock: {}us txs_len: {}", + "lock: {}us LERC: {}us unlock: {}us txs_len: {}", duration_as_us(&lock_time), - duration_as_us(&load_execute_time), - duration_as_us(&record_time), - duration_as_us(&commit_time), + duration_as_us(&lerc_time), duration_as_us(&unlock_time), txs.len(), ); - Ok(results) + results } fn ignore_program_errors(results: Vec>) -> Vec> { results @@ -482,7 +472,7 @@ impl BankState { _ => result, }) .collect() - } + } pub fn par_execute_entries(&self, entries: &[(&Entry, Vec>)]) -> Result<()> { let head = &self.checkpoints[0]; inc_new_counter_info!("bank-par_execute_entries-count", entries.len()); @@ -494,13 +484,11 @@ impl BankState { //connect them to the previous fork. We need a way to identify the fork from the //entry itself, or have that information passed through. assert_eq!(e.tick_height / TICKS_PER_BLOCK, head.fork_id()); - let results = self.load_execute_and_commit_transactions( - &e.transactions, - locks.to_vec(), - MAX_ENTRY_IDS, - ); + let results = self + .load_execute_record_commit(&e.transactions, None, locks, MAX_ENTRY_IDS) + .expect("no record failures"); let results = BankState::ignore_program_errors(results); - head.unlock_accounts(&e.transactions, &results); + head.unlock_accounts(&e.transactions, &locks); BankCheckpoint::first_err(&results) }) .collect(); @@ -608,6 +596,7 @@ impl BankState { #[cfg(test)] mod test { use super::*; + use solana_sdk::native_program::ProgramError; use solana_sdk::signature::Keypair; use solana_sdk::signature::KeypairUtil; use solana_sdk::system_program; @@ -665,8 +654,9 @@ mod test { let locked_alice = bank.head().lock_accounts(&pay_alice); assert!(locked_alice[0].is_ok()); - let results_alice = - bank.load_execute_and_commit_transactions(&pay_alice, locked_alice, MAX_ENTRY_IDS); + let results_alice = bank + .load_execute_record_commit(&pay_alice, None, &locked_alice, MAX_ENTRY_IDS) + .unwrap(); assert_eq!(results_alice[0], Ok(())); // try executing an interleaved transfer twice @@ -681,7 +671,7 @@ mod test { Err(BankError::AccountInUse) ); - bank.head().unlock_accounts(&pay_alice, &results_alice); + bank.head().unlock_accounts(&pay_alice, &locked_alice); assert_matches!(transfer(&bank, 2, &mint, bob.pubkey(), last_id), Ok(_)); } @@ -707,7 +697,7 @@ mod test { let updated_results = BankState::ignore_program_errors(results); assert_ne!(updated_results, expected_results); } - + //#[test] //fn test_bank_record_transactions() { // let mint = Mint::new(10_000); diff --git a/src/replay_stage.rs b/src/replay_stage.rs index db25e9f8b43969..c6c19f20c10584 100644 --- a/src/replay_stage.rs +++ b/src/replay_stage.rs @@ -190,13 +190,13 @@ impl ReplayStage { // vote(options.last()); if end_block != start_block { - if let Some(signer) = vote_signer { - if let Some(sender) = vote_blob_sender { - signer - .send_validator_vote(bank, &cluster_info, sender) - .unwrap(); - } - } + if let Some(signer) = vote_signer { + if let Some(sender) = vote_blob_sender { + signer + .send_validator_vote(bank, &cluster_info, sender) + .unwrap(); + } + } } let (scheduled_leader, _) = bank .get_current_leader() From d2e41fe1117d39f5fc20d8cdbb8e9c71bc4e8d64 Mon Sep 17 00:00:00 2001 From: Rob Walker Date: Thu, 24 Jan 2019 18:13:53 -0800 Subject: [PATCH 04/14] clippy, format, other nits --- ledger-tool/src/main.rs | 4 ++-- src/accounts.rs | 5 +---- src/bank_state.rs | 6 +++--- src/checkpoints.rs | 2 +- src/forks.rs | 4 ++-- src/last_id_queue.rs | 4 ++-- src/replay_stage.rs | 14 +++++++------- src/status_cache.rs | 9 +++------ 8 files changed, 21 insertions(+), 27 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 1e0d8af0724c80..d0ffde38965f1e 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -111,7 +111,7 @@ fn main() { ); exit(1); } - let bank = Bank::new_with_builtin_programs(); + let bank = Bank::default(); { let genesis = entries.by_ref().take(NUM_GENESIS_ENTRIES); if let Err(e) = bank.process_ledger(genesis) { @@ -139,7 +139,7 @@ fn main() { last_id = entry.id; num_entries += 1; - if let Err(e) = bank.process_entry(&entry) { + if let Err(e) = bank.process_entries(&[entry]) { eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e); if !matches.is_present("continue") { exit(1); diff --git a/src/accounts.rs b/src/accounts.rs index 257c18d6c42a7f..a41ec5dcf66cc3 100644 --- a/src/accounts.rs +++ b/src/accounts.rs @@ -248,10 +248,7 @@ impl AccountsDB { self.transaction_count } pub fn account_values_slow(&self) -> Vec<(Pubkey, solana_sdk::account::Account)> { - self.accounts - .iter() - .map(|(x, y)| (x.clone(), y.clone())) - .collect() + self.accounts.iter().map(|(x, y)| (*x, y.clone())).collect() } fn merge(&mut self, other: Self) { self.transaction_count += other.transaction_count; diff --git a/src/bank_state.rs b/src/bank_state.rs index 9ce8c999b3e554..76c8d2e6c3cf93 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -1,7 +1,6 @@ use crate::accounts::{Accounts, ErrorCounters, InstructionAccounts, InstructionLoaders}; use crate::bank::{BankError, Result}; use crate::counter::Counter; -use solana_sdk::native_program::ProgramError; use crate::entry::Entry; use crate::last_id_queue::{LastIdQueue, MAX_ENTRY_IDS}; use crate::leader_scheduler::TICKS_PER_BLOCK; @@ -482,7 +481,7 @@ impl BankState { _ => result, }) .collect() - } + } pub fn par_execute_entries(&self, entries: &[(&Entry, Vec>)]) -> Result<()> { let head = &self.checkpoints[0]; inc_new_counter_info!("bank-par_execute_entries-count", entries.len()); @@ -608,6 +607,7 @@ impl BankState { #[cfg(test)] mod test { use super::*; + use solana_sdk::native_program::ProgramError; use solana_sdk::signature::Keypair; use solana_sdk::signature::KeypairUtil; use solana_sdk::system_program; @@ -707,7 +707,7 @@ mod test { let updated_results = BankState::ignore_program_errors(results); assert_ne!(updated_results, expected_results); } - + //#[test] //fn test_bank_record_transactions() { // let mint = Mint::new(10_000); diff --git a/src/checkpoints.rs b/src/checkpoints.rs index f0a832411d12c7..cca8659d2312f1 100644 --- a/src/checkpoints.rs +++ b/src/checkpoints.rs @@ -71,7 +71,7 @@ impl Checkpoints { let (data, prev) = self.load(trunk).expect("load from inverse").clone(); new.store(trunk, data.clone(), prev); if let Some(children) = inverse.get(&trunk) { - let mut next = children.into_iter().map(|x| *x).collect(); + let mut next = children.into_iter().cloned().collect(); queue.append(&mut next); } } diff --git a/src/forks.rs b/src/forks.rs index 119d3e3c473bf7..f871289094ec9b 100644 --- a/src/forks.rs +++ b/src/forks.rs @@ -66,8 +66,8 @@ impl Forks { return Err(BankError::InvalidTrunk); } let len = states.len(); - let old_trunk = states[len - 1].clone(); - let new_trunk = states[len - 2].clone(); + let old_trunk = states[len - 1]; + let new_trunk = states[len - 2]; if !new_trunk.1.finalized() { println!("new_trunk id {}", new_trunk.1.fork_id()); return Err(BankError::CheckpointNotFinalized); diff --git a/src/last_id_queue.rs b/src/last_id_queue.rs index 995084d869fa2a..f8b020e4cf40a5 100644 --- a/src/last_id_queue.rs +++ b/src/last_id_queue.rs @@ -135,8 +135,8 @@ impl LastIdQueue { pub fn fork(&self) -> Self { Self { entries: self.entries.clone(), - tick_height: self.tick_height.clone(), - last_id: self.last_id.clone(), + tick_height: self.tick_height, + last_id: self.last_id, } } /// merge for entryq is a swap diff --git a/src/replay_stage.rs b/src/replay_stage.rs index db25e9f8b43969..c6c19f20c10584 100644 --- a/src/replay_stage.rs +++ b/src/replay_stage.rs @@ -190,13 +190,13 @@ impl ReplayStage { // vote(options.last()); if end_block != start_block { - if let Some(signer) = vote_signer { - if let Some(sender) = vote_blob_sender { - signer - .send_validator_vote(bank, &cluster_info, sender) - .unwrap(); - } - } + if let Some(signer) = vote_signer { + if let Some(sender) = vote_blob_sender { + signer + .send_validator_vote(bank, &cluster_info, sender) + .unwrap(); + } + } } let (scheduled_leader, _) = bank .get_current_leader() diff --git a/src/status_cache.rs b/src/status_cache.rs index 236850ce749a53..b783f8962c71d1 100644 --- a/src/status_cache.rs +++ b/src/status_cache.rs @@ -22,12 +22,9 @@ pub struct StatusCache { impl StatusCache { pub fn new(last_id: &Hash) -> Self { - let keys = (0..27) - .into_iter() - .map(|i| last_id.hash_at_index(i)) - .collect(); + let keys = (0..27).map(|i| last_id.hash_at_index(i)).collect(); Self { - signatures: Bloom::new(38340234, keys), + signatures: Bloom::new(38_340_234, keys), failures: HashMap::new(), merges: VecDeque::new(), } @@ -38,7 +35,7 @@ impl StatusCache { return true; } } - return false; + false } /// test if a signature is known pub fn has_signature(&self, sig: &Signature) -> bool { From 0812242963b57201bf4504da19243aa00e7a44df Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Fri, 25 Jan 2019 05:54:39 -0800 Subject: [PATCH 05/14] more tests --- src/bank.rs | 36 +++++++++++++++++++++++++++++++++++- src/bank_state.rs | 41 ++--------------------------------------- 2 files changed, 37 insertions(+), 40 deletions(-) diff --git a/src/bank.rs b/src/bank.rs index c279e1cfc73fe2..f084b4bf89fe1a 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -649,7 +649,7 @@ mod tests { use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::transaction::Instruction; use std; - //use std::sync::mpsc::channel; + use std::sync::mpsc::channel; //use tokio::prelude::{Stream, Async}; #[test] @@ -1473,6 +1473,40 @@ mod tests { assert_eq!(account.loader, default_account.loader); } + #[test] + fn test_bank_record_transactions() { + let mint = Mint::new(10_000); + let bank = Arc::new(Bank::new(&mint)); + let (entry_sender, entry_receiver) = channel(); + let poh_recorder = PohRecorder::new(bank.clone(), entry_sender, bank.last_id(), None); + let pubkey = Keypair::new().pubkey(); + + let transactions = vec![ + Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), + Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), + ]; + + let mut results = vec![Ok(()), Ok(())]; + BankState::record_transactions(&transactions, &results, &poh_recorder).unwrap(); + let entries = entry_receiver.recv().unwrap(); + assert_eq!(entries[0].transactions.len(), transactions.len()); + + // ProgramErrors should still be recorded + results[0] = Err(BankError::ProgramError( + 1, + ProgramError::ResultWithNegativeTokens, + )); + BankState::record_transactions(&transactions, &results, &poh_recorder).unwrap(); + let entries = entry_receiver.recv().unwrap(); + assert_eq!(entries[0].transactions.len(), transactions.len()); + + // Other BankErrors should not be recorded + results[0] = Err(BankError::AccountNotFound); + BankState::record_transactions(&transactions, &results, &poh_recorder).unwrap(); + let entries = entry_receiver.recv().unwrap(); + assert_eq!(entries[0].transactions.len(), transactions.len() - 1); + } + // #[test] // fn test_bank_storage() { // solana_logger::setup(); diff --git a/src/bank_state.rs b/src/bank_state.rs index 8f413f228fcd63..c1d5e69c272594 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -544,7 +544,7 @@ impl BankState { Ok(()) } - fn record_transactions( + pub fn record_transactions( txs: &[Transaction], results: &[Result<()>], poh: &PohRecorder, @@ -553,7 +553,7 @@ impl BankState { .iter() .zip(txs.iter()) .filter_map(|(r, x)| match r { - Ok(_) => Some(x.clone()), + Ok(_) | Err(BankError::ProgramError(_, _)) => Some(x.clone()), Err(ref e) => { debug!("process transaction failed {:?}", e); None @@ -698,43 +698,6 @@ mod test { assert_ne!(updated_results, expected_results); } - //#[test] - //fn test_bank_record_transactions() { - // let mint = Mint::new(10_000); - // let bank = Arc::new(Bank::new(&mint)); - // let (entry_sender, entry_receiver) = channel(); - // let poh_recorder = PohRecorder::new(bank.clone(), entry_sender, bank.last_id(), None); - // let pubkey = Keypair::new().pubkey(); - - // let transactions = vec![ - // Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), - // Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), - // ]; - - // let mut results = vec![Ok(()), Ok(())]; - // BankStater::record_transactions(&transactions, &results, &poh_recorder) - // .unwrap(); - // let entries = entry_receiver.recv().unwrap(); - // assert_eq!(entries[0].transactions.len(), transactions.len()); - - // // ProgramErrors should still be recorded - // results[0] = Err(BankError::ProgramError( - // 1, - // ProgramError::ResultWithNegativeTokens, - // )); - // BankState::record_transactions(&transactions, &results, &poh_recorder) - // .unwrap(); - // let entries = entry_receiver.recv().unwrap(); - // assert_eq!(entries[0].transactions.len(), transactions.len()); - - // // Other BankErrors should not be recorded - // results[0] = Err(BankError::AccountNotFound); - // BankState::record_transactions(&transactions, &results, &poh_recorder) - // .unwrap(); - // let entries = entry_receiver.recv().unwrap(); - // assert_eq!(entries[0].transactions.len(), transactions.len() - 1); - //} - // // #[test] // fn test_bank_process_and_record_transactions() { // let mint = Mint::new(10_000); From 719f3aae588b5f7e9e9de09125eb832133df71f1 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Fri, 25 Jan 2019 05:57:53 -0800 Subject: [PATCH 06/14] update --- src/bank.rs | 56 +++++++++++++++++++++++++++++++++++++++++++++++ src/bank_state.rs | 56 ----------------------------------------------- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/bank.rs b/src/bank.rs index f084b4bf89fe1a..256364d77c37f9 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -1507,6 +1507,62 @@ mod tests { assert_eq!(entries[0].transactions.len(), transactions.len() - 1); } + #[test] + fn test_bank_process_and_record_transactions() { + let mint = Mint::new(10_000); + let bank = Arc::new(Bank::new(&mint)); + let pubkey = Keypair::new().pubkey(); + + let transactions = vec![Transaction::system_move( + &mint.keypair(), + pubkey, + 1, + mint.last_id(), + 0, + )]; + + let (entry_sender, entry_receiver) = channel(); + let mut poh_recorder = PohRecorder::new( + bank.clone(), + entry_sender, + bank.last_id(), + Some(bank.tick_height() + 1), + ); + + bank.process_and_record_transactions(&transactions, Some(&poh_recorder)) + .unwrap(); + poh_recorder.tick().unwrap(); + + let mut need_tick = true; + // read entries until I find mine, might be ticks... + while need_tick { + let entries = entry_receiver.recv().unwrap(); + for entry in entries { + if !entry.is_tick() { + assert_eq!(entry.transactions.len(), transactions.len()); + assert_eq!(bank.get_balance(&pubkey), 1); + } else { + need_tick = false; + } + } + } + + let transactions = vec![Transaction::system_move( + &mint.keypair(), + pubkey, + 2, + mint.last_id(), + 0, + )]; + + assert_eq!( + bank.process_and_record_transactions(&transactions, Some(&poh_recorder)), + Err(BankError::RecordFailure) + ); + + assert_eq!(bank.get_balance(&pubkey), 1); + } + // #[test] // fn test_bank_storage() { // solana_logger::setup(); diff --git a/src/bank_state.rs b/src/bank_state.rs index c1d5e69c272594..1e27b394ddbf63 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -698,60 +698,4 @@ mod test { assert_ne!(updated_results, expected_results); } - // #[test] - // fn test_bank_process_and_record_transactions() { - // let mint = Mint::new(10_000); - // let bank = Arc::new(Bank::new(&mint)); - // let pubkey = Keypair::new().pubkey(); - - // let transactions = vec![Transaction::system_move( - // &mint.keypair(), - // pubkey, - // 1, - // mint.last_id(), - // 0, - // )]; - - // let (entry_sender, entry_receiver) = channel(); - // let mut poh_recorder = PohRecorder::new( - // bank.clone(), - // entry_sender, - // bank.last_id(), - // Some(bank.tick_height() + 1), - // ); - - // bank.process_and_record_transactions(&transactions, &poh_recorder) - // .unwrap(); - // poh_recorder.tick().unwrap(); - - // let mut need_tick = true; - // // read entries until I find mine, might be ticks... - // while need_tick { - // let entries = entry_receiver.recv().unwrap(); - // for entry in entries { - // if !entry.is_tick() { - // assert_eq!(entry.transactions.len(), transactions.len()); - // assert_eq!(bank.get_balance(&pubkey), 1); - // } else { - // need_tick = false; - // } - // } - // } - - // let transactions = vec![Transaction::system_move( - // &mint.keypair(), - // pubkey, - // 2, - // mint.last_id(), - // 0, - // )]; - - // assert_eq!( - // bank.process_and_record_transactions(&transactions, &poh_recorder), - // Err(BankError::RecordFailure) - // ); - - // assert_eq!(bank.get_balance(&pubkey), 1); - // } - } From f17879dfc4f65faa1b411bc10c60e93753587dd8 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Fri, 25 Jan 2019 06:44:40 -0800 Subject: [PATCH 07/14] cleanup --- Cargo.toml | 2 +- src/accounts.rs | 4 +- src/bank.rs | 102 +++++++------- src/bank_state.rs | 12 +- src/compute_leader_confirmation_service.rs | 4 +- src/forks.rs | 152 ++++++++++----------- src/last_id_queue.rs | 4 +- src/leader_scheduler.rs | 2 +- src/status_cache.rs | 18 +-- 9 files changed, 154 insertions(+), 146 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 330e8b45f6b50b..e25daf03417fee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,11 +29,11 @@ bs58 = "0.2.0" bv = { version = "0.11.0", features = ["serde"] } byteorder = "1.3.1" chrono = { version = "0.4.0", features = ["serde"] } +fnv = "1.0.6" hashbrown = "0.1.8" indexmap = "1.0" itertools = "0.8.0" libc = "0.2.48" -fnv = "1.0.6" log = "0.4.2" nix = "0.13.0" rand = "0.6.4" diff --git a/src/accounts.rs b/src/accounts.rs index 257c18d6c42a7f..2fc868469de42c 100644 --- a/src/accounts.rs +++ b/src/accounts.rs @@ -380,8 +380,8 @@ impl Accounts { self.accounts_db.read().unwrap().transaction_count() } /// accounts starts with an empty data structure for every fork - /// self is trunk, merge the fork into self - pub fn merge_into_trunk(&self, other: Self) { + /// self is root, merge the fork into self + pub fn merge_into_root(&self, other: Self) { assert!(other.account_locks.lock().unwrap().is_empty()); let db = other.accounts_db.into_inner().unwrap(); let mut mydb = self.accounts_db.write().unwrap(); diff --git a/src/bank.rs b/src/bank.rs index 256364d77c37f9..4219cc4859c635 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -131,20 +131,22 @@ impl Default for Bank { impl Bank { /// Create an Bank using a deposit. - fn new_from_deposits(deposits: &[Payment], trunk: u64, last_id: &Hash) -> Self { + fn new_from_deposits(deposits: &[Payment], root: u64, last_id: &Hash) -> Self { let bank = Self::default(); let accounts: Vec<_> = deposits .iter() .map(|deposit| { - let mut account = Account::default(); - account.tokens += deposit.tokens; + let account = Account::new(deposit.tokens, 0, Default::default()); (deposit.to, account) }) .collect(); - let bank_checkpoint = BankCheckpoint::new_from_accounts(trunk, &accounts, last_id); + let bank_checkpoint = BankCheckpoint::new_from_accounts(root, &accounts, last_id); bank_checkpoint.register_tick(last_id); - bank.forks.write().unwrap().init_trunk_fork(bank_checkpoint); + bank.forks + .write() + .unwrap() + .init_root_bank_state(bank_checkpoint); bank.add_builtin_programs(); bank } @@ -188,8 +190,8 @@ impl Bank { .unwrap() .init_fork(current, last_id, base) } - fn live_fork(&self) -> BankState { - self.forks.read().unwrap().live_fork() + fn live_bank_state(&self) -> BankState { + self.forks.read().unwrap().live_bank_state() } fn add_system_program(&self) { @@ -200,9 +202,11 @@ impl Bank { executable: true, loader: solana_native_loader::id(), }; - self.live_fork() - .head() - .store_slow(false, &system_program::id(), &system_program_account); + self.live_bank_state().head().store_slow( + false, + &system_program::id(), + &system_program_account, + ); } fn add_builtin_programs(&self) { @@ -216,7 +220,7 @@ impl Bank { executable: true, loader: solana_native_loader::id(), }; - self.live_fork() + self.live_bank_state() .head() .store_slow(false, &vote_program::id(), &vote_program_account); @@ -228,9 +232,11 @@ impl Bank { executable: true, loader: solana_native_loader::id(), }; - self.live_fork() - .head() - .store_slow(false, &storage_program::id(), &storage_program_account); + self.live_bank_state().head().store_slow( + false, + &storage_program::id(), + &storage_program_account, + ); let storage_system_account = Account { tokens: 1, @@ -239,7 +245,7 @@ impl Bank { executable: false, loader: Pubkey::default(), }; - self.live_fork().head().store_slow( + self.live_bank_state().head().store_slow( false, &storage_program::system_id(), &storage_system_account, @@ -254,7 +260,7 @@ impl Bank { loader: solana_native_loader::id(), }; - self.live_fork() + self.live_bank_state() .head() .store_slow(false, &bpf_loader::id(), &bpf_loader_account); @@ -266,9 +272,11 @@ impl Bank { executable: true, loader: solana_native_loader::id(), }; - self.live_fork() - .head() - .store_slow(false, &budget_program::id(), &budget_program_account); + self.live_bank_state().head().store_slow( + false, + &budget_program::id(), + &budget_program_account, + ); // Erc20 token program let erc20_account = Account { @@ -279,18 +287,18 @@ impl Bank { loader: solana_native_loader::id(), }; - self.live_fork() + self.live_bank_state() .head() .store_slow(false, &token_program::id(), &erc20_account); } pub fn tpu_register_tick(&self, last_id: &Hash) { - self.live_fork().head().register_tick(last_id) + self.live_bank_state().head().register_tick(last_id) } /// Return the last entry ID registered. pub fn last_id(&self) -> Hash { - self.live_fork().head().last_id() + self.live_bank_state().head().last_id() } pub fn get_pubkeys_for_entry_height(&self, entry_height: u64) -> Vec { @@ -331,7 +339,7 @@ impl Bank { /// Return a vec of tuple of (valid index, timestamp) /// index is into the passed ids slice to avoid copying hashes pub fn count_valid_ids(&self, ids: &[Hash]) -> Vec<(usize, u64)> { - self.live_fork().head().count_valid_ids(ids) + self.live_bank_state().head().count_valid_ids(ids) } /// Looks through a list of tick heights and stakes, and finds the latest @@ -341,12 +349,12 @@ impl Bank { ticks_and_stakes: &mut [(u64, u64)], supermajority_stake: u64, ) -> Option { - self.live_fork() + self.live_bank_state() .head() .get_confirmation_timestamp(ticks_and_stakes, supermajority_stake) } pub fn tick_height(&self) -> u64 { - self.live_fork().head().tick_height() + self.live_bank_state().head().tick_height() } /// Process a Transaction. This is used for unit tests and simply calls the vector Bank::process_transactions method. @@ -361,8 +369,8 @@ impl Bank { } } - pub fn trunk_fork(&self) -> BankState { - self.forks.read().unwrap().trunk_fork() + pub fn root_bank_state(&self) -> BankState { + self.forks.read().unwrap().root_bank_state() } pub fn bank_state(&self, fork: u64) -> Option { @@ -375,7 +383,7 @@ impl Bank { txs: &[Transaction], poh: Option<&PohRecorder>, ) -> Result<(Vec>)> { - let state = self.forks.read().unwrap().live_fork(); + let state = self.forks.read().unwrap().live_bank_state(); //TODO: pass the pubsub to process_and_record state.process_and_record_transactions(txs, poh) } @@ -387,7 +395,7 @@ impl Bank { /// Process an ordered list of entries. pub fn process_entries(&self, entries: &[Entry]) -> Result<()> { - let state = self.forks.read().unwrap().live_fork(); + let state = self.forks.read().unwrap().live_bank_state(); state.par_process_entries(entries) } @@ -434,7 +442,7 @@ impl Bank { if !self.forks.read().unwrap().is_active_fork(current) { let new = current; - let base = self.forks.read().unwrap().live_fork; + let base = self.forks.read().unwrap().live_bank_state; // create a new fork self.init_fork(new, &block[0].id, base) .expect("initializing fork for replay"); @@ -442,8 +450,8 @@ impl Bank { self.forks .write() .unwrap() - .merge_into_trunk(base, new) - .expect("merge into trunk"); + .merge_into_root(base, new) + .expect("merge into root"); } else { // only the first fork should be active at the start of the loop // every block should be unique otherwise @@ -474,7 +482,7 @@ impl Bank { self.forks .write() .unwrap() - .init_trunk_fork(BankCheckpoint::new(0, &entry0.id)); + .init_root_bank_state(BankCheckpoint::new(0, &entry0.id)); self.add_builtin_programs(); @@ -521,7 +529,7 @@ impl Bank { // 1) Deposit into the mint let mut account = self.get_account(&tx.account_keys[0]).unwrap_or_default(); account.tokens += mint_deposit - leader_payment; - self.live_fork() + self.live_bank_state() .head() .store_slow(false, &tx.account_keys[0], &account); trace!( @@ -538,7 +546,7 @@ impl Bank { let bootstrap_leader_id = tx.account_keys[2]; let mut account = self.get_account(&bootstrap_leader_id).unwrap_or_default(); account.tokens += leader_payment; - self.live_fork() + self.live_bank_state() .head() .store_slow(false, &bootstrap_leader_id, &account); @@ -587,32 +595,32 @@ impl Bank { } pub fn get_account(&self, pubkey: &Pubkey) -> Option { - let state = self.live_fork(); + let state = self.live_bank_state(); state.load_slow(pubkey) } pub fn store_slow(&self, pubkey: &Pubkey, account: &Account) { - let state = self.live_fork(); + let state = self.live_bank_state(); let purge = state.checkpoints.len() == 1; state.head().store_slow(purge, pubkey, account) } pub fn transaction_count(&self) -> u64 { - self.live_fork().head().transaction_count() + self.live_bank_state().head().transaction_count() } pub fn get_signature_status(&self, signature: &Signature) -> Option> { - self.live_fork().get_signature_status(signature) + self.live_bank_state().get_signature_status(signature) } pub fn has_signature(&self, signature: &Signature) -> bool { - self.live_fork().has_signature(signature) + self.live_bank_state().has_signature(signature) } /// Hash the `accounts` HashMap. This represents a validator's interpretation /// of the delta of the ledger since the last vote and up to now pub fn hash_internal_state(&self) -> Hash { //TODO: this probably needs to iterate the checkpoints and update a merkle - self.live_fork().head().hash_internal_state() + self.live_bank_state().head().hash_internal_state() } pub fn confirmation_time(&self) -> usize { @@ -1442,7 +1450,7 @@ mod tests { bank.transfer(500, &alice.keypair(), bob.pubkey(), alice.last_id()) .unwrap(); assert_eq!(bank.get_balance(&bob.pubkey()), 500); - assert_eq!(bank.live_fork().checkpoint_depth(), 1); + assert_eq!(bank.live_bank_state().checkpoint_depth(), 1); let account = bank.get_account(&alice.pubkey()).unwrap(); let default_account = Account::default(); @@ -1451,13 +1459,13 @@ mod tests { assert_eq!(account.executable, default_account.executable); assert_eq!(account.loader, default_account.loader); - let base = bank.live_fork().head().fork_id(); + let base = bank.live_bank_state().head().fork_id(); let last_id = hash(alice.last_id().as_ref()); - bank.live_fork().head().finalize(); + bank.live_bank_state().head().finalize(); assert_eq!(bank.init_fork(base + 1, &alice.last_id(), base), Ok(())); - assert_eq!(bank.live_fork().head().fork_id(), base + 1); - bank.live_fork().head().register_tick(&last_id); - assert_eq!(bank.live_fork().checkpoint_depth(), 2); + assert_eq!(bank.live_bank_state().head().fork_id(), base + 1); + bank.live_bank_state().head().register_tick(&last_id); + assert_eq!(bank.live_bank_state().checkpoint_depth(), 2); // charlie should have 500, alice should have 0 bank.transfer(500, &alice.keypair(), charlie.pubkey(), alice.last_id()) diff --git a/src/bank_state.rs b/src/bank_state.rs index 1e27b394ddbf63..4e2182029e2255 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -208,9 +208,9 @@ impl BankCheckpoint { fork_id: AtomicUsize::new(fork_id as usize), } } - /// consume the checkpoint into the trunk state - /// self becomes the new trunk and its fork_id is updated - pub fn merge_into_trunk(&self, other: Self) { + /// consume the checkpoint into the root state + /// self becomes the new root and its fork_id is updated + pub fn merge_into_root(&self, other: Self) { let (accounts, entry_q, status_cache, fork_id) = { ( other.accounts, @@ -219,15 +219,15 @@ impl BankCheckpoint { other.fork_id, ) }; - self.accounts.merge_into_trunk(accounts); + self.accounts.merge_into_root(accounts); self.entry_q .write() .unwrap() - .merge_into_trunk(entry_q.into_inner().unwrap()); + .merge_into_root(entry_q.into_inner().unwrap()); self.status_cache .write() .unwrap() - .merge_into_trunk(status_cache.into_inner().unwrap()); + .merge_into_root(status_cache.into_inner().unwrap()); self.fork_id .store(fork_id.load(Ordering::Relaxed), Ordering::Relaxed); } diff --git a/src/compute_leader_confirmation_service.rs b/src/compute_leader_confirmation_service.rs index 084b5dd8b30d14..7bf25be7de3c2e 100644 --- a/src/compute_leader_confirmation_service.rs +++ b/src/compute_leader_confirmation_service.rs @@ -38,8 +38,8 @@ impl ComputeLeaderConfirmationService { // Hold an accounts_db read lock as briefly as possible, just long enough to collect all // the vote states - // TODO: do we use trunk or live fork here? - let state = bank.trunk_fork(); + // TODO: do we use root or live fork here? + let state = bank.root_bank_state(); let accounts = state.head().accounts.accounts_db.read().unwrap(); let vote_states: Vec = accounts .accounts diff --git a/src/forks.rs b/src/forks.rs index 119d3e3c473bf7..3a70c2b8462db4 100644 --- a/src/forks.rs +++ b/src/forks.rs @@ -15,18 +15,18 @@ pub struct Forks { /// Last fork to be initialized /// This should be the last fork to be replayed or the TPU fork - pub live_fork: u64, + pub live_bank_state: u64, - /// Fork that is trunk - pub trunk_fork: u64, + /// Fork that is root + pub root_bank_state: u64, } impl Forks { - pub fn live_fork(&self) -> BankState { - self.bank_state(self.live_fork).expect("live fork") + pub fn live_bank_state(&self) -> BankState { + self.bank_state(self.live_bank_state).expect("live fork") } - pub fn trunk_fork(&self) -> BankState { - self.bank_state(self.trunk_fork).expect("trunk fork") + pub fn root_bank_state(&self) -> BankState { + self.bank_state(self.root_bank_state).expect("root fork") } pub fn bank_state(&self, fork: u64) -> Option { @@ -44,82 +44,82 @@ impl Forks { } } /// Collapse the bottom two checkpoints. - /// The tree is computed from the `leaf` to the `trunk` + /// The tree is computed from the `leaf` to the `root` /// The leaf is the last possible fork, it should have no descendants. - /// The expected oldest fork must be the trunk - /// The direct child of the trunk that leads the leaf becomes the new trunk. - /// The forks that are not a decendant of the new trunk -> leaf path are pruned. - /// live_fork is the leaf. - /// trunk_fork is the new trunk. - /// Return the new trunk id. - pub fn merge_into_trunk(&mut self, trunk: u64, leaf: u64) -> Result { - // `old` trunk, should have `trunk` as its fork_id - // `new` trunk is a direct decendant of old and has new_trunk_id as its fork_id + /// The expected oldest fork must be the root + /// The direct child of the root that leads the leaf becomes the new root. + /// The forks that are not a decendant of the new root -> leaf path are pruned. + /// live_bank_state is the leaf. + /// root_bank_state is the new root. + /// Return the new root id. + pub fn merge_into_root(&mut self, root: u64, leaf: u64) -> Result { + // `old` root, should have `root` as its fork_id + // `new` root is a direct decendant of old and has new_root_id as its fork_id // new is merged into old - // and old is swapped into the checkpoint under new_trunk_id - let (old_trunk, new_trunk, new_trunk_id) = { + // and old is swapped into the checkpoint under new_root_id + let (old_root, new_root, new_root_id) = { let states = self.checkpoints.collect(ROLLBACK_DEPTH + 1, leaf); let leaf_id = states.first().map(|x| x.0).ok_or(BankError::UnknownFork)?; assert_eq!(leaf_id, leaf); - let trunk_id = states.last().map(|x| x.0).ok_or(BankError::UnknownFork)?; - if trunk_id != trunk { + let root_id = states.last().map(|x| x.0).ok_or(BankError::UnknownFork)?; + if root_id != root { return Err(BankError::InvalidTrunk); } let len = states.len(); - let old_trunk = states[len - 1].clone(); - let new_trunk = states[len - 2].clone(); - if !new_trunk.1.finalized() { - println!("new_trunk id {}", new_trunk.1.fork_id()); + let old_root = states[len - 1].clone(); + let new_root = states[len - 2].clone(); + if !new_root.1.finalized() { + println!("new_root id {}", new_root.1.fork_id()); return Err(BankError::CheckpointNotFinalized); } - if !old_trunk.1.finalized() { - println!("old id {}", old_trunk.1.fork_id()); + if !old_root.1.finalized() { + println!("old id {}", old_root.1.fork_id()); return Err(BankError::CheckpointNotFinalized); } //stupid sanity checks - assert_eq!(new_trunk.1.fork_id(), new_trunk.0); - assert_eq!(old_trunk.1.fork_id(), old_trunk.0); - (old_trunk.1.clone(), new_trunk.1.clone(), new_trunk.0) + assert_eq!(new_root.1.fork_id(), new_root.0); + assert_eq!(old_root.1.fork_id(), old_root.0); + (old_root.1.clone(), new_root.1.clone(), new_root.0) }; let idag = self.checkpoints.invert(); - let new_checkpoints = self.checkpoints.prune(new_trunk_id, &idag); - let old_trunk_id = old_trunk.fork_id(); + let new_checkpoints = self.checkpoints.prune(new_root_id, &idag); + let old_root_id = old_root.fork_id(); self.checkpoints = new_checkpoints; - self.trunk_fork = new_trunk_id; - self.live_fork = leaf; + self.root_bank_state = new_root_id; + self.live_bank_state = leaf; // old should have been pruned - assert!(self.checkpoints.load(old_trunk_id).is_none()); - // new_trunk id should be in the new tree - assert!(!self.checkpoints.load(new_trunk_id).is_none()); + assert!(self.checkpoints.load(old_root_id).is_none()); + // new_root id should be in the new tree + assert!(!self.checkpoints.load(new_root_id).is_none()); - // swap in the old instance under the new_trunk id - // this should be the last external ref to `new_trunk` + // swap in the old instance under the new_root id + // this should be the last external ref to `new_root` self.checkpoints - .insert(new_trunk_id, old_trunk.clone(), old_trunk_id); + .insert(new_root_id, old_root.clone(), old_root_id); // merge all the new changes into the old instance under the new id // this should consume `new` // new should have no other references - let new_trunk: BankCheckpoint = Arc::try_unwrap(new_trunk).unwrap(); - old_trunk.merge_into_trunk(new_trunk); - assert_eq!(old_trunk.fork_id(), new_trunk_id); - Ok(new_trunk_id) + let new_root: BankCheckpoint = Arc::try_unwrap(new_root).unwrap(); + old_root.merge_into_root(new_root); + assert_eq!(old_root.fork_id(), new_root_id); + Ok(new_root_id) } - /// Initialize the first trunk - pub fn init_trunk_fork(&mut self, checkpoint: BankCheckpoint) { + /// Initialize the first root + pub fn init_root_bank_state(&mut self, checkpoint: BankCheckpoint) { assert!(self.checkpoints.is_empty()); - self.live_fork = checkpoint.fork_id(); - self.trunk_fork = checkpoint.fork_id(); + self.live_bank_state = checkpoint.fork_id(); + self.root_bank_state = checkpoint.fork_id(); //TODO: using u64::MAX as the impossible checkpoint //this should be a None instead self.checkpoints - .store(self.live_fork, Arc::new(checkpoint), std::u64::MAX); + .store(self.live_bank_state, Arc::new(checkpoint), std::u64::MAX); } pub fn is_active_fork(&self, fork: u64) -> bool { if let Some(state) = self.checkpoints.load(fork) { - !state.0.finalized() && self.live_fork == fork + !state.0.finalized() && self.live_bank_state == fork } else { false } @@ -132,7 +132,7 @@ impl Forks { } let new = state.0.fork(current, last_id); self.checkpoints.store(current, Arc::new(new), base); - self.live_fork = current; + self.live_bank_state = current; Ok(()) } else { return Err(BankError::UnknownFork); @@ -147,14 +147,14 @@ mod tests { use solana_sdk::hash::hash; #[test] - fn forks_init_trunk() { + fn forks_init_root() { let mut forks = Forks::default(); let cp = BankCheckpoint::new(0, &Hash::default()); - forks.init_trunk_fork(cp); + forks.init_root_bank_state(cp); assert!(forks.is_active_fork(0)); - assert_eq!(forks.trunk_fork().checkpoints.len(), 1); - assert_eq!(forks.trunk_fork().head().fork_id(), 0); - assert_eq!(forks.live_fork().head().fork_id(), 0); + assert_eq!(forks.root_bank_state().checkpoints.len(), 1); + assert_eq!(forks.root_bank_state().head().fork_id(), 0); + assert_eq!(forks.live_bank_state().head().fork_id(), 0); } #[test] @@ -163,19 +163,19 @@ mod tests { let last_id = Hash::default(); let cp = BankCheckpoint::new(0, &last_id); cp.register_tick(&last_id); - forks.init_trunk_fork(cp); + forks.init_root_bank_state(cp); let last_id = hash(last_id.as_ref()); assert_eq!(forks.init_fork(1, &last_id, 1), Err(BankError::UnknownFork)); assert_eq!( forks.init_fork(1, &last_id, 0), Err(BankError::CheckpointNotFinalized) ); - forks.trunk_fork().head().finalize(); + forks.root_bank_state().head().finalize(); assert_eq!(forks.init_fork(1, &last_id, 0), Ok(())); - assert_eq!(forks.trunk_fork().head().fork_id(), 0); - assert_eq!(forks.live_fork().head().fork_id(), 1); - assert_eq!(forks.live_fork().checkpoints.len(), 2); + assert_eq!(forks.root_bank_state().head().fork_id(), 0); + assert_eq!(forks.live_bank_state().head().fork_id(), 1); + assert_eq!(forks.live_bank_state().checkpoints.len(), 2); } #[test] @@ -184,17 +184,17 @@ mod tests { let last_id = Hash::default(); let cp = BankCheckpoint::new(0, &last_id); cp.register_tick(&last_id); - forks.init_trunk_fork(cp); + forks.init_root_bank_state(cp); let last_id = hash(last_id.as_ref()); - forks.trunk_fork().head().finalize(); + forks.root_bank_state().head().finalize(); assert_eq!(forks.init_fork(1, &last_id, 0), Ok(())); - forks.live_fork().head().register_tick(&last_id); - forks.live_fork().head().finalize(); - assert_eq!(forks.merge_into_trunk(0, 1), Ok(1)); + forks.live_bank_state().head().register_tick(&last_id); + forks.live_bank_state().head().finalize(); + assert_eq!(forks.merge_into_root(0, 1), Ok(1)); - assert_eq!(forks.live_fork().checkpoints.len(), 1); - assert_eq!(forks.trunk_fork().head().fork_id(), 1); - assert_eq!(forks.live_fork().head().fork_id(), 1); + assert_eq!(forks.live_bank_state().checkpoints.len(), 1); + assert_eq!(forks.root_bank_state().head().fork_id(), 1); + assert_eq!(forks.live_bank_state().head().fork_id(), 1); } #[test] fn forks_merge_prune() { @@ -202,9 +202,9 @@ mod tests { let last_id = Hash::default(); let cp = BankCheckpoint::new(0, &last_id); cp.register_tick(&last_id); - forks.init_trunk_fork(cp); + forks.init_root_bank_state(cp); let last_id = hash(last_id.as_ref()); - forks.trunk_fork().head().finalize(); + forks.root_bank_state().head().finalize(); assert_eq!(forks.init_fork(1, &last_id, 0), Ok(())); assert_eq!(forks.bank_state(1).unwrap().checkpoints.len(), 2); forks.bank_state(1).unwrap().head().register_tick(&last_id); @@ -217,14 +217,14 @@ mod tests { forks.bank_state(2).unwrap().head().register_tick(&last_id); forks.bank_state(1).unwrap().head().finalize(); - // fork 1 is the new trunk, only forks that are descendant from 1 are valid - assert_eq!(forks.merge_into_trunk(0, 1), Ok(1)); + // fork 1 is the new root, only forks that are descendant from 1 are valid + assert_eq!(forks.merge_into_root(0, 1), Ok(1)); // fork 2 is gone since it does not connect to 1 assert!(forks.bank_state(2).is_none()); - assert_eq!(forks.live_fork().checkpoints.len(), 1); - assert_eq!(forks.trunk_fork().head().fork_id(), 1); - assert_eq!(forks.live_fork().head().fork_id(), 1); + assert_eq!(forks.live_bank_state().checkpoints.len(), 1); + assert_eq!(forks.root_bank_state().head().fork_id(), 1); + assert_eq!(forks.live_bank_state().head().fork_id(), 1); } } diff --git a/src/last_id_queue.rs b/src/last_id_queue.rs index 995084d869fa2a..4553126b39df5c 100644 --- a/src/last_id_queue.rs +++ b/src/last_id_queue.rs @@ -140,7 +140,7 @@ impl LastIdQueue { } } /// merge for entryq is a swap - pub fn merge_into_trunk(&mut self, other: Self) { + pub fn merge_into_root(&mut self, other: Self) { let (entries, tick_height, last_id) = { (other.entries, other.tick_height, other.last_id) }; self.entries = entries; self.tick_height = tick_height; @@ -207,7 +207,7 @@ mod tests { assert!(!first.check_entry(last_id)); let mut second = first.fork(); second.register_tick(&last_id); - first.merge_into_trunk(second); + first.merge_into_root(second); assert!(first.check_entry(last_id)); } } diff --git a/src/leader_scheduler.rs b/src/leader_scheduler.rs index 6d2ac970739f07..7fb12d47497edb 100644 --- a/src/leader_scheduler.rs +++ b/src/leader_scheduler.rs @@ -321,7 +321,7 @@ impl LeaderScheduler { let lower_bound = height.saturating_sub(self.active_window_length); { - let state = bank.trunk_fork(); + let state = bank.root_bank_state(); let accounts = state.head().accounts.accounts_db.read().unwrap(); // TODO: iterate through checkpoints, too diff --git a/src/status_cache.rs b/src/status_cache.rs index 236850ce749a53..018ce959386bd8 100644 --- a/src/status_cache.rs +++ b/src/status_cache.rs @@ -16,7 +16,7 @@ pub struct StatusCache { /// failures failures: FailureMap, - /// Merges are empty unless this is the trunk checkpoint which cannot be unrolled + /// Merges are empty unless this is the root checkpoint which cannot be unrolled merges: VecDeque, } @@ -27,7 +27,7 @@ impl StatusCache { .map(|i| last_id.hash_at_index(i)) .collect(); Self { - signatures: Bloom::new(38340234, keys), + signatures: Bloom::new(38_340_234, keys), failures: HashMap::new(), merges: VecDeque::new(), } @@ -38,7 +38,7 @@ impl StatusCache { return true; } } - return false; + false } /// test if a signature is known pub fn has_signature(&self, sig: &Signature) -> bool { @@ -47,7 +47,7 @@ impl StatusCache { /// add a signature pub fn add(&mut self, sig: &Signature) { // any mutable cache is "live" and should not be merged into - // since it cannot be a valid trunk checkpoint + // since it cannot be a valid root checkpoint assert!(self.merges.is_empty()); self.signatures.add(&sig) @@ -56,7 +56,7 @@ impl StatusCache { pub fn save_failure_status(&mut self, sig: &Signature, err: BankError) { assert!(self.has_signature(sig), "sig not found with err {:?}", err); // any mutable cache is "live" and should not be merged into - // since it cannot be a valid trunk checkpoint + // since it cannot be a valid root checkpoint assert!(self.merges.is_empty()); self.failures.insert(*sig, err); } @@ -84,8 +84,8 @@ impl StatusCache { /// like accounts, status cache starts with an new data structure for every checkpoint /// so only merge is implemented /// but the merges maintains a history - pub fn merge_into_trunk(&mut self, other: Self) { - // merges should be empty for every other checkpoint accept the trunk + pub fn merge_into_root(&mut self, other: Self) { + // merges should be empty for every other checkpoint accept the root // which cannot be rolled back assert!(other.merges.is_empty()); self.merges.push_front(other); @@ -172,7 +172,7 @@ mod tests { assert_eq!(first.get_signature_status(&sig), Some(Ok(()))); let last_id = hash(last_id.as_ref()); let second = StatusCache::new(&last_id); - first.merge_into_trunk(second); + first.merge_into_root(second); assert_eq!(first.get_signature_status(&sig), Some(Ok(())),); assert!(first.has_signature(&sig)); } @@ -186,7 +186,7 @@ mod tests { assert_eq!(first.get_signature_status(&sig), Some(Ok(()))); let last_id = hash(last_id.as_ref()); let mut second = StatusCache::new(&last_id); - second.merge_into_trunk(first); + second.merge_into_root(first); assert_eq!(second.get_signature_status(&sig), Some(Ok(())),); assert!(second.has_signature(&sig)); } From 4c429c983638c55722ff5ca41cd4b3972f103d79 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Fri, 25 Jan 2019 07:12:57 -0800 Subject: [PATCH 08/14] track record us --- src/bank.rs | 14 +++++++------- src/bank_state.rs | 6 ++++++ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/bank.rs b/src/bank.rs index 4219cc4859c635..f30f4bf7908845 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -653,7 +653,7 @@ mod tests { use solana_sdk::native_program::ProgramError; use solana_sdk::signature::Keypair; use solana_sdk::signature::KeypairUtil; - //use solana_sdk::storage_program::{StorageTransaction, ENTRIES_PER_SEGMENT}; + use solana_sdk::storage_program::{StorageTransaction, ENTRIES_PER_SEGMENT}; use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::transaction::Instruction; use std; @@ -1603,7 +1603,7 @@ mod tests { // ENTRIES_PER_SEGMENT, // ); - // assert!(bank.process_transaction(&tx).is_ok()); + // assert_eq!(bank.process_transaction(&tx), Ok(())); // let entry_height = 0; @@ -1615,10 +1615,10 @@ mod tests { // Signature::default(), // ); - // assert!(bank.process_transaction(&tx).is_ok()); + // assert_eq!(bank.process_transaction(&tx), Ok(())); - // assert_eq!(bank.get_storage_entry_height(), ENTRIES_PER_SEGMENT); - // assert_eq!(bank.get_storage_last_id(), storage_last_id); - // assert_eq!(bank.get_pubkeys_for_entry_height(0), vec![]); - // } + // assert_eq!(bank.get_storage_entry_height(), ENTRIES_PER_SEGMENT); + // assert_eq!(bank.get_storage_last_id(), storage_last_id); + // assert_eq!(bank.get_pubkeys_for_entry_height(0), vec![]); + // } } diff --git a/src/bank_state.rs b/src/bank_state.rs index 4e2182029e2255..58c25c5a71b0d0 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -549,6 +549,7 @@ impl BankState { results: &[Result<()>], poh: &PohRecorder, ) -> Result<()> { + let now = Instant::now(); let processed_transactions: Vec<_> = results .iter() .zip(txs.iter()) @@ -570,6 +571,11 @@ impl BankState { BankError::RecordFailure })?; } + debug!( + "record: {}us txs_len={}", + duration_as_us(&now.elapsed()), + txs.len(), + ); Ok(()) } pub fn get_signature_status(&self, sig: &Signature) -> Option> { From 1ae8a3eaed9fa9bb504c8fac4a3211f2dbb307ce Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Fri, 25 Jan 2019 07:13:25 -0800 Subject: [PATCH 09/14] comments --- src/bank.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bank.rs b/src/bank.rs index f30f4bf7908845..145c512ea69e80 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -653,7 +653,7 @@ mod tests { use solana_sdk::native_program::ProgramError; use solana_sdk::signature::Keypair; use solana_sdk::signature::KeypairUtil; - use solana_sdk::storage_program::{StorageTransaction, ENTRIES_PER_SEGMENT}; + //use solana_sdk::storage_program::{StorageTransaction, ENTRIES_PER_SEGMENT}; use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::transaction::Instruction; use std; From 349852150c852bef4da8b49eca6324c3c3bbf5be Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Fri, 25 Jan 2019 14:16:52 -0800 Subject: [PATCH 10/14] fixed accounts test --- src/accounts.rs | 5 ++++- src/replay_stage.rs | 24 ++++++++++++++---------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/accounts.rs b/src/accounts.rs index 2fc868469de42c..d19eecc02775e7 100644 --- a/src/accounts.rs +++ b/src/accounts.rs @@ -141,7 +141,10 @@ impl AccountsDB { for key in &tx.account_keys { called_accounts.push(Self::load(checkpoints, key).unwrap_or_default()); } - if called_accounts[0].tokens == 0 { + if called_accounts.is_empty() { + error_counters.account_not_found += 1; + Err(BankError::AccountNotFound) + } else if called_accounts[0].tokens == 0 { error_counters.account_not_found += 1; Err(BankError::AccountNotFound) } else if called_accounts[0].tokens < tx.fee { diff --git a/src/replay_stage.rs b/src/replay_stage.rs index c6c19f20c10584..3e187448e3a3c9 100644 --- a/src/replay_stage.rs +++ b/src/replay_stage.rs @@ -87,13 +87,14 @@ impl ReplayStage { bank: &Arc, cluster_info: &Arc>, window_receiver: &EntryReceiver, - _keypair: &Arc, + keypair: &Arc, vote_signer: Option<&Arc>, vote_blob_sender: Option<&BlobSender>, ledger_entry_sender: &EntrySender, entry_height: &mut u64, last_entry_id: &mut Hash, ) -> Result<()> { + let my_id = keypair.pubkey(); let timer = Duration::new(1, 0); //coalesce all the available entries into a single vote let mut entries = window_receiver.recv_timeout(timer)?; @@ -156,6 +157,18 @@ impl ReplayStage { ledger_entry_sender.send(entries)?; } *entry_height += entries_len; + let (scheduled_leader, _) = bank + .get_current_leader() + .expect("Scheduled leader should be calculated by this point"); + + // TODO: Remove this soon once we boot the leader from ClusterInfo + if scheduled_leader != current_leader { + cluster_info.write().unwrap().set_leader(scheduled_leader); + } + + if my_id == scheduled_leader { + break; + } } // do this at the trunk crossover bank.leader_scheduler @@ -198,15 +211,6 @@ impl ReplayStage { } } } - let (scheduled_leader, _) = bank - .get_current_leader() - .expect("Scheduled leader should be calculated by this point"); - - // TODO: Remove this soon once we boot the leader from ClusterInfo - if scheduled_leader != current_leader { - cluster_info.write().unwrap().set_leader(scheduled_leader); - } - inc_new_counter_info!( "replicate_stage-duration", duration_as_ms(&now.elapsed()) as usize From c80d91944caea97dd8009099ef47ddea5f2c3136 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Fri, 25 Jan 2019 15:08:33 -0800 Subject: [PATCH 11/14] purge docs --- src/accounts.rs | 3 ++- src/bank_state.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/accounts.rs b/src/accounts.rs index d19eecc02775e7..9cc6ec671ff892 100644 --- a/src/accounts.rs +++ b/src/accounts.rs @@ -89,7 +89,8 @@ impl AccountsDB { } None } - /// purge == checkpoints.is_empty() + /// Store the account update. If the update is to delete the account because the token balance + /// is 0, purge needs to be set to true for the delete to occur in place. pub fn store(&mut self, purge: bool, pubkey: &Pubkey, account: &Account) { if account.tokens == 0 { if purge { diff --git a/src/bank_state.rs b/src/bank_state.rs index 58c25c5a71b0d0..eab853fb6cca0b 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -382,6 +382,7 @@ impl BankState { ) { let head = &self.checkpoints[0]; let now = Instant::now(); + // purge if there is only 1 checkpoint and its this one. let purge = self.checkpoints.len() == 1; head.accounts .store_accounts(purge, txs, executed, loaded_accounts); From d45ef8609225f6ce119dbccc2198f4d5000b23a4 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Fri, 25 Jan 2019 16:16:08 -0800 Subject: [PATCH 12/14] fixed tests --- src/replay_stage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/replay_stage.rs b/src/replay_stage.rs index 3e187448e3a3c9..87a766b2e49414 100644 --- a/src/replay_stage.rs +++ b/src/replay_stage.rs @@ -69,7 +69,7 @@ impl ReplayStage { let mut blocks = vec![]; for e in entries { let current = e.tick_height / TICKS_PER_BLOCK; - let prev = current - 1; + let prev = if current == 0 { 0 } else { current - 1 }; if blocks.is_empty() { blocks.push((vec![], current, prev)); } From b901821bbd007d1647796580a7b9e079a2ec83f9 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Mon, 28 Jan 2019 12:02:15 -0800 Subject: [PATCH 13/14] storage test --- ledger-tool/src/main.rs | 2 +- src/bank.rs | 104 ++++++++++++++++++++-------------------- 2 files changed, 53 insertions(+), 53 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 1e0d8af0724c80..19f91e44a5b8ed 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -111,7 +111,7 @@ fn main() { ); exit(1); } - let bank = Bank::new_with_builtin_programs(); + let bank = Bank::default(); { let genesis = entries.by_ref().take(NUM_GENESIS_ENTRIES); if let Err(e) = bank.process_ledger(genesis) { diff --git a/src/bank.rs b/src/bank.rs index 145c512ea69e80..dad474f91d9bcd 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -653,12 +653,12 @@ mod tests { use solana_sdk::native_program::ProgramError; use solana_sdk::signature::Keypair; use solana_sdk::signature::KeypairUtil; - //use solana_sdk::storage_program::{StorageTransaction, ENTRIES_PER_SEGMENT}; + use solana_sdk::storage_program::{StorageTransaction, ENTRIES_PER_SEGMENT}; use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::transaction::Instruction; use std; use std::sync::mpsc::channel; - //use tokio::prelude::{Stream, Async}; + use tokio::prelude::{Stream, Async}; #[test] fn test_bank_new() { @@ -1571,54 +1571,54 @@ mod tests { assert_eq!(bank.get_balance(&pubkey), 1); } - // #[test] - // fn test_bank_storage() { - // solana_logger::setup(); - // let alice = Mint::new(1000); - // let bank = Bank::new(&alice); - - // let bob = Keypair::new(); - // let jack = Keypair::new(); - // let jill = Keypair::new(); - - // let x = 42; - // let last_id = hash(&[x]); - // let x2 = x * 2; - // let storage_last_id = hash(&[x2]); - - // bank.tpu_register_tick(&last_id); - - // bank.transfer(10, &alice.keypair(), jill.pubkey(), last_id) - // .unwrap(); - - // bank.transfer(10, &alice.keypair(), bob.pubkey(), last_id) - // .unwrap(); - // bank.transfer(10, &alice.keypair(), jack.pubkey(), last_id) - // .unwrap(); - - // let tx = Transaction::storage_new_advertise_last_id( - // &bob, - // storage_last_id, - // last_id, - // ENTRIES_PER_SEGMENT, - // ); - - // assert_eq!(bank.process_transaction(&tx), Ok(())); - - // let entry_height = 0; - - // let tx = Transaction::storage_new_mining_proof( - // &jack, - // Hash::default(), - // last_id, - // entry_height, - // Signature::default(), - // ); - - // assert_eq!(bank.process_transaction(&tx), Ok(())); - - // assert_eq!(bank.get_storage_entry_height(), ENTRIES_PER_SEGMENT); - // assert_eq!(bank.get_storage_last_id(), storage_last_id); - // assert_eq!(bank.get_pubkeys_for_entry_height(0), vec![]); - // } + #[test] + fn test_bank_storage() { + solana_logger::setup(); + let alice = Mint::new(1000); + let bank = Bank::new(&alice); + + let bob = Keypair::new(); + let jack = Keypair::new(); + let jill = Keypair::new(); + + let x = 42; + let last_id = hash(&[x]); + let x2 = x * 2; + let storage_last_id = hash(&[x2]); + + bank.tpu_register_tick(&last_id); + + bank.transfer(10, &alice.keypair(), jill.pubkey(), last_id) + .unwrap(); + + bank.transfer(10, &alice.keypair(), bob.pubkey(), last_id) + .unwrap(); + bank.transfer(10, &alice.keypair(), jack.pubkey(), last_id) + .unwrap(); + + let tx = Transaction::storage_new_advertise_last_id( + &bob, + storage_last_id, + last_id, + ENTRIES_PER_SEGMENT, + ); + + assert_eq!(bank.process_transaction(&tx), Ok(())); + + let entry_height = 0; + + let tx = Transaction::storage_new_mining_proof( + &jack, + Hash::default(), + last_id, + entry_height, + Signature::default(), + ); + + assert_eq!(bank.process_transaction(&tx), Ok(())); + + assert_eq!(bank.get_storage_entry_height(), ENTRIES_PER_SEGMENT); + assert_eq!(bank.get_storage_last_id(), storage_last_id); + assert_eq!(bank.get_pubkeys_for_entry_height(0), vec![]); + } } From d5eec9b6cb874a05a85f34e928d52e8ec09a43b3 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Mon, 28 Jan 2019 12:19:43 -0800 Subject: [PATCH 14/14] cleanup --- src/bank_state.rs | 92 ----------------------------------------------- 1 file changed, 92 deletions(-) diff --git a/src/bank_state.rs b/src/bank_state.rs index 917ca111e407fe..a7351d8a90f05a 100644 --- a/src/bank_state.rs +++ b/src/bank_state.rs @@ -704,96 +704,4 @@ mod test { let updated_results = BankState::ignore_program_errors(results); assert_ne!(updated_results, expected_results); } - //#[test] - //fn test_bank_record_transactions() { - // let mint = Mint::new(10_000); - // let bank = Arc::new(Bank::new(&mint)); - // let (entry_sender, entry_receiver) = channel(); - // let poh_recorder = PohRecorder::new(bank.clone(), entry_sender, bank.last_id(), None); - // let pubkey = Keypair::new().pubkey(); - - // let transactions = vec![ - // Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), - // Transaction::system_move(&mint.keypair(), pubkey, 1, mint.last_id(), 0), - // ]; - - // let mut results = vec![Ok(()), Ok(())]; - // BankStater::record_transactions(&transactions, &results, &poh_recorder) - // .unwrap(); - // let entries = entry_receiver.recv().unwrap(); - // assert_eq!(entries[0].transactions.len(), transactions.len()); - - // // ProgramErrors should still be recorded - // results[0] = Err(BankError::ProgramError( - // 1, - // ProgramError::ResultWithNegativeTokens, - // )); - // BankState::record_transactions(&transactions, &results, &poh_recorder) - // .unwrap(); - // let entries = entry_receiver.recv().unwrap(); - // assert_eq!(entries[0].transactions.len(), transactions.len()); - - // // Other BankErrors should not be recorded - // results[0] = Err(BankError::AccountNotFound); - // BankState::record_transactions(&transactions, &results, &poh_recorder) - // .unwrap(); - // let entries = entry_receiver.recv().unwrap(); - // assert_eq!(entries[0].transactions.len(), transactions.len() - 1); - //} - // - // #[test] - // fn test_bank_process_and_record_transactions() { - // let mint = Mint::new(10_000); - // let bank = Arc::new(Bank::new(&mint)); - // let pubkey = Keypair::new().pubkey(); - - // let transactions = vec![Transaction::system_move( - // &mint.keypair(), - // pubkey, - // 1, - // mint.last_id(), - // 0, - // )]; - - // let (entry_sender, entry_receiver) = channel(); - // let mut poh_recorder = PohRecorder::new( - // bank.clone(), - // entry_sender, - // bank.last_id(), - // Some(bank.tick_height() + 1), - // ); - - // bank.process_and_record_transactions(&transactions, &poh_recorder) - // .unwrap(); - // poh_recorder.tick().unwrap(); - - // let mut need_tick = true; - // // read entries until I find mine, might be ticks... - // while need_tick { - // let entries = entry_receiver.recv().unwrap(); - // for entry in entries { - // if !entry.is_tick() { - // assert_eq!(entry.transactions.len(), transactions.len()); - // assert_eq!(bank.get_balance(&pubkey), 1); - // } else { - // need_tick = false; - // } - // } - // } - - // let transactions = vec![Transaction::system_move( - // &mint.keypair(), - // pubkey, - // 2, - // mint.last_id(), - // 0, - // )]; - - // assert_eq!( - // bank.process_and_record_transactions(&transactions, &poh_recorder), - // Err(BankError::RecordFailure) - // ); - - // assert_eq!(bank.get_balance(&pubkey), 1); - // } }