From 0b9482d69a3f5c30d8895dc7d4a1de4a3b59165f Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 27 Apr 2020 13:34:08 +0900 Subject: [PATCH 01/28] Switch AccountsIndex.account_maps from HashMap to BTreeMap --- runtime/src/accounts_index.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index beba960baa93d3..96eefa672b6b37 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -1,7 +1,7 @@ use solana_sdk::{clock::Slot, pubkey::Pubkey}; use std::sync::atomic::{AtomicU64, Ordering}; use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, sync::{RwLock, RwLockReadGuard}, }; @@ -14,7 +14,7 @@ type AccountMapEntry = (AtomicU64, RwLock>); #[derive(Debug, Default)] pub struct AccountsIndex { - pub account_maps: HashMap>, + pub account_maps: BTreeMap>, pub roots: HashSet, pub uncleaned_roots: HashSet, From e464209a0deac6d99bcb51d49c673027bb162cd3 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 16 Apr 2020 13:36:31 +0900 Subject: [PATCH 02/28] Introduce eager rent collection --- runtime/src/accounts.rs | 19 ++++++++ runtime/src/accounts_db.rs | 40 +++++++++++++++++ runtime/src/accounts_index.rs | 17 +++++++ runtime/src/bank.rs | 85 ++++++++++++++++++++++++++++++++++- 4 files changed, 160 insertions(+), 1 deletion(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index c156b3418dd613..0a7ea991e459f9 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -476,6 +476,25 @@ impl Accounts { ) } + pub fn load_to_collect_rent_eargerly>( + &self, + ancestors: &HashMap, + range: R, + ) -> Vec<(Pubkey, Account)> { + self.accounts_db.scan_accounts_under_range( + ancestors, + range, + |collector: &mut Vec<(Pubkey, Account)>, option| { + if let Some(data) = option + .filter(|(_, account, _)| account.lamports != 0) + .map(|(pubkey, account, _slot)| (*pubkey, account)) + { + collector.push(data) + } + }, + ) + } + /// Slow because lock is held for 1 operation instead of many pub fn store_slow(&self, slot: Slot, pubkey: &Pubkey, account: &Account) { self.accounts_db.store(slot, &[(pubkey, account)]); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 1de4f45d840d62..522db4ed5f6f9f 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1113,6 +1113,46 @@ impl AccountsDB { collector } + pub fn scan_accounts_under_range( + &self, + ancestors: &HashMap, + range: R, + scan_func: F, + ) -> A + where + F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (), + A: Default, + R: std::ops::RangeBounds, + { + let mut collector = A::default(); + let accounts_index = self.accounts_index.read().unwrap(); + let storage = self.storage.read().unwrap(); + accounts_index.scan_accounts_under_range( + ancestors, + range, + |pubkey, (account_info, slot)| { + scan_func( + &mut collector, + storage + .0 + .get(&slot) + .and_then(|storage_map| storage_map.get(&account_info.store_id)) + .and_then(|store| { + Some( + store + .accounts + .get_account(account_info.offset)? + .0 + .clone_account(), + ) + }) + .map(|account| (pubkey, account, slot)), + ) + }, + ); + collector + } + /// Scan a specific slot through all the account storage in parallel with sequential read // PERF: Sequentially read each storage entry in parallel pub fn scan_account_storage(&self, slot: Slot, scan_func: F) -> Vec diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 96eefa672b6b37..079f41cd387af8 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -34,6 +34,23 @@ impl AccountsIndex { } } + pub fn scan_accounts_under_range( + &self, + ancestors: &HashMap, + range: R, + mut func: F, + ) where + F: FnMut(&Pubkey, (&T, Slot)) -> (), + R: std::ops::RangeBounds, + { + for (pubkey, list) in self.account_maps.range(range) { + let list_r = &list.1.read().unwrap(); + if let Some(index) = self.latest_slot(ancestors, &list_r) { + func(pubkey, (&list_r[index].1, list_r[index].0)); + } + } + } + fn get_rooted_entries(&self, slice: SlotSlice) -> SlotList { slice .iter() diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index addd71145a25bc..385040f57676bf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -26,7 +26,7 @@ use crate::{ transaction_utils::OrderedIterator, }; use bincode::{deserialize_from, serialize_into}; -use byteorder::{ByteOrder, LittleEndian}; +use byteorder::{BigEndian, ByteOrder, LittleEndian}; use itertools::Itertools; use log::*; use serde::{Deserialize, Serialize}; @@ -799,6 +799,7 @@ impl Bank { if *hash == Hash::default() { // finish up any deferred changes to account state + self.collect_rent_eagerly(); // update the docs self.collect_fees(); self.distribute_rent(); self.update_slot_history(); @@ -1647,6 +1648,88 @@ impl Bank { } } + fn collect_rent_by_range( + &self, + start_slot_index: Slot, + end_slot_index: Slot, + subrange_count: Slot, + ) { + // pubkey (= account address, including derived ones?) distribution should be uniform? + error!( + "ryoqun: {}, {}, {}", + start_slot_index, end_slot_index, subrange_count + ); + + let start_key_prefix = if start_slot_index == 0 && end_slot_index == 0 { + 0 + } else { + (start_slot_index + 1) * (Slot::max_value() / subrange_count) + }; + + let end_key_prefix = if end_slot_index + 1 == subrange_count { + Slot::max_value() + } else { + (end_slot_index + 1) * (Slot::max_value() / subrange_count) - 1 + }; + + let mut start_pubkey = [0x00u8; 32]; + let mut end_pubkey = [0xffu8; 32]; + BigEndian::write_u64(&mut start_pubkey[..], start_key_prefix); + BigEndian::write_u64(&mut end_pubkey[..], end_key_prefix); + // special case parent_slot is in previous current_epoch + error!( + "ryoqun: ({}-{})/{}, {:064b} {:064b} {:?} {:?}", + start_slot_index, + end_slot_index, + subrange_count, + start_slot_index, + start_key_prefix, + start_pubkey, + end_pubkey + ); + // should be an inclusive range (a closed interval) like this: + // [0xgg00-0xhhff], [0xii00-0xjjff], ... (where 0xii00 == 0xhhff + 1) + let subrange = Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey); + + let accounts = self + .rc + .accounts + .load_to_collect_rent_eargerly(&self.ancestors, subrange); + let account_count = accounts.len(); + + // parallelize? + let mut rent = 0; + for (pubkey, mut account) in accounts { + rent += self.rent_collector.update(&pubkey, &mut account); + // to purge old AppendVec, store even regardless rent is zero or not + self.store_account(&pubkey, &account); + } + self.collected_rent.fetch_add(rent, Ordering::Relaxed); + + error!( + "ryoqun: collected rent eagerly: {} from {} accounts", + rent, account_count + ); + } + + fn collect_rent_eagerly(&self) { + let (current_epoch, current_slot_index) = self.get_epoch_and_slot_index(self.slot()); + let (parent_epoch, mut parent_slot_index) = + self.get_epoch_and_slot_index(self.parent_slot()); + + if parent_epoch < current_epoch { + if current_slot_index > 0 { + let parent_slot_count = self.get_slots_in_epoch(parent_epoch); + let last_slot_index = parent_slot_count - 1; + self.collect_rent_by_range(parent_slot_index, last_slot_index, parent_slot_count); + } + parent_slot_index = 0; + } + + let current_slot_count = self.get_slots_in_epoch(current_epoch); + self.collect_rent_by_range(parent_slot_index, current_slot_index, current_slot_count); + } + /// Process a batch of transactions. #[must_use] pub fn load_execute_and_commit_transactions( From 06a3e3e534d1b9d92e0f088c9c69335679e78832 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 21 Apr 2020 17:09:20 +0900 Subject: [PATCH 03/28] Start to add tests --- runtime/src/bank.rs | 73 +++++++++++++++++++++++++++++++++++++-------- sdk/src/clock.rs | 6 ++++ 2 files changed, 67 insertions(+), 12 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 385040f57676bf..6de5e01a6b0088 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -37,8 +37,8 @@ use solana_metrics::{ use solana_sdk::{ account::Account, clock::{ - get_segment_from_slot, Epoch, Slot, UnixTimestamp, MAX_PROCESSING_AGE, - MAX_RECENT_BLOCKHASHES, + get_segment_from_slot, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, + MAX_PROCESSING_AGE, MAX_RECENT_BLOCKHASHES, }, epoch_schedule::EpochSchedule, fee_calculator::{FeeCalculator, FeeRateGovernor}, @@ -1652,24 +1652,24 @@ impl Bank { &self, start_slot_index: Slot, end_slot_index: Slot, - subrange_count: Slot, + partition_count: Slot, ) { // pubkey (= account address, including derived ones?) distribution should be uniform? error!( "ryoqun: {}, {}, {}", - start_slot_index, end_slot_index, subrange_count + start_slot_index, end_slot_index, partition_count ); let start_key_prefix = if start_slot_index == 0 && end_slot_index == 0 { 0 } else { - (start_slot_index + 1) * (Slot::max_value() / subrange_count) + (start_slot_index + 1) * (Slot::max_value() / partition_count) }; - let end_key_prefix = if end_slot_index + 1 == subrange_count { + let end_key_prefix = if end_slot_index + 1 == partition_count { Slot::max_value() } else { - (end_slot_index + 1) * (Slot::max_value() / subrange_count) - 1 + (end_slot_index + 1) * (Slot::max_value() / partition_count) - 1 }; let mut start_pubkey = [0x00u8; 32]; @@ -1681,7 +1681,7 @@ impl Bank { "ryoqun: ({}-{})/{}, {:064b} {:064b} {:?} {:?}", start_slot_index, end_slot_index, - subrange_count, + partition_count, start_slot_index, start_key_prefix, start_pubkey, @@ -1712,22 +1712,34 @@ impl Bank { ); } - fn collect_rent_eagerly(&self) { + fn eager_rent_ranges_for_epochs(&self) -> Vec<(SlotIndex, SlotIndex, SlotCount)> { let (current_epoch, current_slot_index) = self.get_epoch_and_slot_index(self.slot()); let (parent_epoch, mut parent_slot_index) = self.get_epoch_and_slot_index(self.parent_slot()); + let mut ranges = vec![]; + if parent_epoch < current_epoch { if current_slot_index > 0 { let parent_slot_count = self.get_slots_in_epoch(parent_epoch); let last_slot_index = parent_slot_count - 1; - self.collect_rent_by_range(parent_slot_index, last_slot_index, parent_slot_count); + ranges.push((parent_slot_index, last_slot_index, parent_slot_count)); } parent_slot_index = 0; } let current_slot_count = self.get_slots_in_epoch(current_epoch); - self.collect_rent_by_range(parent_slot_index, current_slot_index, current_slot_count); + ranges.push((parent_slot_index, current_slot_index, current_slot_count)); + + ranges + } + + fn collect_rent_eagerly(&self) { + for (start_slot_index, end_slot_index, partition_count) in + self.eager_rent_ranges_for_epochs() + { + self.collect_rent_by_range(start_slot_index, end_slot_index, partition_count); + } } /// Process a batch of transactions. @@ -2269,7 +2281,7 @@ impl Bank { /// /// ( slot/slots_per_epoch, slot % slots_per_epoch ) /// - pub fn get_epoch_and_slot_index(&self, slot: Slot) -> (u64, u64) { + pub fn get_epoch_and_slot_index(&self, slot: Slot) -> (Epoch, SlotIndex) { self.epoch_schedule.get_epoch_and_slot_index(slot) } @@ -3309,6 +3321,43 @@ mod tests { assert_eq!(bank.collected_rent.load(Ordering::Relaxed), rent_collected); } + #[test] + fn test_rent_eager_across_epoch_without_gap() { + let (genesis_config, _mint_keypair) = create_genesis_config(1); + + let mut bank = Arc::new(Bank::new(&genesis_config)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 32)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 32)]); + for _ in 2..32 { + bank = Arc::new(new_from_parent(&bank)); + } + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(30, 31, 32)]); + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 64)]); + } + + #[test] + fn test_rent_eager_across_epoch_with_gap() { + let (genesis_config, _mint_keypair) = create_genesis_config(1); + + let mut bank = Arc::new(Bank::new(&genesis_config)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 32)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 32)]); + for _ in 2..15 { + bank = Arc::new(new_from_parent(&bank)); + } + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(13, 14, 32)]); + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 49)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(14, 31, 32), (0, 17, 64)] + ); + } + #[test] fn test_bank_update_rewards() { // create a bank that ticks really slowly... diff --git a/sdk/src/clock.rs b/sdk/src/clock.rs index 22b36606c6804f..4a64b7120f4da2 100644 --- a/sdk/src/clock.rs +++ b/sdk/src/clock.rs @@ -85,6 +85,12 @@ pub type Segment = u64; /// some number of Slots. pub type Epoch = u64; +/// SlotIndex is an index to the slots of a epoch +pub type SlotIndex = u64; + +/// SlotCount is the number of slots in a epoch +pub type SlotCount = u64; + /// UnixTimestamp is an approximate measure of real-world time, /// expressed as Unix time (ie. seconds since the Unix epoch) pub type UnixTimestamp = i64; From 96502581ff75a04c1e29a0c3494e3a85536f9f2d Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 21 Apr 2020 19:19:01 +0900 Subject: [PATCH 04/28] Avoid too short eager rent collection cycles --- runtime/src/bank.rs | 364 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 323 insertions(+), 41 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6de5e01a6b0088..be604a2aeca189 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -38,7 +38,7 @@ use solana_sdk::{ account::Account, clock::{ get_segment_from_slot, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, - MAX_PROCESSING_AGE, MAX_RECENT_BLOCKHASHES, + DEFAULT_TICKS_PER_SECOND, MAX_PROCESSING_AGE, MAX_RECENT_BLOCKHASHES, SECONDS_PER_DAY, }, epoch_schedule::EpochSchedule, fee_calculator::{FeeCalculator, FeeRateGovernor}, @@ -79,6 +79,13 @@ pub type BankSlotDelta = SlotDelta>; type TransactionAccountRefCells = Vec>>; type TransactionLoaderRefCells = Vec)>>; +// eager rent collection cycle is composed of number of tiny pubkey ranges to scan +// the whole pubkey value domain. +type PartitionIndex = u64; +type PartitionCount = u64; + +type EpochCount = u64; + #[derive(Default)] pub struct BankRc { /// where all the Accounts are stored @@ -1650,26 +1657,53 @@ impl Bank { fn collect_rent_by_range( &self, - start_slot_index: Slot, - end_slot_index: Slot, - partition_count: Slot, + range_start: PartitionIndex, + range_end: PartitionIndex, + partition_count: PartitionCount, ) { - // pubkey (= account address, including derived ones?) distribution should be uniform? + let subrange = Self::pubkey_range_by_partition(range_start, range_end, partition_count); + + let accounts = self + .rc + .accounts + .load_to_collect_rent_eargerly(&self.ancestors, subrange); + let account_count = accounts.len(); + + // parallelize? + let mut rent = 0; + for (pubkey, mut account) in accounts { + rent += self.rent_collector.update(&pubkey, &mut account); + // to purge old AppendVec, store even regardless rent is zero or not + self.store_account(&pubkey, &account); + } + self.collected_rent.fetch_add(rent, Ordering::Relaxed); + error!( - "ryoqun: {}, {}, {}", - start_slot_index, end_slot_index, partition_count + "ryoqun: collected rent eagerly: {} from {} accounts", + rent, account_count ); + datapoint_info!("collect_rent_eagerly", ("accounts", account_count, i64)); + } - let start_key_prefix = if start_slot_index == 0 && end_slot_index == 0 { + fn pubkey_range_by_partition( + start_index: PartitionIndex, + end_index: PartitionIndex, + partition_count: PartitionCount, + ) -> std::ops::RangeInclusive { + // pubkey (= account address, including derived ones?) distribution should be uniform? + error!("ryoqun: {}, {}, {}", start_index, end_index, end_index); + + let partition_width = Slot::max_value() / partition_count; + let start_key_prefix = if start_index == 0 && end_index == 0 { 0 } else { - (start_slot_index + 1) * (Slot::max_value() / partition_count) + (start_index + 1) * partition_width }; - let end_key_prefix = if end_slot_index + 1 == partition_count { + let end_key_prefix = if end_index + 1 == partition_count { Slot::max_value() } else { - (end_slot_index + 1) * (Slot::max_value() / partition_count) - 1 + (end_index + 1) * partition_width - 1 }; let mut start_pubkey = [0x00u8; 32]; @@ -1679,37 +1713,17 @@ impl Bank { // special case parent_slot is in previous current_epoch error!( "ryoqun: ({}-{})/{}, {:064b} {:064b} {:?} {:?}", - start_slot_index, - end_slot_index, + start_index, + end_index, partition_count, - start_slot_index, + start_index, start_key_prefix, start_pubkey, end_pubkey ); // should be an inclusive range (a closed interval) like this: // [0xgg00-0xhhff], [0xii00-0xjjff], ... (where 0xii00 == 0xhhff + 1) - let subrange = Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey); - - let accounts = self - .rc - .accounts - .load_to_collect_rent_eargerly(&self.ancestors, subrange); - let account_count = accounts.len(); - - // parallelize? - let mut rent = 0; - for (pubkey, mut account) in accounts { - rent += self.rent_collector.update(&pubkey, &mut account); - // to purge old AppendVec, store even regardless rent is zero or not - self.store_account(&pubkey, &account); - } - self.collected_rent.fetch_add(rent, Ordering::Relaxed); - - error!( - "ryoqun: collected rent eagerly: {} from {} accounts", - rent, account_count - ); + Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey) } fn eager_rent_ranges_for_epochs(&self) -> Vec<(SlotIndex, SlotIndex, SlotCount)> { @@ -1721,25 +1735,116 @@ impl Bank { if parent_epoch < current_epoch { if current_slot_index > 0 { - let parent_slot_count = self.get_slots_in_epoch(parent_epoch); - let last_slot_index = parent_slot_count - 1; - ranges.push((parent_slot_index, last_slot_index, parent_slot_count)); + let last_slot_index = self.get_slots_in_epoch(parent_epoch) - 1; + ranges.push(self.partition_in_collection_cycle( + parent_slot_index, + last_slot_index, + parent_epoch, + )); } parent_slot_index = 0; } - let current_slot_count = self.get_slots_in_epoch(current_epoch); - ranges.push((parent_slot_index, current_slot_index, current_slot_count)); + ranges.push(self.partition_in_collection_cycle( + parent_slot_index, + current_slot_index, + current_epoch, + )); ranges } + fn partition_in_collection_cycle( + &self, + start_slot_index: SlotIndex, + end_slot_index: SlotIndex, + current_epoch: Epoch, + ) -> (PartitionIndex, PartitionIndex, PartitionCount) { + let slot_count_in_two_day: SlotCount = + 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; + let slot_count_per_epoch = self.get_slots_in_epoch(current_epoch); + let first_normal_epoch = self.epoch_schedule.first_normal_epoch; + let is_in_longer_cycle = Self::use_longer_collection_cycle( + current_epoch, + first_normal_epoch, + slot_count_per_epoch, + slot_count_in_two_day, + ); + let (epoch_count_per_cycle, base_epoch, partition_count) = if !is_in_longer_cycle { + (1, 0, slot_count_per_epoch) + } else { + // Given short epochs, it's too costly to collect rent eagerly within an epoch, so lower the frequency of + // it. + // these logics aren't strictly eager rent collection anymore; should only used for + // development/performance + // purpose not under OperationMode::Stable!!!! + let epoch_count_in_cycle = slot_count_in_two_day / slot_count_per_epoch; + ( + epoch_count_in_cycle, + first_normal_epoch, + slot_count_per_epoch * epoch_count_in_cycle, + ) + }; + + // use common code-path for both very-likely and very-unlikely for the sake of minimized + // risk of any mis-calculation instead of neligilbe faster computation per slot for the + // likely case. + let mut start_partition_index = Self::partition_index_in_collection_cycle( + start_slot_index, + current_epoch, + base_epoch, + epoch_count_per_cycle, + slot_count_per_epoch, + ); + //error!("ryoqun: {}, {}, {}", start_slot_index, end_slot_index, start_slot_in + if (start_slot_index == end_slot_index + || (start_slot_index == 0 && (end_slot_index - start_slot_index > 1))) + && start_partition_index > 0 + && is_in_longer_cycle + { + start_partition_index -= 1; + } + let end_partition_index = Self::partition_index_in_collection_cycle( + end_slot_index, + current_epoch, + base_epoch, + epoch_count_per_cycle, + slot_count_per_epoch, + ); + + (start_partition_index, end_partition_index, partition_count) + } + + fn partition_index_in_collection_cycle( + slot_index_in_epoch: SlotIndex, + current_epoch: Epoch, + base_epoch: Epoch, + epoch_count_per_cycle: EpochCount, + slot_count_per_epoch: SlotCount, + ) -> PartitionIndex { + let epoch_offset = current_epoch - base_epoch; + let epoch_index_in_cycle = epoch_offset % epoch_count_per_cycle; + slot_index_in_epoch + epoch_index_in_cycle * slot_count_per_epoch + } + + fn use_longer_collection_cycle( + current_epoch: Epoch, + first_normal_epoch: Epoch, + slot_count_per_epoch: SlotCount, + slot_count_in_two_day: SlotCount, + ) -> bool { + current_epoch >= first_normal_epoch && slot_count_per_epoch < slot_count_in_two_day + } + fn collect_rent_eagerly(&self) { + let mut measure = Measure::start("collect_rent_eagerly-ms"); for (start_slot_index, end_slot_index, partition_count) in self.eager_rent_ranges_for_epochs() { self.collect_rent_by_range(start_slot_index, end_slot_index, partition_count); } + measure.stop(); + inc_new_counter_info!("collect_rent_eagerly-ms", measure.as_ms() as usize); } /// Process a batch of transactions. @@ -2417,7 +2522,7 @@ mod tests { use solana_sdk::{ account::KeyedAccount, account_utils::StateMut, - clock::DEFAULT_TICKS_PER_SLOT, + clock::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, genesis_config::create_genesis_config, instruction::{AccountMeta, CompiledInstruction, Instruction, InstructionError}, @@ -3358,6 +3463,183 @@ mod tests { ); } + #[test] + fn test_rent_eager_across_epoch_without_gap_under_longer_cycle() { + let leader_pubkey = Pubkey::new_rand(); + let leader_lamports = 3; + let mut genesis_config = + create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config; + + const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64; + const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3; + genesis_config.epoch_schedule = + EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, false); + + let mut bank = Arc::new(Bank::new(&genesis_config)); + assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432000); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 1)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 432000)]); + + for _ in 2..32 { + bank = Arc::new(new_from_parent(&bank)); + } + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 31)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(30, 31, 432000)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 0)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(31, 32, 432000)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 1)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(32, 33, 432000)]); + + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 1000)); + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 1001)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (31, 9)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(1000, 1001, 432000)] + ); + + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 431998)); + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 431999)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13499, 31)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(431998, 431999, 432000)] + ); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 0)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 1)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 432000)]); + } + + #[test] + fn test_rent_eager_across_epoch_with_gap_under_longer_cycle() { + let leader_pubkey = Pubkey::new_rand(); + let leader_lamports = 3; + let mut genesis_config = + create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config; + + const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64; + const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3; + genesis_config.epoch_schedule = + EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, false); + + let mut bank = Arc::new(Bank::new(&genesis_config)); + assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432000); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 1)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 432000)]); + + for _ in 2..19 { + bank = Arc::new(new_from_parent(&bank)); + } + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 18)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(17, 18, 432000)]); + + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 44)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 12)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(18, 31, 432000), (31, 44, 432000)] + ); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 13)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(44, 45, 432000)]); + + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 431993)); + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 432011)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 11)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(431993, 431999, 432000), (0, 11, 432000)] + ); + } + + #[test] + fn test_rent_eager_without_warmup_epochs_under_longer_cycle() { + let leader_pubkey = Pubkey::new_rand(); + let leader_lamports = 3; + let mut genesis_config = + create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config; + + const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64 * 8; + const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3; + genesis_config.epoch_schedule = + EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, true); + + let mut bank = Arc::new(Bank::new(&genesis_config)); + assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432000); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.epoch_schedule.first_normal_epoch, 3); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 32)]); + + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 222)); + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 223)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 128); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (2, 127)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(126, 127, 128)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 0)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 431872)]); + assert_eq!(431872 % bank.get_slots_in_epoch(bank.epoch()), 0); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 1)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 431872)]); + + bank = Arc::new(Bank::new_from_parent( + &bank, + &Pubkey::default(), + 431872 + 223 - 1, + )); + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1689, 255)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(431870, 431871, 431872)] + ); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1690, 0)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 431872)]); + } + #[test] fn test_bank_update_rewards() { // create a bank that ticks really slowly... From 43013f26990b56edebbb17d4b11ec9d3bef4f9d1 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 23 Apr 2020 14:56:47 +0900 Subject: [PATCH 05/28] Add more tests --- runtime/src/bank.rs | 156 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 135 insertions(+), 21 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index be604a2aeca189..07095526aa6b12 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1678,10 +1678,6 @@ impl Bank { } self.collected_rent.fetch_add(rent, Ordering::Relaxed); - error!( - "ryoqun: collected rent eagerly: {} from {} accounts", - rent, account_count - ); datapoint_info!("collect_rent_eagerly", ("accounts", account_count, i64)); } @@ -1691,7 +1687,6 @@ impl Bank { partition_count: PartitionCount, ) -> std::ops::RangeInclusive { // pubkey (= account address, including derived ones?) distribution should be uniform? - error!("ryoqun: {}, {}, {}", start_index, end_index, end_index); let partition_width = Slot::max_value() / partition_count; let start_key_prefix = if start_index == 0 && end_index == 0 { @@ -1711,13 +1706,11 @@ impl Bank { BigEndian::write_u64(&mut start_pubkey[..], start_key_prefix); BigEndian::write_u64(&mut end_pubkey[..], end_key_prefix); // special case parent_slot is in previous current_epoch - error!( - "ryoqun: ({}-{})/{}, {:064b} {:064b} {:?} {:?}", + trace!( + "pubkey_range_by_partition: ({}-{})/{}: {:02x?}-{:02x?}", start_index, end_index, partition_count, - start_index, - start_key_prefix, start_pubkey, end_pubkey ); @@ -1796,14 +1789,6 @@ impl Bank { epoch_count_per_cycle, slot_count_per_epoch, ); - //error!("ryoqun: {}, {}, {}", start_slot_index, end_slot_index, start_slot_in - if (start_slot_index == end_slot_index - || (start_slot_index == 0 && (end_slot_index - start_slot_index > 1))) - && start_partition_index > 0 - && is_in_longer_cycle - { - start_partition_index -= 1; - } let end_partition_index = Self::partition_index_in_collection_cycle( end_slot_index, current_epoch, @@ -1812,9 +1797,29 @@ impl Bank { slot_count_per_epoch, ); + // do special handling... + let is_across_epoch_boundary = Self::across_epoch_boundary_in_collection_cycle( + start_slot_index, + end_slot_index, + start_partition_index, + ); + if is_in_longer_cycle && is_across_epoch_boundary { + start_partition_index -= 1; + } + (start_partition_index, end_partition_index, partition_count) } + fn across_epoch_boundary_in_collection_cycle( + start_slot_index: SlotIndex, + end_slot_index: SlotIndex, + start_partition_index: PartitionIndex, + ) -> bool { + (start_slot_index == end_slot_index + || (start_slot_index == 0 && (end_slot_index - start_slot_index > 1))) + && start_partition_index > 0 + } + fn partition_index_in_collection_cycle( slot_index_in_epoch: SlotIndex, current_epoch: Epoch, @@ -1838,10 +1843,8 @@ impl Bank { fn collect_rent_eagerly(&self) { let mut measure = Measure::start("collect_rent_eagerly-ms"); - for (start_slot_index, end_slot_index, partition_count) in - self.eager_rent_ranges_for_epochs() - { - self.collect_rent_by_range(start_slot_index, end_slot_index, partition_count); + for (start_index, end_index, partition_count) in self.eager_rent_ranges_for_epochs() { + self.collect_rent_by_range(start_index, end_index, partition_count); } measure.stop(); inc_new_counter_info!("collect_rent_eagerly-ms", measure.as_ms() as usize); @@ -3640,6 +3643,117 @@ mod tests { assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 431872)]); } + #[test] + fn test_rent_eager_pubkey_range_minimal() { + let range = Bank::pubkey_range_by_partition(0, 0, 1); + assert_eq!( + range, + Pubkey::new_from_array([0x00; 32])..=Pubkey::new_from_array([0xff; 32]) + ); + } + + #[test] + fn test_rent_eager_pubkey_range_dividable() { + let range = Bank::pubkey_range_by_partition(0, 0, 2); + assert_eq!( + range, + Pubkey::new_from_array([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + ]) + ..=Pubkey::new_from_array([ + 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + ]) + ); + + let range = Bank::pubkey_range_by_partition(0, 1, 2); + assert_eq!( + range, + Pubkey::new_from_array([ + 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + ]) + ..=Pubkey::new_from_array([ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + ]) + ); + } + + #[test] + fn test_rent_eager_pubkey_range_not_dividable() { + solana_logger::setup(); + + let range = Bank::pubkey_range_by_partition(0, 0, 3); + assert_eq!( + range, + Pubkey::new_from_array([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + ]) + ..=Pubkey::new_from_array([ + 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + ]) + ); + + let range = Bank::pubkey_range_by_partition(0, 1, 3); + assert_eq!( + range, + Pubkey::new_from_array([ + 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + ]) + ..=Pubkey::new_from_array([ + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa9, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + ]) + ); + + let range = Bank::pubkey_range_by_partition(1, 2, 3); + assert_eq!( + range, + Pubkey::new_from_array([ + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + ]) + ..=Pubkey::new_from_array([ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + ]) + ); + } + + #[test] + fn test_rent_eager_pubkey_range_gap() { + solana_logger::setup(); + let range = Bank::pubkey_range_by_partition(120, 1023, 12345); + assert_eq!( + range, + Pubkey::new_from_array([ + 0x02, 0x82, 0x5a, 0x89, 0xd1, 0xac, 0x58, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + ]) + ..=Pubkey::new_from_array([ + 0x15, 0x3c, 0x1d, 0xf1, 0xc6, 0x39, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + ]) + ); + } + #[test] fn test_bank_update_rewards() { // create a bank that ticks really slowly... From 8487ef543027310f51e84e49d754b6f7645ecb32 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 23 Apr 2020 18:23:43 +0900 Subject: [PATCH 06/28] Add more tests... --- runtime/src/bank.rs | 94 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 07095526aa6b12..661b7e4f815e35 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3754,6 +3754,100 @@ mod tests { ); } + #[test] + fn test_rent_eager_collect_rent_by_range() { + solana_logger::setup(); + + let (genesis_config, _mint_keypair) = create_genesis_config(1); + + let rent_due_pubkey = Pubkey::new_rand(); + let rent_exempt_pubkey = Pubkey::new_rand(); + + let mut bank = Arc::new(Bank::new(&genesis_config)); + let little_lamports = 1234; + let large_lamports = 123456789; + let rent_collected = 22; + + bank.store_account( + &rent_due_pubkey, + &Account::new(little_lamports, 0, &Pubkey::default()), + ); + bank.store_account( + &rent_exempt_pubkey, + &Account::new(large_lamports, 0, &Pubkey::default()), + ); + + let genesis_slot = 0; + let some_slot = 1000; + let ancestors = vec![(some_slot, 0), (0, 1)].into_iter().collect(); + + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), some_slot)); + + assert_eq!(bank.collected_rent.load(Ordering::Relaxed), 0); + assert_eq!( + bank.get_account(&rent_due_pubkey).unwrap().lamports, + little_lamports + ); + assert_eq!(bank.get_account(&rent_due_pubkey).unwrap().rent_epoch, 0); + { + let accounts_index = bank.rc.accounts.accounts_db.accounts_index.read().unwrap(); + let (accounts, _) = accounts_index.get(&rent_due_pubkey, &ancestors).unwrap(); + assert_eq!( + accounts + .iter() + .map(|(slot, _)| *slot) + .collect::>(), + vec![genesis_slot] + ); + let (accounts, _) = accounts_index.get(&rent_exempt_pubkey, &ancestors).unwrap(); + assert_eq!( + accounts + .iter() + .map(|(slot, _)| *slot) + .collect::>(), + vec![genesis_slot] + ); + } + + bank.collect_rent_by_range(0, 0, 1); // all range + + // unrelated 1-lamport account exists + assert_eq!( + bank.collected_rent.load(Ordering::Relaxed), + rent_collected + 1 + ); + assert_eq!( + bank.get_account(&rent_due_pubkey).unwrap().lamports, + little_lamports - rent_collected + ); + assert_eq!(bank.get_account(&rent_due_pubkey).unwrap().rent_epoch, 6); + assert_eq!( + bank.get_account(&rent_exempt_pubkey).unwrap().lamports, + large_lamports + ); + assert_eq!(bank.get_account(&rent_exempt_pubkey).unwrap().rent_epoch, 6); + + { + let accounts_index = bank.rc.accounts.accounts_db.accounts_index.read().unwrap(); + let (accounts, _) = accounts_index.get(&rent_due_pubkey, &ancestors).unwrap(); + assert_eq!( + accounts + .iter() + .map(|(slot, _)| *slot) + .collect::>(), + vec![genesis_slot, some_slot] + ); + let (accounts, _) = accounts_index.get(&rent_exempt_pubkey, &ancestors).unwrap(); + assert_eq!( + accounts + .iter() + .map(|(slot, _)| *slot) + .collect::>(), + vec![genesis_slot, some_slot] + ); + } + } + #[test] fn test_bank_update_rewards() { // create a bank that ticks really slowly... From a348b6f81053cc075088c6614cb98491472e4d97 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 23 Apr 2020 23:54:24 +0900 Subject: [PATCH 07/28] Refacotr!!!!!! --- runtime/src/accounts.rs | 40 +++++++++++--------- runtime/src/accounts_db.rs | 69 ++++++++++++++++------------------- runtime/src/accounts_index.rs | 32 ++++++++-------- 3 files changed, 69 insertions(+), 72 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 0a7ea991e459f9..6bcd23146370eb 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -27,6 +27,7 @@ use solana_sdk::{ use std::{ collections::{HashMap, HashSet}, io::{BufReader, Error as IOError, Read}, + ops::RangeBounds, path::{Path, PathBuf}, sync::{Arc, Mutex, RwLock}, }; @@ -455,6 +456,19 @@ impl Accounts { } } + fn load_while_filtering bool>( + collector: &mut Vec<(Pubkey, Account)>, + option: Option<(&Pubkey, Account, Slot)>, + filter: F, + ) { + if let Some(data) = option + .filter(|(_, account, _)| filter(account)) + .map(|(pubkey, account, _slot)| (*pubkey, account)) + { + collector.push(data) + } + } + pub fn load_by_program( &self, ancestors: &Ancestors, @@ -463,34 +477,24 @@ impl Accounts { self.accounts_db.scan_accounts( ancestors, |collector: &mut Vec<(Pubkey, Account)>, option| { - if let Some(data) = option - .filter(|(_, account, _)| { - (program_id.is_none() || Some(&account.owner) == program_id) - && account.lamports != 0 - }) - .map(|(pubkey, account, _slot)| (*pubkey, account)) - { - collector.push(data) - } + Self::load_while_filtering(collector, option, |account| { + (program_id.is_none() || Some(&account.owner) == program_id) + && account.lamports != 0 + }) }, ) } - pub fn load_to_collect_rent_eargerly>( + pub fn load_to_collect_rent_eargerly>( &self, - ancestors: &HashMap, + ancestors: &Ancestors, range: R, ) -> Vec<(Pubkey, Account)> { - self.accounts_db.scan_accounts_under_range( + self.accounts_db.range_scan_accounts( ancestors, range, |collector: &mut Vec<(Pubkey, Account)>, option| { - if let Some(data) = option - .filter(|(_, account, _)| account.lamports != 0) - .map(|(pubkey, account, _slot)| (*pubkey, account)) - { - collector.push(data) - } + Self::load_while_filtering(collector, option, |account| account.lamports != 0) }, ) } diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 522db4ed5f6f9f..9fcfe31806ef36 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -173,6 +173,25 @@ impl<'a> Serialize for AccountStorageSerialize<'a> { #[derive(Clone, Default, Debug)] pub struct AccountStorage(pub HashMap); + +impl AccountStorage { + fn scan_accounts(&self, account_info: &AccountInfo, slot: Slot) -> Option<(Account, Slot)> { + self.0 + .get(&slot) + .and_then(|storage_map| storage_map.get(&account_info.store_id)) + .and_then(|store| { + Some( + store + .accounts + .get_account(account_info.offset)? + .0 + .clone_account(), + ) + }) + .map(|account| (account, slot)) + } +} + impl<'de> Deserialize<'de> for AccountStorage { fn deserialize(deserializer: D) -> Result where @@ -1095,27 +1114,16 @@ impl AccountsDB { scan_func( &mut collector, storage - .0 - .get(&slot) - .and_then(|storage_map| storage_map.get(&account_info.store_id)) - .and_then(|store| { - Some( - store - .accounts - .get_account(account_info.offset)? - .0 - .clone_account(), - ) - }) - .map(|account| (pubkey, account, slot)), + .scan_accounts(account_info, slot) + .map(|(account, slot)| (pubkey, account, slot)), ) }); collector } - pub fn scan_accounts_under_range( + pub fn range_scan_accounts( &self, - ancestors: &HashMap, + ancestors: &Ancestors, range: R, scan_func: F, ) -> A @@ -1127,29 +1135,14 @@ impl AccountsDB { let mut collector = A::default(); let accounts_index = self.accounts_index.read().unwrap(); let storage = self.storage.read().unwrap(); - accounts_index.scan_accounts_under_range( - ancestors, - range, - |pubkey, (account_info, slot)| { - scan_func( - &mut collector, - storage - .0 - .get(&slot) - .and_then(|storage_map| storage_map.get(&account_info.store_id)) - .and_then(|store| { - Some( - store - .accounts - .get_account(account_info.offset)? - .0 - .clone_account(), - ) - }) - .map(|account| (pubkey, account, slot)), - ) - }, - ); + accounts_index.range_scan_accounts(ancestors, range, |pubkey, (account_info, slot)| { + scan_func( + &mut collector, + storage + .scan_accounts(account_info, slot) + .map(|(account, slot)| (pubkey, account, slot)), + ) + }); collector } diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 079f41cd387af8..2a38cbfe084bc5 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -2,6 +2,7 @@ use solana_sdk::{clock::Slot, pubkey::Pubkey}; use std::sync::atomic::{AtomicU64, Ordering}; use std::{ collections::{BTreeMap, HashMap, HashSet}, + ops::RangeBounds, sync::{RwLock, RwLockReadGuard}, }; @@ -20,13 +21,14 @@ pub struct AccountsIndex { pub uncleaned_roots: HashSet, } -impl AccountsIndex { +impl<'a, T: 'a + Clone> AccountsIndex { /// call func with every pubkey and index visible from a given set of ancestors - pub fn scan_accounts(&self, ancestors: &Ancestors, mut func: F) + pub fn do_scan_accounts(&self, ancestors: &Ancestors, mut func: F, iter: I) where F: FnMut(&Pubkey, (&T, Slot)) -> (), + I: Iterator)>, { - for (pubkey, list) in self.account_maps.iter() { + for (pubkey, list) in iter { let list_r = &list.1.read().unwrap(); if let Some(index) = self.latest_slot(ancestors, &list_r) { func(pubkey, (&list_r[index].1, list_r[index].0)); @@ -34,21 +36,19 @@ impl AccountsIndex { } } - pub fn scan_accounts_under_range( - &self, - ancestors: &HashMap, - range: R, - mut func: F, - ) where + pub fn scan_accounts(&self, ancestors: &Ancestors, func: F) + where F: FnMut(&Pubkey, (&T, Slot)) -> (), - R: std::ops::RangeBounds, { - for (pubkey, list) in self.account_maps.range(range) { - let list_r = &list.1.read().unwrap(); - if let Some(index) = self.latest_slot(ancestors, &list_r) { - func(pubkey, (&list_r[index].1, list_r[index].0)); - } - } + self.do_scan_accounts(ancestors, func, self.account_maps.iter()); + } + + pub fn range_scan_accounts(&self, ancestors: &Ancestors, range: R, func: F) + where + F: FnMut(&Pubkey, (&T, Slot)) -> (), + R: RangeBounds, + { + self.do_scan_accounts(ancestors, func, self.account_maps.range(range)); } fn get_rooted_entries(&self, slice: SlotSlice) -> SlotList { From c78d339199e69f85e0fda1f1662820df796f66b0 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 24 Apr 2020 01:36:29 +0900 Subject: [PATCH 08/28] Refactoring follow up --- runtime/src/accounts.rs | 1 + runtime/src/accounts_db.rs | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 6bcd23146370eb..6f9fe72ce3aa54 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -494,6 +494,7 @@ impl Accounts { ancestors, range, |collector: &mut Vec<(Pubkey, Account)>, option| { + // THINK ABOUT ZERO LAMPORTS FILTERING RAMIFICATION Self::load_while_filtering(collector, option, |account| account.lamports != 0) }, ) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 9fcfe31806ef36..ea6d24340647b8 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -47,6 +47,7 @@ use std::{ collections::{HashMap, HashSet}, fmt, io::{BufReader, Cursor, Error as IOError, ErrorKind, Read, Result as IOResult}, + ops::RangeBounds, path::{Path, PathBuf}, sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, sync::{Arc, Mutex, RwLock}, @@ -1130,7 +1131,7 @@ impl AccountsDB { where F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (), A: Default, - R: std::ops::RangeBounds, + R: RangeBounds, { let mut collector = A::default(); let accounts_index = self.accounts_index.read().unwrap(); From 3f41af3ad52409c8f125de80e9a2e53da60839ce Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 24 Apr 2020 01:59:12 +0900 Subject: [PATCH 09/28] More tiny cleanups --- runtime/src/accounts_index.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 2a38cbfe084bc5..110721023ecbea 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -22,8 +22,7 @@ pub struct AccountsIndex { } impl<'a, T: 'a + Clone> AccountsIndex { - /// call func with every pubkey and index visible from a given set of ancestors - pub fn do_scan_accounts(&self, ancestors: &Ancestors, mut func: F, iter: I) + fn do_scan_accounts(&self, ancestors: &Ancestors, mut func: F, iter: I) where F: FnMut(&Pubkey, (&T, Slot)) -> (), I: Iterator)>, @@ -36,6 +35,7 @@ impl<'a, T: 'a + Clone> AccountsIndex { } } + /// call func with every pubkey and index visible from a given set of ancestors pub fn scan_accounts(&self, ancestors: &Ancestors, func: F) where F: FnMut(&Pubkey, (&T, Slot)) -> (), @@ -43,6 +43,7 @@ impl<'a, T: 'a + Clone> AccountsIndex { self.do_scan_accounts(ancestors, func, self.account_maps.iter()); } + /// call func with every pubkey and index visible from a given set of ancestors with range pub fn range_scan_accounts(&self, ancestors: &Ancestors, range: R, func: F) where F: FnMut(&Pubkey, (&T, Slot)) -> (), From 7d059689a8e86a82031fdad66119e31be4b8290d Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 24 Apr 2020 14:55:17 +0900 Subject: [PATCH 10/28] Don't rewrite 0-lamport accounts to be deterministic --- runtime/src/accounts.rs | 10 +-- runtime/src/bank.rs | 190 +++++++++++++++++++++++++++++----------- 2 files changed, 142 insertions(+), 58 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 6f9fe72ce3aa54..f02b1706128578 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -462,7 +462,9 @@ impl Accounts { filter: F, ) { if let Some(data) = option - .filter(|(_, account, _)| filter(account)) + // Don't ever load zero lamport accounts into runtime because + // the existence of zero-lamport accounts are never deterministic!! + .filter(|(_, account, _)| account.lamports > 0 && filter(account)) .map(|(pubkey, account, _slot)| (*pubkey, account)) { collector.push(data) @@ -478,8 +480,7 @@ impl Accounts { ancestors, |collector: &mut Vec<(Pubkey, Account)>, option| { Self::load_while_filtering(collector, option, |account| { - (program_id.is_none() || Some(&account.owner) == program_id) - && account.lamports != 0 + program_id.is_none() || Some(&account.owner) == program_id }) }, ) @@ -494,8 +495,7 @@ impl Accounts { ancestors, range, |collector: &mut Vec<(Pubkey, Account)>, option| { - // THINK ABOUT ZERO LAMPORTS FILTERING RAMIFICATION - Self::load_while_filtering(collector, option, |account| account.lamports != 0) + Self::load_while_filtering(collector, option, |_| true) }, ) } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 661b7e4f815e35..70800751e12fab 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1673,7 +1673,8 @@ impl Bank { let mut rent = 0; for (pubkey, mut account) in accounts { rent += self.rent_collector.update(&pubkey, &mut account); - // to purge old AppendVec, store even regardless rent is zero or not + // Store all of them unconditionally to purge old AppendVec, + // even if collected rent is 0 (= not updated). self.store_account(&pubkey, &account); } self.collected_rent.fetch_add(rent, Ordering::Relaxed); @@ -1747,12 +1748,32 @@ impl Bank { ranges } + fn determine_collection_cycle_params( + is_in_longer_cycle: bool, + slot_count_in_two_day: SlotCount, + slot_count_per_epoch: SlotCount, + first_normal_epoch: Epoch, + ) -> (EpochCount, Epoch, PartitionCount) { + if !is_in_longer_cycle { + (1, 0, slot_count_per_epoch) + } else { + let epoch_count_in_cycle = slot_count_in_two_day / slot_count_per_epoch; + ( + epoch_count_in_cycle, + first_normal_epoch, + slot_count_per_epoch * epoch_count_in_cycle, + ) + } + } + fn partition_in_collection_cycle( &self, start_slot_index: SlotIndex, end_slot_index: SlotIndex, current_epoch: Epoch, ) -> (PartitionIndex, PartitionIndex, PartitionCount) { + // Assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect rent + // eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally. let slot_count_in_two_day: SlotCount = 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; let slot_count_per_epoch = self.get_slots_in_epoch(current_epoch); @@ -1763,21 +1784,13 @@ impl Bank { slot_count_per_epoch, slot_count_in_two_day, ); - let (epoch_count_per_cycle, base_epoch, partition_count) = if !is_in_longer_cycle { - (1, 0, slot_count_per_epoch) - } else { - // Given short epochs, it's too costly to collect rent eagerly within an epoch, so lower the frequency of - // it. - // these logics aren't strictly eager rent collection anymore; should only used for - // development/performance - // purpose not under OperationMode::Stable!!!! - let epoch_count_in_cycle = slot_count_in_two_day / slot_count_per_epoch; - ( - epoch_count_in_cycle, + let (epoch_count_per_cycle, base_epoch, partition_count) = + Self::determine_collection_cycle_params( + is_in_longer_cycle, + slot_count_in_two_day, + slot_count_per_epoch, first_normal_epoch, - slot_count_per_epoch * epoch_count_in_cycle, - ) - }; + ); // use common code-path for both very-likely and very-unlikely for the sake of minimized // risk of any mis-calculation instead of neligilbe faster computation per slot for the @@ -1838,6 +1851,11 @@ impl Bank { slot_count_per_epoch: SlotCount, slot_count_in_two_day: SlotCount, ) -> bool { + // Given short epochs, it's too costly to collect rent eagerly + // within an epoch, so lower the frequency of it. + // These logic is't strictly eager anymore and should only be used + // for development/performance purpose. + // Absolutely not under OperationMode::Stable!!!! current_epoch >= first_normal_epoch && slot_count_per_epoch < slot_count_in_two_day } @@ -2516,6 +2534,7 @@ mod tests { use super::*; use crate::{ accounts_db::{get_temp_accounts_paths, tests::copy_append_vecs}, + accounts_index::AncestorList, genesis_utils::{ create_genesis_config_with_leader, GenesisConfigInfo, BOOTSTRAP_VALIDATOR_LAMPORTS, }, @@ -3754,20 +3773,37 @@ mod tests { ); } + impl Bank { + fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &AncestorList) -> Vec { + let accounts_index = self.rc.accounts.accounts_db.accounts_index.read().unwrap(); + let (accounts, _) = accounts_index.get(&pubkey, &ancestors).unwrap(); + accounts + .iter() + .map(|(slot, _)| *slot) + .collect::>() + } + } + #[test] fn test_rent_eager_collect_rent_by_range() { solana_logger::setup(); let (genesis_config, _mint_keypair) = create_genesis_config(1); + let zero_lamport_pubkey = Pubkey::new_rand(); let rent_due_pubkey = Pubkey::new_rand(); let rent_exempt_pubkey = Pubkey::new_rand(); let mut bank = Arc::new(Bank::new(&genesis_config)); + let zero_lamports = 0; let little_lamports = 1234; let large_lamports = 123456789; let rent_collected = 22; + bank.store_account( + &zero_lamport_pubkey, + &Account::new(zero_lamports, 0, &Pubkey::default()), + ); bank.store_account( &rent_due_pubkey, &Account::new(little_lamports, 0, &Pubkey::default()), @@ -3789,25 +3825,18 @@ mod tests { little_lamports ); assert_eq!(bank.get_account(&rent_due_pubkey).unwrap().rent_epoch, 0); - { - let accounts_index = bank.rc.accounts.accounts_db.accounts_index.read().unwrap(); - let (accounts, _) = accounts_index.get(&rent_due_pubkey, &ancestors).unwrap(); - assert_eq!( - accounts - .iter() - .map(|(slot, _)| *slot) - .collect::>(), - vec![genesis_slot] - ); - let (accounts, _) = accounts_index.get(&rent_exempt_pubkey, &ancestors).unwrap(); - assert_eq!( - accounts - .iter() - .map(|(slot, _)| *slot) - .collect::>(), - vec![genesis_slot] - ); - } + assert_eq!( + bank.slots_by_pubkey(&rent_due_pubkey, &ancestors), + vec![genesis_slot] + ); + assert_eq!( + bank.slots_by_pubkey(&rent_exempt_pubkey, &ancestors), + vec![genesis_slot] + ); + assert_eq!( + bank.slots_by_pubkey(&zero_lamport_pubkey, &ancestors), + vec![genesis_slot] + ); bank.collect_rent_by_range(0, 0, 1); // all range @@ -3826,26 +3855,81 @@ mod tests { large_lamports ); assert_eq!(bank.get_account(&rent_exempt_pubkey).unwrap().rent_epoch, 6); + assert_eq!( + bank.slots_by_pubkey(&rent_due_pubkey, &ancestors), + vec![genesis_slot, some_slot] + ); + assert_eq!( + bank.slots_by_pubkey(&rent_exempt_pubkey, &ancestors), + vec![genesis_slot, some_slot] + ); + assert_eq!( + bank.slots_by_pubkey(&zero_lamport_pubkey, &ancestors), + vec![genesis_slot] + ); + } - { - let accounts_index = bank.rc.accounts.accounts_db.accounts_index.read().unwrap(); - let (accounts, _) = accounts_index.get(&rent_due_pubkey, &ancestors).unwrap(); - assert_eq!( - accounts - .iter() - .map(|(slot, _)| *slot) - .collect::>(), - vec![genesis_slot, some_slot] - ); - let (accounts, _) = accounts_index.get(&rent_exempt_pubkey, &ancestors).unwrap(); - assert_eq!( - accounts - .iter() - .map(|(slot, _)| *slot) - .collect::>(), - vec![genesis_slot, some_slot] - ); - } + #[test] + fn test_rent_eager_collect_rent_zero_lamport_deterministic() { + solana_logger::setup(); + + let (genesis_config, _mint_keypair) = create_genesis_config(1); + + let zero_lamport_pubkey = Pubkey::new_rand(); + + let genesis_bank1 = Arc::new(Bank::new(&genesis_config)); + let genesis_bank2 = Arc::new(Bank::new(&genesis_config)); + let bank1_with_zero = Arc::new(new_from_parent(&genesis_bank1)); + let bank1_without_zero = Arc::new(new_from_parent(&genesis_bank2)); + let zero_lamports = 0; + + let account = Account::new(zero_lamports, 0, &Pubkey::default()); + bank1_with_zero.store_account(&zero_lamport_pubkey, &account); + bank1_without_zero.store_account(&zero_lamport_pubkey, &account); + + bank1_without_zero + .rc + .accounts + .accounts_db + .accounts_index + .write() + .unwrap() + .add_root(genesis_bank1.slot() + 1); + bank1_without_zero + .rc + .accounts + .accounts_db + .accounts_index + .write() + .unwrap() + .purge(&zero_lamport_pubkey); + + let some_slot = 1000; + let bank2_with_zero = Arc::new(Bank::new_from_parent( + &bank1_with_zero, + &Pubkey::default(), + some_slot, + )); + let bank2_without_zero = Arc::new(Bank::new_from_parent( + &bank1_without_zero, + &Pubkey::default(), + some_slot, + )); + let hash1_with_zero = bank1_with_zero.hash(); + let hash1_without_zero = bank1_without_zero.hash(); + assert_eq!(hash1_with_zero, hash1_without_zero); + assert_ne!(hash1_with_zero, Hash::default()); + + bank2_with_zero.collect_rent_by_range(0, 0, 1); // all range + bank2_without_zero.collect_rent_by_range(0, 0, 1); // all range + + bank2_with_zero.freeze(); + let hash2_with_zero = bank2_with_zero.hash(); + bank2_without_zero.freeze(); + let hash2_without_zero = bank2_without_zero.hash(); + + assert_eq!(hash2_with_zero, hash2_without_zero); + assert_ne!(hash2_with_zero, Hash::default()); } #[test] From 4d936475090ae91ca65574b5ce569aad2d6e82f0 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 24 Apr 2020 22:48:01 +0900 Subject: [PATCH 11/28] Refactor a bit --- runtime/src/bank.rs | 95 +++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 50 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 70800751e12fab..b8cdc9c85f156e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -79,10 +79,12 @@ pub type BankSlotDelta = SlotDelta>; type TransactionAccountRefCells = Vec>>; type TransactionLoaderRefCells = Vec)>>; -// eager rent collection cycle is composed of number of tiny pubkey ranges to scan -// the whole pubkey value domain. +// Eager rent collection repeats in cyclic manner. +// Each cycle is composed of number of tiny pubkey subranges +// to scan, which is always multiple of the number of slots in epoch. type PartitionIndex = u64; type PartitionCount = u64; +type RentCollectionCycleParams = (Epoch, SlotCount, bool, Epoch, EpochCount, PartitionCount); type EpochCount = u64; @@ -1749,19 +1751,42 @@ impl Bank { } fn determine_collection_cycle_params( - is_in_longer_cycle: bool, - slot_count_in_two_day: SlotCount, + &self, + current_epoch: Epoch, slot_count_per_epoch: SlotCount, - first_normal_epoch: Epoch, - ) -> (EpochCount, Epoch, PartitionCount) { + ) -> RentCollectionCycleParams { + // Assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect rent + // eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally. + let slot_count_in_two_day: SlotCount = + 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; + let first_normal_epoch = self.epoch_schedule.first_normal_epoch; + let is_in_longer_cycle = Self::use_longer_collection_cycle( + current_epoch, + first_normal_epoch, + slot_count_per_epoch, + slot_count_in_two_day, + ); + if !is_in_longer_cycle { - (1, 0, slot_count_per_epoch) + ( + current_epoch, + slot_count_per_epoch, + false, + 0, + 1, + slot_count_per_epoch, + ) } else { let epoch_count_in_cycle = slot_count_in_two_day / slot_count_per_epoch; + let partition_count = slot_count_per_epoch * epoch_count_in_cycle; + ( - epoch_count_in_cycle, + current_epoch, + slot_count_per_epoch, + true, first_normal_epoch, - slot_count_per_epoch * epoch_count_in_cycle, + epoch_count_in_cycle, + partition_count, ) } } @@ -1772,46 +1797,21 @@ impl Bank { end_slot_index: SlotIndex, current_epoch: Epoch, ) -> (PartitionIndex, PartitionIndex, PartitionCount) { - // Assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect rent - // eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally. - let slot_count_in_two_day: SlotCount = - 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; let slot_count_per_epoch = self.get_slots_in_epoch(current_epoch); - let first_normal_epoch = self.epoch_schedule.first_normal_epoch; - let is_in_longer_cycle = Self::use_longer_collection_cycle( - current_epoch, - first_normal_epoch, - slot_count_per_epoch, - slot_count_in_two_day, - ); - let (epoch_count_per_cycle, base_epoch, partition_count) = - Self::determine_collection_cycle_params( - is_in_longer_cycle, - slot_count_in_two_day, - slot_count_per_epoch, - first_normal_epoch, - ); + let cycle_params = + self.determine_collection_cycle_params(current_epoch, slot_count_per_epoch); + let (_, _, is_in_longer_cycle, _, _, partition_count) = cycle_params; // use common code-path for both very-likely and very-unlikely for the sake of minimized // risk of any mis-calculation instead of neligilbe faster computation per slot for the // likely case. - let mut start_partition_index = Self::partition_index_in_collection_cycle( - start_slot_index, - current_epoch, - base_epoch, - epoch_count_per_cycle, - slot_count_per_epoch, - ); - let end_partition_index = Self::partition_index_in_collection_cycle( - end_slot_index, - current_epoch, - base_epoch, - epoch_count_per_cycle, - slot_count_per_epoch, - ); + let mut start_partition_index = + Self::partition_index_in_collection_cycle(start_slot_index, cycle_params); + let end_partition_index = + Self::partition_index_in_collection_cycle(end_slot_index, cycle_params); // do special handling... - let is_across_epoch_boundary = Self::across_epoch_boundary_in_collection_cycle( + let is_across_epoch_boundary = Self::across_gapped_epoch_boundary_in_collection_cycle( start_slot_index, end_slot_index, start_partition_index, @@ -1823,22 +1823,17 @@ impl Bank { (start_partition_index, end_partition_index, partition_count) } - fn across_epoch_boundary_in_collection_cycle( + fn across_gapped_epoch_boundary_in_collection_cycle( start_slot_index: SlotIndex, end_slot_index: SlotIndex, start_partition_index: PartitionIndex, ) -> bool { - (start_slot_index == end_slot_index - || (start_slot_index == 0 && (end_slot_index - start_slot_index > 1))) - && start_partition_index > 0 + start_slot_index == 0 && end_slot_index != 1 && start_partition_index > 0 } fn partition_index_in_collection_cycle( slot_index_in_epoch: SlotIndex, - current_epoch: Epoch, - base_epoch: Epoch, - epoch_count_per_cycle: EpochCount, - slot_count_per_epoch: SlotCount, + (current_epoch, slot_count_per_epoch, _, base_epoch, epoch_count_per_cycle, _): RentCollectionCycleParams, ) -> PartitionIndex { let epoch_offset = current_epoch - base_epoch; let epoch_index_in_cycle = epoch_offset % epoch_count_per_cycle; From aec99a1a4cdf5b932af462402faad37c082fdf36 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Sat, 25 Apr 2020 00:31:26 +0900 Subject: [PATCH 12/28] Do hard fork, restore tests, and perf. mitigation --- ledger/src/bank_forks_utils.rs | 1 + ledger/src/snapshot_utils.rs | 6 +- runtime/src/bank.rs | 201 +++++++++++++++++++++++++++++---- sdk/src/hash.rs | 8 ++ 4 files changed, 194 insertions(+), 22 deletions(-) diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 7b65fb5f1615b4..43c7686f2f7d0e 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -60,6 +60,7 @@ pub fn load( &snapshot_config.snapshot_path, &archive_filename, compression, + genesis_config, ) .expect("Load from snapshot failed"); diff --git a/ledger/src/snapshot_utils.rs b/ledger/src/snapshot_utils.rs index 11c50c9d792248..9d58d8de7da411 100644 --- a/ledger/src/snapshot_utils.rs +++ b/ledger/src/snapshot_utils.rs @@ -15,7 +15,7 @@ use solana_runtime::{ MAX_SNAPSHOT_DATA_FILE_SIZE, }, }; -use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}; +use solana_sdk::{clock::Slot, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey}; use std::{ cmp::Ordering, fs::{self, File}, @@ -451,6 +451,7 @@ pub fn bank_from_archive>( snapshot_path: &PathBuf, snapshot_tar: P, compression: CompressionType, + genesis_config: &GenesisConfig, ) -> Result { // Untar the snapshot into a temp directory under `snapshot_config.snapshot_path()` let unpack_dir = tempfile::tempdir_in(snapshot_path)?; @@ -470,6 +471,7 @@ pub fn bank_from_archive>( frozen_account_pubkeys, &unpacked_snapshots_dir, unpacked_accounts_dir, + genesis_config, )?; if !bank.verify_snapshot_bank() { @@ -615,6 +617,7 @@ fn rebuild_bank_from_snapshots

( frozen_account_pubkeys: &[Pubkey], unpacked_snapshots_dir: &PathBuf, append_vecs_path: P, + genesis_config: &GenesisConfig, ) -> Result where P: AsRef, @@ -643,6 +646,7 @@ where ))); } }; + bank.operating_mode = Some(genesis_config.operating_mode); info!("Rebuilding accounts..."); let rc = bank::BankRc::from_stream( account_paths, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b8cdc9c85f156e..7eeddb22a71a55 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -42,7 +42,7 @@ use solana_sdk::{ }, epoch_schedule::EpochSchedule, fee_calculator::{FeeCalculator, FeeRateGovernor}, - genesis_config::GenesisConfig, + genesis_config::{GenesisConfig, OperatingMode}, hard_forks::HardForks, hash::{extend_and_hash, hashv, Hash}, incinerator, @@ -74,6 +74,12 @@ pub const MAX_SNAPSHOT_DATA_FILE_SIZE: u64 = 32 * 1024 * 1024 * 1024; // 32 GiB pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5; +pub const MAINNET_BETA_GENESIS_HASH: Hash = Hash::new_from_array([ + 69, 41, 105, 152, 166, 248, 226, 167, 132, 219, 93, 159, 149, 225, 143, 194, 63, 112, 68, 26, + 16, 57, 68, 104, 1, 8, 152, 121, 176, 140, 126, 240, +]); +const EAGER_RENT_COLLECTION_START_EPOCH: Epoch = 30; + type BankStatusCache = StatusCache>; pub type BankSlotDelta = SlotDelta>; type TransactionAccountRefCells = Vec>>; @@ -367,6 +373,15 @@ pub struct Bank { #[serde(skip)] pub skip_drop: AtomicBool, + + #[serde(skip)] + pub operating_mode: Option, + + #[serde(skip)] + pub genesis_hash: Option, + + #[serde(skip)] + pub lazy_rent_collection: AtomicBool, } impl Default for BlockhashQueue { @@ -386,6 +401,8 @@ impl Bank { frozen_account_pubkeys: &[Pubkey], ) -> Self { let mut bank = Self::default(); + bank.operating_mode = Some(genesis_config.operating_mode); + bank.genesis_hash = Some(genesis_config.hash()); bank.ancestors.insert(bank.slot(), 0); bank.rc.accounts = Arc::new(Accounts::new(paths)); @@ -479,6 +496,11 @@ impl Bank { last_vote_sync: AtomicU64::new(parent.last_vote_sync.load(Ordering::Relaxed)), rewards: None, skip_drop: AtomicBool::new(false), + operating_mode: parent.operating_mode, + genesis_hash: parent.genesis_hash, + lazy_rent_collection: AtomicBool::new( + parent.lazy_rent_collection.load(Ordering::Relaxed), + ), }; datapoint_info!( @@ -1689,8 +1711,6 @@ impl Bank { end_index: PartitionIndex, partition_count: PartitionCount, ) -> std::ops::RangeInclusive { - // pubkey (= account address, including derived ones?) distribution should be uniform? - let partition_width = Slot::max_value() / partition_count; let start_key_prefix = if start_index == 0 && end_index == 0 { 0 @@ -1708,7 +1728,6 @@ impl Bank { let mut end_pubkey = [0xffu8; 32]; BigEndian::write_u64(&mut start_pubkey[..], start_key_prefix); BigEndian::write_u64(&mut end_pubkey[..], end_key_prefix); - // special case parent_slot is in previous current_epoch trace!( "pubkey_range_by_partition: ({}-{})/{}: {:02x?}-{:02x?}", start_index, @@ -1729,6 +1748,36 @@ impl Bank { let mut ranges = vec![]; + let slot_count_per_normal_epoch = + self.get_slots_in_epoch(self.epoch_schedule.first_normal_epoch); + let slot_count_in_two_day: SlotCount = + 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; + let is_in_constant_cycle = + self.use_constant_collection_cycle(slot_count_per_normal_epoch, slot_count_in_two_day); + if is_in_constant_cycle { + let parent_cycle = self.parent_slot() / slot_count_in_two_day; + let current_cycle = self.slot() / slot_count_in_two_day; + let mut parent_cycle_index = self.parent_slot() % slot_count_in_two_day; + let current_cycle_index = self.slot() % slot_count_in_two_day; + if parent_cycle < current_cycle { + if current_cycle_index > 0 { + ranges.push(( + parent_cycle_index, + slot_count_in_two_day - 1, + slot_count_in_two_day, + )); + } + parent_cycle_index = 0; + } + + ranges.push(( + parent_cycle_index, + current_cycle_index, + slot_count_in_two_day, + )); + return ranges; + } + if parent_epoch < current_epoch { if current_slot_index > 0 { let last_slot_index = self.get_slots_in_epoch(parent_epoch) - 1; @@ -1760,10 +1809,10 @@ impl Bank { let slot_count_in_two_day: SlotCount = 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; let first_normal_epoch = self.epoch_schedule.first_normal_epoch; - let is_in_longer_cycle = Self::use_longer_collection_cycle( + let slot_count_per_normal_epoch = self.get_slots_in_epoch(first_normal_epoch); + let is_in_longer_cycle = self.use_longer_collection_cycle( current_epoch, - first_normal_epoch, - slot_count_per_epoch, + slot_count_per_normal_epoch, slot_count_in_two_day, ); @@ -1833,36 +1882,65 @@ impl Bank { fn partition_index_in_collection_cycle( slot_index_in_epoch: SlotIndex, - (current_epoch, slot_count_per_epoch, _, base_epoch, epoch_count_per_cycle, _): RentCollectionCycleParams, + ( + current_epoch, + slot_count_per_epoch, + _, + base_epoch, + epoch_count_per_cycle, + _, + ): RentCollectionCycleParams, ) -> PartitionIndex { let epoch_offset = current_epoch - base_epoch; let epoch_index_in_cycle = epoch_offset % epoch_count_per_cycle; slot_index_in_epoch + epoch_index_in_cycle * slot_count_per_epoch } + // Given short epochs, it's too costly to collect rent eagerly + // within an epoch, so lower the frequency of it. + // These logic is't strictly eager anymore and should only be used + // for development/performance purpose. + // Absolutely not under OperationMode::Stable!!!! fn use_longer_collection_cycle( + &self, current_epoch: Epoch, - first_normal_epoch: Epoch, - slot_count_per_epoch: SlotCount, + slot_count_per_normal_epoch: SlotCount, + slot_count_in_two_day: SlotCount, + ) -> bool { + let first_normal_epoch = self.epoch_schedule.first_normal_epoch; + current_epoch >= first_normal_epoch && slot_count_per_normal_epoch < slot_count_in_two_day + } + + fn use_constant_collection_cycle( + &self, + slot_count_per_normal_epoch: SlotCount, slot_count_in_two_day: SlotCount, ) -> bool { - // Given short epochs, it's too costly to collect rent eagerly - // within an epoch, so lower the frequency of it. - // These logic is't strictly eager anymore and should only be used - // for development/performance purpose. - // Absolutely not under OperationMode::Stable!!!! - current_epoch >= first_normal_epoch && slot_count_per_epoch < slot_count_in_two_day + self.operating_mode.unwrap() != OperatingMode::Stable + && slot_count_per_normal_epoch < slot_count_in_two_day } fn collect_rent_eagerly(&self) { let mut measure = Measure::start("collect_rent_eagerly-ms"); - for (start_index, end_index, partition_count) in self.eager_rent_ranges_for_epochs() { - self.collect_rent_by_range(start_index, end_index, partition_count); + if self.enable_eager_rent_collection() { + for (start_index, end_index, partition_count) in self.eager_rent_ranges_for_epochs() { + self.collect_rent_by_range(start_index, end_index, partition_count); + } } measure.stop(); inc_new_counter_info!("collect_rent_eagerly-ms", measure.as_ms() as usize); } + fn enable_eager_rent_collection(&self) -> bool { + if self.lazy_rent_collection.load(Ordering::Relaxed) { + return false; + } + + !(self.operating_mode.unwrap() == OperatingMode::Stable + && self.genesis_hash.unwrap() == MAINNET_BETA_GENESIS_HASH + && self.epoch() < EAGER_RENT_COLLECTION_START_EPOCH) + } + /// Process a batch of transactions. #[must_use] pub fn load_execute_and_commit_transactions( @@ -3271,6 +3349,7 @@ mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_rent_complex() { + solana_logger::setup(); let mock_program_id = Pubkey::new(&[2u8; 32]); let (mut genesis_config, _mint_keypair) = create_genesis_config(10); @@ -3285,7 +3364,13 @@ mod tests { burn_percent: 10, }; - let root_bank = Arc::new(Bank::new(&genesis_config)); + let root_bank = Bank::new(&genesis_config); + // until we completely transition to the eager rent collection, + // we must ensure lazy rent collection doens't get broken! + root_bank + .lazy_rent_collection + .store(true, Ordering::Relaxed); + let root_bank = Arc::new(root_bank); let bank = create_child_bank_for_rent_test(&root_bank, &genesis_config, mock_program_id); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -3486,6 +3571,7 @@ mod tests { let leader_lamports = 3; let mut genesis_config = create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config; + genesis_config.operating_mode = OperatingMode::Stable; const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64; const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3; @@ -3555,6 +3641,7 @@ mod tests { let leader_lamports = 3; let mut genesis_config = create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config; + genesis_config.operating_mode = OperatingMode::Stable; const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64; const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3; @@ -3603,11 +3690,12 @@ mod tests { } #[test] - fn test_rent_eager_without_warmup_epochs_under_longer_cycle() { + fn test_rent_eager_with_warmup_epochs_under_longer_cycle() { let leader_pubkey = Pubkey::new_rand(); let leader_lamports = 3; let mut genesis_config = create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config; + genesis_config.operating_mode = OperatingMode::Stable; const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64 * 8; const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3; @@ -3622,7 +3710,7 @@ mod tests { assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 32)]); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 222)); - bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 223)); + bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 128); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (2, 127)); assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(126, 127, 128)]); @@ -3657,6 +3745,77 @@ mod tests { assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 431872)]); } + #[test] + fn test_rent_eager_under_longer_cycle_for_developemnt() { + solana_logger::setup(); + let leader_pubkey = Pubkey::new_rand(); + let leader_lamports = 3; + let mut genesis_config = + create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config; + + const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64 * 8; + const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3; + genesis_config.epoch_schedule = + EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, true); + + let mut bank = Arc::new(Bank::new(&genesis_config)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); + assert_eq!(bank.epoch_schedule.first_normal_epoch, 3); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 222)); + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 128); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (2, 127)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(222, 223, 432000)] + ); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 0)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(223, 224, 432000)] + ); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); + assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 1)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(224, 225, 432000)] + ); + + bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 432000 - 2)); + bank = Arc::new(new_from_parent(&bank)); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(431998, 431999, 432000)] + ); + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 432000)]); + + bank = Arc::new(Bank::new_from_parent( + &bank, + &Pubkey::default(), + 864000 - 20, + )); + bank = Arc::new(Bank::new_from_parent( + &bank, + &Pubkey::default(), + 864000 + 39, + )); + assert_eq!( + bank.eager_rent_ranges_for_epochs(), + vec![(431980, 431999, 432000), (0, 39, 432000)] + ); + } + #[test] fn test_rent_eager_pubkey_range_minimal() { let range = Bank::pubkey_range_by_partition(0, 0, 1); diff --git a/sdk/src/hash.rs b/sdk/src/hash.rs index c255d61768b660..46f1a371bb5384 100644 --- a/sdk/src/hash.rs +++ b/sdk/src/hash.rs @@ -78,6 +78,14 @@ impl Hash { pub fn new(hash_slice: &[u8]) -> Self { Hash(<[u8; HASH_BYTES]>::try_from(hash_slice).unwrap()) } + + pub const fn new_from_array(hash_array: [u8; HASH_BYTES]) -> Self { + Self(hash_array) + } + + pub fn to_bytes(self) -> [u8; HASH_BYTES] { + self.0 + } } /// Return a Sha256 hash for the given data. From 691f8f06c1e24e8c6f26210b571ca69e87d3797e Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Sat, 25 Apr 2020 18:03:53 +0900 Subject: [PATCH 13/28] Fix build... --- core/tests/bank_forks.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/tests/bank_forks.rs b/core/tests/bank_forks.rs index 572674d331d9f7..7b2fd1e6062a81 100644 --- a/core/tests/bank_forks.rs +++ b/core/tests/bank_forks.rs @@ -20,6 +20,7 @@ mod tests { }; use solana_sdk::{ clock::Slot, + genesis_config::GenesisConfig, hash::hashv, pubkey::Pubkey, signature::{Keypair, Signer}, @@ -95,6 +96,7 @@ mod tests { &CompressionType::Bzip2, ), CompressionType::Bzip2, + &GenesisConfig::default(), ) .unwrap(); From 5ea7b3851bccc0168167ce8111aa8d68daecad6a Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Sat, 25 Apr 2020 23:21:38 +0900 Subject: [PATCH 14/28] Refactor and add switch over for testnet (TdS) --- runtime/src/bank.rs | 231 ++++++++++++++++++++++++-------------------- 1 file changed, 127 insertions(+), 104 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7eeddb22a71a55..b38d30926b8973 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -78,7 +78,13 @@ pub const MAINNET_BETA_GENESIS_HASH: Hash = Hash::new_from_array([ 69, 41, 105, 152, 166, 248, 226, 167, 132, 219, 93, 159, 149, 225, 143, 194, 63, 112, 68, 26, 16, 57, 68, 104, 1, 8, 152, 121, 176, 140, 126, 240, ]); -const EAGER_RENT_COLLECTION_START_EPOCH: Epoch = 30; +const MAINNET_BETA_EAGER_RENT_COLLECTION_START_EPOCH: Epoch = 30; // TENTATIVE!!!! + +pub const TESTNET_GENESIS_HASH: Hash = Hash::new_from_array([ + 58, 19, 46, 206, 16, 48, 94, 193, 131, 7, 37, 80, 47, 162, 183, 231, 235, 129, 87, 233, 18, 61, + 76, 31, 101, 74, 113, 120, 113, 97, 220, 33, +]); +const TESTNET_EAGER_RENT_COLLECTION_START_EPOCH: Epoch = 40; // TENTATIVE!!!! type BankStatusCache = StatusCache>; pub type BankSlotDelta = SlotDelta>; @@ -89,8 +95,16 @@ type TransactionLoaderRefCells = Vec)>>; // Each cycle is composed of number of tiny pubkey subranges // to scan, which is always multiple of the number of slots in epoch. type PartitionIndex = u64; -type PartitionCount = u64; -type RentCollectionCycleParams = (Epoch, SlotCount, bool, Epoch, EpochCount, PartitionCount); +type PartitionsPerCycle = u64; +type Partition = (PartitionIndex, PartitionIndex, PartitionsPerCycle); +type RentCollectionCycleParams = ( + Epoch, + SlotCount, + bool, + Epoch, + EpochCount, + PartitionsPerCycle, +); type EpochCount = u64; @@ -1679,12 +1693,7 @@ impl Bank { } } - fn collect_rent_by_range( - &self, - range_start: PartitionIndex, - range_end: PartitionIndex, - partition_count: PartitionCount, - ) { + fn collect_rent_in_partition(&self, (range_start, range_end, partition_count): Partition) { let subrange = Self::pubkey_range_by_partition(range_start, range_end, partition_count); let accounts = self @@ -1709,7 +1718,7 @@ impl Bank { fn pubkey_range_by_partition( start_index: PartitionIndex, end_index: PartitionIndex, - partition_count: PartitionCount, + partition_count: PartitionsPerCycle, ) -> std::ops::RangeInclusive { let partition_width = Slot::max_value() / partition_count; let start_key_prefix = if start_index == 0 && end_index == 0 { @@ -1741,47 +1750,42 @@ impl Bank { Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey) } - fn eager_rent_ranges_for_epochs(&self) -> Vec<(SlotIndex, SlotIndex, SlotCount)> { - let (current_epoch, current_slot_index) = self.get_epoch_and_slot_index(self.slot()); - let (parent_epoch, mut parent_slot_index) = - self.get_epoch_and_slot_index(self.parent_slot()); + fn constant_cycle_partitions(&self, slot_count_in_two_day: SlotCount) -> Vec { + let parent_cycle = self.parent_slot() / slot_count_in_two_day; + let current_cycle = self.slot() / slot_count_in_two_day; + let mut parent_cycle_index = self.parent_slot() % slot_count_in_two_day; + let current_cycle_index = self.slot() % slot_count_in_two_day; + let mut partitions = vec![]; + if parent_cycle < current_cycle { + if current_cycle_index > 0 { + partitions.push(( + parent_cycle_index, + slot_count_in_two_day - 1, + slot_count_in_two_day, + )); + } + parent_cycle_index = 0; + } - let mut ranges = vec![]; + partitions.push(( + parent_cycle_index, + current_cycle_index, + slot_count_in_two_day, + )); - let slot_count_per_normal_epoch = - self.get_slots_in_epoch(self.epoch_schedule.first_normal_epoch); - let slot_count_in_two_day: SlotCount = - 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; - let is_in_constant_cycle = - self.use_constant_collection_cycle(slot_count_per_normal_epoch, slot_count_in_two_day); - if is_in_constant_cycle { - let parent_cycle = self.parent_slot() / slot_count_in_two_day; - let current_cycle = self.slot() / slot_count_in_two_day; - let mut parent_cycle_index = self.parent_slot() % slot_count_in_two_day; - let current_cycle_index = self.slot() % slot_count_in_two_day; - if parent_cycle < current_cycle { - if current_cycle_index > 0 { - ranges.push(( - parent_cycle_index, - slot_count_in_two_day - 1, - slot_count_in_two_day, - )); - } - parent_cycle_index = 0; - } + partitions + } - ranges.push(( - parent_cycle_index, - current_cycle_index, - slot_count_in_two_day, - )); - return ranges; - } + fn normal_cycle_partitions(&self) -> Vec { + let (current_epoch, current_slot_index) = self.get_epoch_and_slot_index(self.slot()); + let (parent_epoch, mut parent_slot_index) = + self.get_epoch_and_slot_index(self.parent_slot()); + let mut partitions = vec![]; if parent_epoch < current_epoch { if current_slot_index > 0 { let last_slot_index = self.get_slots_in_epoch(parent_epoch) - 1; - ranges.push(self.partition_in_collection_cycle( + partitions.push(self.partition_in_collection_cycle( parent_slot_index, last_slot_index, parent_epoch, @@ -1790,13 +1794,28 @@ impl Bank { parent_slot_index = 0; } - ranges.push(self.partition_in_collection_cycle( + partitions.push(self.partition_in_collection_cycle( parent_slot_index, current_slot_index, current_epoch, )); - ranges + partitions + } + + fn rent_collection_partitions(&self) -> Vec { + let slot_count_per_normal_epoch = + self.get_slots_in_epoch(self.epoch_schedule.first_normal_epoch); + let slot_count_in_two_day: SlotCount = + 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; + let is_in_constant_cycle = + self.use_constant_collection_cycle(slot_count_per_normal_epoch, slot_count_in_two_day); + + if !is_in_constant_cycle { + self.normal_cycle_partitions() + } else { + self.constant_cycle_partitions(slot_count_in_two_day) + } } fn determine_collection_cycle_params( @@ -1845,7 +1864,7 @@ impl Bank { start_slot_index: SlotIndex, end_slot_index: SlotIndex, current_epoch: Epoch, - ) -> (PartitionIndex, PartitionIndex, PartitionCount) { + ) -> Partition { let slot_count_per_epoch = self.get_slots_in_epoch(current_epoch); let cycle_params = self.determine_collection_cycle_params(current_epoch, slot_count_per_epoch); @@ -1916,16 +1935,18 @@ impl Bank { slot_count_per_normal_epoch: SlotCount, slot_count_in_two_day: SlotCount, ) -> bool { - self.operating_mode.unwrap() != OperatingMode::Stable + self.operating_mode() != OperatingMode::Stable && slot_count_per_normal_epoch < slot_count_in_two_day } fn collect_rent_eagerly(&self) { + if !self.enable_eager_rent_collection() { + return; + } + let mut measure = Measure::start("collect_rent_eagerly-ms"); - if self.enable_eager_rent_collection() { - for (start_index, end_index, partition_count) in self.eager_rent_ranges_for_epochs() { - self.collect_rent_by_range(start_index, end_index, partition_count); - } + for partition in self.rent_collection_partitions() { + self.collect_rent_in_partition(partition); } measure.stop(); inc_new_counter_info!("collect_rent_eagerly-ms", measure.as_ms() as usize); @@ -1936,9 +1957,20 @@ impl Bank { return false; } - !(self.operating_mode.unwrap() == OperatingMode::Stable + let not_yet_for_existing_clusters = (self.operating_mode() == OperatingMode::Stable && self.genesis_hash.unwrap() == MAINNET_BETA_GENESIS_HASH - && self.epoch() < EAGER_RENT_COLLECTION_START_EPOCH) + && self.epoch() < MAINNET_BETA_EAGER_RENT_COLLECTION_START_EPOCH) + || (self.operating_mode() == OperatingMode::Preview + && self.genesis_hash.unwrap() == TESTNET_GENESIS_HASH + && self.epoch() < TESTNET_EAGER_RENT_COLLECTION_START_EPOCH); + + !not_yet_for_existing_clusters + } + + fn operating_mode(&self) -> OperatingMode { + // unwrap is safe; self.operating_mode is ensured to be Some() always... + // we only using Option here for ABI compatibility... + self.operating_mode.unwrap() } /// Process a batch of transactions. @@ -3533,16 +3565,16 @@ mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(1); let mut bank = Arc::new(Bank::new(&genesis_config)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]); bank = Arc::new(new_from_parent(&bank)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 32)]); for _ in 2..32 { bank = Arc::new(new_from_parent(&bank)); } - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(30, 31, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(30, 31, 32)]); bank = Arc::new(new_from_parent(&bank)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 64)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 64)]); } #[test] @@ -3550,17 +3582,17 @@ mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(1); let mut bank = Arc::new(Bank::new(&genesis_config)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]); bank = Arc::new(new_from_parent(&bank)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 32)]); for _ in 2..15 { bank = Arc::new(new_from_parent(&bank)); } - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(13, 14, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(13, 14, 32)]); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 49)); assert_eq!( - bank.eager_rent_ranges_for_epochs(), + bank.rent_collection_partitions(), vec![(14, 31, 32), (0, 17, 64)] ); } @@ -3582,36 +3614,36 @@ mod tests { assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432000); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432000)]); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 1)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 432000)]); for _ in 2..32 { bank = Arc::new(new_from_parent(&bank)); } assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 31)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(30, 31, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(30, 31, 432000)]); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 0)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(31, 32, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(31, 32, 432000)]); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 1)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(32, 33, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(32, 33, 432000)]); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 1000)); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 1001)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (31, 9)); assert_eq!( - bank.eager_rent_ranges_for_epochs(), + bank.rent_collection_partitions(), vec![(1000, 1001, 432000)] ); @@ -3620,19 +3652,19 @@ mod tests { assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13499, 31)); assert_eq!( - bank.eager_rent_ranges_for_epochs(), + bank.rent_collection_partitions(), vec![(431998, 431999, 432000)] ); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 0)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432000)]); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 1)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 432000)]); } #[test] @@ -3652,39 +3684,39 @@ mod tests { assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432000); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432000)]); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 1)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 432000)]); for _ in 2..19 { bank = Arc::new(new_from_parent(&bank)); } assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 18)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(17, 18, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(17, 18, 432000)]); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 44)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 12)); assert_eq!( - bank.eager_rent_ranges_for_epochs(), + bank.rent_collection_partitions(), vec![(18, 31, 432000), (31, 44, 432000)] ); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 13)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(44, 45, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(44, 45, 432000)]); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 431993)); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 432011)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 11)); assert_eq!( - bank.eager_rent_ranges_for_epochs(), + bank.rent_collection_partitions(), vec![(431993, 431999, 432000), (0, 11, 432000)] ); } @@ -3707,24 +3739,24 @@ mod tests { assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.epoch_schedule.first_normal_epoch, 3); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 32)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 222)); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 128); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (2, 127)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(126, 127, 128)]); + assert_eq!(bank.rent_collection_partitions(), vec![(126, 127, 128)]); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 0)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 431872)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 431872)]); assert_eq!(431872 % bank.get_slots_in_epoch(bank.epoch()), 0); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 1)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 431872)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 431872)]); bank = Arc::new(Bank::new_from_parent( &bank, @@ -3735,14 +3767,14 @@ mod tests { assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1689, 255)); assert_eq!( - bank.eager_rent_ranges_for_epochs(), + bank.rent_collection_partitions(), vec![(431870, 431871, 431872)] ); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1690, 0)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 431872)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 431872)]); } #[test] @@ -3762,43 +3794,34 @@ mod tests { assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); assert_eq!(bank.epoch_schedule.first_normal_epoch, 3); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432000)]); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 222)); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 128); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (2, 127)); - assert_eq!( - bank.eager_rent_ranges_for_epochs(), - vec![(222, 223, 432000)] - ); + assert_eq!(bank.rent_collection_partitions(), vec![(222, 223, 432000)]); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 0)); - assert_eq!( - bank.eager_rent_ranges_for_epochs(), - vec![(223, 224, 432000)] - ); + assert_eq!(bank.rent_collection_partitions(), vec![(223, 224, 432000)]); bank = Arc::new(new_from_parent(&bank)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 1)); - assert_eq!( - bank.eager_rent_ranges_for_epochs(), - vec![(224, 225, 432000)] - ); + assert_eq!(bank.rent_collection_partitions(), vec![(224, 225, 432000)]); bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 432000 - 2)); bank = Arc::new(new_from_parent(&bank)); assert_eq!( - bank.eager_rent_ranges_for_epochs(), + bank.rent_collection_partitions(), vec![(431998, 431999, 432000)] ); bank = Arc::new(new_from_parent(&bank)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 0, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432000)]); bank = Arc::new(new_from_parent(&bank)); - assert_eq!(bank.eager_rent_ranges_for_epochs(), vec![(0, 1, 432000)]); + assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 432000)]); bank = Arc::new(Bank::new_from_parent( &bank, @@ -3811,7 +3834,7 @@ mod tests { 864000 + 39, )); assert_eq!( - bank.eager_rent_ranges_for_epochs(), + bank.rent_collection_partitions(), vec![(431980, 431999, 432000), (0, 39, 432000)] ); } @@ -3939,7 +3962,7 @@ mod tests { } #[test] - fn test_rent_eager_collect_rent_by_range() { + fn test_rent_eager_collect_rent_in_partition() { solana_logger::setup(); let (genesis_config, _mint_keypair) = create_genesis_config(1); @@ -3992,7 +4015,7 @@ mod tests { vec![genesis_slot] ); - bank.collect_rent_by_range(0, 0, 1); // all range + bank.collect_rent_in_partition((0, 0, 1)); // all range // unrelated 1-lamport account exists assert_eq!( @@ -4074,8 +4097,8 @@ mod tests { assert_eq!(hash1_with_zero, hash1_without_zero); assert_ne!(hash1_with_zero, Hash::default()); - bank2_with_zero.collect_rent_by_range(0, 0, 1); // all range - bank2_without_zero.collect_rent_by_range(0, 0, 1); // all range + bank2_with_zero.collect_rent_in_partition((0, 0, 1)); // all + bank2_without_zero.collect_rent_in_partition((0, 0, 1)); // all bank2_with_zero.freeze(); let hash2_with_zero = bank2_with_zero.hash(); From 5505530666fe5792269db8665b1e3838e9c81a94 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Sun, 26 Apr 2020 01:27:48 +0900 Subject: [PATCH 15/28] Use to_be_bytes --- runtime/src/bank.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b38d30926b8973..095c58e76ad71e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -26,7 +26,7 @@ use crate::{ transaction_utils::OrderedIterator, }; use bincode::{deserialize_from, serialize_into}; -use byteorder::{BigEndian, ByteOrder, LittleEndian}; +use byteorder::{ByteOrder, LittleEndian}; use itertools::Itertools; use log::*; use serde::{Deserialize, Serialize}; @@ -63,6 +63,7 @@ use std::{ cell::RefCell, collections::{HashMap, HashSet}, io::{BufReader, Cursor, Error as IOError, Read}, + mem, path::{Path, PathBuf}, rc::Rc, sync::atomic::{AtomicBool, AtomicU64, Ordering}, @@ -1720,7 +1721,9 @@ impl Bank { end_index: PartitionIndex, partition_count: PartitionsPerCycle, ) -> std::ops::RangeInclusive { - let partition_width = Slot::max_value() / partition_count; + type Prefix = u64; + const PREFIX_SIZE: usize = mem::size_of::(); + let partition_width = Prefix::max_value() / partition_count; let start_key_prefix = if start_index == 0 && end_index == 0 { 0 } else { @@ -1728,15 +1731,15 @@ impl Bank { }; let end_key_prefix = if end_index + 1 == partition_count { - Slot::max_value() + Prefix::max_value() } else { (end_index + 1) * partition_width - 1 }; let mut start_pubkey = [0x00u8; 32]; let mut end_pubkey = [0xffu8; 32]; - BigEndian::write_u64(&mut start_pubkey[..], start_key_prefix); - BigEndian::write_u64(&mut end_pubkey[..], end_key_prefix); + start_pubkey[0..PREFIX_SIZE].copy_from_slice(&start_key_prefix.to_be_bytes()); + end_pubkey[0..PREFIX_SIZE].copy_from_slice(&end_key_prefix.to_be_bytes()); trace!( "pubkey_range_by_partition: ({}-{})/{}: {:02x?}-{:02x?}", start_index, From f83416654002bc75f7a02fd51785dbc67a58f680 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Sun, 26 Apr 2020 01:50:29 +0900 Subject: [PATCH 16/28] cleanup --- runtime/src/bank.rs | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 095c58e76ad71e..f32efe5481c9d7 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1694,8 +1694,8 @@ impl Bank { } } - fn collect_rent_in_partition(&self, (range_start, range_end, partition_count): Partition) { - let subrange = Self::pubkey_range_by_partition(range_start, range_end, partition_count); + fn collect_rent_in_partition(&self, partition: Partition) { + let subrange = Self::pubkey_range_by_partition(partition); let accounts = self .rc @@ -1717,9 +1717,7 @@ impl Bank { } fn pubkey_range_by_partition( - start_index: PartitionIndex, - end_index: PartitionIndex, - partition_count: PartitionsPerCycle, + (start_index, end_index, partition_count): Partition, ) -> std::ops::RangeInclusive { type Prefix = u64; const PREFIX_SIZE: usize = mem::size_of::(); @@ -3844,7 +3842,7 @@ mod tests { #[test] fn test_rent_eager_pubkey_range_minimal() { - let range = Bank::pubkey_range_by_partition(0, 0, 1); + let range = Bank::pubkey_range_by_partition((0, 0, 1)); assert_eq!( range, Pubkey::new_from_array([0x00; 32])..=Pubkey::new_from_array([0xff; 32]) @@ -3853,7 +3851,7 @@ mod tests { #[test] fn test_rent_eager_pubkey_range_dividable() { - let range = Bank::pubkey_range_by_partition(0, 0, 2); + let range = Bank::pubkey_range_by_partition((0, 0, 2)); assert_eq!( range, Pubkey::new_from_array([ @@ -3868,7 +3866,7 @@ mod tests { ]) ); - let range = Bank::pubkey_range_by_partition(0, 1, 2); + let range = Bank::pubkey_range_by_partition((0, 1, 2)); assert_eq!( range, Pubkey::new_from_array([ @@ -3888,7 +3886,7 @@ mod tests { fn test_rent_eager_pubkey_range_not_dividable() { solana_logger::setup(); - let range = Bank::pubkey_range_by_partition(0, 0, 3); + let range = Bank::pubkey_range_by_partition((0, 0, 3)); assert_eq!( range, Pubkey::new_from_array([ @@ -3903,7 +3901,7 @@ mod tests { ]) ); - let range = Bank::pubkey_range_by_partition(0, 1, 3); + let range = Bank::pubkey_range_by_partition((0, 1, 3)); assert_eq!( range, Pubkey::new_from_array([ @@ -3918,7 +3916,7 @@ mod tests { ]) ); - let range = Bank::pubkey_range_by_partition(1, 2, 3); + let range = Bank::pubkey_range_by_partition((1, 2, 3)); assert_eq!( range, Pubkey::new_from_array([ @@ -3937,7 +3935,7 @@ mod tests { #[test] fn test_rent_eager_pubkey_range_gap() { solana_logger::setup(); - let range = Bank::pubkey_range_by_partition(120, 1023, 12345); + let range = Bank::pubkey_range_by_partition((120, 1023, 12345)); assert_eq!( range, Pubkey::new_from_array([ From ec5f9f0cf2b7af1aedef5988d90a3debba721809 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Sun, 26 Apr 2020 01:55:20 +0900 Subject: [PATCH 17/28] More tiny cleanup --- runtime/src/bank.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f32efe5481c9d7..c7f4ebaf4bc1c2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -64,6 +64,7 @@ use std::{ collections::{HashMap, HashSet}, io::{BufReader, Cursor, Error as IOError, Read}, mem, + ops::RangeInclusive, path::{Path, PathBuf}, rc::Rc, sync::atomic::{AtomicBool, AtomicU64, Ordering}, @@ -1718,7 +1719,7 @@ impl Bank { fn pubkey_range_by_partition( (start_index, end_index, partition_count): Partition, - ) -> std::ops::RangeInclusive { + ) -> RangeInclusive { type Prefix = u64; const PREFIX_SIZE: usize = mem::size_of::(); let partition_width = Prefix::max_value() / partition_count; From 26787aa87acecaf6fc52920bbea476b53fa498bf Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 27 Apr 2020 12:06:53 +0900 Subject: [PATCH 18/28] Rebase cleanup --- runtime/src/accounts_db.rs | 7 +------ runtime/src/bank.rs | 4 ++-- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index ea6d24340647b8..54387c95afbc50 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1122,12 +1122,7 @@ impl AccountsDB { collector } - pub fn range_scan_accounts( - &self, - ancestors: &Ancestors, - range: R, - scan_func: F, - ) -> A + pub fn range_scan_accounts(&self, ancestors: &Ancestors, range: R, scan_func: F) -> A where F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (), A: Default, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c7f4ebaf4bc1c2..61c643635c3635 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2641,7 +2641,7 @@ mod tests { use super::*; use crate::{ accounts_db::{get_temp_accounts_paths, tests::copy_append_vecs}, - accounts_index::AncestorList, + accounts_index::Ancestors, genesis_utils::{ create_genesis_config_with_leader, GenesisConfigInfo, BOOTSTRAP_VALIDATOR_LAMPORTS, }, @@ -3953,7 +3953,7 @@ mod tests { } impl Bank { - fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &AncestorList) -> Vec { + fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &Ancestors) -> Vec { let accounts_index = self.rc.accounts.accounts_db.accounts_index.read().unwrap(); let (accounts, _) = accounts_index.get(&pubkey, &ancestors).unwrap(); accounts From 2ff416e24060e818d1d3bc0e61416202df56f0b5 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 27 Apr 2020 13:54:28 +0900 Subject: [PATCH 19/28] Set Bank::genesis_hash when resuming from snapshot --- ledger/src/snapshot_utils.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/ledger/src/snapshot_utils.rs b/ledger/src/snapshot_utils.rs index 9d58d8de7da411..46619652824ca5 100644 --- a/ledger/src/snapshot_utils.rs +++ b/ledger/src/snapshot_utils.rs @@ -647,6 +647,7 @@ where } }; bank.operating_mode = Some(genesis_config.operating_mode); + bank.genesis_hash = Some(genesis_config.hash()); info!("Rebuilding accounts..."); let rc = bank::BankRc::from_stream( account_paths, From 641a1f3219298320241ba6cfeb7f7b3b6d60d069 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 27 Apr 2020 14:28:33 +0900 Subject: [PATCH 20/28] Reorder fns and clean ups --- runtime/src/bank.rs | 196 +++++++++++++++++++------------------------- 1 file changed, 86 insertions(+), 110 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 61c643635c3635..81ba8ed0ff9db8 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1695,6 +1695,46 @@ impl Bank { } } + fn collect_rent_eagerly(&self) { + if !self.enable_eager_rent_collection() { + return; + } + + let mut measure = Measure::start("collect_rent_eagerly-ms"); + for partition in self.rent_collection_partitions() { + self.collect_rent_in_partition(partition); + } + measure.stop(); + inc_new_counter_info!("collect_rent_eagerly-ms", measure.as_ms() as usize); + } + + fn enable_eager_rent_collection(&self) -> bool { + if self.lazy_rent_collection.load(Ordering::Relaxed) { + return false; + } + + let not_yet_for_existing_clusters = (self.operating_mode() == OperatingMode::Stable + && self.genesis_hash.unwrap() == MAINNET_BETA_GENESIS_HASH + && self.epoch() < MAINNET_BETA_EAGER_RENT_COLLECTION_START_EPOCH) + || (self.operating_mode() == OperatingMode::Preview + && self.genesis_hash.unwrap() == TESTNET_GENESIS_HASH + && self.epoch() < TESTNET_EAGER_RENT_COLLECTION_START_EPOCH); + + !not_yet_for_existing_clusters + } + + fn rent_collection_partitions(&self) -> Vec { + if !self.use_fixed_collection_cycle() { + self.normal_cycle_partitions() + } else { + // This mode is mainly for benchmarking only + // we always iterate over the whole pubkey value range with + // slots as a collection cycle, regardless warm-up or alignment between collection + // cycles and epochs. + self.fixed_cycle_partitions() + } + } + fn collect_rent_in_partition(&self, partition: Partition) { let subrange = Self::pubkey_range_by_partition(partition); @@ -1752,7 +1792,9 @@ impl Bank { Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey) } - fn constant_cycle_partitions(&self, slot_count_in_two_day: SlotCount) -> Vec { + fn fixed_cycle_partitions(&self) -> Vec { + let slot_count_in_two_day = self.slot_count_in_two_day(); + let parent_cycle = self.parent_slot() / slot_count_in_two_day; let current_cycle = self.slot() / slot_count_in_two_day; let mut parent_cycle_index = self.parent_slot() % slot_count_in_two_day; @@ -1805,71 +1847,13 @@ impl Bank { partitions } - fn rent_collection_partitions(&self) -> Vec { - let slot_count_per_normal_epoch = - self.get_slots_in_epoch(self.epoch_schedule.first_normal_epoch); - let slot_count_in_two_day: SlotCount = - 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; - let is_in_constant_cycle = - self.use_constant_collection_cycle(slot_count_per_normal_epoch, slot_count_in_two_day); - - if !is_in_constant_cycle { - self.normal_cycle_partitions() - } else { - self.constant_cycle_partitions(slot_count_in_two_day) - } - } - - fn determine_collection_cycle_params( - &self, - current_epoch: Epoch, - slot_count_per_epoch: SlotCount, - ) -> RentCollectionCycleParams { - // Assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect rent - // eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally. - let slot_count_in_two_day: SlotCount = - 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot; - let first_normal_epoch = self.epoch_schedule.first_normal_epoch; - let slot_count_per_normal_epoch = self.get_slots_in_epoch(first_normal_epoch); - let is_in_longer_cycle = self.use_longer_collection_cycle( - current_epoch, - slot_count_per_normal_epoch, - slot_count_in_two_day, - ); - - if !is_in_longer_cycle { - ( - current_epoch, - slot_count_per_epoch, - false, - 0, - 1, - slot_count_per_epoch, - ) - } else { - let epoch_count_in_cycle = slot_count_in_two_day / slot_count_per_epoch; - let partition_count = slot_count_per_epoch * epoch_count_in_cycle; - - ( - current_epoch, - slot_count_per_epoch, - true, - first_normal_epoch, - epoch_count_in_cycle, - partition_count, - ) - } - } - fn partition_in_collection_cycle( &self, start_slot_index: SlotIndex, end_slot_index: SlotIndex, current_epoch: Epoch, ) -> Partition { - let slot_count_per_epoch = self.get_slots_in_epoch(current_epoch); - let cycle_params = - self.determine_collection_cycle_params(current_epoch, slot_count_per_epoch); + let cycle_params = self.determine_collection_cycle_params(current_epoch); let (_, _, is_in_longer_cycle, _, _, partition_count) = cycle_params; // use common code-path for both very-likely and very-unlikely for the sake of minimized @@ -1881,11 +1865,8 @@ impl Bank { Self::partition_index_in_collection_cycle(end_slot_index, cycle_params); // do special handling... - let is_across_epoch_boundary = Self::across_gapped_epoch_boundary_in_collection_cycle( - start_slot_index, - end_slot_index, - start_partition_index, - ); + let is_across_epoch_boundary = + start_slot_index == 0 && end_slot_index != 1 && start_partition_index > 0; if is_in_longer_cycle && is_across_epoch_boundary { start_partition_index -= 1; } @@ -1893,12 +1874,32 @@ impl Bank { (start_partition_index, end_partition_index, partition_count) } - fn across_gapped_epoch_boundary_in_collection_cycle( - start_slot_index: SlotIndex, - end_slot_index: SlotIndex, - start_partition_index: PartitionIndex, - ) -> bool { - start_slot_index == 0 && end_slot_index != 1 && start_partition_index > 0 + fn determine_collection_cycle_params(&self, current_epoch: Epoch) -> RentCollectionCycleParams { + let slot_count_per_epoch = self.get_slots_in_epoch(current_epoch); + + if !self.use_longer_collection_cycle(current_epoch) { + ( + current_epoch, + slot_count_per_epoch, + false, + 0, + 1, + slot_count_per_epoch, + ) + } else { + let first_normal_epoch = self.epoch_schedule.first_normal_epoch; + let epoch_count_in_cycle = self.slot_count_in_two_day() / slot_count_per_epoch; + let partition_count = slot_count_per_epoch * epoch_count_in_cycle; + + ( + current_epoch, + slot_count_per_epoch, + true, + first_normal_epoch, + epoch_count_in_cycle, + partition_count, + ) + } } fn partition_index_in_collection_cycle( @@ -1922,51 +1923,26 @@ impl Bank { // These logic is't strictly eager anymore and should only be used // for development/performance purpose. // Absolutely not under OperationMode::Stable!!!! - fn use_longer_collection_cycle( - &self, - current_epoch: Epoch, - slot_count_per_normal_epoch: SlotCount, - slot_count_in_two_day: SlotCount, - ) -> bool { + fn use_longer_collection_cycle(&self, current_epoch: Epoch) -> bool { let first_normal_epoch = self.epoch_schedule.first_normal_epoch; - current_epoch >= first_normal_epoch && slot_count_per_normal_epoch < slot_count_in_two_day - } - - fn use_constant_collection_cycle( - &self, - slot_count_per_normal_epoch: SlotCount, - slot_count_in_two_day: SlotCount, - ) -> bool { - self.operating_mode() != OperatingMode::Stable - && slot_count_per_normal_epoch < slot_count_in_two_day + let slot_count_per_normal_epoch = self.get_slots_in_epoch(first_normal_epoch); + current_epoch >= first_normal_epoch + && slot_count_per_normal_epoch < self.slot_count_in_two_day() } - fn collect_rent_eagerly(&self) { - if !self.enable_eager_rent_collection() { - return; - } + fn use_fixed_collection_cycle(&self) -> bool { + let first_normal_epoch = self.epoch_schedule.first_normal_epoch; + let slot_count_per_normal_epoch = self.get_slots_in_epoch(first_normal_epoch); - let mut measure = Measure::start("collect_rent_eagerly-ms"); - for partition in self.rent_collection_partitions() { - self.collect_rent_in_partition(partition); - } - measure.stop(); - inc_new_counter_info!("collect_rent_eagerly-ms", measure.as_ms() as usize); + self.operating_mode() != OperatingMode::Stable + && slot_count_per_normal_epoch < self.slot_count_in_two_day() } - fn enable_eager_rent_collection(&self) -> bool { - if self.lazy_rent_collection.load(Ordering::Relaxed) { - return false; - } - - let not_yet_for_existing_clusters = (self.operating_mode() == OperatingMode::Stable - && self.genesis_hash.unwrap() == MAINNET_BETA_GENESIS_HASH - && self.epoch() < MAINNET_BETA_EAGER_RENT_COLLECTION_START_EPOCH) - || (self.operating_mode() == OperatingMode::Preview - && self.genesis_hash.unwrap() == TESTNET_GENESIS_HASH - && self.epoch() < TESTNET_EAGER_RENT_COLLECTION_START_EPOCH); - - !not_yet_for_existing_clusters + // This value is specially chosen to align with slots per epoch in mainnet-beta and testnet + // Also, assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect + // rent eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally. + fn slot_count_in_two_day(&self) -> SlotCount { + 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot } fn operating_mode(&self) -> OperatingMode { From cb61b75bada58005976b0a31ed49f29aadabf11f Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 27 Apr 2020 15:22:35 +0900 Subject: [PATCH 21/28] Better naming and commenting --- runtime/src/bank.rs | 111 ++++++++++++++++++++++++++++---------------- 1 file changed, 70 insertions(+), 41 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 81ba8ed0ff9db8..b167452d21bc69 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -561,6 +561,10 @@ impl Bank { self.epoch } + pub fn first_normal_epoch(&self) -> Epoch { + self.epoch_schedule.first_normal_epoch + } + pub fn freeze_lock(&self) -> RwLockReadGuard { self.hash.read().unwrap() } @@ -1725,18 +1729,26 @@ impl Bank { fn rent_collection_partitions(&self) -> Vec { if !self.use_fixed_collection_cycle() { - self.normal_cycle_partitions() + // This mode is for production/development/testing. + // In this mode, we iterate over the whole pubkey value range for each epochs + // including warm-up epochs. + // The only exception is the situation where normal epochs are relatively short + // (currently less than 2 day). In that case, we arrange a single collection + // cycle to be multiple of epochs so that a cycle could be greater than the 2 day. + self.variable_cycle_partitions() } else { - // This mode is mainly for benchmarking only - // we always iterate over the whole pubkey value range with - // slots as a collection cycle, regardless warm-up or alignment between collection - // cycles and epochs. + // This mode is mainly for benchmarking only. + // In this mode, we always iterate over the whole pubkey value range with + // slots as a collection cycle, regardless warm-up or + // alignment between collection cycles and epochs. + // Thus, we can simulate stable processing load of eager rent collection, + // strictly proportional to the number of pubkeys since genesis. self.fixed_cycle_partitions() } } fn collect_rent_in_partition(&self, partition: Partition) { - let subrange = Self::pubkey_range_by_partition(partition); + let subrange = Self::pubkey_range_from_partition(partition); let accounts = self .rc @@ -1757,7 +1769,7 @@ impl Bank { datapoint_info!("collect_rent_eagerly", ("accounts", account_count, i64)); } - fn pubkey_range_by_partition( + fn pubkey_range_from_partition( (start_index, end_index, partition_count): Partition, ) -> RangeInclusive { type Prefix = u64; @@ -1780,7 +1792,7 @@ impl Bank { start_pubkey[0..PREFIX_SIZE].copy_from_slice(&start_key_prefix.to_be_bytes()); end_pubkey[0..PREFIX_SIZE].copy_from_slice(&end_key_prefix.to_be_bytes()); trace!( - "pubkey_range_by_partition: ({}-{})/{}: {:02x?}-{:02x?}", + "pubkey_range_from_partition: ({}-{})/{}: {:02x?}-{:02x?}", start_index, end_index, partition_count, @@ -1802,9 +1814,10 @@ impl Bank { let mut partitions = vec![]; if parent_cycle < current_cycle { if current_cycle_index > 0 { + let parent_last_cycle_index = slot_count_in_two_day - 1; partitions.push(( parent_cycle_index, - slot_count_in_two_day - 1, + parent_last_cycle_index, slot_count_in_two_day, )); } @@ -1820,7 +1833,7 @@ impl Bank { partitions } - fn normal_cycle_partitions(&self) -> Vec { + fn variable_cycle_partitions(&self) -> Vec { let (current_epoch, current_slot_index) = self.get_epoch_and_slot_index(self.slot()); let (parent_epoch, mut parent_slot_index) = self.get_epoch_and_slot_index(self.parent_slot()); @@ -1828,10 +1841,10 @@ impl Bank { let mut partitions = vec![]; if parent_epoch < current_epoch { if current_slot_index > 0 { - let last_slot_index = self.get_slots_in_epoch(parent_epoch) - 1; + let parent_last_slot_index = self.get_slots_in_epoch(parent_epoch) - 1; partitions.push(self.partition_in_collection_cycle( parent_slot_index, - last_slot_index, + parent_last_slot_index, parent_epoch, )); } @@ -1854,7 +1867,7 @@ impl Bank { current_epoch: Epoch, ) -> Partition { let cycle_params = self.determine_collection_cycle_params(current_epoch); - let (_, _, is_in_longer_cycle, _, _, partition_count) = cycle_params; + let (_, _, is_in_multi_epoch_cycle, _, _, partition_count) = cycle_params; // use common code-path for both very-likely and very-unlikely for the sake of minimized // risk of any mis-calculation instead of neligilbe faster computation per slot for the @@ -1864,10 +1877,28 @@ impl Bank { let end_partition_index = Self::partition_index_in_collection_cycle(end_slot_index, cycle_params); - // do special handling... let is_across_epoch_boundary = start_slot_index == 0 && end_slot_index != 1 && start_partition_index > 0; - if is_in_longer_cycle && is_across_epoch_boundary { + if is_in_multi_epoch_cycle && is_across_epoch_boundary { + // When an epoch boundary is crossed, the caller gives us off-by-one indexes. + // Usually there should be no need for adjustment because cycles are aligned + // with epochs. But for multi-epoch cycles, adjust the start index if it + // happens in the middle of a cycle for both gapped and non-gapped cases: + // + // epoch & slot range| *slot idx. | raw partition idx.| adj. partition idx. + // ------------------+------------+-------------------+----------------------- + // 3 20..30 | [7..8] | 7.. 8 | 7.. 8 + // | [8..9] | 8.. 9 | 8.. 9 + // 4 30..40 | [0..0] | <10>..10 | <9>..10 <= not gapped + // | [0..1] | 10..11 | 10..11 + // | [1..2] | 11..12 | 11..12 + // | [2..9 | 12..19 | 12..19 + // 5 40..50 | 0..4] | <20>..24 | <19>..24 <= gapped + // | [4..5] | 24..25 | 24..25 + // | [5..6] | 25..26 | 25..26 + // *: The range of parent_bank.slot() and current_bank.slot() is firstly + // split by the epoch boundaries and then the split ones are given to us. + // The oritinal ranges are denoted as [...] start_partition_index -= 1; } @@ -1877,7 +1908,7 @@ impl Bank { fn determine_collection_cycle_params(&self, current_epoch: Epoch) -> RentCollectionCycleParams { let slot_count_per_epoch = self.get_slots_in_epoch(current_epoch); - if !self.use_longer_collection_cycle(current_epoch) { + if !self.use_multi_epoch_collection_cycle(current_epoch) { ( current_epoch, slot_count_per_epoch, @@ -1887,7 +1918,6 @@ impl Bank { slot_count_per_epoch, ) } else { - let first_normal_epoch = self.epoch_schedule.first_normal_epoch; let epoch_count_in_cycle = self.slot_count_in_two_day() / slot_count_per_epoch; let partition_count = slot_count_per_epoch * epoch_count_in_cycle; @@ -1895,7 +1925,7 @@ impl Bank { current_epoch, slot_count_per_epoch, true, - first_normal_epoch, + self.first_normal_epoch(), epoch_count_in_cycle, partition_count, ) @@ -1920,22 +1950,17 @@ impl Bank { // Given short epochs, it's too costly to collect rent eagerly // within an epoch, so lower the frequency of it. - // These logic is't strictly eager anymore and should only be used + // These logic isn't strictly eager anymore and should only be used // for development/performance purpose. // Absolutely not under OperationMode::Stable!!!! - fn use_longer_collection_cycle(&self, current_epoch: Epoch) -> bool { - let first_normal_epoch = self.epoch_schedule.first_normal_epoch; - let slot_count_per_normal_epoch = self.get_slots_in_epoch(first_normal_epoch); - current_epoch >= first_normal_epoch - && slot_count_per_normal_epoch < self.slot_count_in_two_day() + fn use_multi_epoch_collection_cycle(&self, current_epoch: Epoch) -> bool { + current_epoch >= self.first_normal_epoch() + && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day() } fn use_fixed_collection_cycle(&self) -> bool { - let first_normal_epoch = self.epoch_schedule.first_normal_epoch; - let slot_count_per_normal_epoch = self.get_slots_in_epoch(first_normal_epoch); - self.operating_mode() != OperatingMode::Stable - && slot_count_per_normal_epoch < self.slot_count_in_two_day() + && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day() } // This value is specially chosen to align with slots per epoch in mainnet-beta and testnet @@ -1945,6 +1970,10 @@ impl Bank { 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot } + fn slot_count_per_normal_epoch(&self) -> SlotCount { + self.get_slots_in_epoch(self.first_normal_epoch()) + } + fn operating_mode(&self) -> OperatingMode { // unwrap is safe; self.operating_mode is ensured to be Some() always... // we only using Option here for ABI compatibility... @@ -3576,7 +3605,7 @@ mod tests { } #[test] - fn test_rent_eager_across_epoch_without_gap_under_longer_cycle() { + fn test_rent_eager_across_epoch_without_gap_under_multi_epoch_cycle() { let leader_pubkey = Pubkey::new_rand(); let leader_lamports = 3; let mut genesis_config = @@ -3646,7 +3675,7 @@ mod tests { } #[test] - fn test_rent_eager_across_epoch_with_gap_under_longer_cycle() { + fn test_rent_eager_across_epoch_with_gap_under_multi_epoch_cycle() { let leader_pubkey = Pubkey::new_rand(); let leader_lamports = 3; let mut genesis_config = @@ -3700,7 +3729,7 @@ mod tests { } #[test] - fn test_rent_eager_with_warmup_epochs_under_longer_cycle() { + fn test_rent_eager_with_warmup_epochs_under_multi_epoch_cycle() { let leader_pubkey = Pubkey::new_rand(); let leader_lamports = 3; let mut genesis_config = @@ -3715,7 +3744,7 @@ mod tests { let mut bank = Arc::new(Bank::new(&genesis_config)); assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432000); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); - assert_eq!(bank.epoch_schedule.first_normal_epoch, 3); + assert_eq!(bank.first_normal_epoch(), 3); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]); @@ -3756,7 +3785,7 @@ mod tests { } #[test] - fn test_rent_eager_under_longer_cycle_for_developemnt() { + fn test_rent_eager_under_fixed_cycle_for_developemnt() { solana_logger::setup(); let leader_pubkey = Pubkey::new_rand(); let leader_lamports = 3; @@ -3770,7 +3799,7 @@ mod tests { let mut bank = Arc::new(Bank::new(&genesis_config)); assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32); - assert_eq!(bank.epoch_schedule.first_normal_epoch, 3); + assert_eq!(bank.first_normal_epoch(), 3); assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0)); assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432000)]); @@ -3819,7 +3848,7 @@ mod tests { #[test] fn test_rent_eager_pubkey_range_minimal() { - let range = Bank::pubkey_range_by_partition((0, 0, 1)); + let range = Bank::pubkey_range_from_partition((0, 0, 1)); assert_eq!( range, Pubkey::new_from_array([0x00; 32])..=Pubkey::new_from_array([0xff; 32]) @@ -3828,7 +3857,7 @@ mod tests { #[test] fn test_rent_eager_pubkey_range_dividable() { - let range = Bank::pubkey_range_by_partition((0, 0, 2)); + let range = Bank::pubkey_range_from_partition((0, 0, 2)); assert_eq!( range, Pubkey::new_from_array([ @@ -3843,7 +3872,7 @@ mod tests { ]) ); - let range = Bank::pubkey_range_by_partition((0, 1, 2)); + let range = Bank::pubkey_range_from_partition((0, 1, 2)); assert_eq!( range, Pubkey::new_from_array([ @@ -3863,7 +3892,7 @@ mod tests { fn test_rent_eager_pubkey_range_not_dividable() { solana_logger::setup(); - let range = Bank::pubkey_range_by_partition((0, 0, 3)); + let range = Bank::pubkey_range_from_partition((0, 0, 3)); assert_eq!( range, Pubkey::new_from_array([ @@ -3878,7 +3907,7 @@ mod tests { ]) ); - let range = Bank::pubkey_range_by_partition((0, 1, 3)); + let range = Bank::pubkey_range_from_partition((0, 1, 3)); assert_eq!( range, Pubkey::new_from_array([ @@ -3893,7 +3922,7 @@ mod tests { ]) ); - let range = Bank::pubkey_range_by_partition((1, 2, 3)); + let range = Bank::pubkey_range_from_partition((1, 2, 3)); assert_eq!( range, Pubkey::new_from_array([ @@ -3912,7 +3941,7 @@ mod tests { #[test] fn test_rent_eager_pubkey_range_gap() { solana_logger::setup(); - let range = Bank::pubkey_range_by_partition((120, 1023, 12345)); + let range = Bank::pubkey_range_from_partition((120, 1023, 12345)); assert_eq!( range, Pubkey::new_from_array([ From 667bd6635c13502852b06113504600b2edfb8ee8 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 27 Apr 2020 17:01:20 +0900 Subject: [PATCH 22/28] Yet more naming clarifications --- runtime/src/bank.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b167452d21bc69..9ce3c4d6ff5f68 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1842,7 +1842,7 @@ impl Bank { if parent_epoch < current_epoch { if current_slot_index > 0 { let parent_last_slot_index = self.get_slots_in_epoch(parent_epoch) - 1; - partitions.push(self.partition_in_collection_cycle( + partitions.push(self.partition_from_slot_indexes( parent_slot_index, parent_last_slot_index, parent_epoch, @@ -1851,7 +1851,7 @@ impl Bank { parent_slot_index = 0; } - partitions.push(self.partition_in_collection_cycle( + partitions.push(self.partition_from_slot_indexes( parent_slot_index, current_slot_index, current_epoch, @@ -1860,22 +1860,22 @@ impl Bank { partitions } - fn partition_in_collection_cycle( + fn partition_from_slot_indexes( &self, start_slot_index: SlotIndex, end_slot_index: SlotIndex, - current_epoch: Epoch, + epoch: Epoch, ) -> Partition { - let cycle_params = self.determine_collection_cycle_params(current_epoch); + let cycle_params = self.determine_collection_cycle_params(epoch); let (_, _, is_in_multi_epoch_cycle, _, _, partition_count) = cycle_params; // use common code-path for both very-likely and very-unlikely for the sake of minimized // risk of any mis-calculation instead of neligilbe faster computation per slot for the // likely case. let mut start_partition_index = - Self::partition_index_in_collection_cycle(start_slot_index, cycle_params); + Self::partition_index_from_slot_index(start_slot_index, cycle_params); let end_partition_index = - Self::partition_index_in_collection_cycle(end_slot_index, cycle_params); + Self::partition_index_from_slot_index(end_slot_index, cycle_params); let is_across_epoch_boundary = start_slot_index == 0 && end_slot_index != 1 && start_partition_index > 0; @@ -1905,12 +1905,12 @@ impl Bank { (start_partition_index, end_partition_index, partition_count) } - fn determine_collection_cycle_params(&self, current_epoch: Epoch) -> RentCollectionCycleParams { - let slot_count_per_epoch = self.get_slots_in_epoch(current_epoch); + fn determine_collection_cycle_params(&self, epoch: Epoch) -> RentCollectionCycleParams { + let slot_count_per_epoch = self.get_slots_in_epoch(epoch); - if !self.use_multi_epoch_collection_cycle(current_epoch) { + if !self.use_multi_epoch_collection_cycle(epoch) { ( - current_epoch, + epoch, slot_count_per_epoch, false, 0, @@ -1922,7 +1922,7 @@ impl Bank { let partition_count = slot_count_per_epoch * epoch_count_in_cycle; ( - current_epoch, + epoch, slot_count_per_epoch, true, self.first_normal_epoch(), @@ -1932,10 +1932,10 @@ impl Bank { } } - fn partition_index_in_collection_cycle( + fn partition_index_from_slot_index( slot_index_in_epoch: SlotIndex, ( - current_epoch, + epoch, slot_count_per_epoch, _, base_epoch, @@ -1943,7 +1943,7 @@ impl Bank { _, ): RentCollectionCycleParams, ) -> PartitionIndex { - let epoch_offset = current_epoch - base_epoch; + let epoch_offset = epoch - base_epoch; let epoch_index_in_cycle = epoch_offset % epoch_count_per_cycle; slot_index_in_epoch + epoch_index_in_cycle * slot_count_per_epoch } @@ -1953,8 +1953,8 @@ impl Bank { // These logic isn't strictly eager anymore and should only be used // for development/performance purpose. // Absolutely not under OperationMode::Stable!!!! - fn use_multi_epoch_collection_cycle(&self, current_epoch: Epoch) -> bool { - current_epoch >= self.first_normal_epoch() + fn use_multi_epoch_collection_cycle(&self, epoch: Epoch) -> bool { + epoch >= self.first_normal_epoch() && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day() } From 98ef1a22f0b832eef10b9e28c0ee61087193bf27 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 27 Apr 2020 19:08:34 +0900 Subject: [PATCH 23/28] Make prefix width strictly uniform for 2-base partition_count --- runtime/src/bank.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 9ce3c4d6ff5f68..fa45fa58455372 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1774,7 +1774,18 @@ impl Bank { ) -> RangeInclusive { type Prefix = u64; const PREFIX_SIZE: usize = mem::size_of::(); - let partition_width = Prefix::max_value() / partition_count; + + let mut start_pubkey = [0x00u8; 32]; + let mut end_pubkey = [0xffu8; 32]; + + if partition_count == 1 { + assert_eq!(start_index, 0); + assert_eq!(end_index, 0); + return Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey); + } + + // not-overflowing way of `(Prefix::max_value() + 1) / partition_count` + let partition_width = (Prefix::max_value() - partition_count + 1) / partition_count + 1; let start_key_prefix = if start_index == 0 && end_index == 0 { 0 } else { @@ -1787,15 +1798,14 @@ impl Bank { (end_index + 1) * partition_width - 1 }; - let mut start_pubkey = [0x00u8; 32]; - let mut end_pubkey = [0xffu8; 32]; start_pubkey[0..PREFIX_SIZE].copy_from_slice(&start_key_prefix.to_be_bytes()); end_pubkey[0..PREFIX_SIZE].copy_from_slice(&end_key_prefix.to_be_bytes()); trace!( - "pubkey_range_from_partition: ({}-{})/{}: {:02x?}-{:02x?}", + "pubkey_range_from_partition: ({}-{})/{} [{}]: {:02x?}-{:02x?}", start_index, end_index, partition_count, + (end_key_prefix - start_key_prefix), start_pubkey, end_pubkey ); @@ -3866,7 +3876,7 @@ mod tests { 0x00, 0x00, 0x00, 0x00 ]) ..=Pubkey::new_from_array([ - 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff ]) @@ -3876,7 +3886,7 @@ mod tests { assert_eq!( range, Pubkey::new_from_array([ - 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ]) From 8c2395e62f4235de8cc0a5fb75fb1538b78ee794 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 7 May 2020 17:49:15 +0900 Subject: [PATCH 24/28] Fix typo... --- runtime/src/accounts.rs | 2 +- runtime/src/bank.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index f02b1706128578..2bc794c465107b 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -486,7 +486,7 @@ impl Accounts { ) } - pub fn load_to_collect_rent_eargerly>( + pub fn load_to_collect_rent_eagerly>( &self, ancestors: &Ancestors, range: R, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index fa45fa58455372..4f58ba568a9eac 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1753,7 +1753,7 @@ impl Bank { let accounts = self .rc .accounts - .load_to_collect_rent_eargerly(&self.ancestors, subrange); + .load_to_collect_rent_eagerly(&self.ancestors, subrange); let account_count = accounts.len(); // parallelize? From 9f19166fccdaca428385dee9289b94ad9fb674a0 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 7 May 2020 18:01:18 +0900 Subject: [PATCH 25/28] Revert cluster-dependent gate --- ledger/src/snapshot_utils.rs | 1 - runtime/src/bank.rs | 26 +------------------------- 2 files changed, 1 insertion(+), 26 deletions(-) diff --git a/ledger/src/snapshot_utils.rs b/ledger/src/snapshot_utils.rs index 46619652824ca5..9d58d8de7da411 100644 --- a/ledger/src/snapshot_utils.rs +++ b/ledger/src/snapshot_utils.rs @@ -647,7 +647,6 @@ where } }; bank.operating_mode = Some(genesis_config.operating_mode); - bank.genesis_hash = Some(genesis_config.hash()); info!("Rebuilding accounts..."); let rc = bank::BankRc::from_stream( account_paths, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4f58ba568a9eac..ee6c1189d39bdf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -76,18 +76,6 @@ pub const MAX_SNAPSHOT_DATA_FILE_SIZE: u64 = 32 * 1024 * 1024 * 1024; // 32 GiB pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5; -pub const MAINNET_BETA_GENESIS_HASH: Hash = Hash::new_from_array([ - 69, 41, 105, 152, 166, 248, 226, 167, 132, 219, 93, 159, 149, 225, 143, 194, 63, 112, 68, 26, - 16, 57, 68, 104, 1, 8, 152, 121, 176, 140, 126, 240, -]); -const MAINNET_BETA_EAGER_RENT_COLLECTION_START_EPOCH: Epoch = 30; // TENTATIVE!!!! - -pub const TESTNET_GENESIS_HASH: Hash = Hash::new_from_array([ - 58, 19, 46, 206, 16, 48, 94, 193, 131, 7, 37, 80, 47, 162, 183, 231, 235, 129, 87, 233, 18, 61, - 76, 31, 101, 74, 113, 120, 113, 97, 220, 33, -]); -const TESTNET_EAGER_RENT_COLLECTION_START_EPOCH: Epoch = 40; // TENTATIVE!!!! - type BankStatusCache = StatusCache>; pub type BankSlotDelta = SlotDelta>; type TransactionAccountRefCells = Vec>>; @@ -393,9 +381,6 @@ pub struct Bank { #[serde(skip)] pub operating_mode: Option, - #[serde(skip)] - pub genesis_hash: Option, - #[serde(skip)] pub lazy_rent_collection: AtomicBool, } @@ -418,7 +403,6 @@ impl Bank { ) -> Self { let mut bank = Self::default(); bank.operating_mode = Some(genesis_config.operating_mode); - bank.genesis_hash = Some(genesis_config.hash()); bank.ancestors.insert(bank.slot(), 0); bank.rc.accounts = Arc::new(Accounts::new(paths)); @@ -513,7 +497,6 @@ impl Bank { rewards: None, skip_drop: AtomicBool::new(false), operating_mode: parent.operating_mode, - genesis_hash: parent.genesis_hash, lazy_rent_collection: AtomicBool::new( parent.lazy_rent_collection.load(Ordering::Relaxed), ), @@ -1717,14 +1700,7 @@ impl Bank { return false; } - let not_yet_for_existing_clusters = (self.operating_mode() == OperatingMode::Stable - && self.genesis_hash.unwrap() == MAINNET_BETA_GENESIS_HASH - && self.epoch() < MAINNET_BETA_EAGER_RENT_COLLECTION_START_EPOCH) - || (self.operating_mode() == OperatingMode::Preview - && self.genesis_hash.unwrap() == TESTNET_GENESIS_HASH - && self.epoch() < TESTNET_EAGER_RENT_COLLECTION_START_EPOCH); - - !not_yet_for_existing_clusters + true } fn rent_collection_partitions(&self) -> Vec { From 719b4c00c3bf43f33128fc2d23e2cb777f7485c5 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 7 May 2020 18:16:05 +0900 Subject: [PATCH 26/28] kick ci? From be7eb995af3d6272d2d26a63d1d4a9526e4c4276 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 7 May 2020 18:17:37 +0900 Subject: [PATCH 27/28] kick ci? From 1add8164712110e86eba4b5ba1e1fd48b8f61468 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 7 May 2020 20:03:25 +0900 Subject: [PATCH 28/28] kick ci?