Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce eager rent collection #9527

Merged
merged 28 commits into from
May 13, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
0b9482d
Switch AccountsIndex.account_maps from HashMap to BTreeMap
ryoqun Apr 27, 2020
e464209
Introduce eager rent collection
ryoqun Apr 16, 2020
06a3e3e
Start to add tests
ryoqun Apr 21, 2020
9650258
Avoid too short eager rent collection cycles
ryoqun Apr 21, 2020
43013f2
Add more tests
ryoqun Apr 23, 2020
8487ef5
Add more tests...
ryoqun Apr 23, 2020
a348b6f
Refacotr!!!!!!
ryoqun Apr 23, 2020
c78d339
Refactoring follow up
ryoqun Apr 23, 2020
3f41af3
More tiny cleanups
ryoqun Apr 23, 2020
7d05968
Don't rewrite 0-lamport accounts to be deterministic
ryoqun Apr 24, 2020
4d93647
Refactor a bit
ryoqun Apr 24, 2020
aec99a1
Do hard fork, restore tests, and perf. mitigation
ryoqun Apr 24, 2020
691f8f0
Fix build...
ryoqun Apr 25, 2020
5ea7b38
Refactor and add switch over for testnet (TdS)
ryoqun Apr 25, 2020
5505530
Use to_be_bytes
ryoqun Apr 25, 2020
f834166
cleanup
ryoqun Apr 25, 2020
ec5f9f0
More tiny cleanup
ryoqun Apr 25, 2020
26787aa
Rebase cleanup
ryoqun Apr 27, 2020
2ff416e
Set Bank::genesis_hash when resuming from snapshot
ryoqun Apr 27, 2020
641a1f3
Reorder fns and clean ups
ryoqun Apr 27, 2020
cb61b75
Better naming and commenting
ryoqun Apr 27, 2020
667bd66
Yet more naming clarifications
ryoqun Apr 27, 2020
98ef1a2
Make prefix width strictly uniform for 2-base partition_count
ryoqun Apr 27, 2020
8c2395e
Fix typo...
ryoqun May 7, 2020
9f19166
Revert cluster-dependent gate
ryoqun May 7, 2020
719b4c0
kick ci?
ryoqun May 7, 2020
be7eb99
kick ci?
ryoqun May 7, 2020
1add816
kick ci?
ryoqun May 7, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions core/tests/bank_forks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ mod tests {
};
use solana_sdk::{
clock::Slot,
genesis_config::GenesisConfig,
hash::hashv,
pubkey::Pubkey,
signature::{Keypair, Signer},
Expand Down Expand Up @@ -95,6 +96,7 @@ mod tests {
&CompressionType::Bzip2,
),
CompressionType::Bzip2,
&GenesisConfig::default(),
)
.unwrap();

Expand Down
1 change: 1 addition & 0 deletions ledger/src/bank_forks_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ pub fn load(
&snapshot_config.snapshot_path,
&archive_filename,
compression,
genesis_config,
)
.expect("Load from snapshot failed");

Expand Down
6 changes: 5 additions & 1 deletion ledger/src/snapshot_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use solana_runtime::{
MAX_SNAPSHOT_DATA_FILE_SIZE,
},
};
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
use solana_sdk::{clock::Slot, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey};
use std::{
cmp::Ordering,
fs::{self, File},
Expand Down Expand Up @@ -451,6 +451,7 @@ pub fn bank_from_archive<P: AsRef<Path>>(
snapshot_path: &PathBuf,
snapshot_tar: P,
compression: CompressionType,
genesis_config: &GenesisConfig,
) -> Result<Bank> {
// Untar the snapshot into a temp directory under `snapshot_config.snapshot_path()`
let unpack_dir = tempfile::tempdir_in(snapshot_path)?;
Expand All @@ -470,6 +471,7 @@ pub fn bank_from_archive<P: AsRef<Path>>(
frozen_account_pubkeys,
&unpacked_snapshots_dir,
unpacked_accounts_dir,
genesis_config,
)?;

if !bank.verify_snapshot_bank() {
Expand Down Expand Up @@ -615,6 +617,7 @@ fn rebuild_bank_from_snapshots<P>(
frozen_account_pubkeys: &[Pubkey],
unpacked_snapshots_dir: &PathBuf,
append_vecs_path: P,
genesis_config: &GenesisConfig,
) -> Result<Bank>
where
P: AsRef<Path>,
Expand Down Expand Up @@ -643,6 +646,7 @@ where
)));
}
};
bank.operating_mode = Some(genesis_config.operating_mode);
info!("Rebuilding accounts...");
let rc = bank::BankRc::from_stream(
account_paths,
Expand Down
42 changes: 33 additions & 9 deletions runtime/src/accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use solana_sdk::{
use std::{
collections::{HashMap, HashSet},
io::{BufReader, Error as IOError, Read},
ops::RangeBounds,
path::{Path, PathBuf},
sync::{Arc, Mutex, RwLock},
};
Expand Down Expand Up @@ -455,6 +456,21 @@ impl Accounts {
}
}

fn load_while_filtering<F: Fn(&Account) -> bool>(
collector: &mut Vec<(Pubkey, Account)>,
option: Option<(&Pubkey, Account, Slot)>,
filter: F,
) {
if let Some(data) = option
// Don't ever load zero lamport accounts into runtime because
// the existence of zero-lamport accounts are never deterministic!!
.filter(|(_, account, _)| account.lamports > 0 && filter(account))
.map(|(pubkey, account, _slot)| (*pubkey, account))
{
collector.push(data)
}
}

pub fn load_by_program(
&self,
ancestors: &Ancestors,
Expand All @@ -463,15 +479,23 @@ impl Accounts {
self.accounts_db.scan_accounts(
ancestors,
|collector: &mut Vec<(Pubkey, Account)>, option| {
if let Some(data) = option
.filter(|(_, account, _)| {
(program_id.is_none() || Some(&account.owner) == program_id)
&& account.lamports != 0
})
.map(|(pubkey, account, _slot)| (*pubkey, account))
{
collector.push(data)
}
Self::load_while_filtering(collector, option, |account| {
program_id.is_none() || Some(&account.owner) == program_id
})
},
)
}

pub fn load_to_collect_rent_eagerly<R: RangeBounds<Pubkey>>(
&self,
ancestors: &Ancestors,
range: R,
) -> Vec<(Pubkey, Account)> {
self.accounts_db.range_scan_accounts(
ancestors,
range,
|collector: &mut Vec<(Pubkey, Account)>, option| {
Self::load_while_filtering(collector, option, |_| true)
},
)
}
Expand Down
55 changes: 42 additions & 13 deletions runtime/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ use std::{
collections::{HashMap, HashSet},
fmt,
io::{BufReader, Cursor, Error as IOError, ErrorKind, Read, Result as IOResult},
ops::RangeBounds,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
sync::{Arc, Mutex, RwLock},
Expand Down Expand Up @@ -173,6 +174,25 @@ impl<'a> Serialize for AccountStorageSerialize<'a> {

#[derive(Clone, Default, Debug)]
pub struct AccountStorage(pub HashMap<Slot, SlotStores>);

impl AccountStorage {
fn scan_accounts(&self, account_info: &AccountInfo, slot: Slot) -> Option<(Account, Slot)> {
self.0
.get(&slot)
.and_then(|storage_map| storage_map.get(&account_info.store_id))
.and_then(|store| {
Some(
store
.accounts
.get_account(account_info.offset)?
.0
.clone_account(),
)
})
.map(|account| (account, slot))
}
}

impl<'de> Deserialize<'de> for AccountStorage {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
Expand Down Expand Up @@ -1095,19 +1115,28 @@ impl AccountsDB {
scan_func(
&mut collector,
storage
.0
.get(&slot)
.and_then(|storage_map| storage_map.get(&account_info.store_id))
.and_then(|store| {
Some(
store
.accounts
.get_account(account_info.offset)?
.0
.clone_account(),
)
})
.map(|account| (pubkey, account, slot)),
.scan_accounts(account_info, slot)
.map(|(account, slot)| (pubkey, account, slot)),
)
});
collector
}

pub fn range_scan_accounts<F, A, R>(&self, ancestors: &Ancestors, range: R, scan_func: F) -> A
where
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (),
A: Default,
R: RangeBounds<Pubkey>,
{
let mut collector = A::default();
let accounts_index = self.accounts_index.read().unwrap();
let storage = self.storage.read().unwrap();
accounts_index.range_scan_accounts(ancestors, range, |pubkey, (account_info, slot)| {
scan_func(
&mut collector,
storage
.scan_accounts(account_info, slot)
.map(|(account, slot)| (pubkey, account, slot)),
)
});
collector
Expand Down
30 changes: 24 additions & 6 deletions runtime/src/accounts_index.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use std::sync::atomic::{AtomicU64, Ordering};
use std::{
collections::{HashMap, HashSet},
collections::{BTreeMap, HashMap, HashSet},
ops::RangeBounds,
sync::{RwLock, RwLockReadGuard},
};

Expand All @@ -14,26 +15,43 @@ type AccountMapEntry<T> = (AtomicU64, RwLock<SlotList<T>>);

#[derive(Debug, Default)]
pub struct AccountsIndex<T> {
pub account_maps: HashMap<Pubkey, AccountMapEntry<T>>,
pub account_maps: BTreeMap<Pubkey, AccountMapEntry<T>>,
ryoqun marked this conversation as resolved.
Show resolved Hide resolved

pub roots: HashSet<Slot>,
pub uncleaned_roots: HashSet<Slot>,
}

impl<T: Clone> AccountsIndex<T> {
/// call func with every pubkey and index visible from a given set of ancestors
pub fn scan_accounts<F>(&self, ancestors: &Ancestors, mut func: F)
impl<'a, T: 'a + Clone> AccountsIndex<T> {
fn do_scan_accounts<F, I>(&self, ancestors: &Ancestors, mut func: F, iter: I)
where
F: FnMut(&Pubkey, (&T, Slot)) -> (),
I: Iterator<Item = (&'a Pubkey, &'a AccountMapEntry<T>)>,
{
for (pubkey, list) in self.account_maps.iter() {
for (pubkey, list) in iter {
let list_r = &list.1.read().unwrap();
if let Some(index) = self.latest_slot(ancestors, &list_r) {
func(pubkey, (&list_r[index].1, list_r[index].0));
}
}
}

/// call func with every pubkey and index visible from a given set of ancestors
pub fn scan_accounts<F>(&self, ancestors: &Ancestors, func: F)
where
F: FnMut(&Pubkey, (&T, Slot)) -> (),
{
self.do_scan_accounts(ancestors, func, self.account_maps.iter());
}

/// call func with every pubkey and index visible from a given set of ancestors with range
pub fn range_scan_accounts<F, R>(&self, ancestors: &Ancestors, range: R, func: F)
where
F: FnMut(&Pubkey, (&T, Slot)) -> (),
R: RangeBounds<Pubkey>,
{
self.do_scan_accounts(ancestors, func, self.account_maps.range(range));
}

fn get_rooted_entries(&self, slice: SlotSlice<T>) -> SlotList<T> {
slice
.iter()
Expand Down
Loading