Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

(LedgerStore/FIFO) Refactor FIFO options and sanity check #23131

Merged
merged 1 commit into from
Feb 19, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions core/tests/ledger_cleanup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ mod tests {
solana_core::ledger_cleanup_service::LedgerCleanupService,
solana_ledger::{
blockstore::{make_many_slot_shreds, Blockstore},
blockstore_db::{BlockstoreOptions, ShredStorageType},
blockstore_db::{BlockstoreOptions, BlockstoreRocksFifoOptions, ShredStorageType},
get_tmp_ledger_path,
},
solana_measure::measure::Measure,
Expand Down Expand Up @@ -309,8 +309,10 @@ mod tests {
&ledger_path,
if config.fifo_compaction {
BlockstoreOptions {
shred_storage_type: ShredStorageType::RocksFifo,
shred_data_cf_size: config.shred_data_cf_size,
shred_storage_type: ShredStorageType::RocksFifo(BlockstoreRocksFifoOptions {
shred_data_cf_size: config.shred_data_cf_size,
..BlockstoreRocksFifoOptions::default()
}),
..BlockstoreOptions::default()
}
} else {
Expand Down
113 changes: 61 additions & 52 deletions ledger/src/blockstore_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,12 @@ impl Rocks {
let oldest_slot = OldestSlot::default();

// Get column family descriptors and names
let (cf_descriptor_shred_data, cf_descriptor_shred_code) =
new_cf_descriptor_pair_shreds::<ShredData, ShredCode>(
&options.shred_storage_type,
&access_type,
&oldest_slot,
);
let cfs = vec![
new_cf_descriptor::<SlotMeta>(&access_type, &oldest_slot),
new_cf_descriptor::<DeadSlots>(&access_type, &oldest_slot),
Expand All @@ -318,18 +324,8 @@ impl Rocks {
new_cf_descriptor::<BankHash>(&access_type, &oldest_slot),
new_cf_descriptor::<Root>(&access_type, &oldest_slot),
new_cf_descriptor::<Index>(&access_type, &oldest_slot),
new_cf_descriptor_shreds::<ShredData>(
&options.shred_storage_type,
&access_type,
&oldest_slot,
&options.shred_data_cf_size,
),
new_cf_descriptor_shreds::<ShredCode>(
&options.shred_storage_type,
&access_type,
&oldest_slot,
&options.shred_code_cf_size,
),
cf_descriptor_shred_data,
cf_descriptor_shred_code,
new_cf_descriptor::<TransactionStatus>(&access_type, &oldest_slot),
new_cf_descriptor::<AddressSignatures>(&access_type, &oldest_slot),
new_cf_descriptor::<TransactionMemos>(&access_type, &oldest_slot),
Expand Down Expand Up @@ -981,7 +977,7 @@ pub enum ShredStorageType {
// (Experimental) Stores shreds under RocksDB's FIFO compaction which
// allows ledger store to reclaim storage more efficiently with
// lower I/O overhead.
RocksFifo,
RocksFifo(BlockstoreRocksFifoOptions),
}

pub struct BlockstoreOptions {
Expand All @@ -993,37 +989,47 @@ pub struct BlockstoreOptions {
pub enforce_ulimit_nofile: bool,
// Determine how to store both data and coding shreds. Default: RocksLevel.
pub shred_storage_type: ShredStorageType,
}

impl Default for BlockstoreOptions {
/// The default options are the values used by [`Blockstore::open`].
fn default() -> Self {
Self {
access_type: AccessType::PrimaryOnly,
recovery_mode: None,
enforce_ulimit_nofile: true,
shred_storage_type: ShredStorageType::RocksLevel,
}
}
}

pub struct BlockstoreRocksFifoOptions {
// The maximum storage size for storing data shreds in column family
// [`cf::DataShred`]. Typically, data shreds contribute around 25% of the
// ledger store storage size if the RPC service is enabled, or 50% if RPC
// service is not enabled.
//
// Currently, this setting is only used when shred_storage_type is set to
// [`ShredStorageType::RocksFifo`].
// Note that this number must be greater than FIFO_WRITE_BUFFER_SIZE
// otherwise we won't be able to write any file. If not, the blockstore
// will panic.
pub shred_data_cf_size: u64,
// The maximum storage size for storing coding shreds in column family
// [`cf::CodeShred`]. Typically, coding shreds contribute around 20% of the
// ledger store storage size if the RPC service is enabled, or 40% if RPC
// service is not enabled.
//
// Currently, this setting is only used when shred_storage_type is set to
// [`ShredStorageType::RocksFifo`].
// Note that this number must be greater than FIFO_WRITE_BUFFER_SIZE
// otherwise we won't be able to write any file. If not, the blockstore
// will panic.
pub shred_code_cf_size: u64,
}

impl Default for BlockstoreOptions {
/// The default options are the values used by [`Blockstore::open`].
impl Default for BlockstoreRocksFifoOptions {
fn default() -> Self {
Self {
access_type: AccessType::PrimaryOnly,
recovery_mode: None,
enforce_ulimit_nofile: true,
shred_storage_type: ShredStorageType::RocksLevel,
// Maximum size of cf::DataShred. Used when `shred_storage_type`
// is set to ShredStorageType::RocksFifo.
// Maximum size of cf::ShredData.
shred_data_cf_size: DEFAULT_FIFO_COMPACTION_DATA_CF_SIZE,
// Maximum size of cf::CodeShred. Used when `shred_storage_type`
// is set to ShredStorageType::RocksFifo.
// Maximum size of cf::ShredCode.
shred_code_cf_size: DEFAULT_FIFO_COMPACTION_CODING_CF_SIZE,
}
}
Expand Down Expand Up @@ -1415,39 +1421,42 @@ fn get_cf_options<C: 'static + Column + ColumnName>(
options
}

/// Constructs and returns a ColumnFamilyDescriptor based on the
/// specified ShredStorageType.
fn new_cf_descriptor_shreds<C: 'static + Column + ColumnName>(
storage_type: &ShredStorageType,
/// Creates and returns the column family descriptors for both data shreds and
/// coding shreds column families.
///
/// @return a pair of ColumnFamilyDescriptor where the first / second elements
/// are associated to the first / second template class respectively.
fn new_cf_descriptor_pair_shreds<
D: 'static + Column + ColumnName, // Column Family for Data Shred
C: 'static + Column + ColumnName, // Column Family for Coding Shred
>(
shred_storage_type: &ShredStorageType,
access_type: &AccessType,
oldest_slot: &OldestSlot,
max_cf_size: &u64,
) -> ColumnFamilyDescriptor {
match storage_type {
ShredStorageType::RocksLevel => new_cf_descriptor::<C>(access_type, oldest_slot),
ShredStorageType::RocksFifo => {
if *max_cf_size > FIFO_WRITE_BUFFER_SIZE {
new_cf_descriptor_fifo::<C>(max_cf_size)
} else {
warn!(
"{} cf_size must be greater than {} when using ShredStorageType::RocksFifo.",
C::NAME,
FIFO_WRITE_BUFFER_SIZE
);
warn!(
"Fall back to ShredStorageType::RocksLevel for cf::{}.",
C::NAME
);
new_cf_descriptor::<C>(access_type, oldest_slot)
}
}
) -> (ColumnFamilyDescriptor, ColumnFamilyDescriptor) {
match shred_storage_type {
ShredStorageType::RocksLevel => (
new_cf_descriptor::<D>(access_type, oldest_slot),
new_cf_descriptor::<C>(access_type, oldest_slot),
),
ShredStorageType::RocksFifo(fifo_options) => (
new_cf_descriptor_fifo::<D>(&fifo_options.shred_data_cf_size),
new_cf_descriptor_fifo::<C>(&fifo_options.shred_code_cf_size),
),
}
}

fn new_cf_descriptor_fifo<C: 'static + Column + ColumnName>(
max_cf_size: &u64,
) -> ColumnFamilyDescriptor {
ColumnFamilyDescriptor::new(C::NAME, get_cf_options_fifo::<C>(max_cf_size))
if *max_cf_size > FIFO_WRITE_BUFFER_SIZE {
ColumnFamilyDescriptor::new(C::NAME, get_cf_options_fifo::<C>(max_cf_size))
} else {
panic!(
"{} cf_size must be greater than write buffer size {} when using ShredStorageType::RocksFifo.",
C::NAME, FIFO_WRITE_BUFFER_SIZE
);
}
}

/// Returns the RocksDB Column Family Options which use FIFO Compaction.
Expand Down