Skip to content

Commit

Permalink
adds support for different variants of ShredCode and ShredData
Browse files Browse the repository at this point in the history
The commit implements two new types:
    pub enum ShredCode {
        Legacy(legacy::ShredCode),
    }
    pub enum ShredData {
        Legacy(legacy::ShredData),
    }

Following commits will extend these types by adding merkle variants:
    pub enum ShredCode {
        Legacy(legacy::ShredCode),
        Merkle(merkle::ShredCode),
    }
    pub enum ShredData {
        Legacy(legacy::ShredData),
        Merkle(merkle::ShredData),
    }
  • Loading branch information
behzadnouri committed May 30, 2022
1 parent ba482b7 commit e1c4f09
Show file tree
Hide file tree
Showing 15 changed files with 539 additions and 310 deletions.
15 changes: 7 additions & 8 deletions core/benches/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use {
solana_entry::entry::{create_ticks, Entry},
solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, ShredFlags,
Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, SIZE_OF_DATA_SHRED_PAYLOAD,
Shredder, LEGACY_SHRED_DATA_CAPACITY, MAX_DATA_SHREDS_PER_FEC_BLOCK,
},
solana_perf::test_tx,
solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair},
Expand Down Expand Up @@ -38,12 +38,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
}

fn make_shreds(num_shreds: usize) -> Vec<Shred> {
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(
&make_test_entry(txs_per_entry),
2 * num_shreds as u64,
Some(shred_size),
Some(LEGACY_SHRED_DATA_CAPACITY),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
Expand Down Expand Up @@ -73,10 +72,10 @@ fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Keypair::new();
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = LEGACY_SHRED_DATA_CAPACITY;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
// ~1Mb
let num_ticks = max_ticks_per_n_shreds(1, Some(SIZE_OF_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
let num_ticks = max_ticks_per_n_shreds(1, Some(LEGACY_SHRED_DATA_CAPACITY)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
Expand All @@ -87,7 +86,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
#[bench]
fn bench_shredder_large_entries(bencher: &mut Bencher) {
let kp = Keypair::new();
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = LEGACY_SHRED_DATA_CAPACITY;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(
Expand All @@ -106,7 +105,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
#[bench]
fn bench_deshredder(bencher: &mut Bencher) {
let kp = Keypair::new();
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = LEGACY_SHRED_DATA_CAPACITY;
// ~10Mb
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
Expand All @@ -121,7 +120,7 @@ fn bench_deshredder(bencher: &mut Bencher) {

#[bench]
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
let data = vec![0; LEGACY_SHRED_DATA_CAPACITY];

let shred = Shred::new_from_data(2, 1, 1, &data, ShredFlags::LAST_SHRED_IN_SLOT, 0, 0, 1);

Expand Down
4 changes: 2 additions & 2 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3180,7 +3180,7 @@ pub(crate) mod tests {
create_new_tmp_ledger,
genesis_utils::{create_genesis_config, create_genesis_config_with_leader},
get_tmp_ledger_path,
shred::{Shred, ShredFlags, SIZE_OF_DATA_SHRED_PAYLOAD},
shred::{Shred, ShredFlags, LEGACY_SHRED_DATA_CAPACITY},
},
solana_rpc::{
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
Expand Down Expand Up @@ -3779,7 +3779,7 @@ pub(crate) mod tests {
fn test_dead_fork_entry_deserialize_failure() {
// Insert entry that causes deserialization failure
let res = check_dead_fork(|_, bank| {
let gibberish = [0xa5u8; SIZE_OF_DATA_SHRED_PAYLOAD];
let gibberish = [0xa5u8; LEGACY_SHRED_DATA_CAPACITY];
let parent_offset = bank.slot() - bank.parent_slot();
let shred = Shred::new_from_data(
bank.slot(),
Expand Down
2 changes: 1 addition & 1 deletion core/src/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ impl ShredSigVerifier {
batches
.iter()
.flat_map(PacketBatch::iter)
.map(shred::layout::get_shred)
.filter_map(shred::layout::get_shred)
.filter_map(shred::layout::get_slot)
.collect()
}
Expand Down
6 changes: 3 additions & 3 deletions ledger/benches/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
extern crate test;
use {
solana_ledger::{
shred::{Shred, ShredFlags, SIZE_OF_DATA_SHRED_PAYLOAD},
shred::{Shred, ShredFlags, LEGACY_SHRED_DATA_CAPACITY},
sigverify_shreds::{sign_shreds_cpu, sign_shreds_gpu, sign_shreds_gpu_pinned_keypair},
},
solana_perf::{
Expand All @@ -29,7 +29,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) {
slot,
0xc0de,
0xdead,
&[5; SIZE_OF_DATA_SHRED_PAYLOAD],
&[5; LEGACY_SHRED_DATA_CAPACITY],
ShredFlags::LAST_SHRED_IN_SLOT,
1,
2,
Expand Down Expand Up @@ -60,7 +60,7 @@ fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) {
slot,
0xc0de,
0xdead,
&[5; SIZE_OF_DATA_SHRED_PAYLOAD],
&[5; LEGACY_SHRED_DATA_CAPACITY],
ShredFlags::LAST_SHRED_IN_SLOT,
1,
2,
Expand Down
7 changes: 5 additions & 2 deletions ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,10 @@ use {
},
leader_schedule_cache::LeaderScheduleCache,
next_slots_iterator::NextSlotsIterator,
shred::{self, max_ticks_per_n_shreds, ErasureSetId, Shred, ShredId, ShredType, Shredder},
shred::{
self, max_ticks_per_n_shreds, ErasureSetId, Shred, ShredData, ShredId, ShredType,
Shredder,
},
slot_stats::{ShredSource, SlotsStats},
},
bincode::deserialize,
Expand Down Expand Up @@ -1648,7 +1651,7 @@ impl Blockstore {

pub fn get_data_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> {
let shred = self.data_shred_cf.get_bytes((slot, index))?;
let shred = shred.map(Shred::resize_stored_shred).transpose();
let shred = shred.map(ShredData::resize_stored_shred).transpose();
shred.map_err(|err| {
let err = format!("Invalid stored shred: {}", err);
let err = Box::new(bincode::ErrorKind::Custom(err));
Expand Down
Loading

0 comments on commit e1c4f09

Please sign in to comment.