Skip to content

Commit

Permalink
embeds versioning into shred binary
Browse files Browse the repository at this point in the history
In preparation of
solana-labs#25237
which adds a new shred variant with merkle tree branches, the commit
embeds versioning into shred binary by encoding a new ShredVariant type
at byte 65 of payload replacing previously ShredType at this offset.

* 0b0101_1010 indicates a legacy coding shred, which is also equal to
  ShredType::Code for backward compatibility.
* 0b1010_0101 indicates a legacy data shred, which is also equal to
  ShredType::Data for backward compatibility.

Following commits will add merkle variants to this type.
  • Loading branch information
behzadnouri committed May 30, 2022
1 parent d4e7ebf commit 026d48b
Show file tree
Hide file tree
Showing 15 changed files with 651 additions and 335 deletions.
18 changes: 10 additions & 8 deletions core/benches/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,16 @@ use {
solana_entry::entry::{create_ticks, Entry},
solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, ShredFlags,
Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, SIZE_OF_DATA_SHRED_PAYLOAD,
Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK,
},
solana_perf::test_tx,
solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair},
test::Bencher,
};

// Equivalent to ledger::shred::legacy::ShredData::CAPACITY.
const LEGACY_SHRED_DATA_CAPACITY: usize = 1051;

// Copied these values here to avoid exposing shreds
// internals only for the sake of benchmarks.

Expand All @@ -38,12 +41,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
}

fn make_shreds(num_shreds: usize) -> Vec<Shred> {
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(
&make_test_entry(txs_per_entry),
2 * num_shreds as u64,
Some(shred_size),
Some(LEGACY_SHRED_DATA_CAPACITY),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
Expand Down Expand Up @@ -73,10 +75,10 @@ fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Keypair::new();
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = LEGACY_SHRED_DATA_CAPACITY;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
// ~1Mb
let num_ticks = max_ticks_per_n_shreds(1, Some(SIZE_OF_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
let num_ticks = max_ticks_per_n_shreds(1, Some(LEGACY_SHRED_DATA_CAPACITY)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
Expand All @@ -87,7 +89,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
#[bench]
fn bench_shredder_large_entries(bencher: &mut Bencher) {
let kp = Keypair::new();
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = LEGACY_SHRED_DATA_CAPACITY;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(
Expand All @@ -106,7 +108,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
#[bench]
fn bench_deshredder(bencher: &mut Bencher) {
let kp = Keypair::new();
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = LEGACY_SHRED_DATA_CAPACITY;
// ~10Mb
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
Expand All @@ -121,7 +123,7 @@ fn bench_deshredder(bencher: &mut Bencher) {

#[bench]
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
let data = vec![0; LEGACY_SHRED_DATA_CAPACITY];

let shred = Shred::new_from_data(2, 1, 1, &data, ShredFlags::LAST_SHRED_IN_SLOT, 0, 0, 1);

Expand Down
4 changes: 2 additions & 2 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3180,7 +3180,7 @@ pub(crate) mod tests {
create_new_tmp_ledger,
genesis_utils::{create_genesis_config, create_genesis_config_with_leader},
get_tmp_ledger_path,
shred::{Shred, ShredFlags, SIZE_OF_DATA_SHRED_PAYLOAD},
shred::{Shred, ShredFlags},
},
solana_rpc::{
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
Expand Down Expand Up @@ -3779,7 +3779,7 @@ pub(crate) mod tests {
fn test_dead_fork_entry_deserialize_failure() {
// Insert entry that causes deserialization failure
let res = check_dead_fork(|_, bank| {
let gibberish = [0xa5u8; SIZE_OF_DATA_SHRED_PAYLOAD];
let gibberish = [0xa5u8; /*legacy data-shred capacity:*/1051];
let parent_offset = bank.slot() - bank.parent_slot();
let shred = Shred::new_from_data(
bank.slot(),
Expand Down
2 changes: 1 addition & 1 deletion core/src/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ impl ShredSigVerifier {
batches
.iter()
.flat_map(PacketBatch::iter)
.map(shred::layout::get_shred)
.filter_map(shred::layout::get_shred)
.filter_map(shred::layout::get_slot)
.collect()
}
Expand Down
6 changes: 3 additions & 3 deletions ledger/benches/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
extern crate test;
use {
solana_ledger::{
shred::{Shred, ShredFlags, SIZE_OF_DATA_SHRED_PAYLOAD},
shred::{Shred, ShredFlags},
sigverify_shreds::{sign_shreds_cpu, sign_shreds_gpu, sign_shreds_gpu_pinned_keypair},
},
solana_perf::{
Expand All @@ -29,7 +29,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) {
slot,
0xc0de,
0xdead,
&[5; SIZE_OF_DATA_SHRED_PAYLOAD],
&[5; /*legacy data-shred capacity:*/1051],
ShredFlags::LAST_SHRED_IN_SLOT,
1,
2,
Expand Down Expand Up @@ -60,7 +60,7 @@ fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) {
slot,
0xc0de,
0xdead,
&[5; SIZE_OF_DATA_SHRED_PAYLOAD],
&[5; /*legacy data-shred capacity:*/1051],
ShredFlags::LAST_SHRED_IN_SLOT,
1,
2,
Expand Down
7 changes: 5 additions & 2 deletions ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,10 @@ use {
},
leader_schedule_cache::LeaderScheduleCache,
next_slots_iterator::NextSlotsIterator,
shred::{self, max_ticks_per_n_shreds, ErasureSetId, Shred, ShredId, ShredType, Shredder},
shred::{
self, max_ticks_per_n_shreds, ErasureSetId, Shred, ShredData, ShredId, ShredType,
Shredder,
},
slot_stats::{ShredSource, SlotsStats},
},
bincode::deserialize,
Expand Down Expand Up @@ -1648,7 +1651,7 @@ impl Blockstore {

pub fn get_data_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> {
let shred = self.data_shred_cf.get_bytes((slot, index))?;
let shred = shred.map(Shred::resize_stored_shred).transpose();
let shred = shred.map(ShredData::resize_stored_shred).transpose();
shred.map_err(|err| {
let err = format!("Invalid stored shred: {}", err);
let err = Box::new(bincode::ErrorKind::Custom(err));
Expand Down
Loading

0 comments on commit 026d48b

Please sign in to comment.