Skip to content

Commit

Permalink
Add bench that more clearly shows difference for resize
Browse files Browse the repository at this point in the history
  • Loading branch information
Steven Czabaniuk committed May 20, 2021
1 parent 7293524 commit fcc137b
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 5 deletions.
33 changes: 28 additions & 5 deletions core/benches/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,18 +114,41 @@ fn bench_deshredder(bencher: &mut Bencher) {
})
}

#[bench]
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
fn make_trimmed_serialized_shred() -> Vec<u8> {
let data = vec![0; 512];
let mut shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1);
// Trim off the zero padding that Shred::new_from_data would have added,
// we want to emulate having a "non-full" shred
shred.data_header.size = 512;
shred.payload.truncate(512);
let trimmed_payload = shred.payload.clone();
// Ensure the bytes were indeed dropped
assert!(trimmed_payload.len() == 512);
assert!(trimmed_payload.capacity() == 512);

trimmed_payload
}

let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1);
#[bench]
fn bench_deserialize_shred_no_padding(bencher: &mut Bencher) {
let trimmed_payload = make_trimmed_serialized_shred();

bencher.iter(|| {
let payload = shred.payload.clone();
let payload = trimmed_payload.clone();
let _ = Shred::new_from_serialized_shred(payload).unwrap();
})
}

#[bench]
fn bench_deserialize_shred_padding(bencher: &mut Bencher) {
let trimmed_payload = make_trimmed_serialized_shred();

bencher.iter(|| {
let payload = trimmed_payload.clone();
let _ = Shred::new_from_serialized_shred_pad_out(payload).unwrap();
})
}

#[bench]
fn bench_shredder_coding(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
Expand Down
45 changes: 45 additions & 0 deletions ledger/src/shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,51 @@ impl Shred {
Ok(shred)
}

pub fn new_from_serialized_shred_pad_out(mut payload: Vec<u8>) -> Result<Self> {
// A shred can be deserialized in several cases; payload length will vary for these:
// payload.len() <= SHRED_PAYLOAD_SIZE when payload is retrieved from the blockstore
// payload.len() == SHRED_PAYLOAD_SIZE when a new shred is created
// payload.len() == PACKET_DATA_SIZE when payload comes from a packet (window serivce)

// Pad out the shred
payload.resize(SHRED_PAYLOAD_SIZE, 0);

let mut start = 0;
let common_header: ShredCommonHeader =
Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?;
let slot = common_header.slot;

let shred = if common_header.shred_type == ShredType(CODING_SHRED) {
let coding_header: CodingShredHeader =
Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?;
Self {
common_header,
data_header: DataShredHeader::default(),
coding_header,
payload,
}
} else if common_header.shred_type == ShredType(DATA_SHRED) {
let data_header: DataShredHeader =
Self::deserialize_obj(&mut start, SIZE_OF_DATA_SHRED_HEADER, &payload)?;
if u64::from(data_header.parent_offset) > common_header.slot {
return Err(ShredError::InvalidParentOffset {
slot,
parent_offset: data_header.parent_offset,
});
}
Self {
common_header,
data_header,
coding_header: CodingShredHeader::default(),
payload,
}
} else {
return Err(ShredError::InvalidShredType);
};

Ok(shred)
}

pub fn new_empty_coding(
slot: Slot,
index: u32,
Expand Down

0 comments on commit fcc137b

Please sign in to comment.