From fcc137b5f06e77c1aa631ca7a934221e40c39975 Mon Sep 17 00:00:00 2001 From: Steven Czabaniuk Date: Thu, 20 May 2021 11:01:56 -0500 Subject: [PATCH] Add bench that more clearly shows difference for resize --- core/benches/shredder.rs | 33 ++++++++++++++++++++++++----- ledger/src/shred.rs | 45 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 5 deletions(-) diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index 5a99f15dccc6ef..b8d751321c2f61 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -114,18 +114,41 @@ fn bench_deshredder(bencher: &mut Bencher) { }) } -#[bench] -fn bench_deserialize_hdr(bencher: &mut Bencher) { - let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD]; +fn make_trimmed_serialized_shred() -> Vec { + let data = vec![0; 512]; + let mut shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1); + // Trim off the zero padding that Shred::new_from_data would have added, + // we want to emulate having a "non-full" shred + shred.data_header.size = 512; + shred.payload.truncate(512); + let trimmed_payload = shred.payload.clone(); + // Ensure the bytes were indeed dropped + assert!(trimmed_payload.len() == 512); + assert!(trimmed_payload.capacity() == 512); + + trimmed_payload +} - let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1); +#[bench] +fn bench_deserialize_shred_no_padding(bencher: &mut Bencher) { + let trimmed_payload = make_trimmed_serialized_shred(); bencher.iter(|| { - let payload = shred.payload.clone(); + let payload = trimmed_payload.clone(); let _ = Shred::new_from_serialized_shred(payload).unwrap(); }) } +#[bench] +fn bench_deserialize_shred_padding(bencher: &mut Bencher) { + let trimmed_payload = make_trimmed_serialized_shred(); + + bencher.iter(|| { + let payload = trimmed_payload.clone(); + let _ = Shred::new_from_serialized_shred_pad_out(payload).unwrap(); + }) +} + #[bench] fn bench_shredder_coding(bencher: &mut Bencher) { let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 5940d06480527e..071e48048e3dd4 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -359,6 +359,51 @@ impl Shred { Ok(shred) } + pub fn new_from_serialized_shred_pad_out(mut payload: Vec) -> Result { + // A shred can be deserialized in several cases; payload length will vary for these: + // payload.len() <= SHRED_PAYLOAD_SIZE when payload is retrieved from the blockstore + // payload.len() == SHRED_PAYLOAD_SIZE when a new shred is created + // payload.len() == PACKET_DATA_SIZE when payload comes from a packet (window serivce) + + // Pad out the shred + payload.resize(SHRED_PAYLOAD_SIZE, 0); + + let mut start = 0; + let common_header: ShredCommonHeader = + Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?; + let slot = common_header.slot; + + let shred = if common_header.shred_type == ShredType(CODING_SHRED) { + let coding_header: CodingShredHeader = + Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?; + Self { + common_header, + data_header: DataShredHeader::default(), + coding_header, + payload, + } + } else if common_header.shred_type == ShredType(DATA_SHRED) { + let data_header: DataShredHeader = + Self::deserialize_obj(&mut start, SIZE_OF_DATA_SHRED_HEADER, &payload)?; + if u64::from(data_header.parent_offset) > common_header.slot { + return Err(ShredError::InvalidParentOffset { + slot, + parent_offset: data_header.parent_offset, + }); + } + Self { + common_header, + data_header, + coding_header: CodingShredHeader::default(), + payload, + } + } else { + return Err(ShredError::InvalidShredType); + }; + + Ok(shred) + } + pub fn new_empty_coding( slot: Slot, index: u32,