Skip to content

Commit

Permalink
Condense repeated shred logic and move back into shred module
Browse files Browse the repository at this point in the history
  • Loading branch information
Steven Czabaniuk committed May 25, 2021
1 parent 59789b4 commit e93e602
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 25 deletions.
15 changes: 3 additions & 12 deletions core/src/serve_repair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -664,10 +664,7 @@ mod tests {
.into_iter()
.filter_map(|b| {
assert_eq!(repair_response::nonce(&b.data[..]).unwrap(), nonce);
use solana_ledger::shred::SHRED_PAYLOAD_SIZE;
let mut serialized_shred = vec![0; SHRED_PAYLOAD_SIZE];
serialized_shred.copy_from_slice(&b.data[..SHRED_PAYLOAD_SIZE]);
Shred::new_from_serialized_shred(serialized_shred).ok()
Shred::copy_from_packet(&b).ok()
})
.collect();
assert!(!rv.is_empty());
Expand Down Expand Up @@ -752,10 +749,7 @@ mod tests {
.into_iter()
.filter_map(|b| {
assert_eq!(repair_response::nonce(&b.data[..]).unwrap(), nonce);
use solana_ledger::shred::SHRED_PAYLOAD_SIZE;
let mut serialized_shred = vec![0; SHRED_PAYLOAD_SIZE];
serialized_shred.copy_from_slice(&b.data[..SHRED_PAYLOAD_SIZE]);
Shred::new_from_serialized_shred(serialized_shred).ok()
Shred::copy_from_packet(&b).ok()
})
.collect();
assert_eq!(rv[0].index(), 1);
Expand Down Expand Up @@ -1118,10 +1112,7 @@ mod tests {

fn verify_responses<'a>(request: &RepairType, packets: impl Iterator<Item = &'a Packet>) {
for packet in packets {
use solana_ledger::shred::SHRED_PAYLOAD_SIZE;
let mut shred_payload = vec![0; SHRED_PAYLOAD_SIZE];
shred_payload.copy_from_slice(&packet.data[..SHRED_PAYLOAD_SIZE]);
let shred = Shred::new_from_serialized_shred(shred_payload).unwrap();
let shred = Shred::copy_from_packet(&packet).unwrap();
request.verify_response(&shred);
}
}
Expand Down
14 changes: 5 additions & 9 deletions core/src/window_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use rayon::ThreadPool;
use solana_ledger::{
blockstore::{self, Blockstore, BlockstoreInsertionMetrics, MAX_DATA_SHREDS_PER_SLOT},
leader_schedule_cache::LeaderScheduleCache,
shred::{Nonce, Shred, SHRED_PAYLOAD_SIZE},
shred::{Nonce, Shred},
};
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
use solana_perf::packet::Packets;
Expand Down Expand Up @@ -247,15 +247,11 @@ where
);
None
} else {
// shred fetch stage should be sending packets
// with sufficiently large buffers. Needed to ensure
// call to `new_from_serialized_shred` is safe.
// shred fetch stage should be sending packets with
// sufficiently large buffers. Needed to ensure call
// to `copy_from_packet` (which deserializes shred) is safe.
assert_eq!(packet.data.len(), PACKET_DATA_SIZE);
let mut serialized_shred = packet.data.to_vec();
// Truncate shred down to SHRED_PAYLOAD_SIZE to remove the nonce so
// that the buffer is proper size for new_from_serialized_shred()
serialized_shred.truncate(SHRED_PAYLOAD_SIZE);
if let Ok(shred) = Shred::new_from_serialized_shred(serialized_shred) {
if let Ok(shred) = Shred::copy_from_packet(&packet) {
let repair_info = {
if packet.meta.repair {
if let Some(nonce) = repair_response::nonce(&packet.data) {
Expand Down
21 changes: 17 additions & 4 deletions ledger/src/shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,13 @@ impl Shred {
packet.meta.size = len;
}

pub fn copy_from_packet(packet: &Packet) -> Result<Self> {
let mut serialized_shred = vec![0; SHRED_PAYLOAD_SIZE];
// TODO: assert packet.data.len() >= SHRED_PAYLOAD_SIZE / == PACKET_DATA_SIZE ?
serialized_shred.copy_from_slice(&packet.data[..SHRED_PAYLOAD_SIZE]);
Shred::new_from_serialized_shred(serialized_shred)
}

pub fn new_from_data(
slot: Slot,
index: u32,
Expand Down Expand Up @@ -1924,10 +1931,7 @@ pub mod tests {
let shred = Shred::new_from_data(10, 0, 1000, Some(&[1, 2, 3]), false, false, 0, 1, 0);
let mut packet = Packet::default();
shred.copy_to_packet(&mut packet);
// We are responsible for ensuring that we don't pass an oversized buffer so truncate here
let mut serialized_packet = packet.data.to_vec();
serialized_packet.truncate(SHRED_PAYLOAD_SIZE);
let shred_res = Shred::new_from_serialized_shred(serialized_packet);
let shred_res = Shred::copy_from_packet(&packet);
assert_matches!(
shred_res,
Err(ShredError::InvalidParentOffset {
Expand Down Expand Up @@ -1991,4 +1995,13 @@ pub mod tests {
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
assert_eq!(1, stats.bad_shred_type);
}

#[test]
fn test_shred_copy_to_from_packet() {
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
let mut packet = Packet::default();
shred.copy_to_packet(&mut packet);
let copied_shred = Shred::copy_from_packet(&packet).unwrap();
assert_eq!(shred, copied_shred);
}
}

0 comments on commit e93e602

Please sign in to comment.