diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index ff14c9d5500d66..c5dcd7236e0707 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1420,6 +1420,8 @@ impl Blockstore { // We don't want only a subset of these changes going through. write_batch.put_bytes::( (slot, index), + // Payload will be padded out to SHRED_PAYLOAD_SIZE + // But only need to store the bytes within data_header.size &shred.payload[..shred.data_header.size as usize], )?; data_index.set_present(index, true); @@ -1452,6 +1454,9 @@ impl Blockstore { use crate::shred::SHRED_PAYLOAD_SIZE; self.data_shred_cf.get_bytes((slot, index)).map(|data| { data.map(|mut d| { + // Only data_header.size bytes stored in the blockstore so + // pad the payload out to SHRED_PAYLOAD_SIZE so that the + // erasure recovery works properly. d.resize(cmp::max(d.len(), SHRED_PAYLOAD_SIZE), 0); d }) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 3f8f41b39c0329..695105d4215aa1 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -319,11 +319,10 @@ impl Shred { Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?; let slot = common_header.slot; - let expected_data_size = SHRED_PAYLOAD_SIZE; // Shreds should be padded out to SHRED_PAYLOAD_SIZE // so that erasure generation/recovery works correctly // But only the data_header.size is stored in blockstore. - payload.resize(expected_data_size, 0); + payload.resize(SHRED_PAYLOAD_SIZE, 0); let shred = if common_header.shred_type == ShredType(CODING_SHRED) { let coding_header: CodingShredHeader = Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?;