Skip to content

Commit

Permalink
adjusts max coding shreds per slot (#27083)
Browse files Browse the repository at this point in the history
As a consequence of removing buffering when generating coding shreds:
#25807
more coding shreds are generated than data shreds, and so
MAX_CODE_SHREDS_PER_SLOT needs to be adjusted accordingly.

The respective value is tied to ERASURE_BATCH_SIZE.

(cherry picked from commit b3b57a0)
  • Loading branch information
behzadnouri authored and mergify[bot] committed Aug 16, 2022
1 parent a6765be commit afc2255
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 8 deletions.
2 changes: 2 additions & 0 deletions ledger/src/shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@
//! So, given a) - c), we must restrict data shred's payload length such that the entire coding
//! payload can fit into one coding shred / packet.
#[cfg(test)]
pub(crate) use shred_code::MAX_CODE_SHREDS_PER_SLOT;
pub(crate) use shred_data::ShredData;
pub use {
self::stats::{ProcessShredsStats, ShredFetchStats},
Expand Down
9 changes: 5 additions & 4 deletions ledger/src/shred/legacy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ impl ShredCode {
mod test {
use {
super::*,
crate::shred::{ShredType, MAX_DATA_SHREDS_PER_SLOT},
crate::shred::{shred_code::MAX_CODE_SHREDS_PER_SLOT, ShredType, MAX_DATA_SHREDS_PER_SLOT},
matches::assert_matches,
};

Expand Down Expand Up @@ -433,10 +433,10 @@ mod test {
}
{
let mut shred = shred.clone();
shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32;
shred.common_header.index = MAX_CODE_SHREDS_PER_SLOT as u32;
assert_matches!(
shred.sanitize(),
Err(Error::InvalidShredIndex(ShredType::Code, 32768))
Err(Error::InvalidShredIndex(ShredType::Code, 557_056))
);
}
// pos >= num_coding is invalid.
Expand All @@ -454,7 +454,7 @@ mod test {
{
let mut shred = shred.clone();
shred.common_header.fec_set_index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2;
shred.coding_header.num_data_shreds = 2;
shred.coding_header.num_data_shreds = 3;
shred.coding_header.num_coding_shreds = 4;
shred.coding_header.position = 1;
shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2;
Expand All @@ -463,6 +463,7 @@ mod test {
Err(Error::InvalidErasureShardIndex { .. })
);

shred.coding_header.num_data_shreds = 2;
shred.coding_header.num_coding_shreds = 2000;
assert_matches!(shred.sanitize(), Err(Error::InvalidNumCodingShreds(2000)));

Expand Down
4 changes: 3 additions & 1 deletion ledger/src/shred/shred_code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ use {
static_assertions::const_assert_eq,
};

pub(super) const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT;
// See ERASURE_BATCH_SIZE.
const_assert_eq!(MAX_CODE_SHREDS_PER_SLOT, 32_768 * 17);
pub(crate) const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT * 17;

const_assert_eq!(ShredCode::SIZE_OF_PAYLOAD, 1228);

Expand Down
22 changes: 19 additions & 3 deletions ledger/src/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,9 +378,12 @@ fn get_fec_set_offsets(
mod tests {
use {
super::*,
crate::shred::{
self, max_entries_per_n_shred, max_ticks_per_n_shreds, verify_test_data_shred,
ShredType,
crate::{
blockstore::MAX_DATA_SHREDS_PER_SLOT,
shred::{
self, max_entries_per_n_shred, max_ticks_per_n_shreds, verify_test_data_shred,
ShredType, MAX_CODE_SHREDS_PER_SLOT,
},
},
bincode::serialized_size,
matches::assert_matches,
Expand Down Expand Up @@ -1105,4 +1108,17 @@ mod tests {
));
}
}

#[test]
fn test_max_shreds_per_slot() {
for num_data_shreds in 0..128 {
let num_coding_shreds = get_erasure_batch_size(num_data_shreds)
.checked_sub(num_data_shreds)
.unwrap();
assert!(
MAX_DATA_SHREDS_PER_SLOT * num_coding_shreds
<= MAX_CODE_SHREDS_PER_SLOT * num_data_shreds
);
}
}
}

0 comments on commit afc2255

Please sign in to comment.