Skip to content

Commit

Permalink
v1.1: Add nonce to shreds repairs, add shred data size to header (#10076
Browse files Browse the repository at this point in the history
)

* Add nonce to shreds/repairs

* Add data shred size to header

* Align nonce unlock with epoch 47

Co-authored-by: Carl <[email protected]>
  • Loading branch information
carllin and carllin authored May 17, 2020
1 parent 7bc915c commit 997f317
Show file tree
Hide file tree
Showing 15 changed files with 838 additions and 270 deletions.
5 changes: 2 additions & 3 deletions archiver-lib/src/archiver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ use solana_core::{
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
gossip_service::GossipService,
repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStats, RepairStrategy},
repair_service::{self, RepairService, RepairSlotRange, RepairStats, RepairStrategy},
serve_repair::ServeRepair,
shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
Expand Down Expand Up @@ -830,7 +829,7 @@ impl Archiver {
.into_iter()
.filter_map(|repair_request| {
serve_repair
.map_repair_request(&repair_request, &mut repair_stats)
.map_repair_request(&repair_request, &mut repair_stats, Some(0))
.map(|result| ((archiver_info.gossip, result), repair_request))
.ok()
})
Expand Down
4 changes: 2 additions & 2 deletions core/benches/cluster_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use rand::{thread_rng, Rng};
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::Shred;
use solana_ledger::shred::{Shred, NONCE_SHRED_PAYLOAD_SIZE};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::sync::RwLock;
Expand All @@ -26,7 +26,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();

const NUM_SHREDS: usize = 32;
let shreds = vec![Shred::new_empty_data_shred(); NUM_SHREDS];
let shreds = vec![Shred::new_empty_data_shred(NONCE_SHRED_PAYLOAD_SIZE); NUM_SHREDS];
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {
Expand Down
21 changes: 13 additions & 8 deletions core/benches/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ extern crate test;
use solana_ledger::entry::{create_ticks, Entry};
use solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder, RECOMMENDED_FEC_RATE,
SIZE_OF_DATA_SHRED_PAYLOAD,
SIZE_OF_NONCE_DATA_SHRED_PAYLOAD,
};
use solana_perf::test_tx;
use solana_sdk::hash::Hash;
Expand All @@ -29,10 +29,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
// ~1Mb
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let num_ticks =
max_ticks_per_n_shreds(1, Some(SIZE_OF_NONCE_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
Expand All @@ -43,10 +44,14 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
#[bench]
fn bench_shredder_large_entries(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(&make_test_entry(txs_per_entry), num_shreds as u64);
let num_entries = max_entries_per_n_shred(
&make_test_entry(txs_per_entry),
num_shreds as u64,
Some(shred_size),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
// 1Mb
bencher.iter(|| {
Expand All @@ -58,10 +63,10 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
#[bench]
fn bench_deshredder(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
// ~10Mb
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
Expand All @@ -73,7 +78,7 @@ fn bench_deshredder(bencher: &mut Bencher) {

#[bench]
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
let data = vec![0; SIZE_OF_NONCE_DATA_SHRED_PAYLOAD];

let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1);

Expand Down
2 changes: 1 addition & 1 deletion core/src/broadcast_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ pub mod test {
Vec<TransmitShreds>,
Vec<TransmitShreds>,
) {
let num_entries = max_ticks_per_n_shreds(num);
let num_entries = max_ticks_per_n_shreds(num, None);
let (data_shreds, _) = make_slot_entries(slot, 0, num_entries);
let keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(slot, 0, RECOMMENDED_FEC_RATE, keypair, 0, 0)
Expand Down
8 changes: 6 additions & 2 deletions core/src/broadcast_stage/standard_broadcast_run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ mod test {
));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut genesis_config = create_genesis_config(10_000).genesis_config;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;
let bank0 = Arc::new(Bank::new(&genesis_config));
(
blockstore,
Expand Down Expand Up @@ -539,7 +539,11 @@ mod test {
// Interrupting the slot should cause the unfinished_slot and stats to reset
let num_shreds = 1;
assert!(num_shreds < num_shreds_per_slot);
let ticks1 = create_ticks(max_ticks_per_n_shreds(num_shreds), 0, genesis_config.hash());
let ticks1 = create_ticks(
max_ticks_per_n_shreds(num_shreds, None),
0,
genesis_config.hash(),
);
let receive_results = ReceiveResults {
entries: ticks1.clone(),
time_elapsed: Duration::new(2, 0),
Expand Down
1 change: 1 addition & 0 deletions core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ pub mod non_circulating_supply;
pub mod poh_recorder;
pub mod poh_service;
pub mod progress_map;
pub mod repair_response;
pub mod repair_service;
pub mod replay_stage;
mod result;
Expand Down
129 changes: 129 additions & 0 deletions core/src/repair_response.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
use solana_ledger::{
blockstore::Blockstore,
shred::{Nonce, Shred, SIZE_OF_NONCE},
};
use solana_perf::packet::limited_deserialize;
use solana_sdk::{clock::Slot, packet::Packet};
use std::{io, net::SocketAddr};

pub fn repair_response_packet(
blockstore: &Blockstore,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
nonce: Option<Nonce>,
) -> Option<Packet> {
if Shred::is_nonce_unlocked(slot) && nonce.is_none()
|| !Shred::is_nonce_unlocked(slot) && nonce.is_some()
{
return None;
}
let shred = blockstore
.get_data_shred(slot, shred_index)
.expect("Blockstore could not get data shred");
shred.map(|shred| repair_response_packet_from_shred(slot, shred, dest, nonce))
}

pub fn repair_response_packet_from_shred(
slot: Slot,
shred: Vec<u8>,
dest: &SocketAddr,
nonce: Option<Nonce>,
) -> Packet {
let size_of_nonce = {
if Shred::is_nonce_unlocked(slot) {
assert!(nonce.is_some());
SIZE_OF_NONCE
} else {
assert!(nonce.is_none());
0
}
};
let mut packet = Packet::default();
packet.meta.size = shred.len() + size_of_nonce;
packet.meta.set_addr(dest);
packet.data[..shred.len()].copy_from_slice(&shred);
let mut wr = io::Cursor::new(&mut packet.data[shred.len()..]);
if let Some(nonce) = nonce {
bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce");
}
packet
}

pub fn nonce(buf: &[u8]) -> Option<Nonce> {
if buf.len() < SIZE_OF_NONCE {
None
} else {
limited_deserialize(&buf[buf.len() - SIZE_OF_NONCE..]).ok()
}
}

#[cfg(test)]
mod test {
use super::*;
use solana_ledger::{
shred::{Shred, Shredder, UNLOCK_NONCE_SLOT},
sigverify_shreds::verify_shred_cpu,
};
use solana_sdk::signature::{Keypair, Signer};
use std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr},
};

fn run_test_sigverify_shred_cpu_repair(slot: Slot) {
solana_logger::setup();
let mut shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
assert_eq!(shred.slot(), slot);
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
trace!("signature {}", shred.common_header.signature);
let nonce = if Shred::is_nonce_unlocked(slot) {
Some(9)
} else {
None
};
let mut packet = repair_response_packet_from_shred(
slot,
shred.payload,
&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080),
nonce,
);
packet.meta.repair = true;

let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(1));

let wrong_keypair = Keypair::new();
let leader_slots = [(slot, wrong_keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(0));

let leader_slots = HashMap::new();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, None);
}

#[test]
fn test_sigverify_shred_cpu_repair() {
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT);
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT + 1);
}
}
47 changes: 19 additions & 28 deletions core/src/repair_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
collections::HashMap,
iter::Iterator,
net::SocketAddr,
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
Expand Down Expand Up @@ -92,7 +91,7 @@ impl RepairService {
&blockstore,
&exit,
&repair_socket,
&cluster_info,
cluster_info,
repair_strategy,
&cluster_slots,
)
Expand All @@ -106,14 +105,14 @@ impl RepairService {
blockstore: &Blockstore,
exit: &AtomicBool,
repair_socket: &UdpSocket,
cluster_info: &Arc<ClusterInfo>,
cluster_info: Arc<ClusterInfo>,
repair_strategy: RepairStrategy,
cluster_slots: &Arc<ClusterSlots>,
) {
let serve_repair = ServeRepair::new(cluster_info.clone());
let id = cluster_info.id();
if let RepairStrategy::RepairAll { .. } = repair_strategy {
Self::initialize_lowest_slot(id, blockstore, cluster_info);
Self::initialize_lowest_slot(id, blockstore, &cluster_info);
}
let mut repair_stats = RepairStats::default();
let mut last_stats = Instant::now();
Expand All @@ -122,7 +121,7 @@ impl RepairService {
..
} = repair_strategy
{
Self::initialize_epoch_slots(blockstore, cluster_info, completed_slots_receiver);
Self::initialize_epoch_slots(blockstore, &cluster_info, completed_slots_receiver);
}
loop {
if exit.load(Ordering::Relaxed) {
Expand All @@ -149,35 +148,27 @@ impl RepairService {
let lowest_slot = blockstore.lowest_slot();
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
Self::update_completed_slots(completed_slots_receiver, &cluster_info);
cluster_slots.update(new_root, cluster_info, bank_forks);
cluster_slots.update(new_root, &cluster_info, bank_forks);
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
}
}
};

if let Ok(repairs) = repairs {
let mut cache = HashMap::new();
let reqs: Vec<((SocketAddr, Vec<u8>), RepairType)> = repairs
.into_iter()
.filter_map(|repair_request| {
serve_repair
.repair_request(
&cluster_slots,
&repair_request,
&mut cache,
&mut repair_stats,
)
.map(|result| (result, repair_request))
.ok()
})
.collect();

for ((to, req), _) in reqs {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
});
}
repairs.into_iter().for_each(|repair_request| {
if let Ok((to, req)) = serve_repair.repair_request(
&cluster_slots,
repair_request,
&mut cache,
&mut repair_stats,
) {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
});
}
});
}
if last_stats.elapsed().as_secs() > 1 {
let repair_total = repair_stats.shred.count
Expand Down Expand Up @@ -504,7 +495,7 @@ mod test {
let blockstore = Blockstore::open(&blockstore_path).unwrap();

let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;

let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
for (mut slot_shreds, _) in shreds.into_iter() {
Expand Down
1 change: 1 addition & 0 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2363,6 +2363,7 @@ pub(crate) mod tests {
ShredCommonHeader::default(),
data_header,
CodingShredHeader::default(),
PACKET_DATA_SIZE,
);
bincode::serialize_into(
&mut shred.payload[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER..],
Expand Down
Loading

0 comments on commit 997f317

Please sign in to comment.