Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
carllin committed Dec 19, 2018
1 parent c0f51c4 commit b1ac800
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 37 deletions.
2 changes: 1 addition & 1 deletion fullnode/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ fn main() {
let mut leader_scheduler = LeaderScheduler::default();

// Remove this line to enable leader rotation
leader_scheduler.use_only_bootstrap_leader = true;
leader_scheduler.use_only_bootstrap_leader = use_only_bootstrap_leader;

let rpc_port = if let Some(port) = matches.value_of("rpc") {
let port_number = port.to_string().parse().expect("integer");
Expand Down
8 changes: 0 additions & 8 deletions src/broadcast_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,6 @@ fn broadcast(
}
for b in &blobs {
{
println!("blob size: {}", b.read().unwrap().data_size()?);
let ix = b.read().unwrap().index().expect("blob index");
let pos = (ix % window_size) as usize;
trace!("{} caching {} at {}", id, ix, pos);
Expand All @@ -134,16 +133,9 @@ fn broadcast(
}
}

let write_start = Instant::now();
db_ledger
.write_consecutive_blobs(&blobs)
.expect("Unrecoverable failure to write to database");
let duration = duration_as_ms(&write_start.elapsed()) as usize;
println!(
"Writing {} blobs in broadcast, elapsed: {}",
blobs.len(),
duration
);
}

// Fill in the coding blob data from the window data blobs
Expand Down
28 changes: 0 additions & 28 deletions src/db_ledger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -351,21 +351,12 @@ impl DbLedger {
return Ok(vec![]);
}

let new_blobs_len = new_blobs.len();
let sort_start = Instant::now();
new_blobs.sort_unstable_by(|b1, b2| {
b1.borrow()
.index()
.unwrap()
.cmp(&b2.borrow().index().unwrap())
});
let duration = duration_as_ms(&sort_start.elapsed()) as usize;
if new_blobs_len > 100 {
println!(
"Sort {} blobs in db_ledger, elapsed: {}",
new_blobs_len, duration
);
}

let meta_key = MetaCf::key(DEFAULT_SLOT_HEIGHT);

Expand Down Expand Up @@ -400,7 +391,6 @@ impl DbLedger {

let mut consumed_queue = vec![];

let loop_start = Instant::now();
if meta.consumed == lowest_index {
// Find the next consecutive block of blobs.
// TODO: account for consecutive blocks that
Expand Down Expand Up @@ -459,38 +449,20 @@ impl DbLedger {
}
}

let duration = duration_as_ms(&loop_start.elapsed()) as usize;
if new_blobs_len > 100 {
println!("Loop blobs in db_ledger, elapsed: {}", duration);
}
let put_cf = Instant::now();
// Commit Step: Atomic write both the metadata and the data
let mut batch = WriteBatch::default();
if should_write_meta {
batch.put_cf(self.meta_cf.handle(&self.db), &meta_key, &serialize(&meta)?)?;
}

let duration = duration_as_ms(&put_cf.elapsed()) as usize;
if new_blobs_len > 100 {
println!("Put_Cf blobs in db_ledger, elapsed: {}", duration);
}

for blob in new_blobs {
let blob = blob.borrow();
let key = DataCf::key(blob.slot()?, blob.index()?);
let serialized_blob_datas = &blob.data[..BLOB_HEADER_SIZE + blob.size()?];
batch.put_cf(self.data_cf.handle(&self.db), &key, serialized_blob_datas)?;
}

let db_start = Instant::now();
self.db.write(batch)?;
let duration = duration_as_ms(&db_start.elapsed()) as usize;
if new_blobs_len > 100 {
println!(
"Writing {} blobs in db_ledger, elapsed: {}",
new_blobs_len, duration
);
}
Ok(consumed_queue)
}

Expand Down

0 comments on commit b1ac800

Please sign in to comment.