From a802ae3fdc83aabe7e56062f3679487e2177107c Mon Sep 17 00:00:00 2001 From: Carl Date: Tue, 18 Dec 2018 21:16:45 -0800 Subject: [PATCH] cleanup --- fullnode/src/main.rs | 2 +- src/broadcast_service.rs | 8 -------- src/db_ledger.rs | 30 ------------------------------ 3 files changed, 1 insertion(+), 39 deletions(-) diff --git a/fullnode/src/main.rs b/fullnode/src/main.rs index f0dd20cfd4a5bb..4bb3d3514b9643 100644 --- a/fullnode/src/main.rs +++ b/fullnode/src/main.rs @@ -119,7 +119,7 @@ fn main() { let mut leader_scheduler = LeaderScheduler::default(); // Remove this line to enable leader rotation - leader_scheduler.use_only_bootstrap_leader = true; + leader_scheduler.use_only_bootstrap_leader = use_only_bootstrap_leader; let rpc_port = if let Some(port) = matches.value_of("rpc") { let port_number = port.to_string().parse().expect("integer"); diff --git a/src/broadcast_service.rs b/src/broadcast_service.rs index d01bbf4237a9f5..1fe27e0984cb53 100644 --- a/src/broadcast_service.rs +++ b/src/broadcast_service.rs @@ -125,7 +125,6 @@ fn broadcast( } for b in &blobs { { - println!("blob size: {}", b.read().unwrap().data_size()?); let ix = b.read().unwrap().index().expect("blob index"); let pos = (ix % window_size) as usize; trace!("{} caching {} at {}", id, ix, pos); @@ -134,16 +133,9 @@ fn broadcast( } } - let write_start = Instant::now(); db_ledger .write_consecutive_blobs(&blobs) .expect("Unrecoverable failure to write to database"); - let duration = duration_as_ms(&write_start.elapsed()) as usize; - println!( - "Writing {} blobs in broadcast, elapsed: {}", - blobs.len(), - duration - ); } // Fill in the coding blob data from the window data blobs diff --git a/src/db_ledger.rs b/src/db_ledger.rs index 4de556445e723d..a2f8066b13d5d4 100644 --- a/src/db_ledger.rs +++ b/src/db_ledger.rs @@ -11,12 +11,10 @@ use rocksdb::{ColumnFamily, ColumnFamilyDescriptor, DBRawIterator, Options, Writ use serde::de::DeserializeOwned; use serde::Serialize; use solana_sdk::signature::{Keypair, KeypairUtil}; -use solana_sdk::timing::duration_as_ms; use std::borrow::Borrow; use std::cmp::max; use std::io; use std::path::Path; -use std::time::Instant; pub const DB_LEDGER_DIRECTORY: &str = "rocksdb"; // A good value for this is the number of cores on the machine @@ -351,21 +349,12 @@ impl DbLedger { return Ok(vec![]); } - let new_blobs_len = new_blobs.len(); - let sort_start = Instant::now(); new_blobs.sort_unstable_by(|b1, b2| { b1.borrow() .index() .unwrap() .cmp(&b2.borrow().index().unwrap()) }); - let duration = duration_as_ms(&sort_start.elapsed()) as usize; - if new_blobs_len > 100 { - println!( - "Sort {} blobs in db_ledger, elapsed: {}", - new_blobs_len, duration - ); - } let meta_key = MetaCf::key(DEFAULT_SLOT_HEIGHT); @@ -400,7 +389,6 @@ impl DbLedger { let mut consumed_queue = vec![]; - let loop_start = Instant::now(); if meta.consumed == lowest_index { // Find the next consecutive block of blobs. // TODO: account for consecutive blocks that @@ -459,22 +447,12 @@ impl DbLedger { } } - let duration = duration_as_ms(&loop_start.elapsed()) as usize; - if new_blobs_len > 100 { - println!("Loop blobs in db_ledger, elapsed: {}", duration); - } - let put_cf = Instant::now(); // Commit Step: Atomic write both the metadata and the data let mut batch = WriteBatch::default(); if should_write_meta { batch.put_cf(self.meta_cf.handle(&self.db), &meta_key, &serialize(&meta)?)?; } - let duration = duration_as_ms(&put_cf.elapsed()) as usize; - if new_blobs_len > 100 { - println!("Put_Cf blobs in db_ledger, elapsed: {}", duration); - } - for blob in new_blobs { let blob = blob.borrow(); let key = DataCf::key(blob.slot()?, blob.index()?); @@ -482,15 +460,7 @@ impl DbLedger { batch.put_cf(self.data_cf.handle(&self.db), &key, serialized_blob_datas)?; } - let db_start = Instant::now(); self.db.write(batch)?; - let duration = duration_as_ms(&db_start.elapsed()) as usize; - if new_blobs_len > 100 { - println!( - "Writing {} blobs in db_ledger, elapsed: {}", - new_blobs_len, duration - ); - } Ok(consumed_queue) }