Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated mentoring by clippy #73

Merged
merged 7 commits into from
Mar 22, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 8 additions & 14 deletions src/accountant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use plan::{Plan, Witness};
use transaction::Transaction;
use signature::{KeyPair, PublicKey, Signature};
use mint::Mint;
use historian::{reserve_signature, Historian};
use historian::Historian;
use recorder::Signal;
use std::sync::mpsc::SendError;
use std::collections::{HashMap, HashSet};
Expand All @@ -30,13 +30,7 @@ pub type Result<T> = result::Result<T, AccountingError>;
/// Commit funds to the 'to' party.
fn complete_transaction(balances: &mut HashMap<PublicKey, i64>, plan: &Plan) {
if let Plan::Pay(ref payment) = *plan {
if balances.contains_key(&payment.to) {
if let Some(x) = balances.get_mut(&payment.to) {
*x += payment.tokens;
}
} else {
balances.insert(payment.to, payment.tokens);
}
*balances.entry(payment.to).or_insert(0) += payment.tokens;
}
}

Expand Down Expand Up @@ -122,7 +116,7 @@ impl Accountant {
tr: &Transaction,
allow_deposits: bool,
) -> Result<()> {
if !reserve_signature(&mut self.historian.signatures, &tr.sig) {
if !self.historian.reserve_signature(&tr.sig) {
return Err(AccountingError::InvalidTransferSignature);
}

Expand All @@ -133,7 +127,7 @@ impl Accountant {
}

let mut plan = tr.plan.clone();
plan.apply_witness(Witness::Timestamp(self.last_time));
plan.apply_witness(&Witness::Timestamp(self.last_time));

if plan.is_complete() {
complete_transaction(&mut self.balances, &plan);
Expand All @@ -146,7 +140,7 @@ impl Accountant {

fn process_verified_sig(&mut self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending.entry(tx_sig) {
e.get_mut().apply_witness(Witness::Signature(from));
e.get_mut().apply_witness(&Witness::Signature(from));
if e.get().is_complete() {
complete_transaction(&mut self.balances, e.get());
e.remove_entry();
Expand Down Expand Up @@ -174,9 +168,9 @@ impl Accountant {
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
for (key, plan) in &mut self.pending {
plan.apply_witness(Witness::Timestamp(self.last_time));
plan.apply_witness(&Witness::Timestamp(self.last_time));
if plan.is_complete() {
complete_transaction(&mut self.balances, &plan);
complete_transaction(&mut self.balances, plan);
completed.push(key.clone());
}
}
Expand Down Expand Up @@ -222,7 +216,7 @@ impl Accountant {
}

pub fn get_balance(self: &Self, pubkey: &PublicKey) -> Option<i64> {
self.balances.get(pubkey).map(|x| *x)
self.balances.get(pubkey).cloned()
}
}

Expand Down
27 changes: 13 additions & 14 deletions src/accountant_skel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use result::Result;
use streamer;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::thread::{spawn, JoinHandle};
use std::default::Default;
Expand All @@ -20,6 +21,7 @@ pub struct AccountantSkel {
pub ledger: Vec<Entry>,
}

#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug)]
pub enum Request {
Transaction(Transaction),
Expand Down Expand Up @@ -91,13 +93,13 @@ impl AccountantSkel {
&mut self,
r_reader: &streamer::Receiver,
s_sender: &streamer::Sender,
recycler: streamer::Recycler,
recycler: &streamer::Recycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let msgs = r_reader.recv_timeout(timer)?;
let msgs_ = msgs.clone();
let msgs__ = msgs.clone();
let rsps = streamer::allocate(recycler.clone());
let rsps = streamer::allocate(recycler);
let rsps_ = rsps.clone();
let l = msgs__.read().unwrap().packets.len();
rsps.write()
Expand All @@ -107,11 +109,11 @@ impl AccountantSkel {
{
let mut num = 0;
let mut ursps = rsps.write().unwrap();
for packet in msgs.read().unwrap().packets.iter() {
for packet in &msgs.read().unwrap().packets {
let sz = packet.size;
let req = deserialize(&packet.data[0..sz])?;
if let Some(resp) = self.process_request(req) {
let rsp = ursps.packets.get_mut(num).unwrap();
let rsp = &mut ursps.packets[num];
let v = serialize(&resp)?;
let len = v.len();
rsp.data[0..len].copy_from_slice(&v);
Expand All @@ -131,7 +133,7 @@ impl AccountantSkel {
pub fn serve(
obj: Arc<Mutex<AccountantSkel>>,
addr: &str,
exit: Arc<Mutex<bool>>,
exit: Arc<AtomicBool>,
) -> Result<[Arc<JoinHandle<()>>; 3]> {
let read = UdpSocket::bind(addr)?;
// make sure we are on the same interface
Expand All @@ -147,17 +149,14 @@ impl AccountantSkel {
let t_sender = streamer::sender(write, exit.clone(), recycler.clone(), r_sender);

let t_server = spawn(move || {
match Arc::try_unwrap(obj) {
Ok(me) => loop {
let e = me.lock()
.unwrap()
.process(&r_reader, &s_sender, recycler.clone());
if e.is_err() && *exit.lock().unwrap() {
if let Ok(me) = Arc::try_unwrap(obj) {
loop {
let e = me.lock().unwrap().process(&r_reader, &s_sender, &recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
},
_ => (),
};
}
}
});
Ok([Arc::new(t_receiver), Arc::new(t_sender), Arc::new(t_server)])
}
Expand Down
5 changes: 3 additions & 2 deletions src/accountant_stub.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ mod tests {
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicBool, Ordering};

#[test]
fn test_accountant_stub() {
Expand All @@ -135,7 +136,7 @@ mod tests {
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice, Some(30));
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(Mutex::new(false));
let exit = Arc::new(AtomicBool::new(false));
let acc = Arc::new(Mutex::new(AccountantSkel::new(acc)));
let threads = AccountantSkel::serve(acc, addr, exit.clone()).unwrap();
sleep(Duration::from_millis(30));
Expand All @@ -147,7 +148,7 @@ mod tests {
.unwrap();
acc.wait_on_signature(&sig, &last_id).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap().unwrap(), 500);
*exit.lock().unwrap() = true;
exit.store(true, Ordering::Relaxed);
for t in threads.iter() {
match Arc::try_unwrap((*t).clone()) {
Ok(j) => j.join().expect("join"),
Expand Down
6 changes: 3 additions & 3 deletions src/bin/client-demo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ fn main() {
})
.collect();
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
println!(
Expand All @@ -48,7 +48,7 @@ fn main() {
assert!(tr.verify());
}
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsvps = txs as f64 / ns as f64;
let nspsv = ns as f64 / txs as f64;
println!(
Expand All @@ -68,7 +68,7 @@ fn main() {
acc.wait_on_signature(&sig, &last_id).unwrap();

let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("Done. {} tps!", tps);
let val = acc.get_balance(&mint_pubkey).unwrap().unwrap();
Expand Down
2 changes: 1 addition & 1 deletion src/bin/genesis-demo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use silk::hash::Hash;
use std::io::stdin;

fn transfer(from: &KeyPair, (to, tokens): (PublicKey, i64), last_id: Hash) -> Event {
Event::Transaction(Transaction::new(&from, to, tokens, last_id))
Event::Transaction(Transaction::new(from, to, tokens, last_id))
}

fn main() {
Expand Down
3 changes: 2 additions & 1 deletion src/bin/testnode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use silk::accountant_skel::AccountantSkel;
use silk::accountant::Accountant;
use std::io::{self, BufRead};
use std::sync::{Arc, Mutex};
use std::sync::atomic::AtomicBool;

fn main() {
let addr = "127.0.0.1:8000";
Expand All @@ -14,7 +15,7 @@ fn main() {
.lines()
.map(|line| serde_json::from_str(&line.unwrap()).unwrap());
let acc = Accountant::new_from_entries(entries, Some(1000));
let exit = Arc::new(Mutex::new(false));
let exit = Arc::new(AtomicBool::new(false));
let skel = Arc::new(Mutex::new(AccountantSkel::new(acc)));
eprintln!("Listening on {}", addr);
let _threads = AccountantSkel::serve(skel, addr, exit.clone()).unwrap();
Expand Down
14 changes: 7 additions & 7 deletions src/entry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ pub struct Entry {
}

impl Entry {
/// Creates a Entry from the number of hashes 'num_hashes' since the previous event
/// and that resulting 'id'.
/// Creates a Entry from the number of hashes `num_hashes` since the previous event
/// and that resulting `id`.
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
Entry {
num_hashes,
Expand All @@ -19,7 +19,7 @@ impl Entry {
}
}

/// Verifies self.id is the result of hashing a 'start_hash' 'self.num_hashes' times.
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
/// If the event is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool {
for event in &self.events {
Expand All @@ -31,7 +31,7 @@ impl Entry {
}
}

/// Creates the hash 'num_hashes' after start_hash. If the event contains
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
/// signature, the final hash will be a hash of both the previous ID and
/// the signature.
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
Expand All @@ -56,7 +56,7 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
id
}

/// Creates the next Entry 'num_hashes' after 'start_hash'.
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn create_entry(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Entry {
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &events);
Expand All @@ -67,15 +67,15 @@ pub fn create_entry(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> E
}
}

/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn create_entry_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Entry {
let entry = create_entry(start_hash, *cur_hashes, events);
*start_hash = entry.id;
*cur_hashes = 0;
entry
}

/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
Entry {
num_hashes,
Expand Down
3 changes: 1 addition & 2 deletions src/event.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ impl Event {
pub fn get_signature(&self) -> Option<Signature> {
match *self {
Event::Transaction(ref tr) => Some(tr.sig),
Event::Signature { .. } => None,
Event::Timestamp { .. } => None,
Event::Signature { .. } | Event::Timestamp { .. } => None,
}
}

Expand Down
23 changes: 12 additions & 11 deletions src/historian.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ impl Historian {
}
}

pub fn reserve_signature(&mut self, sig: &Signature) -> bool {
if self.signatures.contains(sig) {
return false;
}
self.signatures.insert(*sig);
true
}

/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
fn create_recorder(
Expand All @@ -55,14 +63,6 @@ impl Historian {
}
}

pub fn reserve_signature(sigs: &mut HashSet<Signature>, sig: &Signature) -> bool {
if sigs.contains(sig) {
return false;
}
sigs.insert(*sig);
true
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down Expand Up @@ -112,10 +112,11 @@ mod tests {

#[test]
fn test_duplicate_event_signature() {
let mut sigs = HashSet::new();
let zero = Hash::default();
let mut hist = Historian::new(&zero, None);
let sig = Signature::default();
assert!(reserve_signature(&mut sigs, &sig));
assert!(!reserve_signature(&mut sigs, &sig));
assert!(hist.reserve_signature(&sig));
assert!(!hist.reserve_signature(&sig));
}

#[test]
Expand Down
14 changes: 7 additions & 7 deletions src/ledger.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
//! The `ledger` crate provides the foundational data structures for Proof-of-History,
//! an ordered log of events in time.

/// Each entry contains three pieces of data. The 'num_hashes' field is the number
/// of hashes performed since the previous entry. The 'id' field is the result
/// of hashing 'id' from the previous entry 'num_hashes' times. The 'event'
/// field points to an Event that took place shortly after 'id' was generated.
/// Each entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `event`
/// field points to an Event that took place shortly after `id` was generated.
///
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last event. Since processing power increases
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was recorded.
Expand All @@ -24,7 +24,7 @@ pub fn verify_slice(entries: &[Entry], start_hash: &Hash) -> bool {
event_pairs.all(|(x0, x1)| x1.verify(&x0.id))
}

/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`.
pub fn next_ticks(start_hash: &Hash, num_hashes: u64, len: usize) -> Vec<Entry> {
let mut id = *start_hash;
let mut ticks = vec![];
Expand Down
Loading