Skip to content

Commit

Permalink
Remove transaction processing from RPU and request processing from TVU
Browse files Browse the repository at this point in the history
  • Loading branch information
garious committed May 15, 2018
1 parent 6d4defd commit f7083e0
Show file tree
Hide file tree
Showing 8 changed files with 46 additions and 253 deletions.
15 changes: 12 additions & 3 deletions src/ecdsa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,14 +136,23 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
mod tests {
use bincode::serialize;
use ecdsa;
use event::Event;
use packet::{Packet, Packets, SharedPackets};
use request::Request;
use std::sync::RwLock;
use transaction::Transaction;
use transaction::test_tx;
use transaction::{memfind, test_tx};

#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Event::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}

fn make_packet_from_transaction(tr: Transaction) -> Packet {
let tx = serialize(&Request::Transaction(tr)).unwrap();
let tx = serialize(&Event::Transaction(tr)).unwrap();
let mut packet = Packet::default();
packet.meta.size = tx.len();
packet.data[..packet.meta.size].copy_from_slice(&tx);
Expand Down
21 changes: 2 additions & 19 deletions src/request.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,10 @@ use hash::Hash;
use packet;
use packet::SharedPackets;
use signature::PublicKey;
use transaction::Transaction;

#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
Transaction(Transaction),
GetBalance { key: PublicKey },
GetLastId,
GetTransactionCount,
Expand All @@ -19,10 +17,7 @@ pub enum Request {
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
match *self {
Request::Transaction(ref tr) => tr.verify_plan(),
_ => true,
}
true
}
}

Expand Down Expand Up @@ -54,24 +49,12 @@ pub fn to_request_packets(r: &packet::PacketRecycler, reqs: Vec<Request>) -> Vec

#[cfg(test)]
mod tests {
use bincode::serialize;
use ecdsa;
use packet::{PacketRecycler, NUM_PACKETS};
use request::{to_request_packets, Request};
use transaction::{memfind, test_tx};

#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Request::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}

#[test]
fn test_to_packets() {
let tr = Request::Transaction(test_tx());
let tr = Request::GetTransactionCount;
let re = PacketRecycler::default();
let rv = to_request_packets(&re, vec![tr.clone(); 1]);
assert_eq!(rv.len(), 1);
Expand Down
69 changes: 14 additions & 55 deletions src/request_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,12 @@ use event::Event;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use recorder::Signal;
use request::{Request, Response};
use result::Result;
use std::collections::VecDeque;
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use std::time::Duration;
use std::sync::mpsc::Receiver;
use std::time::Instant;
use streamer;
use timing;
Expand Down Expand Up @@ -53,7 +51,6 @@ impl RequestProcessor {
info!("Response::TransactionCount {:?}", rsp);
Some(rsp)
}
Request::Transaction(_) => unreachable!(),
}
}

Expand Down Expand Up @@ -91,24 +88,6 @@ impl RequestProcessor {
}

/// Split Request list into verified transactions and the rest
fn partition_requests(
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> (Vec<Event>, Vec<(Request, SocketAddr)>) {
let mut events = vec![];
let mut reqs = vec![];
for (msg, rsp_addr, verify) in req_vers {
match msg {
Request::Transaction(tr) => {
if verify != 0 {
events.push(Event::Transaction(tr));
}
}
_ => reqs.push((msg, rsp_addr)),
}
}
(events, reqs)
}

fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
Expand Down Expand Up @@ -139,49 +118,29 @@ impl RequestProcessor {

pub fn process_request_packets(
&self,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
signal_sender: &Sender<Signal>,
packet_receiver: &Receiver<SharedPackets>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let recv_start = Instant::now();
let mms = verified_receiver.recv_timeout(timer)?;
let mut reqs_len = 0;
let mms_len = mms.len();
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;

info!(
"@{:?} process start stalled for: {:?}ms batches: {}",
"@{:?} request_stage: processing: {}",
timing::timestamp(),
timing::duration_as_ms(&recv_start.elapsed()),
mms.len(),
batch_len
);

let mut reqs_len = 0;
let proc_start = Instant::now();
for (msgs, vers) in mms {
let reqs = Self::deserialize_requests(&msgs.read().unwrap());
reqs_len += reqs.len();
let req_vers = reqs.into_iter()
.zip(vers)
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
.filter(|x| {
let v = x.0.verify();
v
})
for msgs in batch {
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
.into_iter()
.filter_map(|x| x)
.collect();
reqs_len += reqs.len();

debug!("partitioning");
let (events, reqs) = Self::partition_requests(req_vers);
debug!("events: {} reqs: {}", events.len(), reqs.len());

debug!("process_events");
let results = self.bank.process_verified_events(events);
let events = results.into_iter().filter_map(|x| x.ok()).collect();
signal_sender.send(Signal::Events(events))?;
debug!("done process_events");

debug!("process_requests");
let rsps = self.process_requests(reqs);
debug!("done process_requests");

let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
Expand All @@ -196,7 +155,7 @@ impl RequestProcessor {
info!(
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
mms_len,
batch_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
Expand Down
145 changes: 2 additions & 143 deletions src/request_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
use packet;
use packet::SharedPackets;
use recorder::Signal;
use request_processor::RequestProcessor;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
Expand All @@ -12,7 +11,6 @@ use streamer;

pub struct RequestStage {
pub thread_hdl: JoinHandle<()>,
pub signal_receiver: Receiver<Signal>,
pub blob_receiver: streamer::BlobReceiver,
pub request_processor: Arc<RequestProcessor>,
}
Expand All @@ -21,18 +19,16 @@ impl RequestStage {
pub fn new(
request_processor: RequestProcessor,
exit: Arc<AtomicBool>,
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
packet_receiver: Receiver<SharedPackets>,
packet_recycler: packet::PacketRecycler,
blob_recycler: packet::BlobRecycler,
) -> Self {
let request_processor = Arc::new(request_processor);
let request_processor_ = request_processor.clone();
let (signal_sender, signal_receiver) = channel();
let (blob_sender, blob_receiver) = channel();
let thread_hdl = spawn(move || loop {
let e = request_processor_.process_request_packets(
&verified_receiver,
&signal_sender,
&packet_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
Expand All @@ -45,145 +41,8 @@ impl RequestStage {
});
RequestStage {
thread_hdl,
signal_receiver,
blob_receiver,
request_processor,
}
}
}

// TODO: When banking is pulled out of RequestStage, add this test back in.

//use bank::Bank;
//use entry::Entry;
//use event::Event;
//use hash::Hash;
//use record_stage::RecordStage;
//use recorder::Signal;
//use result::Result;
//use std::sync::mpsc::{channel, Sender};
//use std::sync::{Arc, Mutex};
//use std::time::Duration;
//
//#[cfg(test)]
//mod tests {
// use bank::Bank;
// use event::Event;
// use event_processor::EventProcessor;
// use mint::Mint;
// use signature::{KeyPair, KeyPairUtil};
// use transaction::Transaction;
//
// #[test]
// // TODO: Move this test banking_stage. Calling process_events() directly
// // defeats the purpose of this test.
// fn test_banking_sequential_consistency() {
// // In this attack we'll demonstrate that a verifier can interpret the ledger
// // differently if either the server doesn't signal the ledger to add an
// // Entry OR if the verifier tries to parallelize across multiple Entries.
// let mint = Mint::new(2);
// let bank = Bank::new(&mint);
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
//
// // Process a batch that includes a transaction that receives two tokens.
// let alice = KeyPair::new();
// let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
// let events = vec![Event::Transaction(tr)];
// let entry0 = event_processor.process_events(events).unwrap();
//
// // Process a second batch that spends one of those tokens.
// let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
// let events = vec![Event::Transaction(tr)];
// let entry1 = event_processor.process_events(events).unwrap();
//
// // Collect the ledger and feed it to a new bank.
// let entries = vec![entry0, entry1];
//
// // Assert the user holds one token, not two. If the server only output one
// // entry, then the second transaction will be rejected, because it drives
// // the account balance below zero before the credit is added.
// let bank = Bank::new(&mint);
// for entry in entries {
// assert!(
// bank
// .process_verified_events(entry.events)
// .into_iter()
// .all(|x| x.is_ok())
// );
// }
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
// }
//}
//
//#[cfg(all(feature = "unstable", test))]
//mod bench {
// extern crate test;
// use self::test::Bencher;
// use bank::{Bank, MAX_ENTRY_IDS};
// use bincode::serialize;
// use event_processor::*;
// use hash::hash;
// use mint::Mint;
// use rayon::prelude::*;
// use signature::{KeyPair, KeyPairUtil};
// use std::collections::HashSet;
// use std::time::Instant;
// use transaction::Transaction;
//
// #[bench]
// fn process_events_bench(_bencher: &mut Bencher) {
// let mint = Mint::new(100_000_000);
// let bank = Bank::new(&mint);
// // Create transactions between unrelated parties.
// let txs = 100_000;
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
// let transactions: Vec<_> = (0..txs)
// .into_par_iter()
// .map(|i| {
// // Seed the 'to' account and a cell for its signature.
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
// {
// let mut last_ids = last_ids.lock().unwrap();
// if !last_ids.contains(&last_id) {
// last_ids.insert(last_id);
// bank.register_entry_id(&last_id);
// }
// }
//
// // Seed the 'from' account.
// let rando0 = KeyPair::new();
// let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
// bank.process_verified_transaction(&tr).unwrap();
//
// let rando1 = KeyPair::new();
// let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
// bank.process_verified_transaction(&tr).unwrap();
//
// // Finally, return a transaction that's unique
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
// })
// .collect();
//
// let events: Vec<_> = transactions
// .into_iter()
// .map(|tr| Event::Transaction(tr))
// .collect();
//
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
//
// let now = Instant::now();
// assert!(event_processor.process_events(events).is_ok());
// let duration = now.elapsed();
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
// let tps = txs as f64 / sec;
//
// // Ensure that all transactions were successfully logged.
// drop(event_processor.historian_input);
// let entries: Vec<Entry> = event_processor.output.lock().unwrap().iter().collect();
// assert_eq!(entries.len(), 1);
// assert_eq!(entries[0].events.len(), txs as usize);
//
// println!("{} tps", tps);
// }
//}
Loading

0 comments on commit f7083e0

Please sign in to comment.