diff --git a/Cargo.toml b/Cargo.toml index 17100f557c200c..fe717c9ade8a0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,6 +57,7 @@ unstable = [] ipv6 = [] cuda = [] erasure = [] +parex = [] [dependencies] rayon = "1.0.0" @@ -91,6 +92,9 @@ reqwest = "0.8.6" influx_db_client = "0.3.4" dirs = "1.0.2" +[profile.release] +debug = true + [dev-dependencies] criterion = "0.2" diff --git a/benches/bank.rs b/benches/bank.rs index 1f5edf96a83114..66e1756ca7397a 100644 --- a/benches/bank.rs +++ b/benches/bank.rs @@ -6,7 +6,6 @@ extern crate solana; use bincode::serialize; use criterion::{Bencher, Criterion}; -use rayon::prelude::*; use solana::bank::*; use solana::hash::hash; use solana::mint::Mint; @@ -18,12 +17,20 @@ fn bench_process_transaction(bencher: &mut Bencher) { let bank = Bank::new(&mint); // Create transactions between unrelated parties. + // page_table requires sequential debits from the same account + let mut version: u64 = 0; let transactions: Vec<_> = (0..4096) - .into_par_iter() + .into_iter() .map(|i| { // Seed the 'from' account. let rando0 = KeyPair::new(); - let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id()); + let tx = Transaction::new( + &mint.keypair(), + rando0.pubkey(), + 10_000, + mint.last_id(), + i as u64, + ); assert!(bank.process_transaction(&tx).is_ok()); // Seed the 'to' account and a cell for its signature. @@ -31,7 +38,7 @@ fn bench_process_transaction(bencher: &mut Bencher) { bank.register_entry_id(&last_id); let rando1 = KeyPair::new(); - let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id); + let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id, version); assert!(bank.process_transaction(&tx).is_ok()); // Finally, return the transaction to the benchmark. @@ -41,9 +48,10 @@ fn bench_process_transaction(bencher: &mut Bencher) { bencher.iter_with_setup( || { - // Since benchmarker runs this multiple times, we need to clear the signatures. - bank.clear_signatures(); - transactions.clone() + let mut txs = transactions.clone(); + version += 1; + txs.iter_mut().for_each(|tx| tx.call.data.version = version); + txs }, |transactions| { let results = bank.process_transactions(transactions); diff --git a/benches/banking_stage.rs b/benches/banking_stage.rs index 1b7e5e87c995ef..13ac89880b0c0a 100644 --- a/benches/banking_stage.rs +++ b/benches/banking_stage.rs @@ -5,11 +5,10 @@ extern crate rayon; extern crate solana; use criterion::{Bencher, Criterion}; -use rayon::prelude::*; use solana::bank::Bank; use solana::banking_stage::BankingStage; use solana::mint::Mint; -use solana::packet::{to_packets_chunked, PacketRecycler}; +use solana::packet::{to_packets_chunked, PacketRecycler, NUM_PACKETS}; use solana::record_stage::Signal; use solana::signature::{KeyPair, KeyPairUtil}; use solana::transaction::Transaction; @@ -93,7 +92,7 @@ fn check_txs(batches: usize, receiver: &Receiver, ref_tx_count: usize) { } fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) { - let tx = 10_000_usize; + let tx = NUM_PACKETS; let mint_total = 1_000_000_000_000; let mint = Mint::new(mint_total); let num_dst_accounts = 8 * 1024; @@ -111,6 +110,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) { dstkeys[i % num_dst_accounts], i as i64, mint.last_id(), + 0, ) }) .collect(); @@ -119,22 +119,32 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) { let (signal_sender, signal_receiver) = channel(); let packet_recycler = PacketRecycler::default(); - let setup_transactions: Vec<_> = (0..num_src_accounts) - .map(|i| { - Transaction::new( - &mint.keypair(), - srckeys[i].pubkey(), - mint_total / num_src_accounts as i64, - mint.last_id(), - ) - }) - .collect(); - - bencher.iter(move || { - let bank = Arc::new(Bank::new(&mint)); + let bank = Arc::new(Bank::new(&mint)); + (0..num_src_accounts).for_each(|i| { + let t = Transaction::new( + &mint.keypair(), + srckeys[i].pubkey(), + mint_total / num_src_accounts as i64, + mint.last_id(), + i as u64, + ); + let tx = bank.process_transaction(&t); + if tx.is_err() { + println!("{:?}", tx); + } + assert!(tx.is_ok()); + }); - let verified_setup: Vec<_> = - to_packets_chunked(&packet_recycler, &setup_transactions.clone(), tx) + let mut version: u64 = 0; + bencher.iter_with_setup( + || { + let mut txs = transactions.clone(); + txs.iter_mut().for_each(|tx| tx.call.data.version = version); + version += 1; + txs + }, + |transactions| { + let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions, 192) .into_iter() .map(|x| { let len = (*x).read().unwrap().packets.len(); @@ -142,80 +152,71 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) { }) .collect(); - let verified_setup_len = verified_setup.len(); - verified_sender.send(verified_setup).unwrap(); - BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler) - .unwrap(); - - check_txs(verified_setup_len, &signal_receiver, num_src_accounts); - - let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192) - .into_iter() - .map(|x| { - let len = (*x).read().unwrap().packets.len(); - (x, iter::repeat(1).take(len).collect()) - }) - .collect(); - - let verified_len = verified.len(); - verified_sender.send(verified).unwrap(); - BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler) - .unwrap(); - - check_txs(verified_len, &signal_receiver, tx); - }); + let verified_len = verified.len(); + verified_sender.send(verified).unwrap(); + BankingStage::process_packets( + &bank, + &verified_receiver, + &signal_sender, + &packet_recycler, + ).unwrap(); + + check_txs(verified_len, &signal_receiver, tx); + }, + ); } -fn bench_banking_stage_single_from(bencher: &mut Bencher) { - let tx = 10_000_usize; - let mint = Mint::new(1_000_000_000_000); - let mut pubkeys = Vec::new(); - let num_keys = 8; - for _ in 0..num_keys { - pubkeys.push(KeyPair::new().pubkey()); - } - - let transactions: Vec<_> = (0..tx) - .into_par_iter() - .map(|i| { - Transaction::new( - &mint.keypair(), - pubkeys[i % num_keys], - i as i64, - mint.last_id(), - ) - }) - .collect(); - - let (verified_sender, verified_receiver) = channel(); - let (signal_sender, signal_receiver) = channel(); - let packet_recycler = PacketRecycler::default(); - - bencher.iter(move || { - let bank = Arc::new(Bank::new(&mint)); - let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx) - .into_iter() - .map(|x| { - let len = (*x).read().unwrap().packets.len(); - (x, iter::repeat(1).take(len).collect()) - }) - .collect(); - let verified_len = verified.len(); - verified_sender.send(verified).unwrap(); - BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler) - .unwrap(); - - check_txs(verified_len, &signal_receiver, tx); - }); -} +//fn bench_banking_stage_single_from(bencher: &mut Bencher) { +// let tx = 10_000_usize; +// let mint = Mint::new(1_000_000_000_000); +// let mut pubkeys = Vec::new(); +// let num_keys = 8; +// for _ in 0..num_keys { +// pubkeys.push(KeyPair::new().pubkey()); +// } +// +// let transactions: Vec<_> = (0..tx) +// .into_par_iter() +// .map(|i| { +// Transaction::new( +// &mint.keypair(), +// pubkeys[i % num_keys], +// i as i64, +// mint.last_id(), +// i as u64, +// ) +// }) +// .collect(); +// +// let (verified_sender, verified_receiver) = channel(); +// let (signal_sender, signal_receiver) = channel(); +// let packet_recycler = PacketRecycler::default(); +// +// bencher.iter(move || { +// let bank = Arc::new(Bank::new(&mint)); +// let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx) +// .into_iter() +// .map(|x| { +// let len = (*x).read().unwrap().packets.len(); +// (x, iter::repeat(1).take(len).collect()) +// }) +// .collect(); +// let verified_len = verified.len(); +// verified_sender.send(verified).unwrap(); +// BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler) +// .unwrap(); +// +// check_txs(verified_len, &signal_receiver, tx); +// }); +//} fn bench(criterion: &mut Criterion) { criterion.bench_function("bench_banking_stage_multi_accounts", |bencher| { bench_banking_stage_multi_accounts(bencher); }); - criterion.bench_function("bench_process_stage_single_from", |bencher| { - bench_banking_stage_single_from(bencher); - }); + //criterion.bench_function("bench_process_stage_single_from", |bencher| { + // bench_banking_stage_single_from(bencher); + //}); } criterion_group!( diff --git a/benches/ledger.rs b/benches/ledger.rs index 8172c0319d1cad..ec7cf1ebc4e370 100644 --- a/benches/ledger.rs +++ b/benches/ledger.rs @@ -14,7 +14,7 @@ fn bench_block_to_blobs_to_block(bencher: &mut Bencher) { let zero = Hash::default(); let one = hash(&zero.as_ref()); let keypair = KeyPair::new(); - let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one); + let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one, 0); let transactions = vec![tx0; 10]; let entries = next_entries(&zero, 1, transactions); diff --git a/rfcs/rfc-001-smart-contracts-engine.md b/rfcs/rfc-001-smart-contracts-engine.md index b0ff842d24af61..d3f7625d639eab 100644 --- a/rfcs/rfc-001-smart-contracts-engine.md +++ b/rfcs/rfc-001-smart-contracts-engine.md @@ -4,7 +4,7 @@ The goal of this RFC is to define a set of constraints for APIs and runtime such ## Version -version 0.1 +version 0.2 ## Toolchain Stack @@ -37,154 +37,175 @@ version 0.1 In Figure 1 an untrusted client, creates a program in the front-end language of her choice, (like C/C++/Rust/Lua), and compiles it with LLVM to a position independent shared object ELF, targeting BPF bytecode. Solana will safely load and execute the ELF. -## Bytecode - -Our bytecode is based on Berkley Packet Filter. The requirements for BPF overlap almost exactly with the requirements we have: - -1. Deterministic amount of time to execute the code -2. Bytecode that is portable between machine instruction sets -3. Verified memory accesses -4. Fast to load the object, verify the bytecode and JIT to local machine instruction set - -For 1, that means that loops are unrolled, and for any jumps back we can guard them with a check against the number of instruction that have been executed at this point. If the limit is reached, the program yields its execution. This involves saving the stack and current instruction index. - -For 2, the BPF bytecode already easily maps to x86–64, arm64 and other instruction sets.  - -For 3, every load and store that is relative can be checked to be within the expected memory that is passed into the ELF. Dynamic load and stores can do a runtime check against available memory, these will be slow and should be avoided. - -For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported. - -### Address Checks - -The interface to the module takes a `&mut Vec>` in rust, or a `int sz, void* data[sz], int szs[sz]` in `C`. Given the module's bytecode, for each method, we need to analyze the bounds on load and stores into each buffer the module uses. This check needs to be done `on chain`, and after those bounds are computed we can verify that the user supplied array of buffers will not cause a memory fault. For load and stores that we cannot analyze, we can replace with a `safe_load` and `safe_store` instruction that will check the table for access. - -## Loader -The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points. - -Since there is only one RX segment, the context for the contract instance is passed into each entry point as well as the event data for that entry point. - -A client will create a transaction to create a new loader instance: - -`Solana_NewLoader(Loader Instance PubKey, proof of key ownership, space I need for my elf)` - -A client will then do a bunch of transactions to load its elf into the loader instance they created: - -`Loader_UploadElf(Loader Instance PubKey, proof of key ownership, pos start, pos end, data)` - -At this point the client can create a new instance of the module with its own instance address: - -`Loader_NewInstance(Loader Instance PubKey, proof of key ownership, Instance PubKey, proof of key ownership)` - -Once the instance has been created, the client may need to upload more user data to solana to configure this instance: - -`Instance_UploadModuleData(Instance PubKey, proof of key ownership, pos start, pos end, data)` - -Now clients can `start` the instance: - -`Instance_Start(Instance PubKey, proof of key ownership)` - ## Runtime -Our goal with the runtime is to have a general purpose execution environment that is highly parallelizable and doesn't require dynamic resource management. We want to execute as many contracts as we can in parallel, and have them pass or fail without a destructive state change. - -### State and Entry Point - -State is addressed by an account which is at the moment simply the PubKey. Our goal is to eliminate dynamic memory allocation in the smart contract itself, so the contract is a function that takes a mapping of [(PubKey,State)] and returns [(PubKey, State')]. The output of keys is a subset of the input. Three basic kinds of state exist: +The goal with the runtime is to have a general purpose execution environment that is highly parallelizeable and doesn't require dynamic resource management. The goal is to execute as many contracts as possible in parallel, and have them pass or fail without a destructive state change. -* Instance State -* Participant State -* Caller State -There isn't any difference in how each is implemented, but conceptually Participant State is memory that is allocated for each participant in the contract. Instance State is memory that is allocated for the contract itself, and Caller State is memory that the transactions caller has allocated. +### State +State is addressed by an account which is at the moment simply the PubKey. Our goal is to eliminate dynamic memory allocation in the smart contract itself, so the contract is a function that takes a mapping of [(PubKey,State)] and returns [(PubKey, State')]. -### Call - +### Call Structure ``` -void call( - const struct instance_data *data, - const uint8_t kind[], //instance|participant|caller|read|write - const uint8_t *keys[], - uint8_t *data[], - int num, - uint8_t dirty[], //dirty memory bits - uint8_t *userdata, //current transaction data -); +/// Call definition +/// Signed portion +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct CallData { + /// number of keys to load, aka the to key + /// keys[0] is the caller's key + pub keys: Vec, + + /// the set of PublicKeys that are required to have a proof + pub required_proofs: Vec, + + /// PoH data + /// last id PoH observed by the sender + pub last_count: u64, + /// last PoH hash observed by the sender + pub last_hash: Hash, + + /// Program + /// the address of the program we want to call + pub contract: ContractId, + /// OS scheduling fee + pub fee: i64, + /// struct version to prevent duplicate spends + /// Calls with a version <= Page.version are rejected + pub version: u64, + /// method to call in the contract + pub method: u8, + /// usedata in bytes + pub user_data: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct Call { + /// Signatures and Keys + /// (signature, key index) + /// This vector contains a tuple of signatures, and the key index the signature is for + /// proofs[0] is always key[0] + pub proofs: Vec, + pub data: CallData, +} ``` -To call this operation, the transaction that is destined to the contract instance specifies what keyed state it should present to the `call` function. To allocate the state memory or a call context, the client has to first call a function on the contract with the designed address that will own the state. - -At its core, this is a system call that requires cryptographic proof of ownership of memory regions instead of an OS that checks page tables for access rights. - -* `Instance_AllocateContext(Instance PubKey, My PubKey, Proof of key ownership)` - -Any transaction can then call `call` on the contract with a set of keys. It's up to the contract itself to manage ownership: - -* `Instance_Call(Instance PubKey, [Context PubKeys], proofs of ownership, userdata...)` +At it's core, this is just a set of PublicKeys and Signatures with a bit of metadata. The contract PublicKey routes this transaction into that contracts entry point. `version` is used for dropping retransmitted requests. Contracts should be able to read any state that is part of solana, but only write to state that the contract allocated. -#### Caller State - -Caller `state` is memory allocated for the `call` that belongs to the public key that is issuing the `call`. This is the caller's context. - -#### Instance State - -Instance `state` is memory that belongs to this contract instance. We may also need module-wide `state` as well. - -#### Participant State +### Execution -Participant `state` is any other memory. In some cases it may make sense to have these allocated as part of the call by the caller. +Calls batched and processed in a pipeline -### Reduce +``` ++-----------+ +-------------+ +--------------+ +--------------------+ +| sigverify |--->| lock memory |--->| validate fee |--->| allocate new pages |---> ++-----------+ +-------------+ +--------------+ +--------------------+ + + +------------+ +---------+ +--------------+ +-=------------+ +--->| load pages |--->| execute |--->|unlock memory |--->| commit pages | + +------------+ +---------+ +--------------+ +--------------+ -Some operations on the contract will require iteration over all the keys. To make this parallelizable the iteration is broken up into reduce calls which are combined. +``` +At the `execute` stage, the loaded pages have no data dependencies, so all the contracts can be executed in parallel. +## Memory Management ``` -void reduce_m( - const struct instance_data *data, - const uint8_t *keys[], - const uint8_t *data[], - int num, - uint8_t *reduce_data, -); - -void reduce_r( - const struct instance_data *data, - const uint8_t *reduce_data[], - int num, - uint8_t *reduce_data, -); +pub struct Page { + /// key that indexes this page + /// proove ownership of this key to spend from this Page + owner: PublicKey, + /// contract that owns this page + /// contract can write to the data that is pointed to by `pointer` + contract: PublicKey, + /// balance that belongs to owner + balance: u64, + /// version of the structure, public for testing + version: u64, + /// hash of the page data + memhash: Hash, + /// The following could be in a separate structure + memory: Vec, +} ``` -### Execution +The guarantee that solana enforces: + 1. The contract code is the only code that will modify the contents of `memory` + 2. Total balances on all the pages is equal before and after exectuion of a call + 3. Balances of each of the pages not owned by the contract must be equal to or greater after the call than before the call. -Transactions are batched and processed in parallel at each stage. +## Entry Point +Exectuion of the contract involves maping the contract's public key to an entry point which takes a pointer to the transaction, and an array of loaded pages. +``` +// Find the method +match (tx.contract, tx.method) { + // system interface + // everyone has the same reallocate + (_, 0) => system_0_realloc(&tx, &mut call_pages), + (_, 1) => system_1_assign(&tx, &mut call_pages), + // contract methods + (DEFAULT_CONTRACT, 128) => default_contract_128_move_funds(&tx, &mut call_pages), + (contract, method) => //... ``` -+-----------+ +--------------+ +-----------+ +---------------+ -| sigverify |-+->| debit commit |---+->| execution |-+->| memory commit | -+-----------+ | +--------------+ | +-----------+ | +---------------+ - | | | - | +---------------+ | | +--------------+ - |->| memory verify |->+ +->| debit undo | - +---------------+ | +--------------+ - | - | +---------------+ - +->| credit commit | - +---------------+ +The first 127 methods are reserved for the system interface, which implements allocation and assignment of memory. The rest, including the contract for moving funds are implemented by the contract itself. +## System Interface ``` -The `debit verify` stage is very similar to `memory verify`. Proof of key ownership is used to check if the callers key has some state allocated with the contract, then the memory is loaded and executed. After execution stage, the dirty pages are written back by the contract. Because know all the memory accesses during execution, we can batch transactions that do not interfere with each other. We can also apply the `debit undo` and `credit commit` stages of the transaction. `debit undo` is run in case of an exception during contract execution, only transfers may be reversed, fees are commited to solana. +/// SYSTEM interface, same for very contract, methods 0 to 127 +/// method 0 +/// reallocate +/// spend the funds from the call to the first recepient +pub fn system_0_realloc(call: &Call, pages: &mut Vec) { + if call.contract == DEFAULT_CONTRACT { + let size: u64 = deserialize(&call.user_data).unwrap(); + pages[0].memory.resize(size as usize, 0u8); + } +} +/// method 1 +/// assign +/// assign the page to a contract +pub fn system_1_assign(call: &Call, pages: &mut Vec) { + let contract = deserialize(&call.user_data).unwrap(); + if call.contract == DEFAULT_CONTRACT { + pages[0].contract = contract; + //zero out the memory in pages[0].memory + //Contracts need to own the state of that data otherwise a use could fabricate the state and + //manipulate the contract + pages[0].memory.clear(); + } +} +``` +The first method resizes the memory that is assosciated with the callers page. The second system call assignes the page to the contract. Both methods check if the current contract is 0, otherwise the method does nothing and the caller spent their fees. -### GPU execution +This ensures that when memory is assigned to the contract the initial state of all the bytes is 0, and the contract itself is the only thing that can modify that state. -A single contract can read and write to separate key pairs without interference. These separate calls to the same contract can execute on the same GPU thread over different memory using different SIMD lanes. +## Simplest contract +``` +/// DEFAULT_CONTRACT interface +/// All contracts start with 128 +/// method 128 +/// move_funds +/// spend the funds from the call to the first recepient +pub fn default_contract_128_move_funds(call: &Call, pages: &mut Vec) { + let amount: u64 = deserialize(&call.user_data).unwrap(); + if pages[0].balance >= amount { + pages[0].balance -= amount; + pages[1].balance += amount; + } +} +``` + +This simply moves the amount from page[0], which is the callers page, to page[1], which is the recipients page. ## Notes 1. There is no dynamic memory allocation. -2. Persistant Memory is allocated to a Key with ownership +2. Persistent Memory is allocated to a Key with ownership 3. Contracts can `call` to update key owned state -4. Contracts can `reduce` over the memory to aggregate state -5. `call` is just a *syscall* that does a cryptographic check of memory owndershp +4. `call` is just a *syscall* that does a cryptographic check of memory ownership +5. Kernel guarantees that when memory is assigned to the contract its state is 0 +6. Kernel guarantees that contract is the only thing that can modify memory that its assigned to +7. Kernel guarantees that the contract can only spend tokens that are in pages that are assigned to it +8. Kernel guarantees the balances belonging to pages are balanced before and after the call diff --git a/src/bank.rs b/src/bank.rs index 01a55eaa3e79cb..d317b4fcb2c3c5 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -5,20 +5,20 @@ extern crate libc; +use bincode::{deserialize, serialize}; use chrono::prelude::*; -use counter::Counter; use entry::Entry; use hash::Hash; use itertools::Itertools; use ledger::Block; use mint::Mint; +use page_table::{self, Call, Page, PageTable}; use payment_plan::{Payment, PaymentPlan, Witness}; use signature::{KeyPair, PublicKey, Signature}; use std::collections::hash_map::Entry::Occupied; -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::{HashMap, VecDeque}; use std::result; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::RwLock; +use std::sync::{Arc, RwLock}; use std::time::Instant; use streamer::WINDOW_SIZE; use timing::{duration_as_us, timestamp}; @@ -34,22 +34,16 @@ pub const MAX_ENTRY_IDS: usize = 1024 * 16; pub const VERIFY_BLOCK_SIZE: usize = 16; +pub const BANK_PROCESS_TRANSACTION_METHOD: u8 = 129; + /// Reasons a transaction might be rejected. #[derive(Debug, PartialEq, Eq)] pub enum BankError { - /// Attempt to debit from `PublicKey`, but no found no record of a prior credit. - AccountNotFound(PublicKey), - /// The requested debit from `PublicKey` has the potential to draw the balance /// below zero. This can occur when a debit and credit are processed in parallel. /// The bank may reject the debit or push it to a future entry. InsufficientFunds(PublicKey), - /// The bank has seen `Signature` before. This can occur under normal operation - /// when a UDP packet is duplicated, as a user error from a client not updating - /// its `last_id`, or as a double-spend attack. - DuplicateSignature(Signature), - /// The bank has not seen the given `last_id` or the transaction is too old and /// the `last_id` has been discarded. LastIdNotFound(Hash), @@ -60,51 +54,55 @@ pub enum BankError { /// Proof of History verification failed. LedgerVerificationFailed, + + /// Account is concurrently processing + AccountInUse, } pub type Result = result::Result; /// The state of all accounts and contracts after processing its entries. pub struct Bank { - /// A map of account public keys to the balance in that account. - balances: RwLock>, - - /// A map of smart contract transaction signatures to what remains of its payment - /// plan. Each transaction that targets the plan should cause it to be reduced. - /// Once it cannot be reduced, final payments are made and it is discarded. - pending: RwLock>, - - /// A FIFO queue of `last_id` items, where each item is a set of signatures - /// that have been processed using that `last_id`. Rejected `last_id` - /// values are so old that the `last_id` has been pulled out of the queue. + /// The page table + page_table: Arc, + ctx: RwLock, + //TODO(anatoly): this functionality should be handled by the ledger + //Ledger context in a call is + //(PoH Count, PoH Hash) + //Ledger should implement a O(1) `lookup(PoH Count) -> PoH Hash` + //which shoudl filter invalid Calls last_ids: RwLock>, - - /// Mapping of hashes to signature sets along with timestamp. The bank uses this data to - /// reject transactions with signatures its seen before - last_ids_sigs: RwLock, u64)>>, - - /// The number of transactions the bank has processed without error since the - /// start of the ledger. - transaction_count: AtomicUsize, + /// Mapping of hashes to timestamp. The bank uses this data to + /// reject transactions with old last_id hashes + last_ids_set: RwLock>, } impl Default for Bank { fn default() -> Self { Bank { - balances: RwLock::new(HashMap::new()), - pending: RwLock::new(HashMap::new()), + page_table: Arc::new(PageTable::default()), + ctx: RwLock::new(page_table::Context::default()), last_ids: RwLock::new(VecDeque::new()), - last_ids_sigs: RwLock::new(HashMap::new()), - transaction_count: AtomicUsize::new(0), + last_ids_set: RwLock::new(HashMap::new()), } } } impl Bank { + /// Create an Bank using a deposit. + pub fn new_with_page_table(page_table: Arc) -> Self { + Bank { + page_table, + ctx: RwLock::new(page_table::Context::default()), + last_ids: RwLock::new(VecDeque::new()), + last_ids_set: RwLock::new(HashMap::new()), + } + } /// Create an Bank using a deposit. pub fn new_from_deposit(deposit: &Payment) -> Self { - let bank = Self::default(); - bank.apply_payment(deposit, &mut bank.balances.write().unwrap()); + let pt = Arc::new(PageTable::default()); + let bank = Self::new_with_page_table(pt); + bank.force_deposit(deposit.to, deposit.tokens); bank } @@ -119,9 +117,10 @@ impl Bank { bank } - /// Commit funds to the `payment.to` party. - fn apply_payment(&self, payment: &Payment, balances: &mut HashMap) { - *balances.entry(payment.to).or_insert(0) += payment.tokens; + /// Get the underlying PageTable + /// this can be used to create multiple banks that process transactions in parallel + pub fn page_table(&self) -> &Arc { + &self.page_table } /// Return the last entry ID registered. @@ -134,62 +133,17 @@ impl Bank { *last_item } - /// Store the given signature. The bank will reject any transaction with the same signature. - fn reserve_signature(signatures: &mut HashSet, sig: &Signature) -> Result<()> { - if let Some(sig) = signatures.get(sig) { - return Err(BankError::DuplicateSignature(*sig)); - } - signatures.insert(*sig); - Ok(()) - } - - /// Forget the given `signature` because its transaction was rejected. - fn forget_signature(signatures: &mut HashSet, signature: &Signature) { - signatures.remove(signature); - } - - /// Forget the given `signature` with `last_id` because the transaction was rejected. - fn forget_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) { - if let Some(entry) = self - .last_ids_sigs - .write() - .expect("'last_ids' read lock in forget_signature_with_last_id") - .get_mut(last_id) - { - Self::forget_signature(&mut entry.0, signature); - } - } - - /// Forget all signatures. Useful for benchmarking. - pub fn clear_signatures(&self) { - for (_, sigs) in self.last_ids_sigs.write().unwrap().iter_mut() { - sigs.0.clear(); - } - } - - fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> { - if let Some(entry) = self - .last_ids_sigs - .write() - .expect("'last_ids' read lock in reserve_signature_with_last_id") - .get_mut(last_id) - { - return Self::reserve_signature(&mut entry.0, signature); - } - Err(BankError::LastIdNotFound(*last_id)) - } - /// Look through the last_ids and find all the valid ids /// This is batched to avoid holding the lock for a significant amount of time /// /// Return a vec of tuple of (valid index, timestamp) /// index is into the passed ids slice to avoid copying hashes pub fn count_valid_ids(&self, ids: &[Hash]) -> Vec<(usize, u64)> { - let last_ids = self.last_ids_sigs.read().unwrap(); + let last_ids = self.last_ids_set.read().unwrap(); let mut ret = Vec::new(); for (i, id) in ids.iter().enumerate() { if let Some(entry) = last_ids.get(id) { - ret.push((i, entry.1)); + ret.push((i, *entry)); } } ret @@ -204,83 +158,90 @@ impl Bank { .last_ids .write() .expect("'last_ids' write lock in register_entry_id"); - let mut last_ids_sigs = self - .last_ids_sigs - .write() - .expect("last_ids_sigs write lock"); + let mut last_ids_set = self.last_ids_set.write().expect("last_ids_set write lock"); if last_ids.len() >= MAX_ENTRY_IDS { let id = last_ids.pop_front().unwrap(); - last_ids_sigs.remove(&id); + last_ids_set.remove(&id); } - last_ids_sigs.insert(*last_id, (HashSet::new(), timestamp())); + last_ids_set.insert(*last_id, timestamp()); last_ids.push_back(*last_id); } /// Deduct tokens from the 'from' address the account has sufficient /// funds and isn't a duplicate. - fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap) -> Result<()> { - let mut purge = false; - { - let option = bals.get_mut(&tx.from); - if option.is_none() { - if let Instruction::NewVote(_) = &tx.instruction { - inc_new_counter!("bank-appy_debits-vote_account_not_found", 1); - } else { - inc_new_counter!("bank-appy_debits-generic_account_not_found", 1); - } - return Err(BankError::AccountNotFound(tx.from)); + fn apply_debits(tx: &Call, instruction: &Instruction, bals: &mut [Page]) -> Result<()> { + // check the spendable amount of tokens in the contract + let spendable = Self::default_contract_get_balance(&bals[0]); + if let Instruction::NewContract(contract) = &instruction { + if contract.tokens < 0 { + return Err(BankError::NegativeTokens); + } + if spendable < contract.tokens { + return Err(BankError::InsufficientFunds(tx.data.keys[0])); + } else { + bals[0].balance -= contract.tokens; } - let bal = option.unwrap(); - - self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?; - - if let Instruction::NewContract(contract) = &tx.instruction { - if contract.tokens < 0 { - return Err(BankError::NegativeTokens); - } - - if *bal < contract.tokens { - self.forget_signature_with_last_id(&tx.sig, &tx.last_id); - return Err(BankError::InsufficientFunds(tx.from)); - } else if *bal == contract.tokens { - purge = true; - } else { - *bal -= contract.tokens; - } - }; - } - - if purge { - bals.remove(&tx.from); } - Ok(()) } + fn force_deposit(&self, to: PublicKey, tokens: i64) { + self.page_table + .force_deposit(to, page_table::DEFAULT_CONTRACT, tokens); + } + /// Deposit Payment to the Page balance + fn apply_payment(payment: &Payment, page: &mut Page) { + page.balance += payment.tokens; + } /// Apply only a transaction's credits. - /// Note: It is safe to apply credits from multiple transactions in parallel. - fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap) { - match &tx.instruction { + fn apply_credits(tx: &Call, instruction: &Instruction, balances: &mut [Page]) { + match &instruction { Instruction::NewContract(contract) => { let plan = contract.plan.clone(); if let Some(payment) = plan.final_payment() { - self.apply_payment(&payment, balances); + Self::apply_payment(&payment, &mut balances[1]); } else { - let mut pending = self - .pending - .write() - .expect("'pending' write lock in apply_credits"); - pending.insert(tx.sig, plan); + let mut pending: HashMap = + deserialize(&balances[1].memory).unwrap_or_default(); + pending.insert(tx.proofs[0], plan); + //TODO(anatoly): this wont work in the future, the user will have to alloate first + //and call serealize_into + balances[1].memory = serialize(&pending).expect("serealize pending hashmap"); + //The sol is stored in the contract, but the contract shouldn't allow it to + //be spendable + balances[1].balance += contract.tokens; } } Instruction::ApplyTimestamp(dt) => { - let _ = self.apply_timestamp(tx.from, *dt); + if let Ok(mut pending) = deserialize(&balances[1].memory) { + //TODO(anatoly): capture error into the contract's data + let _ = Self::apply_timestamp(&balances[0], &mut pending, *dt); + //TODO(anatoly): this wont work in the future, the user will have to alloate first + //and call serealize_into + balances[1].memory = if pending.is_empty() { + //free up the memory when the contract is done + vec![] + } else { + serialize(&pending).expect("serealize pending hashmap") + }; + } } Instruction::ApplySignature(tx_sig) => { - let _ = self.apply_signature(tx.from, *tx_sig); + if let Ok(mut pending) = deserialize(&balances[1].memory) { + //TODO(anatoly): capture error into the contract's data + let _ = Self::apply_signature(balances, &mut pending, *tx_sig); + //TODO(anatoly): this wont work in the future, the user will have to alloate first + //and call serealize_into + balances[1].memory = if pending.is_empty() { + //free up the memory when the contract is done + vec![] + } else { + serialize(&pending).expect("serealize pending hashmap") + }; + } } Instruction::NewVote(_vote) => { - trace!("GOT VOTE! last_id={:?}", &tx.last_id.as_ref()[..8]); + trace!("GOT VOTE! last_id={:?}", &tx.data.last_hash.as_ref()[..8]); // TODO: record the vote in the stake table... } } @@ -288,45 +249,78 @@ impl Bank { /// Process a Transaction. If it contains a payment plan that requires a witness /// to progress, the payment plan will be stored in the bank. - pub fn process_transaction(&self, tx: &Transaction) -> Result<()> { - let bals = &mut self.balances.write().unwrap(); - self.apply_debits(tx, bals)?; - self.apply_credits(tx, bals); - self.transaction_count.fetch_add(1, Ordering::Relaxed); - Ok(()) + pub fn default_contract_129_process_transaction(tx: &Call, pages: &mut [Page]) { + let instruction = deserialize(&tx.data.user_data).expect("instruction deserialize"); + if Self::apply_debits(tx, &instruction, pages).is_ok() { + Self::apply_credits(tx, &instruction, pages); + } } + /// Only used for testing + pub fn process_transaction(&self, txs: &Transaction) -> Result { + let rv = self.process_transactions(vec![txs.clone()]); + match rv[0] { + Err(BankError::AccountInUse) => Err(BankError::AccountInUse), + Err(BankError::InsufficientFunds(key)) => Err(BankError::InsufficientFunds(key)), + Err(BankError::LastIdNotFound(id)) => Err(BankError::LastIdNotFound(id)), + + Ok(ref t) => Ok(t.clone()), + _ => { + assert!(false, "unexpected return value from process_transactions"); + Err(BankError::AccountInUse) + } + } + } /// Process a batch of transactions. #[must_use] - pub fn process_transactions(&self, txs: Vec) -> Vec> { - let bals = &mut self.balances.write().unwrap(); + pub fn process_calls(&self, txs: Vec) -> Vec> { debug!("processing Transactions {}", txs.len()); + let mut ctx = self.ctx.write().unwrap(); + let txs_len = txs.len(); let now = Instant::now(); - let results: Vec<_> = txs - .into_iter() - .map(|tx| self.apply_debits(&tx, bals).map(|_| tx)) - .collect(); // Calling collect() here forces all debits to complete before moving on. + { + let last_ids_set = self.last_ids_set.read().unwrap(); + for (i, t) in txs.iter().enumerate() { + ctx.valid_ledger[i] = last_ids_set.get(&t.data.last_hash).is_some(); + } + } + self.page_table.acquire_validate_find(&txs, &mut ctx); + self.page_table.allocate_keys_with_ctx(&txs, &mut ctx); + self.page_table.load_pages_with_ctx(&txs, &mut ctx); - let debits = now.elapsed(); + let loaded = now.elapsed(); let now = Instant::now(); - - let res: Vec<_> = results - .into_iter() - .map(|result| { - result.map(|tx| { - self.apply_credits(&tx, bals); - tx - }) - }) - .collect(); + PageTable::execute_with_ctx(&txs, &mut ctx); + let executed = now.elapsed(); + let now = Instant::now(); + self.page_table.commit_release_with_ctx(&txs, &ctx); debug!( - "debits: {} us credits: {:?} us tx: {}", - duration_as_us(&debits), + "loaded: {} us executed: {:?} us commit: {:?} us tx: {}", + duration_as_us(&loaded), + duration_as_us(&executed), duration_as_us(&now.elapsed()), txs_len ); + let res: Vec<_> = txs + .into_iter() + .enumerate() + .map(|(i, t)| { + if !ctx.valid_ledger[i] { + trace!("LastIdNotFound"); + Err(BankError::LastIdNotFound(t.data.last_hash)) + } else if !ctx.lock[i] { + trace!("AccountInUse"); + Err(BankError::AccountInUse) + } else if !ctx.checked[i] || !ctx.commit[i] { + trace!("InsufficientFunds"); + Err(BankError::InsufficientFunds(t.data.keys[0])) + } else { + Ok(t) + } + }) + .collect(); let mut tx_count = 0; let mut err_count = 0; @@ -343,10 +337,14 @@ impl Bank { if err_count > 0 { info!("{} errors of {} txs", err_count, err_count + tx_count); } - self.transaction_count - .fetch_add(tx_count, Ordering::Relaxed); res } + pub fn process_transactions(&self, txs: Vec) -> Vec> { + self.process_calls(txs.into_iter().map(|t| t.call).collect()) + .into_iter() + .map(|v| v.map(|call| Transaction { call })) + .collect() + } fn process_entry(&self, entry: Entry) -> Result<()> { if !entry.transactions.is_empty() { @@ -438,13 +436,14 @@ impl Bank { .expect("invalid ledger: need at least 2 entries"); { let tx = &entry1.transactions[0]; - let deposit = if let Instruction::NewContract(contract) = &tx.instruction { + let instruction = tx.instruction(); + let deposit = if let Instruction::NewContract(contract) = instruction { contract.plan.final_payment() } else { None }.expect("invalid ledger, needs to start with a contract"); - self.apply_payment(&deposit, &mut self.balances.write().unwrap()); + self.force_deposit(deposit.to, deposit.tokens); } self.register_entry_id(&entry0.id); self.register_entry_id(&entry1.id); @@ -462,42 +461,41 @@ impl Bank { Ok((entry_count, tail)) } - /// Process a Witness Signature. Any payment plans waiting on this signature /// will progress one step. - fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> { - if let Occupied(mut e) = self - .pending - .write() - .expect("write() in apply_signature") - .entry(tx_sig) - { - e.get_mut().apply_witness(&Witness::Signature, &from); + fn apply_signature( + balances: &mut [Page], + pending: &mut HashMap, + tx_sig: Signature, + ) -> Result<()> { + if let Occupied(mut e) = pending.entry(tx_sig) { + e.get_mut() + .apply_witness(&Witness::Signature, &balances[0].owner); if let Some(payment) = e.get().final_payment() { - self.apply_payment(&payment, &mut self.balances.write().unwrap()); + //return the tokens back to the source + balances[0].balance += payment.tokens; + balances[1].balance -= payment.tokens; e.remove_entry(); } }; - Ok(()) } /// Process a Witness Timestamp. Any payment plans waiting on this timestamp /// will progress one step. - fn apply_timestamp(&self, from: PublicKey, dt: DateTime) -> Result<()> { + fn apply_timestamp( + page: &Page, + pending: &mut HashMap, + dt: DateTime, + ) -> Result<()> { // Check to see if any timelocked transactions can be completed. let mut completed = vec![]; // Hold 'pending' write lock until the end of this function. Otherwise another thread can // double-spend if it enters before the modified plan is removed from 'pending'. - let mut pending = self - .pending - .write() - .expect("'pending' write lock in apply_timestamp"); for (key, plan) in pending.iter_mut() { - plan.apply_witness(&Witness::Timestamp(dt), &from); - if let Some(payment) = plan.final_payment() { - self.apply_payment(&payment, &mut self.balances.write().unwrap()); + plan.apply_witness(&Witness::Timestamp(dt), &page.owner); + if let Some(_payment) = plan.final_payment() { completed.push(key.clone()); } } @@ -509,6 +507,31 @@ impl Bank { Ok(()) } + pub fn transfer_timestamp( + &self, + keypair: &KeyPair, + to: PublicKey, + dt: DateTime, + last_id: Hash, + version: u64, + ) -> Result { + let tx = Transaction::new_timestamp(keypair, to, dt, last_id, version); + let sig = tx.call.proofs[0]; + self.process_transaction(&tx).map(|_| sig) + } + pub fn transfer_signature( + &self, + keypair: &KeyPair, + to: PublicKey, + sig: Signature, + last_id: Hash, + version: u64, + ) -> Result { + let tx = Transaction::new_signature(keypair, to, sig, last_id, version); + let sig = tx.call.proofs[0]; + self.process_transaction(&tx).map(|_| sig) + } + /// Create, sign, and process a Transaction from `keypair` to `to` of /// `n` tokens where `last_id` is the last Entry ID observed by the client. pub fn transfer( @@ -517,9 +540,10 @@ impl Bank { keypair: &KeyPair, to: PublicKey, last_id: Hash, + version: u64, ) -> Result { - let tx = Transaction::new(keypair, to, n, last_id); - let sig = tx.sig; + let tx = Transaction::new(keypair, to, n, last_id, version); + let sig = tx.call.proofs[0]; self.process_transaction(&tx).map(|_| sig) } @@ -533,46 +557,48 @@ impl Bank { to: PublicKey, dt: DateTime, last_id: Hash, + version: u64, ) -> Result { - let tx = Transaction::new_on_date(keypair, to, dt, n, last_id); - let sig = tx.sig; + let tx = Transaction::new_on_date(keypair, to, dt, n, last_id, version); + let sig = tx.call.proofs[0]; self.process_transaction(&tx).map(|_| sig) } + //calculate how much is left in the contract + pub fn default_contract_get_balance(page: &Page) -> i64 { + let contract: Option> = deserialize(&page.memory).ok(); + let spendable = if let Some(pending) = contract { + pending + .values() + .flat_map(|plan| plan.final_payment().map(|p| p.tokens)) + .sum() + } else { + page.balance + }; + assert!(spendable <= page.balance); + spendable + } pub fn get_balance(&self, pubkey: &PublicKey) -> i64 { - let bals = self - .balances - .read() - .expect("'balances' read lock in get_balance"); - bals.get(pubkey).cloned().unwrap_or(0) + self.page_table.get_balance(pubkey).unwrap_or(0) } - pub fn transaction_count(&self) -> usize { - self.transaction_count.load(Ordering::Relaxed) + pub fn get_version(&self, pubkey: &PublicKey) -> (u64, Signature) { + self.page_table.get_version(pubkey).unwrap_or_default() } - pub fn has_signature(&self, signature: &Signature) -> bool { - let last_ids_sigs = self - .last_ids_sigs - .read() - .expect("'last_ids_sigs' read lock"); - for (_hash, signatures) in last_ids_sigs.iter() { - if signatures.0.contains(signature) { - return true; - } - } - false + pub fn transaction_count(&self) -> usize { + self.page_table.transaction_count() } } #[cfg(test)] mod tests { use super::*; - use bincode::serialize; use entry::next_entry; use entry::Entry; use entry_writer::{self, EntryWriter}; use hash::hash; + use logger; use signature::KeyPairUtil; use std::io::{BufReader, Cursor, Seek, SeekFrom}; @@ -583,11 +609,11 @@ mod tests { let bank = Bank::new(&mint); assert_eq!(bank.last_id(), mint.last_id()); - bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id()) + bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id(), 0) .unwrap(); assert_eq!(bank.get_balance(&pubkey), 1_000); - bank.transfer(500, &mint.keypair(), pubkey, mint.last_id()) + bank.transfer(500, &mint.keypair(), pubkey, mint.last_id(), 1) .unwrap(); assert_eq!(bank.get_balance(&pubkey), 1_500); assert_eq!(bank.transaction_count(), 2); @@ -598,38 +624,45 @@ mod tests { let mint = Mint::new(1); let pubkey = KeyPair::new().pubkey(); let bank = Bank::new(&mint); - assert_eq!( - bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()), - Err(BankError::NegativeTokens) + //NOTE(anatoly): page table will allow any transaction that can pay the fee to move forward + //but will not accept an overdraft, or creation of tokens + assert_matches!( + bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id(), 0), + Ok(_) ); - assert_eq!(bank.transaction_count(), 0); + assert_eq!(bank.transaction_count(), 1); + assert_eq!(bank.get_balance(&mint.pubkey()), 1); + assert_eq!(bank.get_balance(&pubkey), 0); } #[test] - fn test_account_not_found() { + fn test_account_with_no_funds() { let mint = Mint::new(1); let bank = Bank::new(&mint); let keypair = KeyPair::new(); assert_eq!( - bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()), - Err(BankError::AccountNotFound(keypair.pubkey())) + bank.transfer(1, &keypair, mint.pubkey(), mint.last_id(), 0), + //TODO(anatoly): page table treats this as a validate error + Err(BankError::InsufficientFunds(keypair.pubkey())) ); assert_eq!(bank.transaction_count(), 0); } #[test] - fn test_insufficient_funds() { + fn test_overdraft() { let mint = Mint::new(11_000); let bank = Bank::new(&mint); let pubkey = KeyPair::new().pubkey(); - bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id()) + bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id(), 0) .unwrap(); assert_eq!(bank.transaction_count(), 1); - assert_eq!( - bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()), - Err(BankError::InsufficientFunds(mint.pubkey())) + //NOTE(anatoly): page table will allow any transaction that can pay the fee to move forward + //but will not accept an overdraft + assert_matches!( + bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id(), 1), + Ok(_) ); - assert_eq!(bank.transaction_count(), 1); + assert_eq!(bank.transaction_count(), 2); let mint_pubkey = mint.keypair().pubkey(); assert_eq!(bank.get_balance(&mint_pubkey), 10_000); @@ -641,18 +674,19 @@ mod tests { let mint = Mint::new(10_000); let bank = Bank::new(&mint); let pubkey = KeyPair::new().pubkey(); - bank.transfer(500, &mint.keypair(), pubkey, mint.last_id()) + bank.transfer(500, &mint.keypair(), pubkey, mint.last_id(), 0) .unwrap(); assert_eq!(bank.get_balance(&pubkey), 500); } #[test] fn test_transfer_on_date() { + logger::setup(); let mint = Mint::new(1); let bank = Bank::new(&mint); let pubkey = KeyPair::new().pubkey(); let dt = Utc::now(); - bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id()) + bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id(), 0) .unwrap(); // Mint's balance will be zero because all funds are locked up. @@ -667,14 +701,15 @@ mod tests { // Now, acknowledge the time in the condition occurred and // that pubkey's funds are now available. - bank.apply_timestamp(mint.pubkey(), dt).unwrap(); + bank.transfer_timestamp(&mint.keypair(), pubkey, dt, mint.last_id(), 1) + .unwrap(); assert_eq!(bank.get_balance(&pubkey), 1); - // tx count is still 1, because we chose not to count timestamp transactions - // tx count. - assert_eq!(bank.transaction_count(), 1); + // PageTable counts all transactions + assert_eq!(bank.transaction_count(), 2); - bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction. + bank.transfer_timestamp(&mint.keypair(), pubkey, dt, mint.last_id(), 2) + .unwrap(); // <-- Attack! Attempt to process completed transaction. assert_ne!(bank.get_balance(&pubkey), 2); } @@ -685,7 +720,7 @@ mod tests { let pubkey = KeyPair::new().pubkey(); let dt = Utc::now(); let sig = bank - .transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id()) + .transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id(), 0) .unwrap(); // Assert the debit counts as a transaction. @@ -699,68 +734,37 @@ mod tests { assert_eq!(bank.get_balance(&pubkey), 0); // Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them. - bank.apply_signature(mint.pubkey(), sig).unwrap(); + bank.transfer_signature(&mint.keypair(), pubkey, sig, mint.last_id(), 1) + .unwrap(); assert_eq!(bank.get_balance(&mint.pubkey()), 1); assert_eq!(bank.get_balance(&pubkey), 0); - // Assert cancel doesn't cause count to go backward. - assert_eq!(bank.transaction_count(), 1); + // Assert each transaction is registered + assert_eq!(bank.transaction_count(), 2); - bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction. + bank.transfer_signature(&mint.keypair(), pubkey, sig, mint.last_id(), 2) + .unwrap(); // <-- Attack! Attempt to cancel completed transaction. assert_ne!(bank.get_balance(&mint.pubkey()), 2); } - #[test] - fn test_duplicate_transaction_signature() { - let mint = Mint::new(1); - let bank = Bank::new(&mint); - let sig = Signature::default(); - assert!( - bank.reserve_signature_with_last_id(&sig, &mint.last_id()) - .is_ok() - ); - assert_eq!( - bank.reserve_signature_with_last_id(&sig, &mint.last_id()), - Err(BankError::DuplicateSignature(sig)) - ); - } - - #[test] - fn test_forget_signature() { - let mint = Mint::new(1); - let bank = Bank::new(&mint); - let sig = Signature::default(); - bank.reserve_signature_with_last_id(&sig, &mint.last_id()) - .unwrap(); - bank.forget_signature_with_last_id(&sig, &mint.last_id()); - assert!( - bank.reserve_signature_with_last_id(&sig, &mint.last_id()) - .is_ok() - ); - } - - #[test] - fn test_has_signature() { - let mint = Mint::new(1); - let bank = Bank::new(&mint); - let sig = Signature::default(); - bank.reserve_signature_with_last_id(&sig, &mint.last_id()) - .expect("reserve signature"); - assert!(bank.has_signature(&sig)); - } - #[test] fn test_reject_old_last_id() { let mint = Mint::new(1); let bank = Bank::new(&mint); - let sig = Signature::default(); for i in 0..MAX_ENTRY_IDS { let last_id = hash(&serialize(&i).unwrap()); // Unique hash bank.register_entry_id(&last_id); } + let tx0 = Transaction::new( + &mint.keypair(), + mint.keypair().pubkey(), + 1, + mint.last_id(), + 0, + ); // Assert we're no longer able to use the oldest entry ID. assert_eq!( - bank.reserve_signature_with_last_id(&sig, &mint.last_id()), + bank.process_transaction(&tx0), Err(BankError::LastIdNotFound(mint.last_id())) ); } @@ -788,11 +792,13 @@ mod tests { let mint = Mint::new(2); let bank = Bank::new(&mint); let keypair = KeyPair::new(); - let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id()); - let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id()); + let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id(), 0); + let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id(), 0); let txs = vec![tx0, tx1]; let results = bank.process_transactions(txs); - assert!(results[1].is_err()); + assert!(results[0].is_ok()); + //TODO(anatoly): tx1 is rejected because it overlaps in memory with tx0 + assert_eq!(results[1], Err(BankError::AccountInUse)); // Assert bad transactions aren't counted. assert_eq!(bank.transaction_count(), 1); @@ -804,7 +810,7 @@ mod tests { let bank = Bank::new(&mint); let keypair = KeyPair::new(); let entry = next_entry(&mint.last_id(), 1, vec![]); - let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, entry.id); + let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, entry.id, 0); // First, ensure the TX is rejected because of the unregistered last ID assert_eq!( @@ -830,9 +836,9 @@ mod tests { let mut entries = Vec::with_capacity(length); let mut hash = mint.last_id(); let mut cur_hashes = 0; - for _ in 0..length { + for v in 0..length { let keypair = KeyPair::new(); - let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, hash); + let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, hash, v as u64); let entry = Entry::new_mut(&mut hash, &mut cur_hashes, vec![tx], false); entries.push(entry); } diff --git a/src/bin/bench-tps.rs b/src/bin/bench-tps.rs index 354946b5a4c350..248f7d1d4cc71f 100644 --- a/src/bin/bench-tps.rs +++ b/src/bin/bench-tps.rs @@ -121,12 +121,12 @@ fn generate_txs( let transactions: Vec<_> = if !reclaim { keypairs .par_iter() - .map(|keypair| Transaction::new(&id, keypair.pubkey(), 1, *last_id)) + .map(|keypair| Transaction::new_noplan(&id, keypair.pubkey(), 1, 0, *last_id, 0)) .collect() } else { keypairs .par_iter() - .map(|keypair| Transaction::new(keypair, id.pubkey(), 1, *last_id)) + .map(|keypair| Transaction::new_noplan(&keypair, id.pubkey(), 1, 0, *last_id, 0)) .collect() }; diff --git a/src/bin/wallet.rs b/src/bin/wallet.rs index d0417e5b9f6c23..c3c10d75b88170 100644 --- a/src/bin/wallet.rs +++ b/src/bin/wallet.rs @@ -27,7 +27,7 @@ enum WalletCommand { Balance, AirDrop(i64), Pay(i64, PublicKey), - Confirm(Signature), + Confirm(PublicKey, u64, Signature), } #[derive(Debug, Clone)] @@ -126,13 +126,29 @@ fn parse_args() -> Result> { ) .subcommand( SubCommand::with_name("confirm") - .about("Confirm your payment by signature") + .about("Confirm your payment by public key and version") + .arg( + Arg::with_name("pubkey") + // .index(1) + .value_name("PUBKEY") + .required(true) + .help("The account pubkey to confirm"), + ) .arg( Arg::with_name("signature") - .index(1) + // .index(1) .value_name("SIGNATURE") .required(true) - .help("The transaction signature to confirm"), + .help("The signature of the transaction to confirm"), + ) + .arg( + Arg::with_name("version") + // .index(2) + .long("version") + .value_name("NUMBER") + .takes_value(true) + .required(true) + .help("the account version to confirm"), ), ) .subcommand(SubCommand::with_name("balance").about("Get your balance")) @@ -190,16 +206,23 @@ fn parse_args() -> Result> { Ok(WalletCommand::Pay(tokens, to)) } ("confirm", Some(confirm_matches)) => { - let sig_vec = bs58::decode(confirm_matches.value_of("signature").unwrap()) + let pubkey_vec = bs58::decode(confirm_matches.value_of("pubkey").unwrap()) + .into_vec() + .expect("base58-encoded public key"); + let signature_vec = bs58::decode(confirm_matches.value_of("signature").unwrap()) .into_vec() .expect("base58-encoded signature"); + let version = confirm_matches.value_of("version").unwrap().parse()?; - if sig_vec.len() == std::mem::size_of::() { - let sig = Signature::new(&sig_vec); - Ok(WalletCommand::Confirm(sig)) + if pubkey_vec.len() == std::mem::size_of::() + && signature_vec.len() == std::mem::size_of::() + { + let pubkey = PublicKey::new(&pubkey_vec); + let signature = Signature::new(&signature_vec); + Ok(WalletCommand::Confirm(pubkey, version, signature)) } else { display_actions(); - Err(WalletError::BadParameter("Invalid signature".to_string())) + Err(WalletError::BadParameter("Invalid public key".to_string())) } } ("balance", Some(_balance_matches)) => Ok(WalletCommand::Balance), @@ -274,13 +297,20 @@ fn process_command( } // If client has positive balance, spend tokens in {balance} number of transactions WalletCommand::Pay(tokens, to) => { - let last_id = client.get_last_id(); - let sig = client.transfer(tokens, &config.id, to, &last_id)?; - println!("{}", sig); + let sig = client + .retry_transfer(&config.id, &to, tokens, 30) + .expect("retry transfer"); + println!("{:?}", sig); + println!( + "{} {} {}", + bs58::encode(config.id.pubkey()).into_string(), + sig.0, + bs58::encode(sig.1).into_string(), + ); } // Confirm the last client transaction by signature - WalletCommand::Confirm(sig) => { - if client.check_signature(&sig) { + WalletCommand::Confirm(pubkey, version, signature) => { + if client.get_version(&pubkey) == (version + 1, signature) { println!("Confirmed"); } else { println!("Not found"); @@ -297,7 +327,7 @@ fn display_actions() { println!(" balance Get your account balance"); println!(" airdrop Request a batch of tokens"); println!(" pay Send tokens to a public key"); - println!(" confirm Confirm your last payment by signature"); + println!(" confirm Confirm your last payment by public key and version"); println!(); } diff --git a/src/crdt.rs b/src/crdt.rs index cbd6257055280f..5edfaf5b17dd3c 100644 --- a/src/crdt.rs +++ b/src/crdt.rs @@ -1767,12 +1767,13 @@ mod tests { let rv = crdt.gossip_request().unwrap(); assert_eq!(rv.0, nxt.contact_info.ncp); - let nxt2 = NodeInfo::new_leader(&"127.0.0.2:1234".parse().unwrap()); + let mut nxt2 = NodeInfo::new_leader(&"127.0.0.2:1234".parse().unwrap()); assert_ne!(me.id, nxt2.id); assert_ne!(nxt.id, nxt2.id); crdt.insert(&nxt2); while now == crdt.alive[&nxt2.id] { - sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS)); + sleep(Duration::from_millis(100)); + nxt2.version += 1; crdt.insert(&nxt2); } let len = crdt.table.len() as u64; diff --git a/src/drone.rs b/src/drone.rs index c0ec1acf194b73..8a6e5ed88f8296 100644 --- a/src/drone.rs +++ b/src/drone.rs @@ -6,14 +6,12 @@ use influx_db_client as influxdb; use metrics; -use signature::Signature; -use signature::{KeyPair, PublicKey}; +use signature::{KeyPair, PublicKey, Signature}; use std::io; use std::io::{Error, ErrorKind}; use std::net::{IpAddr, SocketAddr, UdpSocket}; use std::time::Duration; use thin_client::ThinClient; -use transaction::Transaction; pub const TIME_SLICE: u64 = 60; pub const REQUEST_CAP: u64 = 1_000_000; @@ -96,54 +94,54 @@ impl Drone { } pub fn send_airdrop(&mut self, req: DroneRequest) -> Result { - let request_amount: u64; let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - let mut client = ThinClient::new( self.requests_addr, requests_socket, self.transactions_addr, transactions_socket, ); - let last_id = client.get_last_id(); - - let tx = match req { + match req { DroneRequest::GetAirdrop { airdrop_request_amount, client_public_key, } => { info!( - "Requesting airdrop of {} to {:?}", + "Requesting airdrop of {} to {}", airdrop_request_amount, client_public_key ); - request_amount = airdrop_request_amount; - Transaction::new( - &self.mint_keypair, - client_public_key, - airdrop_request_amount as i64, - last_id, - ) + let request_amount = airdrop_request_amount; + if self.check_request_limit(request_amount) { + self.request_current += request_amount; + metrics::submit( + influxdb::Point::new("drone") + .add_tag("op", influxdb::Value::String("airdrop".to_string())) + .add_field( + "request_amount", + influxdb::Value::Integer(request_amount as i64), + ) + .add_field( + "request_current", + influxdb::Value::Integer(self.request_current as i64), + ) + .to_owned(), + ); + let rv = client.retry_transfer( + &self.mint_keypair, + &client_public_key, + request_amount as i64, + 10, + ); + if let Some(reply) = rv { + Ok(reply.1) + } else { + Err(Error::new(ErrorKind::Other, "transaction failed")) + } + } else { + Err(Error::new(ErrorKind::Other, "token limit reached")) + } } - }; - if self.check_request_limit(request_amount) { - self.request_current += request_amount; - metrics::submit( - influxdb::Point::new("drone") - .add_tag("op", influxdb::Value::String("airdrop".to_string())) - .add_field( - "request_amount", - influxdb::Value::Integer(request_amount as i64), - ) - .add_field( - "request_current", - influxdb::Value::Integer(self.request_current as i64), - ) - .to_owned(), - ); - client.transfer_signed(&tx) - } else { - Err(Error::new(ErrorKind::Other, "token limit reached")) } } } @@ -316,15 +314,13 @@ mod tests { airdrop_request_amount: 50, client_public_key: bob_pubkey, }; - let bob_sig = drone.send_airdrop(bob_req).unwrap(); - assert!(client.poll_for_signature(&bob_sig).is_ok()); + let _ = drone.send_airdrop(bob_req).expect("airdrop succeeded"); let carlos_req = DroneRequest::GetAirdrop { airdrop_request_amount: 5_000_000, client_public_key: carlos_pubkey, }; - let carlos_sig = drone.send_airdrop(carlos_req).unwrap(); - assert!(client.poll_for_signature(&carlos_sig).is_ok()); + let _ = drone.send_airdrop(carlos_req).expect("airdrop2 succeeded"); let bob_balance = client.get_balance(&bob_pubkey); info!("Small request balance: {:?}", bob_balance); diff --git a/src/entry.rs b/src/entry.rs index 4a9393a3b6c678..5836924aa867d7 100644 --- a/src/entry.rs +++ b/src/entry.rs @@ -137,9 +137,12 @@ impl Entry { } } +//TODO(anatoly): call contains multiple signatures now fn add_transaction_data(hash_data: &mut Vec, tx: &Transaction) { hash_data.push(0u8); - hash_data.extend_from_slice(&tx.sig.as_ref()); + for sig in tx.sigs() { + hash_data.extend_from_slice(sig.as_ref()); + } } /// Creates the hash `num_hashes` after `start_hash`. If the transaction contains @@ -204,8 +207,9 @@ mod tests { // First, verify entries let keypair = KeyPair::new(); - let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero); - let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero); + let pubkey = KeyPair::new().pubkey(); + let tx0 = Transaction::new(&keypair, pubkey, 0, zero, 0); + let tx1 = Transaction::new(&keypair, pubkey, 1, zero, 1); let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false); assert!(e0.verify(&zero)); @@ -221,8 +225,9 @@ mod tests { // First, verify entries let keypair = KeyPair::new(); - let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero); - let tx1 = Transaction::new_signature(&keypair, Default::default(), zero); + let tx0 = Transaction::new_timestamp(&keypair, keypair.pubkey(), Utc::now(), zero, 0); + let tx1 = + Transaction::new_signature(&keypair, keypair.pubkey(), Default::default(), zero, 1); let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false); assert!(e0.verify(&zero)); @@ -244,7 +249,7 @@ mod tests { assert_eq!(tick.id, zero); let keypair = KeyPair::new(); - let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero); + let tx0 = Transaction::new_timestamp(&keypair, keypair.pubkey(), Utc::now(), zero, 0); let entry0 = next_entry(&zero, 1, vec![tx0.clone()]); assert_eq!(entry0.num_hashes, 1); assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0])); @@ -255,7 +260,7 @@ mod tests { fn test_next_entry_panic() { let zero = Hash::default(); let keypair = KeyPair::new(); - let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero); + let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero, 0); next_entry(&zero, 0, vec![tx]); } } diff --git a/src/entry_writer.rs b/src/entry_writer.rs index 4e5e1857f8f58c..52f6609dad7484 100644 --- a/src/entry_writer.rs +++ b/src/entry_writer.rs @@ -103,7 +103,7 @@ mod tests { use super::*; use ledger; use mint::Mint; - use packet::BLOB_DATA_SIZE; + use packet::{BLOB_DATA_SIZE, PACKET_DATA_SIZE}; use signature::{KeyPair, KeyPairUtil}; use std::io::Cursor; use transaction::Transaction; @@ -116,10 +116,10 @@ mod tests { let writer = io::sink(); let mut entry_writer = EntryWriter::new(&bank, writer); let keypair = KeyPair::new(); - let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id()); + let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id(), 0); // NOTE: if Entry grows to larger than a transaction, the code below falls over - let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size + let threshold = (BLOB_DATA_SIZE / PACKET_DATA_SIZE) - 1; // PACKET_DATA_SIZE is transaction size // Verify large entries are split up and the first sets has_more. let txs = vec![tx.clone(); threshold * 2]; diff --git a/src/ledger.rs b/src/ledger.rs index 9a66f57628bc41..e7a3968c6e351c 100644 --- a/src/ledger.rs +++ b/src/ledger.rs @@ -347,8 +347,9 @@ mod tests { }, one, 1, + 0, ); - let tx1 = Transaction::new_timestamp(&keypair, Utc::now(), one); + let tx1 = Transaction::new_timestamp(&keypair, keypair.pubkey(), Utc::now(), one, 0); // // TODO: this magic number and the mix of transaction types // is designed to fill up a Blob more or less exactly, @@ -367,7 +368,6 @@ mod tests { #[test] fn test_entries_to_blobs() { let entries = make_test_entries(); - let blob_recycler = BlobRecycler::default(); let mut blob_q = VecDeque::new(); entries.to_blobs(&blob_recycler, &mut blob_q); @@ -398,8 +398,9 @@ mod tests { }, next_id, 2, + 0, ); - let tx_large = Transaction::new(&keypair, keypair.pubkey(), 1, next_id); + let tx_large = Transaction::new(&keypair, keypair.pubkey(), 1, next_id, 0); let tx_small_size = serialized_size(&tx_small).unwrap(); let tx_large_size = serialized_size(&tx_large).unwrap(); @@ -408,7 +409,6 @@ mod tests { // NOTE: if Entry grows to larger than a transaction, the code below falls over let threshold = (BLOB_DATA_SIZE / PACKET_DATA_SIZE) - 1; - // verify no split let transactions = vec![tx_small.clone(); threshold]; let entries0 = next_entries(&id, 0, transactions.clone()); diff --git a/src/lib.rs b/src/lib.rs index f81ce846c44cbc..58c245e4532aae 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -31,6 +31,7 @@ pub mod mint; pub mod nat; pub mod ncp; pub mod packet; +pub mod page_table; pub mod payment_plan; pub mod record_stage; pub mod recorder; diff --git a/src/mint.rs b/src/mint.rs index 4937b69555d2e7..2c6fa74caf82e8 100644 --- a/src/mint.rs +++ b/src/mint.rs @@ -52,7 +52,7 @@ impl Mint { pub fn create_transactions(&self) -> Vec { let keypair = self.keypair(); - let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed()); + let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed(), 0); vec![tx] } @@ -74,9 +74,9 @@ mod tests { fn test_create_transactions() { let mut transactions = Mint::new(100).create_transactions().into_iter(); let tx = transactions.next().unwrap(); - if let Instruction::NewContract(contract) = tx.instruction { + if let Instruction::NewContract(contract) = tx.instruction() { if let Plan::Budget(Budget::Pay(payment)) = contract.plan { - assert_eq!(tx.from, payment.to); + assert_eq!(*tx.from(), payment.to); } } assert_eq!(transactions.next(), None); diff --git a/src/packet.rs b/src/packet.rs index 6687693d481551..8193a1032a676b 100644 --- a/src/packet.rs +++ b/src/packet.rs @@ -22,7 +22,7 @@ pub type BlobRecycler = Recycler; pub const NUM_PACKETS: usize = 1024 * 8; pub const BLOB_SIZE: usize = 64 * 1024; pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE; -pub const PACKET_DATA_SIZE: usize = 256; +pub const PACKET_DATA_SIZE: usize = 512; pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE; #[derive(Clone, Default, Debug)] @@ -455,6 +455,7 @@ impl Blob { mod tests { use packet::{ to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, Recycler, NUM_PACKETS, + PACKET_DATA_SIZE, }; use request::Request; use std::collections::VecDeque; @@ -523,12 +524,12 @@ mod tests { p.write().unwrap().packets.resize(10, Packet::default()); for m in p.write().unwrap().packets.iter_mut() { m.meta.set_addr(&addr); - m.meta.size = 256; + m.meta.size = PACKET_DATA_SIZE; } p.read().unwrap().send_to(&sender).unwrap(); p.write().unwrap().recv_from(&reader).unwrap(); for m in p.write().unwrap().packets.iter_mut() { - assert_eq!(m.meta.size, 256); + assert_eq!(m.meta.size, PACKET_DATA_SIZE); assert_eq!(m.meta.addr(), saddr); } diff --git a/src/page_table.rs b/src/page_table.rs new file mode 100644 index 00000000000000..f56f6b56815553 --- /dev/null +++ b/src/page_table.rs @@ -0,0 +1,1849 @@ +/// The `page_table` module implements a fast way to process requests to smart contracts including +/// simple transactions. Benchmarks show about 125,791 ns/iter of 256 Tx's which is near 2m TPS in +/// single threaded mode. +/// +/// This can be safely pipelined. The memory lock ensures that all pages +/// traveling through the system are non overlapping, and using the WRITE lock durring allocation +/// ensures that they are present when the READ lock is held. To safely execute the contracts in +/// parallel the READ lock must be held while loading the pages, and executing the contract. +/// +/// +/// Main differences from the `Bank` +/// 1. `last_count` and `last_hash` are PoH identifiers, these are processed outside of this pipeline +/// 2. `Call.version` is used to prevent duplicate spends, each PublicKey has 2**64 number of calls +/// it can make. +/// 3. `Page` entry is similar to an `Account`, but it also has a `contract` that owns it. That +/// tag allows the contract to Write to the memory owned by the page. Contracts can spend money +use bank::{Bank, BANK_PROCESS_TRANSACTION_METHOD}; +use bincode::{deserialize, serialize}; +use hash::{hash, Hash}; +use packet::NUM_PACKETS; +use rand::{thread_rng, Rng, RngCore}; +use signature::{KeyPair, KeyPairUtil, PublicKey, Signature}; +use std::collections::{BTreeMap, HashSet}; +use std::hash::{BuildHasher, Hasher}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::{Mutex, RwLock}; + +//run with 'cargo +nightly bench --features=unstable,parex page_table::bench' +#[cfg(feature = "parex")] +use rayon::prelude::*; + +//these types vs just u64 had a 40% impact on perf without FastHasher +//type Hash = [u64; 4]; +//type PublicKey = [u64; 4]; +//type Signature = [u64; 8]; +type ContractId = [u64; 4]; + +pub const DEFAULT_CONTRACT: ContractId = [0u64; 4]; + +/// SYSTEM interface, same for very contract, methods 0 to 127 +/// method 0 +/// reallocate +/// spend the funds from the call to the first recepient +pub fn system_0_realloc(call: &Call, pages: &mut [Page]) { + if call.data.contract == DEFAULT_CONTRACT { + let size: u64 = deserialize(&call.data.user_data).unwrap(); + pages[0].memory.resize(size as usize, 0u8); + } +} +/// method 1 +/// assign +/// assign the page to a contract +pub fn system_1_assign(call: &Call, pages: &mut [Page]) { + let contract: ContractId = deserialize(&call.data.user_data).unwrap(); + if call.data.contract == DEFAULT_CONTRACT { + pages[0].contract = contract; + //zero out the memory in pages[0].memory + //Contracts need to own the state of that data otherwise a use could fabricate the state and + //manipulate the contract + pages[0].memory.clear(); + } +} +/// DEFAULT_CONTRACT interface +/// All contracts start with 128 +/// method 128 +/// move_funds +/// spend the funds from the call to the first recepient +pub const DEFAULT_CONTRACT_MOVE_FUNDS: u8 = 128; +pub fn default_contract_128_move_funds(call: &Call, pages: &mut [Page]) { + let amount: i64 = deserialize(&call.data.user_data).unwrap(); + pages[0].balance -= amount; + pages[1].balance += amount; +} + +#[cfg(test)] +pub fn default_contract_254_create_new_other(call: &Call, pages: &mut [Page]) { + let amount: i64 = deserialize(&call.data.user_data).unwrap(); + pages[1].balance += amount; +} + +#[cfg(test)] +pub fn default_contract_253_create_new_mine(call: &Call, pages: &mut [Page]) { + let amount: i64 = deserialize(&call.data.user_data).unwrap(); + pages[0].balance += amount; +} + +//157,173 ns/iter vs 125,791 ns/iter with using this hasher, 110,000 ns with u64 as the key +struct FastHasher { + //generates some seeds to pluck bytes out of the keys + //since these are random every time we create a new hashset, there is no way to + //predict what these will be for the node or the network + rand32: [usize; 8], + rand8: [usize; 8], + data64: [u8; 64], + len: usize, +} + +impl Hasher for FastHasher { + /// pluck the bytes out of the data and glue it into a u64 + fn finish(&self) -> u64 { + let seed = if self.len == 32 { + self.rand32 + } else if self.len == 8 { + self.rand8 + } else { + assert!(false); + [0xfefe_fefe_fefe_fefe; 8] + }; + assert_eq!(seed.len(), 8); + (self.data64[seed[0]] as u64) + | (u64::from(self.data64[seed[1]]) << (8)) + | (u64::from(self.data64[seed[2]]) << (2 * 8)) + | (u64::from(self.data64[seed[3]]) << (3 * 8)) + | (u64::from(self.data64[seed[4]]) << (4 * 8)) + | (u64::from(self.data64[seed[5]]) << (5 * 8)) + | (u64::from(self.data64[seed[6]]) << (6 * 8)) + | (u64::from(self.data64[seed[7]]) << (7 * 8)) + } + fn write(&mut self, bytes: &[u8]) { + assert!(bytes.len() < 64); + self.data64[..bytes.len()].copy_from_slice(bytes); + self.len = bytes.len(); + } +} + +impl Default for FastHasher { + fn default() -> FastHasher { + fn gen_rand(size: usize, rand: &mut [usize]) { + let mut seed: Vec<_> = (0..size).collect(); + assert!(seed.len() >= 8); + thread_rng().shuffle(&mut seed); + rand[0..8].copy_from_slice(&seed[0..8]); + } + + let mut rand32 = [0usize; 8]; + gen_rand(32, &mut rand32); + let mut rand8 = [0usize; 8]; + gen_rand(8, &mut rand8); + + FastHasher { + rand32, + rand8, + data64: [0; 64], + len: 0, + } + } +} + +impl BuildHasher for FastHasher { + type Hasher = FastHasher; + fn build_hasher(&self) -> Self::Hasher { + FastHasher { + rand32: self.rand32, + rand8: self.rand8, + data64: [0; 64], + len: 0, + } + } +} + +/// Generic Page for the PageTable +#[derive(Clone)] +pub struct Page { + /// key that indexes this page + pub owner: PublicKey, + /// contract that owns this page + /// Only the contract can write to the `memory` vector + pub contract: ContractId, + /// balance that belongs to owner + pub balance: i64, + /// Version used for reply attacks + /// The `CallData::version` field must match the key[0]'s Page::version field + /// Once its processed, the version number is incremented. + pub version: u64, + /// signature of the last transaction that was called with this page as the primary key + pub signature: Signature, + /// hash of the page data + pub memhash: Hash, + /// The following could be in a separate structure + pub memory: Vec, +} + +impl Page { + pub fn new(owner: PublicKey, contract: ContractId, balance: i64) -> Page { + Page { + owner, + contract, + balance, + version: 0, + signature: Signature::default(), + memhash: Hash::default(), + memory: vec![], + } + } +} +impl Default for Page { + fn default() -> Page { + Page { + owner: PublicKey::default(), + contract: DEFAULT_CONTRACT, + balance: 0, + version: 0, + signature: Signature::default(), + memhash: Hash::default(), + memory: vec![], + } + } +} +/// Call definition +/// Signed portion +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct CallData { + // TODO(anatoly): allow for read only pages + /// number of keys to load, aka the to key + /// keys[0] is the caller's key + pub keys: Vec, + + /// the set of PublicKeys that are required to have a proof + pub required_proofs: Vec, + + /// PoH data + /// last id PoH observed by the sender + pub last_count: u64, + /// last PoH hash observed by the sender + pub last_hash: Hash, + + /// Program + /// the address of the program we want to call + pub contract: ContractId, + /// OS scheduling fee + pub fee: i64, + /// struct version to prevent duplicate spends + /// Calls with a version <= Page.version are rejected + pub version: u64, + /// method to call in the contract + pub method: u8, + /// usedata in bytes + pub user_data: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct Call { + /// Signatures and Keys + /// (signature, key index) + /// This vector contains a tuple of signatures, and the key index the signature is for + /// proofs[0] is always key[0] + pub proofs: Vec, + pub data: CallData, +} + +/// simple transaction over a Call +/// TODO: The pipeline will need to pass the `destination` public keys in a side buffer to generalize +/// this +impl Call { + /// Create a new Call object + pub fn new( + keys: Vec, + required_proofs: Vec, + last_count: u64, + last_hash: Hash, + contract: ContractId, + version: u64, + fee: i64, + method: u8, + user_data: Vec, + ) -> Self { + Call { + proofs: vec![], + data: CallData { + keys, + required_proofs, + last_count, + last_hash, + contract, + fee, + version, + method, + user_data, + }, + } + } + pub fn new_tx( + from: PublicKey, + last_count: u64, + last_hash: Hash, + amount: i64, + fee: i64, + version: u64, + to: PublicKey, + ) -> Self { + let user_data = serialize(&amount).unwrap(); + Self::new( + vec![from, to], + vec![0], + last_count, + last_hash, + DEFAULT_CONTRACT, + version, + fee, + DEFAULT_CONTRACT_MOVE_FUNDS, + user_data, + ) + } + + /// Get the transaction data to sign. + pub fn get_sign_data(&self) -> Vec { + serialize(&self.data).expect("serialize CallData") + } + + pub fn append_signature(&mut self, keypair: &KeyPair) { + let sign_data = self.get_sign_data(); + let signature = Signature::new(keypair.sign(&sign_data).as_ref()); + if self.proofs.len() >= self.data.keys.len() + || Some(&keypair.pubkey()) != self.data.keys.get(self.proofs.len()) + { + warn!("signature is for an invalid pubkey"); + } + self.proofs.push(signature); + } + /// Verify only the transaction signature. + pub fn verify_sig(&self) -> bool { + warn!("slow transaction signature verification called"); + let sign_data = self.get_sign_data(); + self.proofs + .iter() + .zip(&self.data.required_proofs) + .map(|(sig, index)| sig.verify(&self.data.keys[*index as usize].as_ref(), &sign_data)) + .all(|y| y) + } + + fn rand_key() -> PublicKey { + let mut r = thread_rng(); + let mut data = [0u8; 32]; + r.fill_bytes(&mut data); + PublicKey::new(&data) + } + fn rand_sig() -> Signature { + let mut r = thread_rng(); + let mut data = [0u8; 64]; + r.fill_bytes(&mut data); + Signature::new(&data) + } + fn rand_hash() -> Hash { + let mut r = thread_rng(); + let mut data = [0u8; 32]; + r.fill_bytes(&mut data); + hash(&data) + } + pub fn random_tx() -> Call { + //sanity check + assert_ne!(Self::rand_key(), Self::rand_key()); + let mut tx = Self::new_tx( + Self::rand_key(), + 0, + Self::rand_hash(), + 1, + 1, + 0, + Self::rand_key(), + ); + tx.proofs.push(Self::rand_sig()); + tx + } +} + +pub struct AllocatedPages { + max: usize, + allocated: BTreeMap, + free: Vec, +} +impl Default for AllocatedPages { + fn default() -> Self { + AllocatedPages { + max: 0, + allocated: BTreeMap::new(), + free: vec![], + } + } +} +impl AllocatedPages { + pub fn lookup(&self, key: &PublicKey) -> Option { + self.allocated.get(key).cloned().map(|x| x as usize) + } + + pub fn free(&mut self, key: &PublicKey) { + let page = self.lookup(key).unwrap(); + self.free.push(page); + self.allocated.remove(key); + } + pub fn allocate(&mut self, key: PublicKey) -> usize { + let page = if let Some(p) = self.free.pop() { + p + } else { + let p = self.max; + self.max += 1; + p + }; + let old = self.allocated.insert(key, page); + assert!(old.is_none()); + page + } +} + +pub struct PageTable { + /// a map from page public keys, to index into the page_table + allocated_pages: RwLock, + // TODO(anatoly): allow for read only pages + // read only pages would need a ref count per page, so a HashMap is probably a better structure + /// locked pages that are currently processed + mem_locks: Mutex>, + /// entries of Pages + page_table: RwLock>, + transaction_count: AtomicUsize, +} + +pub const N: usize = NUM_PACKETS; +pub const K: usize = 16; +pub struct Context { + pub valid_ledger: Vec, + pub lock: Vec, + needs_alloc: Vec, + pub checked: Vec, + pages: Vec>>, + loaded_page_table: Vec>, + pub commit: Vec, +} +impl Default for Context { + fn default() -> Self { + let valid_ledger = vec![false; N]; + let lock = vec![false; N]; + let needs_alloc = vec![false; N]; + let checked = vec![false; N]; + let pages = vec![vec![None; K]; N]; + let loaded_page_table: Vec> = vec![vec![Page::default(); K]; N]; + let commit = vec![false; N]; + Context { + valid_ledger, + lock, + needs_alloc, + checked, + pages, + loaded_page_table, + commit, + } + } +} + +impl Default for PageTable { + fn default() -> Self { + PageTable { + allocated_pages: RwLock::new(AllocatedPages::default()), + mem_locks: Mutex::new(HashSet::with_hasher(FastHasher::default())), + page_table: RwLock::new(vec![]), + transaction_count: AtomicUsize::new(0), + } + } +} + +impl PageTable { + // for each call in the packet acquire the memory lock for the pages it references if possible + pub fn acquire_memory_lock( + &self, + packet: &[Call], + valid_ledger: &[bool], + acquired_memory: &mut [bool], + ) { + //holds mem_locks mutex + let mut mem_locks = self.mem_locks.lock().unwrap(); + for (i, tx) in packet.iter().enumerate() { + acquired_memory[i] = false; + if !valid_ledger[i] { + continue; + } + let collision: u64 = tx + .data + .keys + .iter() + .map({ |k| mem_locks.contains(k) as u64 }) + .sum(); + trace!("contains count: {}", collision); + acquired_memory[i] = collision == 0; + if collision == 0 { + for k in &tx.data.keys { + mem_locks.insert(*k); + } + } + } + } + pub fn acquire_validate_find(&self, transactions: &[Call], ctx: &mut Context) { + self.acquire_memory_lock(&transactions, &ctx.valid_ledger, &mut ctx.lock); + self.validate_call(&transactions, &ctx.lock, &mut ctx.checked); + self.find_new_keys( + &transactions, + &ctx.checked, + &mut ctx.needs_alloc, + &mut ctx.pages, + ); + } + pub fn allocate_keys_with_ctx(&self, transactions: &[Call], ctx: &mut Context) { + self.allocate_keys( + &transactions, + &ctx.checked, + &ctx.needs_alloc, + &mut ctx.pages, + ); + } + pub fn execute_with_ctx(transactions: &[Call], ctx: &mut Context) { + PageTable::execute( + &transactions, + &ctx.checked, + &mut ctx.loaded_page_table, + &mut ctx.commit, + ); + } + + /// validate that we can process the fee and its not a dup call + pub fn validate_call( + &self, + packet: &[Call], + acquired_memory: &Vec, + checked: &mut Vec, + ) { + let allocated_pages = self.allocated_pages.read().unwrap(); + let page_table = self.page_table.read().unwrap(); + for (i, tx) in packet.iter().enumerate() { + checked[i] = false; + if !acquired_memory[i] { + continue; + } + let caller = &tx.data.keys[0]; + if let Some(memix) = allocated_pages.lookup(caller) { + if let Some(page) = page_table.get(memix) { + assert_eq!(page.owner, *caller); + // skip calls that are to old + // this check prevents retransmitted transactions from being processed twice + // page_table[keys[0]].version + if page.version != tx.data.version { + trace!( + "version is not correct {} {}", + page.version, + tx.data.version + ); + continue; + } + // caller page must belong to the contract + // TODO(anatoly): we could relax this, and have the contract itself + // do this check for pages it cares about + if page.contract != tx.data.contract { + trace!( + "contract is not correct {:x} {:x}", + page.contract[0], + tx.data.contract[0] + ); + continue; + } + // caller page must cover the tx.data.fee + if page.balance >= tx.data.fee { + checked[i] = true; + } + } + } + } + } + pub fn find_new_keys( + &self, + packet: &[Call], + checked: &Vec, + needs_alloc: &mut Vec, + pages: &mut Vec>>, + ) { + //holds READ lock to the page table + let allocated_pages = self.allocated_pages.read().unwrap(); + for (i, tx) in packet.iter().enumerate() { + if !checked[i] { + continue; + } + needs_alloc[i] = false; + for (j, k) in tx.data.keys.iter().enumerate() { + let lookup = allocated_pages.lookup(k); + needs_alloc[i] = needs_alloc[i] || lookup.is_none(); + pages[i][j] = lookup; + } + } + } + pub fn force_deposit_to( + page_table: &mut Vec, + allocated_pages: &mut AllocatedPages, + owner: PublicKey, + contract: ContractId, + amount: i64, + ) { + let page = Page::new(owner, contract, amount); + let ix = allocated_pages.allocate(owner) as usize; + if page_table.len() <= ix { + page_table.resize(ix + 1, page); + } else { + page_table[ix] = page; + } + } + + pub fn force_deposit(&self, owner: PublicKey, contract: ContractId, amount: i64) { + let mut allocated_pages = self.allocated_pages.write().unwrap(); + let mut page_table = self.page_table.write().unwrap(); + Self::force_deposit_to( + &mut page_table, + &mut allocated_pages, + owner, + contract, + amount, + ); + } + + pub fn transaction_count(&self) -> usize { + self.transaction_count.load(Ordering::Relaxed) + } + + /// used for testing + pub fn bump_versions(&self, packet: &[Call]) { + let allocated_pages = self.allocated_pages.write().unwrap(); + let mut page_table = self.page_table.write().unwrap(); + packet.iter().for_each(|tx| { + allocated_pages.lookup(&tx.data.keys[0]).map(|ix| { + page_table.get_mut(ix).map(|page| { + page.version += 1; + }); + }); + }); + } + + /// used for testing + pub fn force_allocate(&self, packet: &Vec, owner: bool, amount: i64) { + let mut allocated_pages = self.allocated_pages.write().unwrap(); + let mut page_table = self.page_table.write().unwrap(); + for tx in packet.iter() { + let key = if owner { + tx.data.keys[0] + } else { + tx.data.keys[1] + }; + Self::force_deposit_to( + &mut page_table, + &mut allocated_pages, + key, + tx.data.contract, + amount, + ); + } + } + /// used for testing + pub fn sanity_check_pages( + &self, + txs: &Vec, + checked: &Vec, + pages: &Vec>>, + ) { + let page_table = self.page_table.read().unwrap(); + //while we hold the write lock, this is where we can create another anonymouns page + //with copy and write, and send this table to the vote signer + for (i, tx) in txs.iter().enumerate() { + if checked[i] { + assert!(pages[i][0].is_some()); + } + for (p, k) in pages[i].iter().zip(&tx.data.keys) { + if p.is_some() { + assert_eq!(*k, page_table[p.unwrap()].owner); + } + } + } + } + pub fn allocate_keys( + &self, + packet: &[Call], + checked: &[bool], + needs_alloc: &[bool], + pages: &mut Vec>>, + ) { + let mut allocated_pages = self.allocated_pages.write().unwrap(); + let mut page_table = self.page_table.write().unwrap(); + for (i, tx) in packet.iter().enumerate() { + if !checked[i] { + continue; + } + if !needs_alloc[i] { + continue; + } + for (j, key) in tx.data.keys.iter().enumerate() { + if pages[i][j].is_some() { + continue; + } + let page = Page { + owner: *key, + contract: tx.data.contract, + version: 0, + balance: 0, + signature: Signature::default(), + memhash: Hash::default(), + memory: vec![], + }; + let ix = allocated_pages.allocate(*key) as usize; + //recheck since we are getting a new lock + if page_table.len() <= ix { + trace!("reallocating page table {}", ix); + //safe to do while the WRITE lock is held + page_table.resize(ix + 1, Page::default()); + } + page_table[ix] = page; + pages[i][j] = Some(ix); + } + } + } + + pub fn load_pages( + &self, + packet: &Vec, + checked: &Vec, + pages: &Vec>>, + loaded_page_table: &mut Vec>, + ) { + let page_table = self.page_table.read().unwrap(); + for (i, tx) in packet.into_iter().enumerate() { + if !checked[i] { + continue; + } + for (j, (_, oix)) in tx.data.keys.iter().zip(pages[i].iter()).enumerate() { + let ix = oix.expect("checked pages should be loadable"); + loaded_page_table[i][j] = page_table[ix].clone(); + } + } + } + pub fn load_pages_with_ctx(&self, packet: &Vec, ctx: &mut Context) { + self.load_pages(packet, &ctx.checked, &ctx.pages, &mut ctx.loaded_page_table); + } + + /// calculate the balances in the loaded pages + /// at the end of the contract the balance must be the same + /// and contract can only spend tokens from pages assigned to it + fn validate_balances(tx: &Call, pre_pages: &Vec, post_pages: &Vec) -> bool { + // contract can spend any of the tokens it owns + for ((pre, post), _tx) in pre_pages + .iter() + .zip(post_pages.iter()) + .zip(tx.data.keys.iter()) + { + if pre.contract != tx.data.contract && pre.balance <= post.balance { + return false; + } + } + // contract can't spend any of the tokens it doesn't own + let pre_sum: i64 = pre_pages + .iter() + .zip(tx.data.keys.iter()) + .map(|(pre, _)| pre.balance) + .sum(); + let post_sum: i64 = post_pages + .iter() + .zip(tx.data.keys.iter()) + .map(|(pre, _)| pre.balance) + .sum(); + pre_sum == post_sum + } + + /// parallel execution of contracts should be possible here since all the alls have no + /// dependencies + pub fn execute( + // Pass the _allocated_pages argument to make sure the lock is held for this call + packet: &[Call], + checked: &[bool], + loaded_page_table: &mut Vec>, + commit: &mut [bool], + ) { + #[cfg(not(feature = "parex"))] + let iter = packet + .into_iter() + .zip(loaded_page_table) + .zip(checked) + .zip(commit); + #[cfg(feature = "parex")] + let iter = packet + .into_par_iter() + .zip(loaded_page_table) + .zip(checked) + .zip(commit); + + iter.for_each(|(((tx, loaded_pages), checked), commit)| { + *commit = false; + if !checked { + return; + } + // fee is paid + *commit = true; + loaded_pages[0].balance -= tx.data.fee; + //update the version + //TODO(anatoly): TODO wrap around test + loaded_pages[0].version += 1; + loaded_pages[0].signature = tx.proofs[0]; + + let mut call_pages: Vec = tx + .data + .keys + .iter() + .zip(loaded_pages.iter()) + .map(|(_, x)| x.clone()) + .collect(); + + // Find the method + match (tx.data.contract, tx.data.method) { + // system interface + // everyone has the same reallocate + (_, 0) => system_0_realloc(&tx, &mut call_pages), + (_, 1) => system_1_assign(&tx, &mut call_pages), + // contract methods + (DEFAULT_CONTRACT, DEFAULT_CONTRACT_MOVE_FUNDS) => { + default_contract_128_move_funds(&tx, &mut call_pages) + } + (DEFAULT_CONTRACT, BANK_PROCESS_TRANSACTION_METHOD) => { + Bank::default_contract_129_process_transaction(&tx, &mut call_pages) + } + #[cfg(test)] + (DEFAULT_CONTRACT, 254) => { + default_contract_254_create_new_other(&tx, &mut call_pages) + } + #[cfg(test)] + (DEFAULT_CONTRACT, 253) => { + default_contract_253_create_new_mine(&tx, &mut call_pages) + } + (contract, method) => { + warn!("unknown contract and method {:?} {:x}", contract, method) + } + }; + + // TODO(anatoly): Verify Memory + // Pages owned by the contract are Read/Write, + // pages not owned by the contract are + // Read only. Code should verify memory integrity or + // verify contract bytecode. + + // verify tokens + if !Self::validate_balances(&tx, &loaded_pages, &call_pages) { + return; + } + // write pages back to memory + for (pre, post) in loaded_pages.iter_mut().zip(call_pages.into_iter()) { + *pre = post; + } + }); + } + + /// parallel execution of contracts + /// first we load the pages, then we pass all the pages to `execute` function which can + /// safely call them all in parallel + pub fn commit( + &self, + packet: &[Call], + commits: &[bool], + pages: &Vec>>, + loaded_page_table: &Vec>, + ) { + let mut page_table = self.page_table.write().unwrap(); + let mut count = 0; + for (i, tx) in packet.into_iter().enumerate() { + if !commits[i] { + continue; + } + for (j, (_, oix)) in tx.data.keys.iter().zip(pages[i].iter()).enumerate() { + let ix = oix.expect("checked pages should be loadable"); + page_table[ix] = loaded_page_table[i][j].clone(); + } + count += 1; + } + self.transaction_count.fetch_add(count, Ordering::Relaxed); + } + pub fn commit_release_with_ctx(&self, packet: &[Call], ctx: &Context) { + self.commit(packet, &ctx.commit, &ctx.pages, &ctx.loaded_page_table); + //TODO(anatoly): generate blobs here + self.release_memory_lock(packet, &ctx.lock); + } + + pub fn get_balance(&self, key: &PublicKey) -> Option { + let ap = self.allocated_pages.read().unwrap(); + let pt = self.page_table.read().unwrap(); + ap.lookup(key).map(|dx| { + let page = &pt[dx]; + match page.contract { + DEFAULT_CONTRACT => Bank::default_contract_get_balance(page), + contract => { + warn!("get_balance: unknown contract {:?}", contract); + 0 + } + } + }) + } + pub fn get_version(&self, key: &PublicKey) -> Option<(u64, Signature)> { + let ap = self.allocated_pages.read().unwrap(); + let pt = self.page_table.read().unwrap(); + ap.lookup(key).map(|dx| (pt[dx].version, pt[dx].signature)) + } + pub fn release_memory_lock(&self, packet: &[Call], lock: &Vec) { + //holds mem_locks mutex + let mut mem_locks = self.mem_locks.lock().unwrap(); + for (i, call) in packet.iter().enumerate() { + if !lock[i] { + continue; + } + for key in &call.data.keys { + mem_locks.remove(key); + } + } + } +} + +#[cfg(test)] +mod test { + use bincode::deserialize; + use logger; + use packet::Recycler; + use page_table::{Call, Context, Page, PageTable, K, N}; + use rayon::prelude::*; + use signature::Signature; + use std::collections::VecDeque; + use std::sync::mpsc::channel; + use std::sync::Arc; + use std::thread::spawn; + use std::time::Instant; + + #[test] + fn mem_lock() { + logger::setup(); + let pt = PageTable::default(); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut lock2 = vec![false; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + for x in &lock { + assert!(*x); + } + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock2); + for x in &lock2 { + assert!(!*x); + } + pt.release_memory_lock(&transactions, &lock); + + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock2); + for x in &lock2 { + assert!(*x); + } + pt.release_memory_lock(&transactions, &lock2); + } + #[test] + fn mem_lock_invalid_ledger() { + logger::setup(); + let pt = PageTable::default(); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + let invalid_ledger = vec![false; N]; + let mut lock = vec![false; N]; + pt.acquire_memory_lock(&transactions, &invalid_ledger, &mut lock); + for x in &lock { + assert!(!*x); + } + } + #[test] + fn validate_call_miss() { + let pt = PageTable::default(); + let fill_table: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&fill_table, true, 1_000_000); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut checked = vec![false; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + for x in &checked { + assert!(!x); + } + } + #[test] + fn validate_call_hit() { + let pt = PageTable::default(); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut checked = vec![false; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + for x in &checked { + assert!(x); + } + } + #[test] + fn validate_call_low_version() { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + pt.bump_versions(&transactions); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut checked = vec![false; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + for tx in &mut transactions { + tx.data.version = 0; + } + pt.validate_call(&transactions, &lock, &mut checked); + for x in &checked { + assert!(!x); + } + } + #[test] + fn find_new_keys_needs_alloc() { + logger::setup(); + let pt = PageTable::default(); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut needs_alloc = vec![false; N]; + let mut checked = vec![false; N]; + let mut pages = vec![vec![None; K]; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + pt.find_new_keys(&transactions, &checked, &mut needs_alloc, &mut pages); + for x in &needs_alloc { + assert!(x); + } + } + #[test] + fn find_new_keys_no_alloc() { + let pt = PageTable::default(); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + pt.force_allocate(&transactions, false, 1_000_000); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut needs_alloc = vec![false; N]; + let mut checked = vec![false; N]; + let mut pages = vec![vec![None; K]; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + pt.find_new_keys(&transactions, &checked, &mut needs_alloc, &mut pages); + for x in &needs_alloc { + assert!(!x); + } + } + #[test] + fn allocate_new_keys_some_new_allocs() { + let pt = PageTable::default(); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut needs_alloc = vec![false; N]; + let mut checked = vec![false; N]; + let mut pages = vec![vec![None; K]; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + pt.find_new_keys(&transactions, &checked, &mut needs_alloc, &mut pages); + pt.allocate_keys(&transactions, &checked, &needs_alloc, &mut pages); + for (i, tx) in transactions.iter().enumerate() { + for (j, _) in tx.data.keys.iter().enumerate() { + assert!(pages[i][j].is_some()); + } + } + } + #[test] + fn allocate_new_keys_no_new_allocs() { + let pt = PageTable::default(); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + pt.force_allocate(&transactions, false, 1_000_000); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut needs_alloc = vec![false; N]; + let mut checked = vec![false; N]; + let mut pages = vec![vec![None; K]; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + pt.find_new_keys(&transactions, &checked, &mut needs_alloc, &mut pages); + for a in &needs_alloc { + assert!(!a); + } + for (i, tx) in transactions.iter().enumerate() { + for (j, _) in tx.data.keys.iter().enumerate() { + assert!(pages[i][j].is_some()); + } + } + pt.allocate_keys(&transactions, &checked, &needs_alloc, &mut pages); + for (i, tx) in transactions.iter().enumerate() { + for (j, _) in tx.data.keys.iter().enumerate() { + assert!(pages[i][j].is_some()); + } + } + } + #[test] + pub fn load_and_execute() { + logger::setup(); + let pt = PageTable::default(); + let transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 10); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut needs_alloc = vec![false; N]; + let mut checked = vec![false; N]; + let mut pages = vec![vec![None; K]; N]; + let mut loaded_page_table: Vec> = vec![vec![Page::default(); K]; N]; + let mut commit = vec![false; N]; + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + pt.find_new_keys(&transactions, &checked, &mut needs_alloc, &mut pages); + pt.sanity_check_pages(&transactions, &checked, &pages); + pt.allocate_keys(&transactions, &checked, &needs_alloc, &mut pages); + pt.sanity_check_pages(&transactions, &checked, &pages); + pt.load_pages( + &transactions, + &mut checked, + &mut pages, + &mut loaded_page_table, + ); + for c in &checked { + assert!(*c); + } + PageTable::execute(&transactions, &checked, &mut loaded_page_table, &mut commit); + pt.commit(&transactions, &commit, &pages, &loaded_page_table); + pt.release_memory_lock(&transactions, &lock); + for (i, x) in transactions.iter().enumerate() { + assert!(checked[i]); + let amount: i64 = deserialize(&x.data.user_data).unwrap(); + assert_eq!(pt.get_balance(&x.data.keys[1]), Some(amount)); + assert_eq!( + pt.get_version(&x.data.keys[1]), + Some((0, Signature::default())) + ); + assert_eq!( + pt.get_version(&x.data.keys[0]), + Some((x.data.version + 1, x.proofs[0])) + ); + assert_eq!( + pt.get_balance(&x.data.keys[0]), + Some(10 - (amount + x.data.fee)) + ); + } + } + /// catch indexing bugs that depend on context == packets in batch + #[test] + fn load_and_execute_variable_batches() { + let pt = PageTable::default(); + let mut ctx = Context::default(); + let mut count = 0; + for n in &[10, 9, 11, 8] { + let mut txs: Vec<_> = (0..*n).map(|_r| Call::random_tx()).collect(); + let start_bal = 1_000_000; + pt.force_allocate(&txs, true, start_bal); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + pt.acquire_validate_find(&txs, &mut ctx); + pt.allocate_keys_with_ctx(&txs, &mut ctx); + pt.load_pages_with_ctx(&txs, &mut ctx); + PageTable::execute_with_ctx(&txs, &mut ctx); + pt.commit_release_with_ctx(&txs, &ctx); + for t in &txs { + assert_eq!(pt.get_balance(&t.data.keys[1]), Some(1)); + let amount: i64 = deserialize(&t.data.user_data).unwrap(); + assert_eq!( + pt.get_balance(&t.data.keys[0]), + Some(start_bal - (amount + t.data.fee)) + ); + } + count += n; + assert_eq!(pt.transaction_count(), count); + } + } + #[test] + fn load_and_execute_double_spends() { + let pt = PageTable::default(); + let mut txs: Vec<_> = (0..2).map(|_r| Call::random_tx()).collect(); + let start_bal = 1_000_000; + pt.force_allocate(&txs, true, start_bal); + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + txs[0].data.method = 254; + txs[1].data.method = 253; + txs[0].data.fee = 3; + txs[1].data.fee = 4; + pt.acquire_validate_find(&txs, &mut ctx); + pt.allocate_keys_with_ctx(&txs, &mut ctx); + pt.load_pages_with_ctx(&txs, &mut ctx); + PageTable::execute_with_ctx(&txs, &mut ctx); + pt.commit_release_with_ctx(&txs, &ctx); + assert_eq!(pt.get_balance(&txs[0].data.keys[1]), Some(0)); + assert_eq!( + pt.get_balance(&txs[0].data.keys[0]), + Some(start_bal - txs[0].data.fee) + ); + assert_eq!(pt.get_balance(&txs[1].data.keys[1]), Some(0)); + assert_eq!( + pt.get_balance(&txs[1].data.keys[0]), + Some(start_bal - txs[1].data.fee) + ); + //assert that a failed TX that spent fees increased the version + assert_eq!( + pt.get_version(&txs[0].data.keys[0]), + Some((txs[0].data.version + 1, txs[0].proofs[0])) + ); + assert_eq!( + pt.get_version(&txs[1].data.keys[0]), + Some((txs[1].data.version + 1, txs[1].proofs[0])) + ); + } + //TODO test assignment + //TODO test spends of unasigned funds + //TODO test realloc + type ContextRecycler = Recycler; + fn load_and_execute_pipeline_bench() { + logger::setup(); + let context_recycler = ContextRecycler::default(); + let pt = PageTable::default(); + let count = 10000; + let mut ttx: Vec> = (0..count) + .map(|_| (0..N).map(|_r| Call::random_tx()).collect()) + .collect(); + for tx in &ttx { + pt.force_allocate(tx, true, 1_000_000); + } + let (send_transactions, recv_transactions) = channel(); + let (send_execute, recv_execute) = channel(); + let (send_commit, recv_commit) = channel(); + let (send_answer, recv_answer) = channel(); + let spt = Arc::new(pt); + + let _reader = { + let lpt = spt.clone(); + let recycler = context_recycler.clone(); + spawn(move || { + for transactions in recv_transactions.iter() { + let transactions: Vec = transactions; + let octx = recycler.allocate(); + { + let mut ctx = octx.write().unwrap(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + lpt.acquire_validate_find(&transactions, &mut ctx); + lpt.allocate_keys_with_ctx(&transactions, &mut ctx); + lpt.load_pages_with_ctx(&transactions, &mut ctx); + } + send_execute.send((transactions, octx)).unwrap(); + } + }) + }; + let _executor = { + spawn(move || { + for (transactions, octx) in recv_execute.iter() { + { + let mut ctx = octx.write().unwrap(); + PageTable::execute_with_ctx(&transactions, &mut ctx); + } + send_commit.send((transactions, octx)).unwrap(); + } + }) + }; + let _commiter = { + let lpt = spt.clone(); + let recycler = context_recycler.clone(); + spawn(move || { + for (transactions, octx) in recv_commit.iter() { + { + let ctx = octx.read().unwrap(); + lpt.commit_release_with_ctx(&transactions, &ctx); + send_answer.send(()).unwrap(); + } + recycler.recycle(octx); + } + }) + }; + //warmup + let tt = ttx.pop().unwrap(); + send_transactions.send(tt).unwrap(); + recv_answer.recv().unwrap(); + + let start = Instant::now(); + for _ in 1..count { + let tt = ttx.pop().unwrap(); + send_transactions.send(tt).unwrap(); + } + for _ in 1..count { + recv_answer.recv().unwrap(); + } + let done = start.elapsed(); + let ns = done.as_secs() as usize * 1_000_000_000 + done.subsec_nanos() as usize; + let total = count * N; + println!( + "PIPELINE: done {:?} {}ns/packet {}ns/t {} tp/s", + done, + ns / (count - 1), + ns / total, + (1_000_000_000 * total) / ns + ); + } + fn load_and_execute_par_pipeline_bench() { + logger::setup(); + let context_recycler = ContextRecycler::default(); + let pt = PageTable::default(); + let count = 10000; + let mut ttx: Vec> = (0..count) + .map(|_| (0..N).map(|_r| Call::random_tx()).collect()) + .collect(); + for tx in &ttx { + pt.force_allocate(tx, true, 1_000_000); + } + let (send_transactions, recv_transactions) = channel(); + let (send_execute, recv_execute) = channel(); + let (send_commit, recv_commit) = channel(); + let (send_answer, recv_answer) = channel(); + let spt = Arc::new(pt); + + let _reader = { + let lpt = spt.clone(); + let recycler = context_recycler.clone(); + spawn(move || { + for transactions in recv_transactions.iter() { + let transactions: Vec = transactions; + let octx = recycler.allocate(); + { + let mut ctx = octx.write().unwrap(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + lpt.acquire_validate_find(&transactions, &mut ctx); + lpt.allocate_keys_with_ctx(&transactions, &mut ctx); + lpt.load_pages_with_ctx(&transactions, &mut ctx); + } + send_execute.send((transactions, octx)).unwrap(); + } + }) + }; + let _executor = { + spawn(move || { + while let Ok(tx) = recv_execute.recv() { + let mut events = VecDeque::new(); + events.push_back(tx); + while let Ok(more) = recv_execute.try_recv() { + events.push_back(more); + } + events.par_iter().for_each(|(transactions, octx)| { + let mut ctx = octx.write().unwrap(); + PageTable::execute_with_ctx(&transactions, &mut ctx); + }); + send_commit.send(events).unwrap(); + } + }) + }; + let _commiter = { + let lpt = spt.clone(); + let recycler = context_recycler.clone(); + spawn(move || { + for mut events in recv_commit.iter() { + for (transactions, octx) in &events { + let ctx = octx.read().unwrap(); + lpt.commit_release_with_ctx(transactions, &ctx); + } + send_answer.send(events.len()).unwrap(); + while let Some((_, octx)) = events.pop_front() { + recycler.recycle(octx); + } + } + }) + }; + let _sender = { + spawn(move || { + while let Some(tt) = ttx.pop() { + let _ = send_transactions.send(tt); + } + }) + }; + //warmup + recv_answer.recv().unwrap(); + let start = Instant::now(); + let mut total = 1; + while total < count { + total += recv_answer.recv().unwrap(); + } + let done = start.elapsed(); + let ns = done.as_secs() as usize * 1_000_000_000 + done.subsec_nanos() as usize; + let total = count * N; + println!( + "PAR_PIPELINE: done {:?} {}ns/packet {}ns/t {} tp/s", + done, + ns / (count - 1), + ns / total, + (1_000_000_000 * total) / ns + ); + } + pub fn load_and_execute_mt_bench(max_threads: usize) { + let pt = PageTable::default(); + let count = 10000; + let mut ttx: Vec> = (0..count) + .map(|_| (0..N).map(|_r| Call::random_tx()).collect()) + .collect(); + for tx in &ttx { + pt.force_allocate(tx, true, 1_000_000); + } + let (send_answer, recv_answer) = channel(); + let spt = Arc::new(pt); + let threads: Vec<_> = (0..max_threads) + .map(|_| { + let (send, recv) = channel(); + let response = send_answer.clone(); + let lpt = spt.clone(); + let t = spawn(move || { + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + for transactions in recv.iter() { + let transactions: Vec = transactions; + lpt.acquire_validate_find(&transactions, &mut ctx); + lpt.allocate_keys_with_ctx(&transactions, &mut ctx); + lpt.load_pages_with_ctx(&transactions, &mut ctx); + PageTable::execute_with_ctx(&transactions, &mut ctx); + lpt.commit_release_with_ctx(&transactions, &ctx); + response.send(()).unwrap(); + } + }); + (t, send) + }) + .collect(); + let _sender = { + spawn(move || { + for thread in 0..count { + let tt = ttx.pop().unwrap(); + threads[thread % max_threads].1.send(tt).unwrap(); + } + }) + }; + //warmup + for _ in 0..max_threads { + recv_answer.recv().unwrap(); + } + + let start = Instant::now(); + for _thread in max_threads..count { + recv_answer.recv().unwrap(); + } + let done = start.elapsed(); + let ns = done.as_secs() as usize * 1_000_000_000 + done.subsec_nanos() as usize; + let total = (count - max_threads) * N; + println!( + "MT-{}: done {:?} {}ns/packet {}ns/t {} tp/s", + max_threads, + done, + ns / (count - max_threads), + ns / total, + (1_000_000_000 * total) / ns + ); + } + #[test] + #[ignore] + fn load_and_execute_benches() { + println!("load_and_execute_mt_bench(1)"); + load_and_execute_mt_bench(1); + println!("load_and_execute_mt_bench(2)"); + load_and_execute_mt_bench(2); + println!("load_and_execute_mt_bench(3)"); + load_and_execute_mt_bench(3); + println!("load_and_execute_mt_bench(4)"); + load_and_execute_mt_bench(4); + println!("load_and_execute_mt_bench(8)"); + load_and_execute_mt_bench(8); + println!("load_and_execute_mt_bench(16)"); + load_and_execute_mt_bench(16); + println!("load_and_execute_mt_bench(32)"); + load_and_execute_mt_bench(32); + println!("load_and_execute_pipeline_bench"); + load_and_execute_pipeline_bench(); + println!("load_and_execute_par_pipeline_bench"); + load_and_execute_par_pipeline_bench(); + } +} + +#[cfg(all(feature = "unstable", test))] +mod bench { + extern crate test; + use self::test::Bencher; + use packet::Recycler; + use page_table::{self, Call, Context, PageTable, N}; + use rand::{thread_rng, RngCore}; + use std::sync::mpsc::channel; + use std::sync::Arc; + use std::thread::spawn; + + #[bench] + fn update_version_baseline(bencher: &mut Bencher) { + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + }); + } + #[bench] + fn mem_lock(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.release_memory_lock(&transactions, &lock); + }); + } + #[bench] + fn mem_lock_invalid(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + let valid_ledger = vec![false; N]; + let mut lock = vec![false; N]; + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.release_memory_lock(&transactions, &lock); + }); + } + #[bench] + fn validate_call_miss(bencher: &mut Bencher) { + let pt = PageTable::default(); + let fill_table: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&fill_table, true, 1_000_000); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut checked = vec![false; N]; + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + pt.release_memory_lock(&transactions, &lock); + }); + } + #[bench] + fn validate_call_hit(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + let valid_ledger = vec![true; N]; + let mut lock = vec![false; N]; + let mut checked = vec![false; N]; + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_memory_lock(&transactions, &valid_ledger, &mut lock); + pt.validate_call(&transactions, &lock, &mut checked); + pt.release_memory_lock(&transactions, &lock); + }); + } + #[bench] + fn find_new_keys_needs_alloc(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_validate_find(&transactions, &mut ctx); + pt.release_memory_lock(&transactions, &ctx.lock); + }); + } + #[bench] + fn find_new_keys_no_alloc(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + pt.force_allocate(&transactions, false, 1_000_000); + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_validate_find(&transactions, &mut ctx); + pt.release_memory_lock(&transactions, &ctx.lock); + }); + } + #[bench] + fn allocate_new_keys_some_new_allocs(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + + pt.acquire_validate_find(&transactions, &mut ctx); + pt.allocate_keys_with_ctx(&transactions, &mut ctx); + pt.release_memory_lock(&transactions, &ctx.lock); + }); + } + #[bench] + fn allocate_new_keys_no_new_allocs(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + pt.force_allocate(&transactions, false, 1_000_000); + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_validate_find(&transactions, &mut ctx); + pt.allocate_keys_with_ctx(&transactions, &mut ctx); + pt.release_memory_lock(&transactions, &ctx.lock); + }); + } + #[bench] + fn load_pages(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_validate_find(&transactions, &mut ctx); + pt.allocate_keys_with_ctx(&transactions, &mut ctx); + pt.load_pages_with_ctx(&transactions, &mut ctx); + pt.release_memory_lock(&transactions, &ctx.lock); + }); + } + #[bench] + fn load_and_execute(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut transactions: Vec<_> = (0..N).map(|_r| Call::random_tx()).collect(); + pt.force_allocate(&transactions, true, 1_000_000); + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + bencher.iter(move || { + for tx in &mut transactions { + tx.data.version += 1; + } + pt.acquire_validate_find(&transactions, &mut ctx); + pt.allocate_keys_with_ctx(&transactions, &mut ctx); + pt.load_pages_with_ctx(&transactions, &mut ctx); + PageTable::execute_with_ctx(&transactions, &mut ctx); + pt.commit_release_with_ctx(&transactions, &ctx); + }); + } + + #[bench] + fn load_and_execute_large_table(bencher: &mut Bencher) { + let pt = PageTable::default(); + let mut ttx: Vec> = (0..N) + .map(|_| (0..N).map(|_r| Call::random_tx()).collect()) + .collect(); + for transactions in &ttx { + pt.force_allocate(transactions, true, 1_000_000); + } + page_table::test::load_and_execute(); + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + bencher.iter(move || { + let transactions = &mut ttx[thread_rng().next_u64() as usize % N]; + for tx in transactions.iter_mut() { + tx.data.version += 1; + } + pt.acquire_validate_find(&transactions, &mut ctx); + pt.allocate_keys_with_ctx(&transactions, &mut ctx); + pt.load_pages_with_ctx(&transactions, &mut ctx); + PageTable::execute_with_ctx(&transactions, &mut ctx); + pt.commit_release_with_ctx(&transactions, &ctx); + }); + } + #[bench] + fn load_and_execute_mt3_experimental(bencher: &mut Bencher) { + const T: usize = 3; + let pt = PageTable::default(); + let count = 10000; + let mut ttx: Vec> = (0..count) + .map(|_| (0..N).map(|_r| Call::random_tx()).collect()) + .collect(); + for tx in &ttx { + pt.force_allocate(tx, true, 1_000_000); + } + let (send_answer, recv_answer) = channel(); + let spt = Arc::new(pt); + let threads: Vec<_> = (0..T) + .map(|_| { + let (send, recv) = channel(); + let response = send_answer.clone(); + let lpt = spt.clone(); + let t = spawn(move || { + let mut ctx = Context::default(); + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + for transactions in recv.iter() { + let transactions: Vec = transactions; + lpt.acquire_validate_find(&transactions, &mut ctx); + lpt.allocate_keys_with_ctx(&transactions, &mut ctx); + lpt.load_pages_with_ctx(&transactions, &mut ctx); + PageTable::execute_with_ctx(&transactions, &mut ctx); + lpt.commit_release_with_ctx(&transactions, &ctx); + if response.send(()).is_err() { + return; + } + } + }); + (t, send) + }) + .collect(); + let _sender = { + spawn(move || { + for thread in 0..count { + let tt = ttx.pop().unwrap(); + if threads[thread % T].1.send(tt).is_err() { + return; + } + } + }) + }; + + //warmup + for _ in 0..T { + recv_answer.recv().unwrap(); + } + bencher.iter(move || { + recv_answer.recv().unwrap(); + }); + } + type ContextRecycler = Recycler; + #[bench] + fn load_and_execute_pipeline_experimental(bencher: &mut Bencher) { + let context_recycler = ContextRecycler::default(); + let pt = PageTable::default(); + let count = 10000; + let mut ttx: Vec> = (0..count) + .map(|_| (0..N).map(|_r| Call::random_tx()).collect()) + .collect(); + for tx in &ttx { + pt.force_allocate(tx, true, 1_000_000); + } + let (send_transactions, recv_transactions) = channel(); + let (send_execute, recv_execute) = channel(); + let (send_commit, recv_commit) = channel(); + let (send_answer, recv_answer) = channel(); + let spt = Arc::new(pt); + + let _reader = { + let lpt = spt.clone(); + let recycler = context_recycler.clone(); + spawn(move || { + for transactions in recv_transactions.iter() { + let transactions: Vec = transactions; + let octx = recycler.allocate(); + { + let mut ctx = octx.write().unwrap(); + + for is_valid in &mut ctx.valid_ledger { + *is_valid = true; + } + lpt.acquire_validate_find(&transactions, &mut ctx); + lpt.allocate_keys_with_ctx(&transactions, &mut ctx); + lpt.load_pages_with_ctx(&transactions, &mut ctx); + } + if send_execute.send((transactions, octx)).is_err() { + return; + } + } + }) + }; + let _executor = { + spawn(move || { + for (transactions, octx) in recv_execute.iter() { + let transactions: Vec = transactions; + { + let mut ctx = octx.write().unwrap(); + PageTable::execute_with_ctx(&transactions, &mut ctx); + } + if send_commit.send((transactions, octx)).is_err() { + return; + } + } + }) + }; + let _commiter = { + let lpt = spt.clone(); + let recycler = context_recycler.clone(); + spawn(move || { + for (transactions, octx) in recv_commit.iter() { + let transactions: Vec = transactions; + { + let ctx = octx.read().unwrap(); + lpt.commit_release_with_ctx(&transactions, &ctx); + if send_answer.send(()).is_err() { + return; + } + } + recycler.recycle(octx); + } + }) + }; + let _sender = { + spawn(move || { + while let Some(tt) = ttx.pop() { + let tt: Vec = tt; + if send_transactions.send(tt).is_err() { + return; + } + } + }) + }; + //warmup + recv_answer.recv().unwrap(); + bencher.iter(move || { + recv_answer.recv().unwrap(); + }); + } +} diff --git a/src/record_stage.rs b/src/record_stage.rs index a2aaa069908c1a..9562292c4527cb 100644 --- a/src/record_stage.rs +++ b/src/record_stage.rs @@ -187,8 +187,8 @@ mod tests { let (_record_stage, entry_receiver) = RecordStage::new(signal_receiver, &zero); let alice_keypair = KeyPair::new(); let bob_pubkey = KeyPair::new().pubkey(); - let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero); - let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero); + let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero, 0); + let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero, 1); tx_sender .send(Signal::Transactions(vec![tx0, tx1])) .unwrap(); diff --git a/src/request.rs b/src/request.rs index 96c84b80ad67e6..696df15f307eb7 100644 --- a/src/request.rs +++ b/src/request.rs @@ -9,7 +9,7 @@ pub enum Request { GetBalance { key: PublicKey }, GetLastId, GetTransactionCount, - GetSignature { signature: Signature }, + GetPubKeyVersion { pubkey: PublicKey }, } impl Request { @@ -24,5 +24,5 @@ pub enum Response { Balance { key: PublicKey, val: i64 }, LastId { id: Hash }, TransactionCount { transaction_count: u64 }, - SignatureStatus { signature_status: bool }, + PubKeyVersion { version: u64, signature: Signature }, } diff --git a/src/request_processor.rs b/src/request_processor.rs index f55bdf37ecb68f..5a8f3c6e7ef59f 100644 --- a/src/request_processor.rs +++ b/src/request_processor.rs @@ -40,10 +40,10 @@ impl RequestProcessor { info!("Response::TransactionCount {:?}", rsp); Some(rsp) } - Request::GetSignature { signature } => { - let signature_status = self.bank.has_signature(&signature); - let rsp = (Response::SignatureStatus { signature_status }, rsp_addr); - info!("Response::Signature {:?}", rsp); + Request::GetPubKeyVersion { pubkey } => { + let (version, signature) = self.bank.get_version(&pubkey); + let rsp = (Response::PubKeyVersion { version, signature }, rsp_addr); + info!("Response::PubKeyVersion {:?}", rsp); Some(rsp) } } diff --git a/src/thin_client.rs b/src/thin_client.rs index d09980dea418dc..6bd2cf9c52ee30 100644 --- a/src/thin_client.rs +++ b/src/thin_client.rs @@ -6,7 +6,7 @@ use bincode::{deserialize, serialize}; use hash::Hash; use request::{Request, Response}; -use signature::{KeyPair, PublicKey, Signature}; +use signature::{KeyPair, KeyPairUtil, PublicKey, Signature}; use std::collections::HashMap; use std::io; use std::net::{SocketAddr, UdpSocket}; @@ -28,7 +28,8 @@ pub struct ThinClient { last_id: Option, transaction_count: u64, balances: HashMap, - signature_status: bool, + pubkey_version: u64, + pubkey_signature: Signature, } impl ThinClient { @@ -49,7 +50,8 @@ impl ThinClient { last_id: None, transaction_count: 0, balances: HashMap::new(), - signature_status: false, + pubkey_version: 0, + pubkey_signature: Signature::default(), } } @@ -75,12 +77,13 @@ impl ThinClient { trace!("Response transaction count {:?}", transaction_count); self.transaction_count = transaction_count; } - Response::SignatureStatus { signature_status } => { - self.signature_status = signature_status; - if signature_status { - trace!("Response found signature"); - } else { + Response::PubKeyVersion { version, signature } => { + if self.pubkey_version == version { trace!("Response signature not found"); + } else { + self.pubkey_version = version; + self.pubkey_signature = signature; + trace!("Response found signature"); } } } @@ -92,7 +95,7 @@ impl ThinClient { let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed"); self.transactions_socket .send_to(&data, &self.transactions_addr)?; - Ok(tx.sig) + Ok(*tx.sig()) } /// Creates, signs, and processes a Transaction. Useful for writing unit-tests. @@ -102,9 +105,10 @@ impl ThinClient { keypair: &KeyPair, to: PublicKey, last_id: &Hash, + version: u64, ) -> io::Result { let now = Instant::now(); - let tx = Transaction::new(keypair, to, n, *last_id); + let tx = Transaction::new(keypair, to, n, *last_id, version); let result = self.transfer_signed(&tx); metrics::submit( influxdb::Point::new("thinclient") @@ -226,25 +230,12 @@ impl ThinClient { } } - /// Poll the server to confirm a transaction. - pub fn poll_for_signature(&mut self, sig: &Signature) -> io::Result<()> { - let now = Instant::now(); - while !self.check_signature(sig) { - if now.elapsed().as_secs() > 1 { - // TODO: Return a better error. - return Err(io::Error::new(io::ErrorKind::Other, "signature not found")); - } - sleep(Duration::from_millis(100)); - } - Ok(()) - } - /// Check a signature in the bank. This method blocks /// until the server sends a response. - pub fn check_signature(&mut self, sig: &Signature) -> bool { + pub fn get_version(&mut self, pubkey: &PublicKey) -> (u64, Signature) { trace!("check_signature"); - let req = Request::GetSignature { signature: *sig }; - let data = serialize(&req).expect("serialize GetSignature in pub fn check_signature"); + let req = Request::GetPubKeyVersion { pubkey: *pubkey }; + let data = serialize(&req).expect("serialize GetPubKeyVersion in pub fn get_version"); let now = Instant::now(); let mut done = false; while !done { @@ -253,7 +244,7 @@ impl ThinClient { .expect("buffer error in pub fn get_last_id"); if let Ok(resp) = self.recv_response() { - if let Response::SignatureStatus { .. } = resp { + if let Response::PubKeyVersion { .. } = resp { done = true; } self.process_response(&resp); @@ -268,7 +259,40 @@ impl ThinClient { ) .to_owned(), ); - self.signature_status + (self.pubkey_version, self.pubkey_signature) + } + /// Retry the transfer until it succeeds + /// To retry correctly + /// 1. client gets the current version of the sender + /// 2. client generates a transfer transaction for that version + /// 3. client check the updated version of the sender + /// 4. If the updated version didn't change, retry the transfer at current version + /// 5. If the updated version changed but signature doesn't match, retry the transfer at new + /// version + /// 6. If the updated version changed and signature matches, return + /// * returns the destination key's balance + pub fn retry_transfer( + &mut self, + alice: &KeyPair, + bob_pubkey: &PublicKey, + amount: i64, + retries: usize, + ) -> Option<(u64, Signature)> { + let last_id = self.get_last_id(); + let mut version = self.get_version(&alice.pubkey()); + for _ in 0..retries { + let sig = self + .transfer(amount, &alice, *bob_pubkey, &last_id, version.0) + .unwrap(); + let next_version = self.get_version(&alice.pubkey()); + if next_version.1 == sig { + return Some(next_version); + } else if version.0 != next_version.0 { + version = next_version; + } + sleep(Duration::from_millis(100)); + } + None } } @@ -317,7 +341,6 @@ mod tests { sink(), false, ); - sleep(Duration::from_millis(900)); let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -329,19 +352,23 @@ mod tests { transactions_socket, ); let last_id = client.get_last_id(); - let sig = client - .transfer(500, &alice.keypair(), bob_pubkey, &last_id) - .unwrap(); - client.poll_for_signature(&sig).unwrap(); - let balance = client.get_balance(&bob_pubkey); + let version = client.get_version(&alice.keypair().pubkey()); + for _ in 0..30 { + let sig = client + .transfer(500, &alice.keypair(), bob_pubkey, &last_id, version.0) + .unwrap(); + if sig == client.get_version(&alice.keypair().pubkey()).1 { + break; + } + sleep(Duration::from_millis(300)); + } + let balance = client.poll_get_balance(&bob_pubkey); assert_eq!(balance.unwrap(), 500); exit.store(true, Ordering::Relaxed); server.join().unwrap(); } - // sleep(Duration::from_millis(300)); is unstable #[test] - #[ignore] fn test_bad_sig() { logger::setup(); let leader_keypair = KeyPair::new(); @@ -363,8 +390,6 @@ mod tests { sink(), false, ); - //TODO: remove this sleep, or add a retry so CI is stable - sleep(Duration::from_millis(300)); let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); requests_socket @@ -378,21 +403,33 @@ mod tests { transactions_socket, ); let last_id = client.get_last_id(); - - let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id); - - let _sig = client.transfer_signed(&tx).unwrap(); + let version = client.get_version(&alice.keypair().pubkey()); + let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id, version.0); + for _ in 0..30 { + let _ = client.transfer_signed(&tx).unwrap(); + if *tx.sig() == client.get_version(&alice.keypair().pubkey()).1 { + break; + } + sleep(Duration::from_millis(300)); + } let last_id = client.get_last_id(); + let version = client.get_version(&alice.keypair().pubkey()); - let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id); - if let Instruction::NewContract(contract) = &mut tr2.instruction { + let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id, version.0); + if let Instruction::NewContract(contract) = tr2.instruction() { + let mut contract = contract.clone(); contract.tokens = 502; contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey)); + tr2.call.data.user_data = serialize(&Instruction::NewContract(contract)).unwrap(); + } + for _ in 0..30 { + let _sig = client.transfer_signed(&tr2).unwrap(); + if version.0 != client.get_version(&alice.keypair().pubkey()).0 { + break; + } + sleep(Duration::from_millis(300)); } - let sig = client.transfer_signed(&tr2).unwrap(); - client.poll_for_signature(&sig).unwrap(); - let balance = client.get_balance(&bob_pubkey); assert_eq!(balance.unwrap(), 500); exit.store(true, Ordering::Relaxed); @@ -400,7 +437,7 @@ mod tests { } #[test] - fn test_client_check_signature() { + fn test_client_retry_transfer() { logger::setup(); let leader_keypair = KeyPair::new(); let leader = TestNode::new_localhost_with_pubkey(leader_keypair.pubkey()); @@ -433,13 +470,8 @@ mod tests { leader_data.contact_info.tpu, transactions_socket, ); - let last_id = client.get_last_id(); - let sig = client - .transfer(500, &alice.keypair(), bob_pubkey, &last_id) - .unwrap(); - sleep(Duration::from_millis(100)); - - assert!(client.check_signature(&sig)); + let result = client.retry_transfer(&alice.keypair(), &bob_pubkey, 500, 30); + assert!(result.is_some()); exit.store(true, Ordering::Relaxed); server.join().unwrap(); diff --git a/src/transaction.rs b/src/transaction.rs index e9ecfcaaa92f86..60166f32c2603a 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -1,15 +1,18 @@ //! The `transaction` module provides functionality for creating log transactions. -use bincode::serialize; +use bank::BANK_PROCESS_TRANSACTION_METHOD; +use bincode::{deserialize, serialize}; use budget::{Budget, Condition}; use chrono::prelude::*; use hash::Hash; +use itertools::Itertools; +use page_table::{self, Call}; use payment_plan::{Payment, PaymentPlan, Witness}; use signature::{KeyPair, KeyPairUtil, PublicKey, Signature}; -pub const SIGNED_DATA_OFFSET: usize = 112; -pub const SIG_OFFSET: usize = 8; -pub const PUB_KEY_OFFSET: usize = 80; +pub const SIGNED_DATA_OFFSET: usize = 80; +pub const SIG_OFFSET: usize = 16; +pub const PUB_KEY_OFFSET: usize = 96; /// The type of payment plan. Each item must implement the PaymentPlan trait. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] @@ -78,40 +81,70 @@ pub enum Instruction { /// An instruction signed by a client with `PublicKey`. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct Transaction { - /// A digital signature of `instruction`, `last_id` and `fee`, signed by `PublicKey`. - pub sig: Signature, - - /// The `PublicKey` of the entity that signed the transaction data. - pub from: PublicKey, - - /// The action the server should take. - pub instruction: Instruction, - - /// The ID of a recent ledger entry. - pub last_id: Hash, - - /// The number of tokens paid for processing and storage of this transaction. - pub fee: i64, + pub call: Call, +} +pub enum TransactionKeys<'a> { + KeyPair(&'a KeyPair), + PubKey(&'a PublicKey), } - impl Transaction { - /// Create a signed transaction from the given `Instruction`. + /// Compile and sign transaction into a Call fn new_from_instruction( - from_keypair: &KeyPair, + keypairs: &[TransactionKeys], instruction: Instruction, last_id: Hash, fee: i64, + version: u64, ) -> Self { - let from = from_keypair.pubkey(); - let mut tx = Transaction { - sig: Signature::default(), - instruction, + assert_eq!( + keypairs.is_empty(), + false, + "expect at least 1 transaction key" + ); + let pubkeys: Vec = keypairs + .into_iter() + .map(|tk| match tk { + TransactionKeys::KeyPair(keypair) => keypair.pubkey(), + TransactionKeys::PubKey(pubkey) => **pubkey, + }) + .collect(); + let user_data = serialize(&instruction).expect("serialize instruction"); + let required_proofs: Vec = keypairs + .iter() + .enumerate() + .filter_map(|(i, tk)| match tk { + TransactionKeys::KeyPair(_) => Some(i as u8), + _ => None, + }) + .collect(); + let mut call = Call::new( + pubkeys, + required_proofs, + 0, //TODO(anatoly): PoH count last_id, - from, + page_table::DEFAULT_CONTRACT, + version, fee, - }; - tx.sign(from_keypair); - tx + BANK_PROCESS_TRANSACTION_METHOD, + user_data, + ); + keypairs.iter().foreach(|tk| match tk { + TransactionKeys::KeyPair(keypair) => call.append_signature(&keypair), + _ => (), + }); + Transaction { call } + } + pub fn new_noplan( + from_keypair: &KeyPair, + to: PublicKey, + tokens: i64, + fee: i64, + last_id: Hash, + version: u64, + ) -> Self { + let mut call = Call::new_tx(from_keypair.pubkey(), 0, last_id, tokens, fee, version, to); + call.append_signature(from_keypair); + Transaction { call } } /// Create and sign a new Transaction. Used for unit-testing. @@ -121,6 +154,7 @@ impl Transaction { tokens: i64, fee: i64, last_id: Hash, + version: u64, ) -> Self { let payment = Payment { tokens: tokens - fee, @@ -129,28 +163,65 @@ impl Transaction { let budget = Budget::Pay(payment); let plan = Plan::Budget(budget); let instruction = Instruction::NewContract(Contract { plan, tokens }); - Self::new_from_instruction(from_keypair, instruction, last_id, fee) + let keys = [ + TransactionKeys::KeyPair(from_keypair.clone()), + TransactionKeys::PubKey(&to), + ]; + Self::new_from_instruction(&keys, instruction, last_id, fee, version) } /// Create and sign a new Transaction. Used for unit-testing. - pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self { - Self::new_taxed(from_keypair, to, tokens, 0, last_id) + pub fn new( + from_keypair: &KeyPair, + to: PublicKey, + tokens: i64, + last_id: Hash, + version: u64, + ) -> Self { + Self::new_taxed(from_keypair, to, tokens, 0, last_id, version) } /// Create and sign a new Witness Timestamp. Used for unit-testing. - pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime, last_id: Hash) -> Self { + pub fn new_timestamp( + from_keypair: &KeyPair, + to: PublicKey, + dt: DateTime, + last_id: Hash, + version: u64, + ) -> Self { let instruction = Instruction::ApplyTimestamp(dt); - Self::new_from_instruction(from_keypair, instruction, last_id, 0) + let keys = [ + TransactionKeys::KeyPair(from_keypair), + TransactionKeys::PubKey(&to), + ]; + Self::new_from_instruction(&keys, instruction, last_id, 0, version) } /// Create and sign a new Witness Signature. Used for unit-testing. - pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self { + pub fn new_signature( + from_keypair: &KeyPair, + to: PublicKey, + tx_sig: Signature, + last_id: Hash, + version: u64, + ) -> Self { let instruction = Instruction::ApplySignature(tx_sig); - Self::new_from_instruction(from_keypair, instruction, last_id, 0) + let keys = [ + TransactionKeys::KeyPair(from_keypair), + TransactionKeys::PubKey(&to), + ]; + Self::new_from_instruction(&keys, instruction, last_id, 0, version) } - pub fn new_vote(from_keypair: &KeyPair, vote: Vote, last_id: Hash, fee: i64) -> Self { - Transaction::new_from_instruction(&from_keypair, Instruction::NewVote(vote), last_id, fee) + pub fn new_vote( + from_keypair: &KeyPair, + vote: Vote, + last_id: Hash, + fee: i64, + version: u64, + ) -> Self { + let keys = [TransactionKeys::KeyPair(from_keypair)]; + Transaction::new_from_instruction(&keys, Instruction::NewVote(vote), last_id, fee, version) } /// Create and sign a postdated Transaction. Used for unit-testing. @@ -160,6 +231,7 @@ impl Transaction { dt: DateTime, tokens: i64, last_id: Hash, + version: u64, ) -> Self { let from = from_keypair.pubkey(); let budget = Budget::Or( @@ -168,39 +240,53 @@ impl Transaction { ); let plan = Plan::Budget(budget); let instruction = Instruction::NewContract(Contract { plan, tokens }); - Self::new_from_instruction(from_keypair, instruction, last_id, 0) + let keys = [ + TransactionKeys::KeyPair(from_keypair), + TransactionKeys::PubKey(&to), + ]; + Self::new_from_instruction(&keys, instruction, last_id, 0, version) } - /// Get the transaction data to sign. - fn get_sign_data(&self) -> Vec { - let mut data = serialize(&(&self.instruction)).expect("serialize Contract"); - let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id"); - data.extend_from_slice(&last_id_data); - - let fee_data = serialize(&(&self.fee)).expect("serialize last_id"); - data.extend_from_slice(&fee_data); - - data + pub fn sig(&self) -> &Signature { + &self.call.proofs[0] + } + pub fn sigs(&self) -> &[Signature] { + &self.call.proofs + } + pub fn from(&self) -> &PublicKey { + &self.call.data.keys[0] + } + pub fn last_id(&self) -> &Hash { + &self.call.data.last_hash + } + pub fn fee(&self) -> i64 { + self.call.data.fee + } + pub fn instruction(&self) -> Instruction { + deserialize(&self.call.data.user_data).unwrap() } - /// Sign this transaction. pub fn sign(&mut self, keypair: &KeyPair) { - let sign_data = self.get_sign_data(); - self.sig = Signature::new(keypair.sign(&sign_data).as_ref()); + self.call.append_signature(keypair); + } + /// Sign this transaction. + pub fn get_sign_data(&self) -> Vec { + self.call.get_sign_data() } /// Verify only the transaction signature. pub fn verify_sig(&self) -> bool { warn!("transaction signature verification called"); - self.sig.verify(&self.from.as_ref(), &self.get_sign_data()) + self.call.verify_sig() } /// Verify only the payment plan. pub fn verify_plan(&self) -> bool { - if let Instruction::NewContract(contract) = &self.instruction { - self.fee >= 0 - && self.fee <= contract.tokens - && contract.plan.verify(contract.tokens - self.fee) + let instruction = self.instruction(); + if let Instruction::NewContract(contract) = instruction { + self.fee() >= 0 + && self.call.data.fee <= contract.tokens + && contract.plan.verify(contract.tokens - self.fee()) } else { true } @@ -211,7 +297,7 @@ pub fn test_tx() -> Transaction { let keypair1 = KeyPair::new(); let pubkey1 = keypair1.pubkey(); let zero = Hash::default(); - Transaction::new(&keypair1, pubkey1, 42, zero) + Transaction::new(&keypair1, pubkey1, 42, zero, 0) } #[cfg(test)] @@ -235,7 +321,7 @@ mod tests { fn test_claim() { let keypair = KeyPair::new(); let zero = Hash::default(); - let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero); + let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero, 0); assert!(tx0.verify_plan()); } @@ -245,7 +331,7 @@ mod tests { let keypair0 = KeyPair::new(); let keypair1 = KeyPair::new(); let pubkey1 = keypair1.pubkey(); - let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero); + let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero, 0); assert!(tx0.verify_plan()); } @@ -254,43 +340,52 @@ mod tests { let zero = Hash::default(); let keypair0 = KeyPair::new(); let pubkey1 = KeyPair::new().pubkey(); - assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan()); - assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan()); - assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan()); + assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero, 0).verify_plan()); + assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero, 0).verify_plan()); + assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero, 0).verify_plan()); } #[test] fn test_serialize_claim() { - let budget = Budget::Pay(Payment { - tokens: 0, - to: Default::default(), - }); + let keypair = KeyPair::new(); + let to = Default::default(); + let budget = Budget::Pay(Payment { tokens: 0, to: to }); let plan = Plan::Budget(budget); let instruction = Instruction::NewContract(Contract { plan, tokens: 0 }); - let claim0 = Transaction { - instruction, - from: Default::default(), - last_id: Default::default(), - sig: Default::default(), - fee: 0, - }; + let keys = [ + TransactionKeys::KeyPair(&keypair), + TransactionKeys::PubKey(&to), + ]; + let claim0 = + Transaction::new_from_instruction(&keys, instruction, Default::default(), 0, 0); let buf = serialize(&claim0).unwrap(); let claim1: Transaction = deserialize(&buf).unwrap(); assert_eq!(claim1, claim0); } + #[test] + fn test_size() { + let keypair = KeyPair::new(); + let to = Default::default(); + let claim0 = Transaction::new_noplan(&keypair, to, 0, 0, Default::default(), 0); + let buf = serialize(&claim0).unwrap(); + assert_eq!(buf.len(), 290); + } + #[test] fn test_token_attack() { let zero = Hash::default(); let keypair = KeyPair::new(); let pubkey = keypair.pubkey(); - let mut tx = Transaction::new(&keypair, pubkey, 42, zero); - if let Instruction::NewContract(contract) = &mut tx.instruction { + let mut tx = Transaction::new(&keypair, pubkey, 42, zero, 0); + let mut instruction = tx.instruction(); + if let Instruction::NewContract(contract) = &mut instruction { contract.tokens = 1_000_000; // <-- attack, part 1! if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan { payment.tokens = contract.tokens; // <-- attack, part 2! } } + tx.call.data.user_data = serialize(&instruction).expect("serialize instruction"); assert!(tx.verify_plan()); assert!(!tx.verify_sig()); } @@ -302,12 +397,14 @@ mod tests { let thief_keypair = KeyPair::new(); let pubkey1 = keypair1.pubkey(); let zero = Hash::default(); - let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero); - if let Instruction::NewContract(contract) = &mut tx.instruction { + let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero, 0); + let mut instruction = tx.instruction(); + if let Instruction::NewContract(contract) = &mut instruction { if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan { payment.to = thief_keypair.pubkey(); // <-- attack! } } + tx.call.data.user_data = serialize(&instruction).expect("serialize instruction"); assert!(tx.verify_plan()); assert!(!tx.verify_sig()); } @@ -317,8 +414,8 @@ mod tests { let sign_data = tx.get_sign_data(); let tx_bytes = serialize(&tx).unwrap(); assert_matches!(memfind(&tx_bytes, &sign_data), Some(SIGNED_DATA_OFFSET)); - assert_matches!(memfind(&tx_bytes, &tx.sig.as_ref()), Some(SIG_OFFSET)); - assert_matches!(memfind(&tx_bytes, &tx.from.as_ref()), Some(PUB_KEY_OFFSET)); + assert_matches!(memfind(&tx_bytes, tx.sig().as_ref()), Some(SIG_OFFSET)); + assert_matches!(memfind(&tx_bytes, tx.from().as_ref()), Some(PUB_KEY_OFFSET)); } #[test] @@ -326,16 +423,18 @@ mod tests { let keypair0 = KeyPair::new(); let keypair1 = KeyPair::new(); let zero = Hash::default(); - let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero); - if let Instruction::NewContract(contract) = &mut tx.instruction { + let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero, 0); + let mut instruction = tx.instruction(); + if let Instruction::NewContract(contract) = &mut instruction { if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan { payment.tokens = 2; // <-- attack! } } + tx.call.data.user_data = serialize(&instruction).expect("serialize instruction"); assert!(!tx.verify_plan()); // Also, ensure all branchs of the plan spend all tokens - if let Instruction::NewContract(contract) = &mut tx.instruction { + if let Instruction::NewContract(contract) = &mut instruction { if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan { payment.tokens = 0; // <-- whoops! } diff --git a/src/tvu.rs b/src/tvu.rs index eb421f432eddef..5f3ab5a7eadaa3 100644 --- a/src/tvu.rs +++ b/src/tvu.rs @@ -253,11 +253,15 @@ pub mod tests { bank.register_entry_id(&cur_hash); cur_hash = hash(&cur_hash.as_ref()); + //version should increase with every message + let version = i; + trace!("version {}", version); let tx0 = Transaction::new( &mint.keypair(), bob_keypair.pubkey(), transfer_amount, cur_hash, + version, ); bank.register_entry_id(&cur_hash); cur_hash = hash(&cur_hash.as_ref()); diff --git a/src/vote_stage.rs b/src/vote_stage.rs index b7bd24b53ad9bb..cec07837575ded 100644 --- a/src/vote_stage.rs +++ b/src/vote_stage.rs @@ -10,7 +10,7 @@ use metrics; use packet::{BlobRecycler, SharedBlob}; use result::Result; use service::Service; -use signature::KeyPair; +use signature::{KeyPair, KeyPairUtil}; use std::collections::VecDeque; use std::result; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; @@ -33,6 +33,7 @@ enum VoteError { } pub fn create_vote_tx_and_blob( + bank: &Arc, last_id: &Hash, keypair: &KeyPair, crdt: &Arc>, @@ -45,7 +46,8 @@ pub fn create_vote_tx_and_blob( debug!("voting on {:?}", &last_id.as_ref()[..8]); wcrdt.new_vote(*last_id) }?; - let tx = Transaction::new_vote(&keypair, vote, *last_id, 0); + let version = bank.get_version(&keypair.pubkey()); + let tx = Transaction::new_vote(&keypair, vote, *last_id, 0, version.0); { let mut blob = shared_blob.write().unwrap(); let bytes = serialize(&tx)?; @@ -111,7 +113,7 @@ pub fn send_leader_vote( get_last_id_to_vote_on(debug_id, &ids, bank, now, last_vote) { if let Ok((tx, shared_blob)) = - create_vote_tx_and_blob(&last_id, keypair, crdt, blob_recycler) + create_vote_tx_and_blob(&bank, &last_id, keypair, crdt, blob_recycler) { bank.process_transaction(&tx)?; vote_blob_sender.send(VecDeque::from(vec![shared_blob]))?; @@ -141,7 +143,9 @@ fn send_validator_vote( vote_blob_sender: &BlobSender, ) -> Result<()> { let last_id = bank.last_id(); - if let Ok((_, shared_blob)) = create_vote_tx_and_blob(&last_id, keypair, crdt, blob_recycler) { + if let Ok((_, shared_blob)) = + create_vote_tx_and_blob(&bank, &last_id, keypair, crdt, blob_recycler) + { inc_new_counter!("replicate-vote_sent", 1); vote_blob_sender.send(VecDeque::from(vec![shared_blob]))?; @@ -272,7 +276,7 @@ pub mod tests { // give the leader some tokens let give_leader_tokens_tx = - Transaction::new(&mint.keypair(), leader_pubkey.clone(), 100, entry.id); + Transaction::new(&mint.keypair(), leader_pubkey.clone(), 100, entry.id, 0); bank.process_transaction(&give_leader_tokens_tx).unwrap(); leader_crdt.set_leader(leader_pubkey); diff --git a/src/voting.rs b/src/voting.rs index a16b46def78ab0..54841720a27334 100644 --- a/src/voting.rs +++ b/src/voting.rs @@ -11,8 +11,8 @@ pub fn entries_to_votes(entries: &[Entry]) -> Vec<(PublicKey, Vote, Hash)> { } pub fn transaction_to_vote(tx: &Transaction) -> Option<(PublicKey, Vote, Hash)> { - match tx.instruction { - Instruction::NewVote(ref vote) => Some((tx.from, vote.clone(), tx.last_id)), + match tx.instruction() { + Instruction::NewVote(ref vote) => Some((*tx.from(), vote.clone(), *tx.last_id())), _ => None, } } diff --git a/tests/multinode.rs b/tests/multinode.rs index b66f5faa35b66c..9d3f05f2296d79 100755 --- a/tests/multinode.rs +++ b/tests/multinode.rs @@ -107,7 +107,7 @@ fn test_multi_node_validator_catchup_from_zero() { // Send leader some tokens to vote let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &leader_pubkey, None).unwrap(); + retry_send_tx_and_get_balance(&leader_data, &alice, &leader_pubkey).unwrap(); info!("leader balance {}", leader_balance); let mut nodes = vec![server]; @@ -127,8 +127,7 @@ fn test_multi_node_validator_catchup_from_zero() { //contains the leader addr as well assert_eq!(servers.len(), N + 1); //verify leader can do transfer - let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap(); + let leader_balance = retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, 500); //verify validator has the same balance let mut success = 0usize; @@ -160,7 +159,7 @@ fn test_multi_node_validator_catchup_from_zero() { let servers = converge(&leader_data, N + 2); let mut leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap(); + retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); info!("leader balance {}", leader_balance); loop { let mut client = mk_client(&leader_data); @@ -215,7 +214,7 @@ fn test_multi_node_basic() { // Send leader some tokens to vote let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &leader_pubkey, None).unwrap(); + retry_send_tx_and_get_balance(&leader_data, &alice, &leader_pubkey).unwrap(); info!("leader balance {}", leader_balance); let mut nodes = vec![server]; @@ -235,8 +234,7 @@ fn test_multi_node_basic() { //contains the leader addr as well assert_eq!(servers.len(), N + 1); //verify leader can do transfer - let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap(); + let leader_balance = retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, 500); //verify validator has the same balance let mut success = 0usize; @@ -272,11 +270,9 @@ fn test_boot_validator_from_file() { leader_keypair, None, ); - let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(500)).unwrap(); + let leader_balance = retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, 500); - let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(1000)).unwrap(); + let leader_balance = retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, 1000); let keypair = KeyPair::new(); @@ -325,8 +321,7 @@ fn test_leader_restart_validator_start_from_old_ledger() { let (leader_data, leader_fullnode) = create_leader(&ledger_path); // lengthen the ledger - let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(500)).unwrap(); + let leader_balance = retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, 500); // create a "stale" ledger by copying current ledger @@ -341,8 +336,7 @@ fn test_leader_restart_validator_start_from_old_ledger() { let (leader_data, leader_fullnode) = create_leader(&ledger_path); // lengthen the ledger - let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(1000)).unwrap(); + let leader_balance = retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, 1000); // restart the leader @@ -368,8 +362,7 @@ fn test_leader_restart_validator_start_from_old_ledger() { let mut client = mk_client(&validator_data); for _ in 0..10 { let leader_balance = - send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(expected)) - .unwrap(); + retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, expected); let getbal = retry_get_balance(&mut client, &bob_pubkey, Some(leader_balance)); if getbal == Some(leader_balance) { @@ -412,7 +405,6 @@ fn test_multi_node_dynamic_network() { let leader = TestNode::new_localhost_with_pubkey(leader_keypair.pubkey()); let bob_pubkey = KeyPair::new().pubkey(); let (alice, ledger_path) = genesis(10_000_000); - let alice_arc = Arc::new(RwLock::new(alice)); let leader_data = leader.data.clone(); let server = FullNode::new_without_sigverify( leader, @@ -423,57 +415,29 @@ fn test_multi_node_dynamic_network() { ); // Send leader some tokens to vote - let leader_balance = send_tx_and_retry_get_balance( - &leader_data, - &alice_arc.read().unwrap(), - &leader_pubkey, - None, - ).unwrap(); + let leader_balance = + retry_send_tx_and_get_balance(&leader_data, &alice, &leader_pubkey).unwrap(); info!("leader balance {}", leader_balance); info!("{:x} LEADER", leader_data.debug_id()); - let leader_balance = retry_send_tx_and_retry_get_balance( - &leader_data, - &alice_arc.read().unwrap(), - &bob_pubkey, - Some(500), - ).unwrap(); + let leader_balance = retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, 500); - let leader_balance = retry_send_tx_and_retry_get_balance( - &leader_data, - &alice_arc.read().unwrap(), - &bob_pubkey, - Some(1000), - ).unwrap(); + let leader_balance = retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); assert_eq!(leader_balance, 1000); - let t1: Vec<_> = (0..num_nodes) + info!("Waiting for keypairs to be created"); + let keypairs: Vec<_> = (0..num_nodes) .into_iter() .map(|n| { - let leader_data = leader_data.clone(); - let alice_clone = alice_arc.clone(); - Builder::new() - .name("keypair-thread".to_string()) - .spawn(move || { - info!("Spawned thread {}", n); - let keypair = KeyPair::new(); - //send some tokens to the new validator - let bal = retry_send_tx_and_retry_get_balance( - &leader_data, - &alice_clone.read().unwrap(), - &keypair.pubkey(), - Some(500), - ); - assert_eq!(bal, Some(500)); - info!("sent balance to[{}/{}] {}", n, num_nodes, keypair.pubkey()); - keypair - }) - .unwrap() + info!("Spawned thread {}", n); + let keypair = KeyPair::new(); + //send some tokens to the new validator + let bal = retry_send_tx_and_get_balance(&leader_data, &alice, &keypair.pubkey()); + assert_eq!(bal, Some(500)); + info!("sent balance to[{}/{}] {}", n, num_nodes, keypair.pubkey()); + keypair }) .collect(); - - info!("Waiting for keypairs to be created"); - let keypairs: Vec<_> = t1.into_iter().map(|t| t.join().unwrap()).collect(); info!("keypairs created"); let t2: Vec<_> = keypairs @@ -509,12 +473,8 @@ fn test_multi_node_dynamic_network() { for i in 0..num_nodes { //verify leader can do transfer let expected = ((i + 3) * 500) as i64; - let leader_balance = retry_send_tx_and_retry_get_balance( - &leader_data, - &alice_arc.read().unwrap(), - &bob_pubkey, - Some(expected), - ).unwrap(); + let leader_balance = + retry_send_tx_and_get_balance(&leader_data, &alice, &bob_pubkey).unwrap(); if leader_balance != expected { info!( "leader dropped transaction {} {:?} {:?}", @@ -639,48 +599,12 @@ fn retry_get_balance( None } -fn send_tx_and_retry_get_balance( - leader: &NodeInfo, +fn retry_send_tx_and_get_balance( + node: &NodeInfo, alice: &Mint, bob_pubkey: &PublicKey, - expected: Option, ) -> Option { - let mut client = mk_client(leader); - trace!("getting leader last_id"); - let last_id = client.get_last_id(); - info!("executing leader transfer"); - let _sig = client - .transfer(500, &alice.keypair(), *bob_pubkey, &last_id) - .unwrap(); - retry_get_balance(&mut client, bob_pubkey, expected) -} - -fn retry_send_tx_and_retry_get_balance( - leader: &NodeInfo, - alice: &Mint, - bob_pubkey: &PublicKey, - expected: Option, -) -> Option { - let mut client = mk_client(leader); - trace!("getting leader last_id"); - let last_id = client.get_last_id(); - info!("executing leader transfer"); - const LAST: usize = 30; - for run in 0..(LAST + 1) { - let _sig = client - .transfer(500, &alice.keypair(), *bob_pubkey, &last_id) - .unwrap(); - let out = client.poll_get_balance(bob_pubkey); - if expected.is_none() || run == LAST { - return out.ok().clone(); - } - trace!("retry_get_balance[{}] {:?} {:?}", run, out, expected); - if let (Some(e), Ok(o)) = (expected, out) { - if o == e { - return Some(o); - } - } - sleep(Duration::from_millis(20)); - } - None + let mut client = mk_client(node); + let _ = client.retry_transfer(&alice.keypair(), bob_pubkey, 500, 30); + client.poll_get_balance(bob_pubkey).ok() }