From b973b7a622b4ba2910c9fd37daf48927b4dad223 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Thu, 9 Dec 2021 11:50:26 -0500 Subject: [PATCH 1/5] Checking of Sprout anchors in non-finalized state (#3123) * Do prelim checking of Sprout anchors in non-finalized state Does not check intra-transaction interstitial states yet * Populate sprout anchors to allow other state tests to pass * Preliminary interstitial sprout note commitment tree anchor checks implementation * Make sure only prior anchors are checked in the same transaction * Add tests * Refactor a comment * Refactor rustdoc Co-authored-by: Deirdre Connolly * Use the first `JoinSplit`s from mainnet * Print debug messages * Use correct blocks for the tests Co-authored-by: Marek Co-authored-by: Conrado Gouvea --- zebra-chain/src/sapling/tests/test_vectors.rs | 2 +- zebra-chain/src/sprout/joinsplit.rs | 1 - zebra-chain/src/transaction.rs | 102 ++++++++------- zebra-state/src/service/check/anchors.rs | 69 +++++++--- .../src/service/check/tests/anchors.rs | 120 +++++++++++++++++- .../src/service/check/tests/nullifier.rs | 1 + zebra-state/src/service/finalized_state.rs | 18 +-- .../src/service/non_finalized_state/chain.rs | 12 +- .../src/vectors/block-main-0-000-395.txt | 1 + zebra-test/src/vectors/block.rs | 4 + 10 files changed, 252 insertions(+), 78 deletions(-) create mode 100644 zebra-test/src/vectors/block-main-0-000-395.txt diff --git a/zebra-chain/src/sapling/tests/test_vectors.rs b/zebra-chain/src/sapling/tests/test_vectors.rs index b726cc560d8..c9188b441ab 100644 --- a/zebra-chain/src/sapling/tests/test_vectors.rs +++ b/zebra-chain/src/sapling/tests/test_vectors.rs @@ -1,4 +1,4 @@ -// From https://github.com/zcash/librustzcash/blob/master/zcash_primitives/src/merkle_tree.rs#L512 +// From https://github.com/zcash/librustzcash/blob/master/zcash_primitives/src/merkle_tree.rs#L585 pub const HEX_EMPTY_ROOTS: [&str; 33] = [ "0100000000000000000000000000000000000000000000000000000000000000", "817de36ab2d57feb077634bca77819c8e0bd298c04f6fed0e6a83cc1356ca155", diff --git a/zebra-chain/src/sprout/joinsplit.rs b/zebra-chain/src/sprout/joinsplit.rs index 5b29028fbc7..7ba81212d59 100644 --- a/zebra-chain/src/sprout/joinsplit.rs +++ b/zebra-chain/src/sprout/joinsplit.rs @@ -24,7 +24,6 @@ pub struct JoinSplit { pub vpub_old: Amount, /// A value that the JoinSplit transfer inserts into the transparent value /// pool. - /// pub vpub_new: Amount, /// A root of the Sprout note commitment tree at some block height in the /// past, or the root produced by a previous JoinSplit transfer in this diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index dcf433e687a..0d13376aa83 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -46,9 +46,9 @@ use std::{collections::HashMap, fmt, iter}; /// /// A transaction is an encoded data structure that facilitates the transfer of /// value between two public key addresses on the Zcash ecosystem. Everything is -/// designed to ensure that transactions can created, propagated on the network, -/// validated, and finally added to the global ledger of transactions (the -/// blockchain). +/// designed to ensure that transactions can be created, propagated on the +/// network, validated, and finally added to the global ledger of transactions +/// (the blockchain). /// /// Zcash has a number of different transaction formats. They are represented /// internally by different enum variants. Because we checkpoint on Canopy @@ -612,6 +612,61 @@ impl Transaction { } } + /// Return if the transaction has any Sprout JoinSplit data. + pub fn has_sprout_joinsplit_data(&self) -> bool { + match self { + // No JoinSplits + Transaction::V1 { .. } | Transaction::V5 { .. } => false, + + // JoinSplits-on-BCTV14 + Transaction::V2 { joinsplit_data, .. } | Transaction::V3 { joinsplit_data, .. } => { + joinsplit_data.is_some() + } + + // JoinSplits-on-Groth16 + Transaction::V4 { joinsplit_data, .. } => joinsplit_data.is_some(), + } + } + + /// Returns the Sprout note commitments in this transaction. + pub fn sprout_note_commitments( + &self, + ) -> Box + '_> { + match self { + // Return [`NoteCommitment`]s with [`Bctv14Proof`]s. + Transaction::V2 { + joinsplit_data: Some(joinsplit_data), + .. + } + | Transaction::V3 { + joinsplit_data: Some(joinsplit_data), + .. + } => Box::new(joinsplit_data.note_commitments()), + + // Return [`NoteCommitment`]s with [`Groth16Proof`]s. + Transaction::V4 { + joinsplit_data: Some(joinsplit_data), + .. + } => Box::new(joinsplit_data.note_commitments()), + + // Return an empty iterator. + Transaction::V2 { + joinsplit_data: None, + .. + } + | Transaction::V3 { + joinsplit_data: None, + .. + } + | Transaction::V4 { + joinsplit_data: None, + .. + } + | Transaction::V1 { .. } + | Transaction::V5 { .. } => Box::new(std::iter::empty()), + } + } + // sapling /// Access the deduplicated [`sapling::tree::Root`]s in this transaction, @@ -741,45 +796,6 @@ impl Transaction { } } - /// Returns the Sprout note commitments in this transaction. - pub fn sprout_note_commitments( - &self, - ) -> Box + '_> { - match self { - // Return [`NoteCommitment`]s with [`Bctv14Proof`]s. - Transaction::V2 { - joinsplit_data: Some(joinsplit_data), - .. - } - | Transaction::V3 { - joinsplit_data: Some(joinsplit_data), - .. - } => Box::new(joinsplit_data.note_commitments()), - - // Return [`NoteCommitment`]s with [`Groth16Proof`]s. - Transaction::V4 { - joinsplit_data: Some(joinsplit_data), - .. - } => Box::new(joinsplit_data.note_commitments()), - - // Return an empty iterator. - Transaction::V2 { - joinsplit_data: None, - .. - } - | Transaction::V3 { - joinsplit_data: None, - .. - } - | Transaction::V4 { - joinsplit_data: None, - .. - } - | Transaction::V1 { .. } - | Transaction::V5 { .. } => Box::new(std::iter::empty()), - } - } - /// Returns the Sapling note commitments in this transaction, regardless of version. pub fn sapling_note_commitments(&self) -> Box + '_> { // This function returns a boxed iterator because the different @@ -1036,7 +1052,7 @@ impl Transaction { .joinsplits_mut() .map(|joinsplit| &mut joinsplit.vpub_old), ), - // JoinSplits with Groth Proofs + // JoinSplits with Groth16 Proofs Transaction::V4 { joinsplit_data: Some(joinsplit_data), .. diff --git a/zebra-state/src/service/check/anchors.rs b/zebra-state/src/service/check/anchors.rs index 2297c7db656..4d9d293f86f 100644 --- a/zebra-state/src/service/check/anchors.rs +++ b/zebra-state/src/service/check/anchors.rs @@ -1,14 +1,19 @@ //! Checks for whether cited anchors are previously-computed note commitment //! tree roots. +use std::collections::HashSet; + +use zebra_chain::sprout; + use crate::{ service::{finalized_state::FinalizedState, non_finalized_state::Chain}, PreparedBlock, ValidateContextError, }; -/// Check that all the Sprout, Sapling, and Orchard anchors specified by +/// Check that the Sprout, Sapling, and Orchard anchors specified by /// transactions in this block have been computed previously within the context -/// of its parent chain. +/// of its parent chain. We do not check any anchors in checkpointed blocks, which avoids +/// JoinSplits /// /// Sprout anchors may refer to some earlier block's final treestate (like /// Sapling and Orchard do exclusively) _or_ to the interstisial output @@ -40,28 +45,52 @@ pub(crate) fn anchors_refer_to_earlier_treestates( prepared: &PreparedBlock, ) -> Result<(), ValidateContextError> { for transaction in prepared.block.transactions.iter() { - // Sprout JoinSplits, with interstitial treestates to check as well + // Sprout JoinSplits, with interstitial treestates to check as well. // // The FIRST JOINSPLIT in a transaction MUST refer to the output treestate // of a previous block. + if transaction.has_sprout_joinsplit_data() { + // > The anchor of each JoinSplit description in a transaction MUST refer to + // > either some earlier block’s final Sprout treestate, or to the interstitial + // > output treestate of any prior JoinSplit description in the same transaction. + // + // https://zips.z.cash/protocol/protocol.pdf#joinsplit + let mut interstitial_roots: HashSet = HashSet::new(); + + let mut interstitial_note_commitment_tree = parent_chain.sprout_note_commitment_tree(); + + for joinsplit in transaction.sprout_groth16_joinsplits() { + // Check all anchor sets, including the one for interstitial anchors. + // + // Note that [`interstitial_roots`] is always empty in the first + // iteration of the loop. This is because: + // + // > "The anchor of each JoinSplit description in a transaction + // > MUST refer to [...] to the interstitial output treestate of + // > any **prior** JoinSplit description in the same transaction." + if !parent_chain.sprout_anchors.contains(&joinsplit.anchor) + && !finalized_state.contains_sprout_anchor(&joinsplit.anchor) + && (!interstitial_roots.contains(&joinsplit.anchor)) + { + return Err(ValidateContextError::UnknownSproutAnchor { + anchor: joinsplit.anchor, + }); + } - // if let Some(sprout_shielded_data) = transaction.joinsplit_data { - // for joinsplit in transaction.sprout_groth16_joinsplits() { - // if !parent_chain.sprout_anchors.contains(joinsplit.anchor) - // && !finalized_state.contains_sprout_anchor(&joinsplit.anchor) - // { - // if !(joinsplit == &sprout_shielded_data.first) { - // // TODO: check interstitial treestates of the earlier JoinSplits - // // in this transaction against this anchor - // unimplemented!() - // } else { - // return Err(ValidateContextError::UnknownSproutAnchor { - // anchor: joinsplit.anchor, - // }); - // } - // } - // } - // } + tracing::debug!(?joinsplit.anchor, "validated sprout anchor"); + + // Add new anchors to the interstitial note commitment tree. + for cm in joinsplit.commitments { + interstitial_note_commitment_tree + .append(cm) + .expect("note commitment should be appendable to the tree"); + } + + interstitial_roots.insert(interstitial_note_commitment_tree.root()); + + tracing::debug!(?joinsplit.anchor, "observed sprout anchor"); + } + } // Sapling Spends // diff --git a/zebra-state/src/service/check/tests/anchors.rs b/zebra-state/src/service/check/tests/anchors.rs index 0fa264feba9..f0ee46751dd 100644 --- a/zebra-state/src/service/check/tests/anchors.rs +++ b/zebra-state/src/service/check/tests/anchors.rs @@ -1,17 +1,133 @@ +//! Tests for whether cited anchors are checked properly. + use std::{convert::TryInto, ops::Deref, sync::Arc}; use zebra_chain::{ + amount::Amount, block::{Block, Height}, + primitives::Groth16Proof, serialization::ZcashDeserializeInto, - transaction::{LockTime, Transaction}, + sprout::JoinSplit, + transaction::{JoinSplitData, LockTime, Transaction}, }; use crate::{ arbitrary::Prepare, tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase}, + PreparedBlock, }; -// sapling +// Sprout + +/// Check that, when primed with the first blocks that contain Sprout anchors, a +/// Sprout Spend's referenced anchor is validated. +#[test] +fn check_sprout_anchors() { + zebra_test::init(); + + let (mut state, _genesis) = new_state_with_mainnet_genesis(); + + // Bootstrap a block at height == 1. + let block_1 = zebra_test::vectors::BLOCK_MAINNET_1_BYTES + .zcash_deserialize_into::() + .expect("block should deserialize"); + + // Bootstrap a block just before the first Sprout anchors. + let block_395 = zebra_test::vectors::BLOCK_MAINNET_395_BYTES + .zcash_deserialize_into::() + .expect("block should deserialize"); + + // Add initial transactions to [`block_1`]. + let block_1 = prepare_sprout_block(block_1, block_395); + + // Validate and commit [`block_1`]. This will add an anchor referencing the + // empty note commitment tree to the state. + assert!(state.validate_and_commit(block_1).is_ok()); + + // Bootstrap a block at height == 2 that references the Sprout note commitment tree state + // from [`block_1`]. + let block_2 = zebra_test::vectors::BLOCK_MAINNET_2_BYTES + .zcash_deserialize_into::() + .expect("block should deserialize"); + + // Exercise Sprout anchor checking with the first shielded transactions with + // anchors. + let block_396 = zebra_test::vectors::BLOCK_MAINNET_396_BYTES + .zcash_deserialize_into::() + .expect("block should deserialize"); + + // Add the transactions with the first anchors to [`block_2`]. + let block_2 = prepare_sprout_block(block_2, block_396); + + // Validate and commit [`block_2`]. This will also check the anchors. + assert_eq!(state.validate_and_commit(block_2), Ok(())); +} + +fn prepare_sprout_block(mut block_to_prepare: Block, reference_block: Block) -> PreparedBlock { + // Convert the coinbase transaction to a version that the non-finalized state will accept. + block_to_prepare.transactions[0] = + transaction_v4_from_coinbase(&block_to_prepare.transactions[0]).into(); + + reference_block + .transactions + .into_iter() + .filter(|tx| tx.has_sprout_joinsplit_data()) + .for_each(|tx| { + let joinsplit_data = match tx.deref() { + Transaction::V2 { joinsplit_data, .. } => joinsplit_data.clone(), + _ => unreachable!("These are known v2 transactions"), + }; + + // Change [`joinsplit_data`] so that the transaction passes the + // semantic validation. Namely, set the value balance to zero, and + // use a dummy Groth16 proof instead of a BCTV14 one. + let joinsplit_data = joinsplit_data.map(|s| { + let mut new_joinsplits: Vec> = Vec::new(); + + for old_joinsplit in s.joinsplits() { + new_joinsplits.push(JoinSplit { + vpub_old: Amount::zero(), + vpub_new: Amount::zero(), + anchor: old_joinsplit.anchor, + nullifiers: old_joinsplit.nullifiers, + commitments: old_joinsplit.commitments, + ephemeral_key: old_joinsplit.ephemeral_key, + random_seed: old_joinsplit.random_seed, + vmacs: old_joinsplit.vmacs.clone(), + zkproof: Groth16Proof::from([0; 192]), + enc_ciphertexts: old_joinsplit.enc_ciphertexts, + }) + } + + match new_joinsplits.split_first() { + None => unreachable!("the new joinsplits are never empty"), + + Some((first, rest)) => JoinSplitData { + first: first.clone(), + rest: rest.to_vec(), + pub_key: s.pub_key, + sig: s.sig, + }, + } + }); + + // Add the new adjusted transaction to [`block_to_prepare`]. + block_to_prepare + .transactions + .push(Arc::new(Transaction::V4 { + inputs: Vec::new(), + outputs: Vec::new(), + lock_time: LockTime::min_lock_time(), + expiry_height: Height(0), + joinsplit_data, + sapling_shielded_data: None, + })) + }); + + Arc::new(block_to_prepare).prepare() +} + +// Sapling /// Check that, when primed with the first Sapling blocks, a Sapling Spend's referenced anchor is /// validated. diff --git a/zebra-state/src/service/check/tests/nullifier.rs b/zebra-state/src/service/check/tests/nullifier.rs index 2217f0b78c2..f9a58005ce8 100644 --- a/zebra-state/src/service/check/tests/nullifier.rs +++ b/zebra-state/src/service/check/tests/nullifier.rs @@ -324,6 +324,7 @@ proptest! { // Allows anchor checks to pass state.disk.populate_with_anchors(&block1); + state.disk.populate_with_anchors(&block2); let mut previous_mem = state.mem.clone(); diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 7d5b9f18f28..81e00e5eba1 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -631,11 +631,11 @@ impl FinalizedState { self.db.zs_contains(orchard_nullifiers, &orchard_nullifier) } - // /// Returns `true` if the finalized state contains `sprout_anchor`. - // pub fn contains_sprout_anchor(&self, sprout_anchor: &sprout::tree::Root) -> bool { - // let sprout_anchors = self.db.cf_handle("sprout_anchors").unwrap(); - // self.db.zs_contains(sprout_anchors, &sprout_anchor) - // } + /// Returns `true` if the finalized state contains `sprout_anchor`. + pub fn contains_sprout_anchor(&self, sprout_anchor: &sprout::tree::Root) -> bool { + let sprout_anchors = self.db.cf_handle("sprout_anchors").unwrap(); + self.db.zs_contains(sprout_anchors, &sprout_anchor) + } /// Returns `true` if the finalized state contains `sapling_anchor`. pub fn contains_sapling_anchor(&self, sapling_anchor: &sapling::tree::Root) -> bool { @@ -790,15 +790,15 @@ impl FinalizedState { pub fn populate_with_anchors(&self, block: &Block) { let mut batch = rocksdb::WriteBatch::default(); - // let sprout_anchors = self.db.cf_handle("sprout_anchors").unwrap(); + let sprout_anchors = self.db.cf_handle("sprout_anchors").unwrap(); let sapling_anchors = self.db.cf_handle("sapling_anchors").unwrap(); let orchard_anchors = self.db.cf_handle("orchard_anchors").unwrap(); for transaction in block.transactions.iter() { // Sprout - // for joinsplit in transaction.sprout_groth16_joinsplits() { - // batch.zs_insert(sprout_anchors, joinsplit.anchor, ()); - // } + for joinsplit in transaction.sprout_groth16_joinsplits() { + batch.zs_insert(sprout_anchors, joinsplit.anchor, ()); + } // Sapling for anchor in transaction.sapling_anchors() { diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 5180a07bde5..a775eaeb4d1 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -60,9 +60,9 @@ pub struct Chain { pub(crate) history_tree: HistoryTree, /// The Sprout anchors created by `blocks`. - pub(super) sprout_anchors: HashMultiSet, + pub(crate) sprout_anchors: HashMultiSet, /// The Sprout anchors created by each block in `blocks`. - pub(super) sprout_anchors_by_height: BTreeMap, + pub(crate) sprout_anchors_by_height: BTreeMap, /// The Sapling anchors created by `blocks`. pub(crate) sapling_anchors: HashMultiSet, /// The Sapling anchors created by each block in `blocks`. @@ -394,6 +394,14 @@ impl Chain { chain_value_pools: self.chain_value_pools, } } + + /// Returns a clone of the Sprout note commitment tree for this chain. + /// + /// Useful when calculating interstitial note commitment trees for each JoinSplit in a Sprout + /// shielded transaction. + pub fn sprout_note_commitment_tree(&self) -> sprout::tree::NoteCommitmentTree { + self.sprout_note_commitment_tree.clone() + } } /// The revert position being performed on a chain. diff --git a/zebra-test/src/vectors/block-main-0-000-395.txt b/zebra-test/src/vectors/block-main-0-000-395.txt new file mode 100644 index 00000000000..0b13c18767d --- /dev/null +++ b/zebra-test/src/vectors/block-main-0-000-395.txt @@ -0,0 +1 @@ +04000000b356199270b1cd6e96759c2a1a224c5c65364a4072d8b1e56f348b4a690100008120191929df6a6dbb5d7f61788155ac0b3f92a431efb2c2f6783fd99bebf39c0000000000000000000000000000000000000000000000000000000000000000518c1358c97d011eaa9da5470d00000000000000000000000000000000000000000000000000000ffd400500cb784fb5929563200bd38f5ad8b0751d052abcef16c3bbcef9af69318fe831e4123aa9e0cd401fce810547653ba354c39d219705ed30ef25b68d7a773c131d82ceb7f09e97a7ca03058ca4b60df665b35e70a1017c8120475b2fdaed0b62dceb31149cd4263a2fc402f07bd29f50a7c5ee6633aa9270894a05883eddc5043679a86fef6b75dad2e066e9c1d5684de00789de1258be8884490bd3eccb621c503bccb9489a8c509e0351cca30ff42f3de4b9e0f910ab5a28f4006def44066683af258c12b3e587a0a137a6f1fd106d3aa43f14495b943cce73ca91a712b6afb9fac934d750015953e1cff110dd395d786d673ec0be906397c23f459007c2f4d6d91d074d482023bebba58b70ffd0757c570b188fcd8f20b433fd2b652c6b34cb3e0221fae43b38e81e55f2dbac4f2f1676780a71a87a7dca7f9fb44a3a3ab500d719bcf9e568004fec8bbe02c2d5553501997c8bb05dd259979d53d60a70910136128c4b2b1fc62e571c702de5b93b2515a56664326ce1959b8d0db5fc554b5299c1635d963d27537e965fc9bb070b1ce54f4ad087e9c31cec127f8e4e0a1a0c4d7377c61b61bd11429ba4e984ee6321385ec09d97ae785fb24a10c355393e1555f3be1b119af65b0b9a243ceed71b7d22492734ba3da7e272f09e31dc2d7785ed86cf1ef3e974dd0d5ac4ed41756e935b4ed61bb09841f601fe54e2826a780b5e1b22d81dbadc5cd79e6d368308722d90c511d0c56075d105054f547ce7729517db1b7655c74e1b6723ded3745498f53e95e9553ef6346ceda50c316a0529b279b753cdd2aca22a48f9dbb10a2141de61c29c07b847243de6d777d153237340ea2dabe7e840ae2c297d9f731629a98568e6ebfc3e1f25d8acac94abff4b82ada46b477c083db3408f0a1925f3531fd2e81361883452a810a7963ee9901fb452010e4c86f8091cb5705d92563fc7ea85a1ea7f158637b647143a35b001f75ac724de453845cbec1666c90a1ecdde5e8c0f82dad4d2e3ca325dd1400ab955495c7aed30799e61bfb8f456f5f94ad58a441ddcc1fb05b0441c2526b5b96827e7706ee917fa98479d6635255a1ce2fbcdcf02fdeb02f1b64ba9e530e7f987a9138ff73b10179d357c32c35558e34f27eae53fb2453cc5adb7b1eb044be9f457b24975d73fd2fc7ed16c03f60b949685c31c36ae135a8fa1f354e87f1776cb08dffc10d943ac8de556f3915ed4c1226783d973a81420c3999d68eb21c181750e713a94ee2b745794cc44c81314349d1cafcd24052ee9f0ece388efff25220ce9360357e8159bd10722b9dcde35bdd55933de0245cb42769ed9806520a8a5da91b253c310437b0f5e1e7c5c31a7ce20578b6a249fdce1b1d57f60358bc81f6bc462a639a079de30d47627c246ce41fa7f29e4021cc96fb19cb78b96a1b5234036532e1341f7054c402747c5af5d6209a9c8e578d7cf486e00c71c60a10eed2889e51791b91da5d3e6d65dff12775797e8542f0a4e3b2b277d3147bbe7b07355200ebf8c56bc7d03a15d4043b75943c673625aa2e071dd4e5b312d4408f4aca22f0e2347c205e2ed9a6036de1a301475930f945f57b28a2e6b996bd40d6fe0c9897dbcbdcaf215da48e5cb9a5e258845c2bdc5ceac9175d157f17e04e6690e10967313d0c3f303517218d4fcacb620651628f7ce39c73b93da15340661b13e2e0fd65b79ce09d69b7a874fa01c878342577137bf7dfe93b920951ce05de1c3dab420edef15fb99e21bfec12f38018309a354a5e105d99aa1f7024eb7c390caf99dddef4a1407a157ad8e22629e5e51815d3a02156a871aa8470a073dd930de01a7854725b965af5a1b3581fb756644642a8fb05b6dab521b561035d8f466b7019c59670101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff04028b0100ffffffff02705c2d01000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac1c574b000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a8700000000 diff --git a/zebra-test/src/vectors/block.rs b/zebra-test/src/vectors/block.rs index deb60379eea..1bbcc31cff9 100644 --- a/zebra-test/src/vectors/block.rs +++ b/zebra-test/src/vectors/block.rs @@ -329,6 +329,10 @@ lazy_static! { pub static ref BLOCK_MAINNET_202_BYTES: Vec = >::from_hex(include_str!("block-main-0-000-202.txt").trim()) .expect("Block bytes are in valid hex representation"); + // zcash-cli getblock 395 0 > block-main-0-000-395.txt + pub static ref BLOCK_MAINNET_395_BYTES: Vec = + >::from_hex(include_str!("block-main-0-000-395.txt").trim()) + .expect("Block bytes are in valid hex representation"); // zcash-cli getblock 396 0 > block-main-0-000-396.txt pub static ref BLOCK_MAINNET_396_BYTES: Vec = >::from_hex(include_str!("block-main-0-000-396.txt").trim()) From 7dd2ac267c4fcb836774f0cd91de253249bf3ac1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Dec 2021 20:11:27 +0000 Subject: [PATCH 2/5] Bump serde from 1.0.130 to 1.0.131 (#3178) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.130 to 1.0.131. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.130...v1.0.131) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: teor --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 058523f5223..2354178700c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3151,9 +3151,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.130" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "b4ad69dfbd3e45369132cc64e6748c2d65cdfb001a2b1c232d128b4ad60561c1" dependencies = [ "serde_derive", ] @@ -3180,9 +3180,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "b710a83c4e0dff6a3d511946b95274ad9ca9e5d3ae497b63fda866ac955358d2" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", From 37808eaadbabc75793599b6bfad413c6c3093c72 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 10 Dec 2021 10:19:52 +1000 Subject: [PATCH 3/5] Security: When there are no new peers, stop crawler using CPU and writing logs (#3177) * Stop useless crawler attempts when there are no peers and no crawl responses * Disable GitHub bug report URLs when the disk is full * Add help text for the `zebrad start` tracing filter option --- zebra-network/src/peer_set/candidate_set.rs | 61 +++++++++++++++------ zebra-network/src/peer_set/initialize.rs | 21 ++++++- zebra-network/src/peer_set/limit.rs | 4 +- zebrad/src/application.rs | 4 +- zebrad/src/commands/start.rs | 4 +- 5 files changed, 68 insertions(+), 26 deletions(-) diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index 4bfeb49b4be..ab1b2790200 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -7,7 +7,9 @@ use tower::{Service, ServiceExt}; use zebra_chain::serialization::DateTime32; -use crate::{constants, types::MetaAddr, AddressBook, BoxError, Request, Response}; +use crate::{ + constants, peer_set::set::MorePeers, types::MetaAddr, AddressBook, BoxError, Request, Response, +}; #[cfg(test)] mod tests; @@ -140,7 +142,7 @@ where /// Update the peer set from the network, using the default fanout limit. /// /// See [`update_initial`][Self::update_initial] for details. - pub async fn update(&mut self) -> Result<(), BoxError> { + pub async fn update(&mut self) -> Result, BoxError> { self.update_timeout(None).await } @@ -151,6 +153,9 @@ where /// - Process all completed peer responses, adding new peers in the /// [`NeverAttemptedGossiped`] state. /// + /// Returns `Some(MorePeers)` if the crawl was successful and the crawler + /// should ask for more peers. Returns `None` if there are no new peers. + /// /// ## Correctness /// /// Pass the initial peer set size as `fanout_limit` during initialization, @@ -177,7 +182,10 @@ where /// [`NeverAttemptedGossiped`]: crate::PeerAddrState::NeverAttemptedGossiped /// [`Failed`]: crate::PeerAddrState::Failed /// [`AttemptPending`]: crate::PeerAddrState::AttemptPending - pub async fn update_initial(&mut self, fanout_limit: usize) -> Result<(), BoxError> { + pub async fn update_initial( + &mut self, + fanout_limit: usize, + ) -> Result, BoxError> { self.update_timeout(Some(fanout_limit)).await } @@ -185,7 +193,12 @@ where /// `fanout_limit`, and imposing a timeout on the entire fanout. /// /// See [`update_initial`][Self::update_initial] for details. - async fn update_timeout(&mut self, fanout_limit: Option) -> Result<(), BoxError> { + async fn update_timeout( + &mut self, + fanout_limit: Option, + ) -> Result, BoxError> { + let mut more_peers = None; + // SECURITY // // Rate limit sending `GetAddr` messages to peers. @@ -203,7 +216,7 @@ where ) .await { - fanout_result?; + more_peers = fanout_result?; } else { // update must only return an error for permanent failures info!("timeout waiting for peer service readiness or peer responses"); @@ -212,37 +225,48 @@ where self.min_next_crawl = Instant::now() + constants::MIN_PEER_GET_ADDR_INTERVAL; } - Ok(()) + Ok(more_peers) } /// Update the peer set from the network, limiting the fanout to /// `fanout_limit`. /// - /// See [`update_initial`][Self::update_initial] for details. + /// Opportunistically crawl the network on every update call to ensure + /// we're actively fetching peers. Continue independently of whether we + /// actually receive any peers, but always ask the network for more. + /// + /// Because requests are load-balanced across existing peers, we can make + /// multiple requests concurrently, which will be randomly assigned to + /// existing peers, but we don't make too many because update may be + /// called while the peer set is already loaded. + /// + /// See [`update_initial`][Self::update_initial] for more details. /// /// # Correctness /// /// This function does not have a timeout. /// Use [`update_timeout`][Self::update_timeout] instead. - async fn update_fanout(&mut self, fanout_limit: Option) -> Result<(), BoxError> { - // Opportunistically crawl the network on every update call to ensure - // we're actively fetching peers. Continue independently of whether we - // actually receive any peers, but always ask the network for more. - // - // Because requests are load-balanced across existing peers, we can make - // multiple requests concurrently, which will be randomly assigned to - // existing peers, but we don't make too many because update may be - // called while the peer set is already loaded. - let mut responses = FuturesUnordered::new(); + async fn update_fanout( + &mut self, + fanout_limit: Option, + ) -> Result, BoxError> { let fanout_limit = fanout_limit .map(|fanout_limit| min(fanout_limit, constants::GET_ADDR_FANOUT)) .unwrap_or(constants::GET_ADDR_FANOUT); debug!(?fanout_limit, "sending GetPeers requests"); + + let mut responses = FuturesUnordered::new(); + let mut more_peers = None; + + // Launch requests + // // TODO: launch each fanout in its own task (might require tokio 1.6) for _ in 0..fanout_limit { let peer_service = self.peer_service.ready().await?; responses.push(peer_service.call(Request::Peers)); } + + // Process responses while let Some(rsp) = responses.next().await { match rsp { Ok(Response::Peers(addrs)) => { @@ -253,6 +277,7 @@ where ); let addrs = validate_addrs(addrs, DateTime32::now()); self.send_addrs(addrs); + more_peers = Some(MorePeers); } Err(e) => { // since we do a fanout, and new updates are triggered by @@ -263,7 +288,7 @@ where } } - Ok(()) + Ok(more_peers) } /// Add new `addrs` to the address book. diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 575e25f2ee6..3b402463133 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -698,9 +698,22 @@ where // // TODO: refactor candidates into a buffered service, so we can // spawn independent tasks to avoid deadlocks - candidates.update().await?; - // Try to connect to a new peer. - let _ = demand_tx.try_send(MorePeers); + let more_peers = candidates.update().await?; + + // If we got more peers, try to connect to a new peer. + // + // # Security + // + // Update attempts are rate-limited by the candidate set. + // + // We only try peers if there was actually an update. + // So if all peers have had a recent attempt, + // and there was recent update with no peers, + // the channel will drain. + // This prevents useless update attempt loops. + if let Some(more_peers) = more_peers { + let _ = demand_tx.try_send(more_peers); + } } TimerCrawl { tick } => { debug!( @@ -726,6 +739,8 @@ where // The demand signal that was taken out of the queue // to attempt to connect to the failed candidate never // turned into a connection, so add it back: + // + // Security: handshake failures are rate-limited by peer attempt timeouts. let _ = demand_tx.try_send(MorePeers); } } diff --git a/zebra-network/src/peer_set/limit.rs b/zebra-network/src/peer_set/limit.rs index 6c2f1849f21..9979ba3d157 100644 --- a/zebra-network/src/peer_set/limit.rs +++ b/zebra-network/src/peer_set/limit.rs @@ -39,7 +39,7 @@ impl fmt::Debug for ActiveConnectionCounter { impl ActiveConnectionCounter { /// Create and return a new active connection counter. pub fn new_counter() -> Self { - // TODO: This channel will be bounded by the connection limit (#1850, #1851, #2902). + // The number of items in this channel is bounded by the connection limit. let (close_notification_tx, close_notification_rx) = mpsc::unbounded_channel(); Self { @@ -73,7 +73,7 @@ impl ActiveConnectionCounter { ); } - debug!( + trace!( open_connections = ?self.count, ?previous_connections, "updated active connection count" diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index 250a4a7f848..d098513a77d 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -299,7 +299,9 @@ impl Application for ZebradApp { } let error_str = error.to_string(); - !error_str.contains("timed out") && !error_str.contains("duplicate hash") + !error_str.contains("timed out") + && !error_str.contains("duplicate hash") + && !error_str.contains("No space left on device") } }); diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 4c7aa8741d9..eaa6f830033 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -72,8 +72,8 @@ use crate::{ /// `start` subcommand #[derive(Command, Debug, Options)] pub struct StartCmd { - /// Filter strings - #[options(free)] + /// Filter strings which override the config file and defaults + #[options(free, help = "tracing filters which override the zebrad.toml config")] filters: Vec, } From f7505359611692d9ac26534ac414e9cc0d02ee9c Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 9 Dec 2021 22:18:43 -0300 Subject: [PATCH 4/5] Spawn initial handshakes in separated task (#3189) * spawn connector * expand comment Co-authored-by: teor * fix error handling Co-authored-by: teor --- zebra-network/src/peer_set/initialize.rs | 28 +++++++++++++++--------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 3b402463133..b514d61f01f 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -246,7 +246,9 @@ async fn add_initial_peers( ) -> Result where S: Service - + Clone, + + Clone + + Send + + 'static, S::Future: Send + 'static, { let initial_peers = limit_initial_peers(&config, address_book_updater).await; @@ -285,20 +287,26 @@ where connection_tracker, }; - let outbound_connector = outbound_connector.clone(); - async move { - // Rate-limit the connection, sleeping for an interval according - // to its index in the list. + // Construct a connector future but do not drive it yet ... + let outbound_connector_future = outbound_connector + .clone() + .oneshot(req) + .map_err(move |e| (addr, e)); + + // ... instead, spawn a new task to handle this connector + tokio::spawn(async move { + let task = outbound_connector_future.await; + // Only spawn one outbound connector per `MIN_PEER_CONNECTION_INTERVAL`, + // sleeping for an interval according to its index in the list. sleep(constants::MIN_PEER_CONNECTION_INTERVAL.saturating_mul(i as u32)).await; - outbound_connector - .oneshot(req) - .map_err(move |e| (addr, e)) - .await - } + task + }) }) .collect(); while let Some(handshake_result) = handshakes.next().await { + let handshake_result = + handshake_result.expect("unexpected panic in initial peer handshake"); match handshake_result { Ok(ref change) => { handshake_success_total += 1; From 62c78ad9398f6783de14322a0df9c1270825fc1c Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 10 Dec 2021 23:35:35 +1000 Subject: [PATCH 5/5] Update the State RFC to match the current database format (#3139) * Update the State RFC to match the current database format * Formatting and name fixes * Remove redundant generic parameter * Remove redundant generics * Fix history tree types Co-authored-by: Conrado Gouvea * Fix spacing Co-authored-by: Conrado Gouvea --- book/src/dev/rfcs/0005-state-updates.md | 94 ++++++++++++------------- 1 file changed, 46 insertions(+), 48 deletions(-) diff --git a/book/src/dev/rfcs/0005-state-updates.md b/book/src/dev/rfcs/0005-state-updates.md index e634d7b2263..a3129f1bdd6 100644 --- a/book/src/dev/rfcs/0005-state-updates.md +++ b/book/src/dev/rfcs/0005-state-updates.md @@ -600,29 +600,46 @@ order on byte strings is the numeric ordering). We use the following rocksdb column families: -| Column Family | Keys | Values | Updates | -|-----------------------|-----------------------|--------------------------------------|---------| -| `hash_by_height` | `BE32(height)` | `block::Hash` | Never | -| `height_by_hash` | `block::Hash` | `BE32(height)` | Never | -| `block_by_height` | `BE32(height)` | `Block` | Never | -| `tx_by_hash` | `transaction::Hash` | `(BE32(height) \|\| BE32(tx_index))` | Never | -| `utxo_by_outpoint` | `OutPoint` | `transparent::Output` | Delete | -| `sprout_nullifiers` | `sprout::Nullifier` | `()` | Never | -| `sapling_nullifiers` | `sapling::Nullifier` | `()` | Never | -| `orchard_nullifiers` | `orchard::Nullifier` | `()` | Never | -| `sprout_anchors` | `sprout::tree::Root` | `()` | Never | -| `sprout_incremental` | `BE32(height)` *?* | `sprout::tree::NoteCommitmentTree` | Delete | -| `sapling_anchors` | `sapling::tree::Root` | `()` | Never | -| `sapling_incremental` | `BE32(height)` *?* | `sapling::tree::NoteCommitmentTree` | Delete | -| `orchard_anchors` | `orchard::tree::Root` | `()` | Never | -| `orchard_incremental` | `BE32(height)` *?* | `orchard::tree::NoteCommitmentTree` | Delete | -| `history_incremental` | `BE32(height)` | `zcash_history::Entry` | Delete | -| `tip_chain_value_pool`| `BE32(height)` | `ValueBalance` | Delete | +| Column Family | Keys | Values | Updates | +|--------------------------------|------------------------|--------------------------------------|---------| +| `hash_by_height` | `block::Height` | `block::Hash` | Never | +| `height_by_hash` | `block::Hash` | `block::Height` | Never | +| `block_by_height` | `block::Height` | `Block` | Never | +| `tx_by_hash` | `transaction::Hash` | `TransactionLocation` | Never | +| `utxo_by_outpoint` | `OutPoint` | `transparent::Utxo` | Delete | +| `sprout_nullifiers` | `sprout::Nullifier` | `()` | Never | +| `sprout_anchors` | `sprout::tree::Root` | `()` | Never | +| `sprout_note_commitment_tree` | `block::Height` | `sprout::tree::NoteCommitmentTree` | Delete | +| `sapling_nullifiers` | `sapling::Nullifier` | `()` | Never | +| `sapling_anchors` | `sapling::tree::Root` | `()` | Never | +| `sapling_note_commitment_tree` | `block::Height` | `sapling::tree::NoteCommitmentTree` | Delete | +| `orchard_nullifiers` | `orchard::Nullifier` | `()` | Never | +| `orchard_anchors` | `orchard::tree::Root` | `()` | Never | +| `orchard_note_commitment_tree` | `block::Height` | `orchard::tree::NoteCommitmentTree` | Delete | +| `history_tree` | `block::Height` | `NonEmptyHistoryTree` | Delete | +| `tip_chain_value_pool` | `()` | `ValueBalance` | Update | Zcash structures are encoded using `ZcashSerialize`/`ZcashDeserialize`. Other structures are encoded using `IntoDisk`/`FromDisk`. -**Note:** We do not store the cumulative work for the finalized chain, because the finalized work is equal for all non-finalized chains. So the additional non-finalized work can be used to calculate the relative chain order, and choose the best chain. +Block and Transaction Data: +- `Height`: 32 bits, big-endian, unsigned +- `TransactionIndex`: 32 bits, big-endian, unsigned +- `TransactionLocation`: `Height \|\| TransactionIndex` +- `TransparentOutputIndex`: 32 bits, big-endian, unsigned +- `OutPoint`: `transaction::Hash \|\| TransparentOutputIndex` +- `IsFromCoinbase` : 8 bits, boolean, zero or one +- `Utxo`: `Height \|\| IsFromCoinbase \|\| Output` + +We use big-endian encoding for keys, to allow database index prefix searches. + +Amounts: +- `Amount`: 64 bits, little-endian, signed +- `ValueBalance`: `[Amount; 4]` + +Derived Formats: +- `*::NoteCommitmentTree`: `bincode` using `serde` +- `NonEmptyHistoryTree`: `bincode` using `serde`, using `zcash_history`'s `serde` implementation ### Implementing consensus rules using rocksdb [rocksdb-consensus-rules]: #rocksdb-consensus-rules @@ -673,7 +690,7 @@ So they should not be used for consensus-critical checks. the fact that we commit blocks in order means we're writing only to the end of the rocksdb column family, which may help save space. -- Transaction references are stored as a `(height, index)` pair referencing the +- `TransactionLocation`s are stored as a `(height, index)` pair referencing the height of the transaction's parent block and the transaction's index in that block. This would more traditionally be a `(hash, index)` pair, but because we store blocks by height, storing the height saves one level of indirection. @@ -689,7 +706,11 @@ So they should not be used for consensus-critical checks. But we map those peak indexes to heights, to make testing and debugging easier. - The value pools are only stored for the finalized tip. - We index it by height to make testing and debugging easier. + +- We do not store the cumulative work for the finalized chain, + because the finalized work is equal for all non-finalized chains. + So the additional non-finalized work can be used to calculate the relative chain order, + and choose the best chain. ## Committing finalized blocks @@ -714,40 +735,17 @@ zebra-state service's responsibility) to commit finalized blocks in order. The genesis block does not have a parent block. For genesis blocks, check that `block`'s parent hash is `null` (all zeroes) and its height is `0`. -2. Insert: - - `(hash, height)` into `height_by_hash`; - - `(height, hash)` into `hash_by_height`; - - `(height, block)` into `block_by_height`. +2. Insert the block and transaction data into the relevant column families. 3. If the block is a genesis block, skip any transaction updates. (Due to a [bug in zcashd](https://github.com/ZcashFoundation/zebra/issues/559), genesis block anchors and transactions are ignored during validation.) -4. Update the `sprout_anchors` and `sapling_anchors` trees with the Sprout and - Sapling anchors. - -5. Iterate over the enumerated transactions in the block. For each transaction: - - 1. Insert `(transaction_hash, BE32(block_height) || BE32(tx_index))` to - `tx_by_hash`; - - 2. For each `TransparentInput::PrevOut { outpoint, .. }` in the - transaction's `inputs()`, remove `outpoint` from `utxo_by_output`. - - 3. For each `output` in the transaction's `outputs()`, construct the - `outpoint` that identifies it, and insert `(outpoint, output)` into - `utxo_by_output`. - - 4. For each [`JoinSplit`] description in the transaction, - insert `(nullifiers[0],())` and `(nullifiers[1],())` into - `sprout_nullifiers`. - - 5. For each [`Spend`] description in the transaction, insert - `(nullifier,())` into `sapling_nullifiers`. +4. Update the block anchors, history tree, and chain value pools. - 6. For each [`Action`] description in the transaction, insert - `(nullifier,())` into `orchard_nullifiers`. +5. Iterate over the enumerated transactions in the block. For each transaction, + update the relevant column families. **Note**: The Sprout and Sapling anchors are the roots of the Sprout and Sapling note commitment trees that have already been calculated for the last