diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index bbb51ec1780..667547512bc 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -329,13 +329,7 @@ impl FinalizedState { #[cfg(feature = "elasticsearch")] let finalized_block = finalized.verified.block.clone(); - let result = self.db.write_block( - finalized.verified, - finalized.treestate.history_tree, - finalized.treestate.note_commitment_trees, - self.network, - source, - ); + let result = self.db.write_block(finalized, self.network, source); if result.is_ok() { // Save blocks to elasticsearch if the feature is enabled. diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index aad9f2272bd..cd786907c37 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -19,9 +19,7 @@ use itertools::Itertools; use zebra_chain::{ amount::NonNegative, block::{self, Block, Height}, - history_tree::HistoryTree, orchard, - parallel::tree::NoteCommitmentTrees, parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, sapling, serialization::TrustedPreallocate, @@ -31,6 +29,7 @@ use zebra_chain::{ }; use crate::{ + request::ContextuallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ @@ -281,15 +280,12 @@ impl ZebraDb { /// - Propagates any errors from updating history and note commitment trees pub(in super::super) fn write_block( &mut self, - finalized: SemanticallyVerifiedBlock, - history_tree: Arc, - note_commitment_trees: NoteCommitmentTrees, + finalized: ContextuallyVerifiedBlockWithTrees, network: Network, source: &str, ) -> Result { - let finalized_hash = finalized.hash; - let tx_hash_indexes: HashMap = finalized + .verified .transaction_hashes .iter() .enumerate() @@ -302,11 +298,12 @@ impl ZebraDb { // simplify the spent_utxos location lookup code, // and remove the extra new_outputs_by_out_loc argument let new_outputs_by_out_loc: BTreeMap = finalized + .verified .new_outputs .iter() .map(|(outpoint, ordered_utxo)| { ( - lookup_out_loc(finalized.height, outpoint, &tx_hash_indexes), + lookup_out_loc(finalized.verified.height, outpoint, &tx_hash_indexes), ordered_utxo.utxo.clone(), ) }) @@ -315,6 +312,7 @@ impl ZebraDb { // Get a list of the spent UTXOs, before we delete any from the database let spent_utxos: Vec<(transparent::OutPoint, OutputLocation, transparent::Utxo)> = finalized + .verified .block .transactions .iter() @@ -326,12 +324,13 @@ impl ZebraDb { // Some utxos are spent in the same block, so they will be in // `tx_hash_indexes` and `new_outputs` self.output_location(&outpoint).unwrap_or_else(|| { - lookup_out_loc(finalized.height, &outpoint, &tx_hash_indexes) + lookup_out_loc(finalized.verified.height, &outpoint, &tx_hash_indexes) }), self.utxo(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo) .or_else(|| { finalized + .verified .new_outputs .get(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo.clone()) @@ -356,6 +355,7 @@ impl ZebraDb { .values() .chain( finalized + .verified .new_outputs .values() .map(|ordered_utxo| &ordered_utxo.utxo), @@ -376,13 +376,11 @@ impl ZebraDb { // In case of errors, propagate and do not write the batch. batch.prepare_block_batch( &self.db, - finalized, + &finalized, new_outputs_by_out_loc, spent_utxos_by_outpoint, spent_utxos_by_out_loc, address_balances, - history_tree, - note_commitment_trees, self.finalized_value_pool(), )?; @@ -390,7 +388,7 @@ impl ZebraDb { tracing::trace!(?source, "committed block from"); - Ok(finalized_hash) + Ok(finalized.verified.hash) } } @@ -429,25 +427,16 @@ impl DiskWriteBatch { pub fn prepare_block_batch( &mut self, db: &DiskDb, - finalized: SemanticallyVerifiedBlock, + finalized: &ContextuallyVerifiedBlockWithTrees, new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, address_balances: HashMap, - history_tree: Arc, - note_commitment_trees: NoteCommitmentTrees, value_pool: ValueBalance, ) -> Result<(), BoxError> { - let SemanticallyVerifiedBlock { - block, - hash, - height, - .. - } = &finalized; - // Commit block and transaction data. // (Transaction indexes, note commitments, and UTXOs are committed later.) - self.prepare_block_header_and_transaction_data_batch(db, &finalized)?; + self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?; // # Consensus // @@ -458,28 +447,37 @@ impl DiskWriteBatch { // // By returning early, Zebra commits the genesis block and transaction data, // but it ignores the genesis UTXO and value pool updates. - if self.prepare_genesis_batch(db, &finalized) { + if self.prepare_genesis_batch(db, &finalized.verified) { return Ok(()); } // Commit transaction indexes self.prepare_transparent_transaction_batch( db, - &finalized, + &finalized.verified, &new_outputs_by_out_loc, &spent_utxos_by_outpoint, &spent_utxos_by_out_loc, address_balances, )?; - self.prepare_shielded_transaction_batch(db, &finalized)?; + self.prepare_shielded_transaction_batch(db, &finalized.verified)?; - self.prepare_note_commitment_batch(db, &finalized, note_commitment_trees, history_tree)?; + self.prepare_note_commitment_batch(db, finalized)?; // Commit UTXOs and value pools - self.prepare_chain_value_pools_batch(db, &finalized, spent_utxos_by_outpoint, value_pool)?; + self.prepare_chain_value_pools_batch( + db, + &finalized.verified, + spent_utxos_by_outpoint, + value_pool, + )?; // The block has passed contextual validation, so update the metrics - block_precommit_metrics(block, *hash, *height); + block_precommit_metrics( + &finalized.verified.block, + finalized.verified.hash, + finalized.verified.height, + ); Ok(()) } diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index 590f609d824..3fa088818ef 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -21,6 +21,7 @@ use zebra_chain::{ }; use crate::{ + request::ContextuallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, @@ -69,15 +70,14 @@ impl DiskWriteBatch { pub fn prepare_history_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, - history_tree: Arc, + finalized: &ContextuallyVerifiedBlockWithTrees, ) -> Result<(), BoxError> { let history_tree_cf = db.cf_handle("history_tree").unwrap(); - let SemanticallyVerifiedBlock { height, .. } = finalized; + let height = finalized.verified.height; // Update the tree in state - let current_tip_height = *height - 1; + let current_tip_height = height - 1; if let Some(h) = current_tip_height { self.zs_delete(&history_tree_cf, h); } @@ -87,7 +87,7 @@ impl DiskWriteBatch { // Otherwise, the ReadStateService could access a height // that was just deleted by a concurrent StateService write. // This requires a database version update. - if let Some(history_tree) = history_tree.as_ref().as_ref() { + if let Some(history_tree) = finalized.treestate.history_tree.as_ref().as_ref() { self.zs_insert(&history_tree_cf, height, history_tree); } diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index ac306bdfe1b..32c93f62480 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -15,11 +15,12 @@ use std::sync::Arc; use zebra_chain::{ - block::Height, history_tree::HistoryTree, orchard, parallel::tree::NoteCommitmentTrees, - sapling, sprout, transaction::Transaction, + block::Height, orchard, parallel::tree::NoteCommitmentTrees, sapling, sprout, + transaction::Transaction, }; use crate::{ + request::ContextuallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, @@ -264,9 +265,7 @@ impl DiskWriteBatch { pub fn prepare_note_commitment_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, - note_commitment_trees: NoteCommitmentTrees, - history_tree: Arc, + finalized: &ContextuallyVerifiedBlockWithTrees, ) -> Result<(), BoxError> { let sprout_anchors = db.cf_handle("sprout_anchors").unwrap(); let sapling_anchors = db.cf_handle("sapling_anchors").unwrap(); @@ -276,7 +275,8 @@ impl DiskWriteBatch { let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - let SemanticallyVerifiedBlock { height, .. } = finalized; + let height = finalized.verified.height; + let note_commitment_trees = finalized.treestate.note_commitment_trees.clone(); // Use the cached values that were previously calculated in parallel. let sprout_root = note_commitment_trees.sprout.root(); @@ -290,7 +290,7 @@ impl DiskWriteBatch { self.zs_insert(&orchard_anchors, orchard_root, ()); // Delete the previously stored Sprout note commitment tree. - let current_tip_height = *height - 1; + let current_tip_height = height - 1; if let Some(h) = current_tip_height { self.zs_delete(&sprout_note_commitment_tree_cf, h); } @@ -317,7 +317,7 @@ impl DiskWriteBatch { note_commitment_trees.orchard, ); - self.prepare_history_batch(db, finalized, history_tree) + self.prepare_history_batch(db, finalized) } /// Prepare a database batch containing the initial note commitment trees,