Skip to content

Commit

Permalink
Use ContextuallyVerifiedBlockWithTrees
Browse files Browse the repository at this point in the history
This commit passes `ContextuallyVerifiedBlockWithTrees` instead of
passing separate `finalized`, `history_tree` and `note_commitment_trees`
when storing blocks in the finalized state.
  • Loading branch information
upbqdn committed Jun 21, 2023
1 parent 584ad32 commit dac739f
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 50 deletions.
8 changes: 1 addition & 7 deletions zebra-state/src/service/finalized_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,13 +329,7 @@ impl FinalizedState {
#[cfg(feature = "elasticsearch")]
let finalized_block = finalized.verified.block.clone();

let result = self.db.write_block(
finalized.verified,
finalized.treestate.history_tree,
finalized.treestate.note_commitment_trees,
self.network,
source,
);
let result = self.db.write_block(finalized, self.network, source);

if result.is_ok() {
// Save blocks to elasticsearch if the feature is enabled.
Expand Down
58 changes: 28 additions & 30 deletions zebra-state/src/service/finalized_state/zebra_db/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@ use itertools::Itertools;
use zebra_chain::{
amount::NonNegative,
block::{self, Block, Height},
history_tree::HistoryTree,
orchard,
parallel::tree::NoteCommitmentTrees,
parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH},
sapling,
serialization::TrustedPreallocate,
Expand All @@ -31,6 +29,7 @@ use zebra_chain::{
};

use crate::{
request::ContextuallyVerifiedBlockWithTrees,
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
disk_format::{
Expand Down Expand Up @@ -281,15 +280,12 @@ impl ZebraDb {
/// - Propagates any errors from updating history and note commitment trees
pub(in super::super) fn write_block(
&mut self,
finalized: SemanticallyVerifiedBlock,
history_tree: Arc<HistoryTree>,
note_commitment_trees: NoteCommitmentTrees,
finalized: ContextuallyVerifiedBlockWithTrees,
network: Network,
source: &str,
) -> Result<block::Hash, BoxError> {
let finalized_hash = finalized.hash;

let tx_hash_indexes: HashMap<transaction::Hash, usize> = finalized
.verified
.transaction_hashes
.iter()
.enumerate()
Expand All @@ -302,11 +298,12 @@ impl ZebraDb {
// simplify the spent_utxos location lookup code,
// and remove the extra new_outputs_by_out_loc argument
let new_outputs_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo> = finalized
.verified
.new_outputs
.iter()
.map(|(outpoint, ordered_utxo)| {
(
lookup_out_loc(finalized.height, outpoint, &tx_hash_indexes),
lookup_out_loc(finalized.verified.height, outpoint, &tx_hash_indexes),
ordered_utxo.utxo.clone(),
)
})
Expand All @@ -315,6 +312,7 @@ impl ZebraDb {
// Get a list of the spent UTXOs, before we delete any from the database
let spent_utxos: Vec<(transparent::OutPoint, OutputLocation, transparent::Utxo)> =
finalized
.verified
.block
.transactions
.iter()
Expand All @@ -326,12 +324,13 @@ impl ZebraDb {
// Some utxos are spent in the same block, so they will be in
// `tx_hash_indexes` and `new_outputs`
self.output_location(&outpoint).unwrap_or_else(|| {
lookup_out_loc(finalized.height, &outpoint, &tx_hash_indexes)
lookup_out_loc(finalized.verified.height, &outpoint, &tx_hash_indexes)
}),
self.utxo(&outpoint)
.map(|ordered_utxo| ordered_utxo.utxo)
.or_else(|| {
finalized
.verified
.new_outputs
.get(&outpoint)
.map(|ordered_utxo| ordered_utxo.utxo.clone())
Expand All @@ -356,6 +355,7 @@ impl ZebraDb {
.values()
.chain(
finalized
.verified
.new_outputs
.values()
.map(|ordered_utxo| &ordered_utxo.utxo),
Expand All @@ -376,21 +376,19 @@ impl ZebraDb {
// In case of errors, propagate and do not write the batch.
batch.prepare_block_batch(
&self.db,
finalized,
&finalized,
new_outputs_by_out_loc,
spent_utxos_by_outpoint,
spent_utxos_by_out_loc,
address_balances,
history_tree,
note_commitment_trees,
self.finalized_value_pool(),
)?;

self.db.write(batch)?;

tracing::trace!(?source, "committed block from");

Ok(finalized_hash)
Ok(finalized.verified.hash)
}
}

Expand Down Expand Up @@ -429,25 +427,16 @@ impl DiskWriteBatch {
pub fn prepare_block_batch(
&mut self,
db: &DiskDb,
finalized: SemanticallyVerifiedBlock,
finalized: &ContextuallyVerifiedBlockWithTrees,
new_outputs_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo>,
spent_utxos_by_outpoint: HashMap<transparent::OutPoint, transparent::Utxo>,
spent_utxos_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo>,
address_balances: HashMap<transparent::Address, AddressBalanceLocation>,
history_tree: Arc<HistoryTree>,
note_commitment_trees: NoteCommitmentTrees,
value_pool: ValueBalance<NonNegative>,
) -> Result<(), BoxError> {
let SemanticallyVerifiedBlock {
block,
hash,
height,
..
} = &finalized;

// Commit block and transaction data.
// (Transaction indexes, note commitments, and UTXOs are committed later.)
self.prepare_block_header_and_transaction_data_batch(db, &finalized)?;
self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?;

// # Consensus
//
Expand All @@ -458,28 +447,37 @@ impl DiskWriteBatch {
//
// By returning early, Zebra commits the genesis block and transaction data,
// but it ignores the genesis UTXO and value pool updates.
if self.prepare_genesis_batch(db, &finalized) {
if self.prepare_genesis_batch(db, &finalized.verified) {
return Ok(());
}

// Commit transaction indexes
self.prepare_transparent_transaction_batch(
db,
&finalized,
&finalized.verified,
&new_outputs_by_out_loc,
&spent_utxos_by_outpoint,
&spent_utxos_by_out_loc,
address_balances,
)?;
self.prepare_shielded_transaction_batch(db, &finalized)?;
self.prepare_shielded_transaction_batch(db, &finalized.verified)?;

self.prepare_note_commitment_batch(db, &finalized, note_commitment_trees, history_tree)?;
self.prepare_note_commitment_batch(db, finalized)?;

// Commit UTXOs and value pools
self.prepare_chain_value_pools_batch(db, &finalized, spent_utxos_by_outpoint, value_pool)?;
self.prepare_chain_value_pools_batch(
db,
&finalized.verified,
spent_utxos_by_outpoint,
value_pool,
)?;

// The block has passed contextual validation, so update the metrics
block_precommit_metrics(block, *hash, *height);
block_precommit_metrics(
&finalized.verified.block,
finalized.verified.hash,
finalized.verified.height,
);

Ok(())
}
Expand Down
10 changes: 5 additions & 5 deletions zebra-state/src/service/finalized_state/zebra_db/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ use zebra_chain::{
};

use crate::{
request::ContextuallyVerifiedBlockWithTrees,
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
zebra_db::ZebraDb,
Expand Down Expand Up @@ -69,15 +70,14 @@ impl DiskWriteBatch {
pub fn prepare_history_batch(
&mut self,
db: &DiskDb,
finalized: &SemanticallyVerifiedBlock,
history_tree: Arc<HistoryTree>,
finalized: &ContextuallyVerifiedBlockWithTrees,
) -> Result<(), BoxError> {
let history_tree_cf = db.cf_handle("history_tree").unwrap();

let SemanticallyVerifiedBlock { height, .. } = finalized;
let height = finalized.verified.height;

// Update the tree in state
let current_tip_height = *height - 1;
let current_tip_height = height - 1;
if let Some(h) = current_tip_height {
self.zs_delete(&history_tree_cf, h);
}
Expand All @@ -87,7 +87,7 @@ impl DiskWriteBatch {
// Otherwise, the ReadStateService could access a height
// that was just deleted by a concurrent StateService write.
// This requires a database version update.
if let Some(history_tree) = history_tree.as_ref().as_ref() {
if let Some(history_tree) = finalized.treestate.history_tree.as_ref().as_ref() {
self.zs_insert(&history_tree_cf, height, history_tree);
}

Expand Down
16 changes: 8 additions & 8 deletions zebra-state/src/service/finalized_state/zebra_db/shielded.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,12 @@
use std::sync::Arc;

use zebra_chain::{
block::Height, history_tree::HistoryTree, orchard, parallel::tree::NoteCommitmentTrees,
sapling, sprout, transaction::Transaction,
block::Height, orchard, parallel::tree::NoteCommitmentTrees, sapling, sprout,
transaction::Transaction,
};

use crate::{
request::ContextuallyVerifiedBlockWithTrees,
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
zebra_db::ZebraDb,
Expand Down Expand Up @@ -264,9 +265,7 @@ impl DiskWriteBatch {
pub fn prepare_note_commitment_batch(
&mut self,
db: &DiskDb,
finalized: &SemanticallyVerifiedBlock,
note_commitment_trees: NoteCommitmentTrees,
history_tree: Arc<HistoryTree>,
finalized: &ContextuallyVerifiedBlockWithTrees,
) -> Result<(), BoxError> {
let sprout_anchors = db.cf_handle("sprout_anchors").unwrap();
let sapling_anchors = db.cf_handle("sapling_anchors").unwrap();
Expand All @@ -276,7 +275,8 @@ impl DiskWriteBatch {
let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap();
let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap();

let SemanticallyVerifiedBlock { height, .. } = finalized;
let height = finalized.verified.height;
let note_commitment_trees = finalized.treestate.note_commitment_trees.clone();

// Use the cached values that were previously calculated in parallel.
let sprout_root = note_commitment_trees.sprout.root();
Expand All @@ -290,7 +290,7 @@ impl DiskWriteBatch {
self.zs_insert(&orchard_anchors, orchard_root, ());

// Delete the previously stored Sprout note commitment tree.
let current_tip_height = *height - 1;
let current_tip_height = height - 1;
if let Some(h) = current_tip_height {
self.zs_delete(&sprout_note_commitment_tree_cf, h);
}
Expand All @@ -317,7 +317,7 @@ impl DiskWriteBatch {
note_commitment_trees.orchard,
);

self.prepare_history_batch(db, finalized, history_tree)
self.prepare_history_batch(db, finalized)
}

/// Prepare a database batch containing the initial note commitment trees,
Expand Down

0 comments on commit dac739f

Please sign in to comment.