diff --git a/consensus/src/liveness/leader_reputation.rs b/consensus/src/liveness/leader_reputation.rs index 7ec800313d321..d78a5f15a1026 100644 --- a/consensus/src/liveness/leader_reputation.rs +++ b/consensus/src/liveness/leader_reputation.rs @@ -54,7 +54,7 @@ impl AptosDBBackend { fn refresh_db_result( &self, mut locked: MutexGuard<'_, (Vec, u64, bool)>, - lastest_db_version: u64, + latest_db_version: u64, ) -> Result<(Vec, u64, bool)> { // assumes target round is not too far from latest commit let limit = self.window_size + self.seek_len; @@ -72,7 +72,7 @@ impl AptosDBBackend { u64::max_value(), Order::Descending, limit as u64, - lastest_db_version, + latest_db_version, )?; let max_returned_version = events.first().map_or(0, |first| first.transaction_version); @@ -86,7 +86,7 @@ impl AptosDBBackend { let result = ( new_block_events, - std::cmp::max(lastest_db_version, max_returned_version), + std::cmp::max(latest_db_version, max_returned_version), hit_end, ); *locked = result.clone(); @@ -143,10 +143,10 @@ impl MetadataBackend for AptosDBBackend { let has_larger = events.first().map_or(false, |e| { (e.epoch(), e.round()) >= (target_epoch, target_round) }); - let lastest_db_version = self.aptos_db.get_latest_version().unwrap_or(0); + let latest_db_version = self.aptos_db.get_latest_version().unwrap_or(0); // check if fresher data has potential to give us different result - if !has_larger && version < lastest_db_version { - let fresh_db_result = self.refresh_db_result(locked, lastest_db_version); + if !has_larger && version < latest_db_version { + let fresh_db_result = self.refresh_db_result(locked, latest_db_version); match fresh_db_result { Ok((events, _version, hit_end)) => { self.get_from_db_result(target_epoch, target_round, &events, hit_end) diff --git a/documentation/specifications/trusted_computing_base/execution_correctness/README.md b/documentation/specifications/trusted_computing_base/execution_correctness/README.md index 5164838005a46..c649e11bfe30d 100644 --- a/documentation/specifications/trusted_computing_base/execution_correctness/README.md +++ b/documentation/specifications/trusted_computing_base/execution_correctness/README.md @@ -355,7 +355,7 @@ The tree structure would look exactly the same as the one in [Overview and Archi /// This module implements `SpeculationCache` that is an in-memory representation of this tree. /// The tree is reprensented by a root block id, /// all the children of root and a global block map. Each block is an Arc> -/// with ref_count = 1. For the chidren of the root, the sole owner is `heads`. For the rest, the sole +/// with ref_count = 1. For the children of the root, the sole owner is `heads`. For the rest, the sole /// owner is their parent block. So when a block is dropped, all its descendants will be dropped /// recursively. In the meanwhile, wheir entries in the block map will be removed by each block's drop(). pub(crate) struct SpeculationCache { @@ -363,7 +363,7 @@ pub(crate) struct SpeculationCache { committed_trees: ExecutedTrees, // The id of root block. committed_block_id: HashValue, - // The chidren of root block. + // The children of root block. heads: Vec>>, // A pointer to the global block map keyed by id to achieve O(1) lookup time complexity. // It is optional but an optimization. diff --git a/execution/executor-benchmark/src/lib.rs b/execution/executor-benchmark/src/lib.rs index ffee53f11f7eb..69a12241fd4c6 100644 --- a/execution/executor-benchmark/src/lib.rs +++ b/execution/executor-benchmark/src/lib.rs @@ -18,7 +18,6 @@ use aptos_config::config::{ }; use aptos_jellyfish_merkle::metrics::{ APTOS_JELLYFISH_INTERNAL_ENCODED_BYTES, APTOS_JELLYFISH_LEAF_ENCODED_BYTES, - APTOS_JELLYFISH_STORAGE_READS, }; use aptosdb::AptosDB; @@ -178,10 +177,6 @@ fn add_accounts_impl( // Write metadata generator.write_meta(&output_dir, num_new_accounts); - println!( - "Total reads from storage: {}", - APTOS_JELLYFISH_STORAGE_READS.get() - ); println!( "Total written internal nodes value size: {} bytes", APTOS_JELLYFISH_INTERNAL_ENCODED_BYTES.get() diff --git a/storage/aptosdb/src/aptosdb_test.rs b/storage/aptosdb/src/aptosdb_test.rs index 2b0c086cfc52e..8b3dfe07fb599 100644 --- a/storage/aptosdb/src/aptosdb_test.rs +++ b/storage/aptosdb/src/aptosdb_test.rs @@ -147,7 +147,7 @@ fn test_get_latest_executed_trees() { let tmp_dir = TempPath::new(); let db = AptosDB::new_for_test(&tmp_dir); - // entirely emtpy db + // entirely empty db let empty = db.get_latest_executed_trees().unwrap(); assert!(empty.is_same_view(&ExecutedTrees::new_empty())); diff --git a/storage/aptosdb/src/event_store/mod.rs b/storage/aptosdb/src/event_store/mod.rs index b1d21e738b945..69c5586a2de1d 100644 --- a/storage/aptosdb/src/event_store/mod.rs +++ b/storage/aptosdb/src/event_store/mod.rs @@ -177,7 +177,7 @@ impl EventStore { let msg = if cur_seq == start_seq_num { "First requested event is probably pruned." } else { - "DB corruption: Sequence number not continous." + "DB corruption: Sequence number not continuous." }; bail!("{} expected: {}, actual: {}", msg, cur_seq, seq); } @@ -322,13 +322,16 @@ impl EventStore { batch.put::( &(*event.key(), version, event.sequence_number()), &(idx as u64), - )?; - Ok(()) + ) })?; // EventAccumulatorSchema updates let event_hashes: Vec = events.iter().map(ContractEvent::hash).collect(); - let (root_hash, writes) = EmptyAccumulator::append(&EmptyReader, 0, &event_hashes)?; + let (root_hash, writes) = MerkleAccumulator::::append( + &EmptyReader, + 0, + &event_hashes, + )?; writes.into_iter().try_for_each(|(pos, hash)| { batch.put::(&(version, pos), &hash) })?; @@ -414,7 +417,7 @@ impl EventStore { }, ledger_version, )?.ok_or_else(|| format_err!( - "No new block found beyond timestmap {}, so can't determine the last version before it.", + "No new block found beyond timestamp {}, so can't determine the last version before it.", timestamp, ))?; @@ -528,8 +531,6 @@ impl EventStore { } } -type Accumulator<'a> = MerkleAccumulator, EventAccumulatorHasher>; - struct EventHashReader<'a> { store: &'a EventStore, version: Version, @@ -550,8 +551,6 @@ impl<'a> HashReader for EventHashReader<'a> { } } -type EmptyAccumulator = MerkleAccumulator; - struct EmptyReader; // Asserts `get()` is never called. diff --git a/storage/aptosdb/src/ledger_store/ledger_info_test_utils.rs b/storage/aptosdb/src/ledger_store/ledger_info_test_utils.rs index 04040ee0239eb..520405b916142 100644 --- a/storage/aptosdb/src/ledger_store/ledger_info_test_utils.rs +++ b/storage/aptosdb/src/ledger_store/ledger_info_test_utils.rs @@ -1,6 +1,7 @@ // Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 use crate::AptosDB; +use anyhow::Result; use aptos_types::{ ledger_info::LedgerInfoWithSignatures, proptest_types::{AccountInfoUniverse, LedgerInfoWithSignaturesGen}, @@ -63,7 +64,7 @@ pub fn set_up( ledger_infos_with_sigs .iter() .map(|info| store.put_ledger_info(info, &mut batch)) - .collect::>>() + .collect::>>() .unwrap(); store.db.write_schemas(batch).unwrap(); store.set_latest_ledger_info(ledger_infos_with_sigs.last().unwrap().clone()); diff --git a/storage/aptosdb/src/ledger_store/transaction_info_test.rs b/storage/aptosdb/src/ledger_store/transaction_info_test.rs index f0875ac9d1074..d4d8e5a8fa07e 100644 --- a/storage/aptosdb/src/ledger_store/transaction_info_test.rs +++ b/storage/aptosdb/src/ledger_store/transaction_info_test.rs @@ -66,7 +66,7 @@ proptest! { verify(store, &batch1, 0, ledger_version2, root_hash2); verify(store, &batch2, batch1.len() as u64, ledger_version2, root_hash2); - // retrieve batch1 and verify against root_hash after batch1 was interted + // retrieve batch1 and verify against root_hash after batch1 was inserted verify(store, &batch1, 0, ledger_version1, root_hash1); } diff --git a/storage/aptosdb/src/lib.rs b/storage/aptosdb/src/lib.rs index 6f2c4fc17452c..93cb0bc1f15bd 100644 --- a/storage/aptosdb/src/lib.rs +++ b/storage/aptosdb/src/lib.rs @@ -90,7 +90,6 @@ use aptos_types::{ TransactionOutput, TransactionOutputListWithProof, TransactionToCommit, TransactionWithProof, Version, }, - write_set::WriteSet, }; use aptos_vm::data_cache::AsMoveResolver; use aptosdb_indexer::Indexer; @@ -122,6 +121,8 @@ use storage_interface::{ pub const LEDGER_DB_NAME: &str = "ledger_db"; pub const STATE_MERKLE_DB_NAME: &str = "state_merkle_db"; +// This is last line of defense against large queries slipping through external facing interfaces, +// like the API and State Sync, etc. const MAX_LIMIT: u64 = 10000; // TODO: Either implement an iteration API to allow a very old client to loop through a long history @@ -433,7 +434,8 @@ impl AptosDB { let state_merkle_db_secondary_path = secondary_db_root_path.as_ref().join(STATE_MERKLE_DB_NAME); - // Secondary needs `max_open_files = -1` per https://github.com/facebook/rocksdb/wiki/Secondary-instance + // Secondary needs `max_open_files = -1` per + // https://github.com/facebook/rocksdb/wiki/Read-only-and-Secondary-instances rocksdb_configs.ledger_db_config.max_open_files = -1; rocksdb_configs.state_merkle_db_config.max_open_files = -1; @@ -933,7 +935,7 @@ impl DbReader for AptosDB { }) } - /// This API is best-effort in that it CANNOT provide absense proof. + /// This API is best-effort in that it CANNOT provide absence proof. fn get_transaction_by_hash( &self, hash: HashValue, @@ -1129,23 +1131,6 @@ impl DbReader for AptosDB { }) } - /// Returns write sets for range [begin_version, end_version). - /// - /// Used by the executor to build in memory state after a state checkpoint. - /// Any missing write set in the entire range results in an error. - fn get_write_sets( - &self, - begin_version: Version, - end_version: Version, - ) -> Result> { - gauged_api("get_write_sets", || { - self.error_if_ledger_pruned("Write set", begin_version)?; - - self.transaction_store - .get_write_sets(begin_version, end_version) - }) - } - fn get_events( &self, event_key: &EventKey, @@ -1489,7 +1474,7 @@ impl DbWriter for AptosDB { /// `first_version` is the version of the first transaction in `txns_to_commit`. /// When `ledger_info_with_sigs` is provided, verify that the transaction accumulator root hash /// it carries is generated after the `txns_to_commit` are applied. - /// Note that even if `txns_to_commit` is empty, `frist_version` is checked to be + /// Note that even if `txns_to_commit` is empty, `first_version` is checked to be /// `ledger_info_with_sigs.ledger_info.version + 1` if `ledger_info_with_sigs` is not `None`. fn save_transactions( &self, diff --git a/storage/aptosdb/src/pruner/db_pruner.rs b/storage/aptosdb/src/pruner/db_pruner.rs index 59e128b11ef14..e101e6d003154 100644 --- a/storage/aptosdb/src/pruner/db_pruner.rs +++ b/storage/aptosdb/src/pruner/db_pruner.rs @@ -42,7 +42,7 @@ pub trait DBPruner: Send + Sync { /// Returns the target version for the current pruning round - this might be different from the /// target_version() because we need to keep max_version in account. - fn get_currrent_batch_target(&self, max_versions: Version) -> Version { + fn get_current_batch_target(&self, max_versions: Version) -> Version { // Current target version might be less than the target version to ensure we don't prune // more than max_version in one go. min( diff --git a/storage/aptosdb/src/pruner/ledger_store/ledger_store_pruner.rs b/storage/aptosdb/src/pruner/ledger_store/ledger_store_pruner.rs index fb30990177fc0..bb2c9753ce625 100644 --- a/storage/aptosdb/src/pruner/ledger_store/ledger_store_pruner.rs +++ b/storage/aptosdb/src/pruner/ledger_store/ledger_store_pruner.rs @@ -20,7 +20,6 @@ use crate::{ }, EventStore, StateStore, TransactionStore, }; - use aptos_logger::warn; use aptos_types::transaction::{AtomicVersion, Version}; use schemadb::{ReadOptions, SchemaBatch, DB}; @@ -173,7 +172,7 @@ impl LedgerPruner { // Current target version might be less than the target version to ensure we don't prune // more than max_version in one go. - let current_target_version = self.get_currrent_batch_target(max_versions as Version); + let current_target_version = self.get_current_batch_target(max_versions as Version); self.transaction_store_pruner.prune( db_batch, diff --git a/storage/aptosdb/src/schema/mod.rs b/storage/aptosdb/src/schema/mod.rs index 11b2aa1543901..f096aab9671c9 100644 --- a/storage/aptosdb/src/schema/mod.rs +++ b/storage/aptosdb/src/schema/mod.rs @@ -35,14 +35,12 @@ pub const EVENT_ACCUMULATOR_CF_NAME: ColumnFamilyName = "event_accumulator"; pub const EVENT_BY_KEY_CF_NAME: ColumnFamilyName = "event_by_key"; pub const EVENT_BY_VERSION_CF_NAME: ColumnFamilyName = "event_by_version"; pub const EVENT_CF_NAME: ColumnFamilyName = "event"; -pub const INDEXER_METADATA_CF_NAME: ColumnFamilyName = "indexer_metadata"; pub const JELLYFISH_MERKLE_NODE_CF_NAME: ColumnFamilyName = "jellyfish_merkle_node"; pub const LEDGER_INFO_CF_NAME: ColumnFamilyName = "ledger_info"; pub const STALE_NODE_INDEX_CF_NAME: ColumnFamilyName = "stale_node_index"; pub const STALE_NODE_INDEX_CROSS_EPOCH_CF_NAME: ColumnFamilyName = "stale_node_index_cross_epoch"; pub const STALE_STATE_VALUE_INDEX_CF_NAME: ColumnFamilyName = "stale_state_value_index"; pub const STATE_VALUE_CF_NAME: ColumnFamilyName = "state_value"; -pub const TABLE_INFO_CF_NAME: ColumnFamilyName = "table_info"; pub const TRANSACTION_CF_NAME: ColumnFamilyName = "transaction"; pub const TRANSACTION_ACCUMULATOR_CF_NAME: ColumnFamilyName = "transaction_accumulator"; pub const TRANSACTION_BY_ACCOUNT_CF_NAME: ColumnFamilyName = "transaction_by_account"; diff --git a/storage/aptosdb/src/schema/stale_node_index/mod.rs b/storage/aptosdb/src/schema/stale_node_index/mod.rs index 502f6d5ae51b7..b434f1a37efc2 100644 --- a/storage/aptosdb/src/schema/stale_node_index/mod.rs +++ b/storage/aptosdb/src/schema/stale_node_index/mod.rs @@ -11,7 +11,7 @@ //! //! ```text //! |<--------------key-------------->| -//! | stale_since_vesrion | node_key | +//! | stale_since_version | node_key | //! ``` //! //! `stale_since_version` is serialized in big endian so that records in RocksDB will be in order of diff --git a/storage/aptosdb/src/schema/stale_state_value_index/mod.rs b/storage/aptosdb/src/schema/stale_state_value_index/mod.rs index aa37091830047..643302cbd723c 100644 --- a/storage/aptosdb/src/schema/stale_state_value_index/mod.rs +++ b/storage/aptosdb/src/schema/stale_state_value_index/mod.rs @@ -12,7 +12,7 @@ //! //! ```text //! |<-------------------key------------------->| -//! | stale_since_vesrion | version | state_key | +//! | stale_since_version | version | state_key | //! ``` //! //! `stale_since_version` is serialized in big endian so that records in RocksDB will be in order of diff --git a/storage/aptosdb/src/schema/state_value/mod.rs b/storage/aptosdb/src/schema/state_value/mod.rs index a450dd4ea1c6e..dd65311fd89fb 100644 --- a/storage/aptosdb/src/schema/state_value/mod.rs +++ b/storage/aptosdb/src/schema/state_value/mod.rs @@ -7,7 +7,7 @@ //! An Index Key in this data set has 2 pieces of information: //! 1. The state key //! 2. The version associated with the key -//! The value associated with the key is the the serialized State Value. +//! The value associated with the key is the serialized State Value. //! //! ```text //! |<-------- key -------->|<--- value --->| diff --git a/storage/aptosdb/src/schema/write_set/mod.rs b/storage/aptosdb/src/schema/write_set/mod.rs index 233b1140603f1..d5250290d44ad 100644 --- a/storage/aptosdb/src/schema/write_set/mod.rs +++ b/storage/aptosdb/src/schema/write_set/mod.rs @@ -1,7 +1,7 @@ // Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 -//! This module defines physical storage schema for write set emited by each transaction +//! This module defines physical storage schema for write set emitted by each transaction //! saved to storage. //! //! Serialized signed transaction bytes identified by version. diff --git a/storage/aptosdb/src/state_store/mod.rs b/storage/aptosdb/src/state_store/mod.rs index 0542eafc8deb8..1a26c8d3be8a9 100644 --- a/storage/aptosdb/src/state_store/mod.rs +++ b/storage/aptosdb/src/state_store/mod.rs @@ -98,8 +98,8 @@ impl Deref for StateStore { } // "using an Arc as an Arc" is not allowed in stable Rust. Actually we -// want another trait, `StateReader`, which is a subset of `DbReaer` here but Rust does not support trait -// upcasting coercion for now. Should change it to a different trait once upcasting is stablized. +// want another trait, `StateReader`, which is a subset of `DbReader` here but Rust does not support trait +// upcasting coercion for now. Should change it to a different trait once upcasting is stabilized. // ref: https://github.com/rust-lang/rust/issues/65991 impl DbReader for StateDb { /// Returns the latest state snapshot strictly before `next_version` if any. diff --git a/storage/aptosdb/src/state_store/state_store_test.rs b/storage/aptosdb/src/state_store/state_store_test.rs index fcd9ad75ebac5..1213ca4197836 100644 --- a/storage/aptosdb/src/state_store/state_store_test.rs +++ b/storage/aptosdb/src/state_store/state_store_test.rs @@ -412,7 +412,7 @@ proptest! { let store2 = &db2.state_store; let mut restore = - StateSnapshotRestore::new(&store2.state_merkle_db, store2, version, expected_root_hash, false, /* async_commit */).unwrap(); + StateSnapshotRestore::new(&store2.state_merkle_db, store2, version, expected_root_hash, true, /* async_commit */).unwrap(); let mut ordered_input: Vec<_> = input .into_iter() diff --git a/storage/backup/backup-cli/src/backup_types/epoch_ending/backup.rs b/storage/backup/backup-cli/src/backup_types/epoch_ending/backup.rs index c58418da03388..7e9d9186fe94e 100644 --- a/storage/backup/backup-cli/src/backup_types/epoch_ending/backup.rs +++ b/storage/backup/backup-cli/src/backup_types/epoch_ending/backup.rs @@ -56,7 +56,7 @@ impl EpochEndingBackupController { pub async fn run(self) -> Result { info!( - "Epoch ending backup started, starting from epoch {}, unill epoch {} (excluded).", + "Epoch ending backup started, starting from epoch {}, until epoch {} (excluded).", start_epoch = self.start_epoch, end_epoch = self.end_epoch, ); diff --git a/storage/backup/backup-cli/src/metadata/view.rs b/storage/backup/backup-cli/src/metadata/view.rs index c618b49e816eb..7a705ea32e65d 100644 --- a/storage/backup/backup-cli/src/metadata/view.rs +++ b/storage/backup/backup-cli/src/metadata/view.rs @@ -69,7 +69,7 @@ impl MetadataView { } ensure!( backup.first_version == next_ver, - "Transactioon backup ranges not continuous, expecting version {}, got {}.", + "Transaction backup ranges not continuous, expecting version {}, got {}.", next_ver, backup.first_version, ); diff --git a/storage/indexer/src/schema/indexer_metadata/mod.rs b/storage/indexer/src/schema/indexer_metadata/mod.rs index be1a59218db82..fbb7a1aa8e018 100644 --- a/storage/indexer/src/schema/indexer_metadata/mod.rs +++ b/storage/indexer/src/schema/indexer_metadata/mod.rs @@ -1,7 +1,7 @@ // Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 -//! This module defines physical storage schema storing medadata for the internal indexer +//! This module defines physical storage schema storing metadata for the internal indexer //! use crate::metadata::{MetadataKey, MetadataValue}; diff --git a/storage/jellyfish-merkle/src/iterator/mod.rs b/storage/jellyfish-merkle/src/iterator/mod.rs index 2a290e606707e..b2395bae77844 100644 --- a/storage/jellyfish-merkle/src/iterator/mod.rs +++ b/storage/jellyfish-merkle/src/iterator/mod.rs @@ -37,7 +37,7 @@ struct NodeVisitInfo { /// This integer always has exactly one 1-bit. The position of the 1-bit (from LSB) indicates /// the next child to visit in the iteration process. All the ones on the left have already - /// been visited. All the chilren on the right (including this one) have not been visited yet. + /// been visited. All the children on the right (including this one) have not been visited yet. next_child_to_visit: u16, } diff --git a/storage/jellyfish-merkle/src/metrics.rs b/storage/jellyfish-merkle/src/metrics.rs index 09f2c439d6cfb..a1ee5556be2a6 100644 --- a/storage/jellyfish-merkle/src/metrics.rs +++ b/storage/jellyfish-merkle/src/metrics.rs @@ -20,14 +20,6 @@ pub static APTOS_JELLYFISH_INTERNAL_ENCODED_BYTES: Lazy = Lazy::new( .unwrap() }); -pub static APTOS_JELLYFISH_STORAGE_READS: Lazy = Lazy::new(|| { - register_int_counter!( - "aptos_jellyfish_storage_reads", - "Aptos jellyfish reads from storage" - ) - .unwrap() -}); - pub static APTOS_JELLYFISH_LEAF_COUNT: Lazy = Lazy::new(|| { register_int_gauge!( "aptos_jellyfish_leaf_count", diff --git a/storage/jellyfish-merkle/src/node_type/mod.rs b/storage/jellyfish-merkle/src/node_type/mod.rs index a5dcfe961eab6..69ced3a0b96d0 100644 --- a/storage/jellyfish-merkle/src/node_type/mod.rs +++ b/storage/jellyfish-merkle/src/node_type/mod.rs @@ -7,7 +7,7 @@ //! and [`LeafNode`] as building blocks of a 256-bit //! [`JellyfishMerkleTree`](crate::JellyfishMerkleTree). [`InternalNode`] represents a 4-level //! binary tree to optimize for IOPS: it compresses a tree with 31 nodes into one node with 16 -//! chidren at the lowest level. [`LeafNode`] stores the full key and the value associated. +//! children at the lowest level. [`LeafNode`] stores the full key and the value associated. #[cfg(test)] mod node_type_test; @@ -242,11 +242,11 @@ pub struct InternalNode { /// height /// ``` /// -/// As illustrated above, at nibble height 0, `0..F` in hex denote 16 chidren hashes. Each `#` +/// As illustrated above, at nibble height 0, `0..F` in hex denote 16 children hashes. Each `#` /// means the hash of its two direct children, which will be used to generate the hash of its /// parent with the hash of its sibling. Finally, we can get the hash of this internal node. /// -/// However, if an internal node doesn't have all 16 chidren exist at height 0 but just a few of +/// However, if an internal node doesn't have all 16 children exist at height 0 but just a few of /// them, we have a modified hashing rule on top of what is stated above: /// 1. From top to bottom, a node will be replaced by a leaf child if the subtree rooted at this /// node has only one child at height 0 and it is a leaf child. @@ -655,7 +655,7 @@ pub struct LeafNode { account_key: HashValue, // The hash of the value. value_hash: HashValue, - // The key and version thats points to the value + // The key and version that points to the value value_index: (K, Version), } diff --git a/storage/schemadb/src/iterator.rs b/storage/schemadb/src/iterator.rs index 624a051d44b66..9603b25de1deb 100644 --- a/storage/schemadb/src/iterator.rs +++ b/storage/schemadb/src/iterator.rs @@ -77,8 +77,8 @@ where return Ok(None); } - let raw_key = self.db_iter.key().expect("Iterator must be valid."); - let raw_value = self.db_iter.value().expect("Iterator must be valid."); + let raw_key = self.db_iter.key().expect("db_iter.key() failed."); + let raw_value = self.db_iter.value().expect("db_iter.value(0 failed."); APTOS_SCHEMADB_ITER_BYTES .with_label_values(&[S::COLUMN_FAMILY_NAME]) .observe((raw_key.len() + raw_value.len()) as f64); diff --git a/storage/schemadb/src/metrics.rs b/storage/schemadb/src/metrics.rs index 94b8f1cb86f47..5302e41728940 100644 --- a/storage/schemadb/src/metrics.rs +++ b/storage/schemadb/src/metrics.rs @@ -25,7 +25,7 @@ pub static APTOS_SCHEMADB_ITER_BYTES: Lazy = Lazy::new(|| { // metric name "aptos_schemadb_iter_bytes", // metric description - "Aptos schemadb iter size in bytess", + "Aptos schemadb iter size in bytes", // metric labels (dimensions) &["cf_name"] ) diff --git a/storage/schemadb/src/schema.rs b/storage/schemadb/src/schema.rs index cfb0279bad58e..d79e8c40d7051 100644 --- a/storage/schemadb/src/schema.rs +++ b/storage/schemadb/src/schema.rs @@ -46,7 +46,7 @@ use std::fmt::Debug; /// /// // And finally define a schema type and associate it with key and value types, as well as the /// // column family name, by generating code that implements the `Schema` trait for the type. -/// define_schema!(ExampleSchema, Key, Value, "exmaple_cf_name"); +/// define_schema!(ExampleSchema, Key, Value, "example_cf_name"); /// /// // SeekKeyCodec is automatically implemented for KeyCodec, /// // so you can seek an iterator with the Key type: diff --git a/storage/scratchpad/src/sparse_merkle/updater.rs b/storage/scratchpad/src/sparse_merkle/updater.rs index 8e8a12a334e8e..3725b2e77c46a 100644 --- a/storage/scratchpad/src/sparse_merkle/updater.rs +++ b/storage/scratchpad/src/sparse_merkle/updater.rs @@ -4,7 +4,7 @@ use crate::{ sparse_merkle::{ node::{InternalNode, Node, NodeHandle, NodeInner}, - utils::{partition, swap_if, Either}, + utils::{partition, swap_if}, UpdateError, }, ProofRead, @@ -305,8 +305,8 @@ impl<'a, V: Send + Sync + Clone + CryptoHash> SubTreeUpdater<'a, V> { let generation = self.generation; let depth = self.depth; match self.maybe_end_recursion()? { - Either::A(ended) => Ok(ended), - Either::B(myself) => { + MaybeEndRecursion::End(ended) => Ok(ended), + MaybeEndRecursion::Continue(myself) => { let (left, right) = myself.into_children(proof_reader)?; let (left_ret, right_ret) = if depth <= MAX_PARALLELIZABLE_DEPTH && left.updates.len() >= MIN_PARALLELIZABLE_SIZE @@ -322,22 +322,24 @@ impl<'a, V: Send + Sync + Clone + CryptoHash> SubTreeUpdater<'a, V> { } } - fn maybe_end_recursion(self) -> Result, Self>> { + fn maybe_end_recursion(self) -> Result, Self>> { Ok(match self.updates.len() { - 0 => Either::A(self.info.materialize(self.generation)), + 0 => MaybeEndRecursion::End(self.info.materialize(self.generation)), 1 => { let (key_to_update, update) = &self.updates[0]; match &self.info { SubTreeInfo::InMem(in_mem_info) => match in_mem_info { InMemSubTreeInfo::Empty => match update { - Some(value) => Either::A(InMemSubTreeInfo::create_leaf_with_update( - (*key_to_update, value), - self.generation, - )), - None => Either::A(self.info.materialize(self.generation)), + Some(value) => { + MaybeEndRecursion::End(InMemSubTreeInfo::create_leaf_with_update( + (*key_to_update, value), + self.generation, + )) + } + None => MaybeEndRecursion::End(self.info.materialize(self.generation)), }, InMemSubTreeInfo::Leaf { key, .. } => match update { - Some(value) => Either::or( + Some(value) => MaybeEndRecursion::or( key == key_to_update, InMemSubTreeInfo::create_leaf_with_update( (*key_to_update, value), @@ -347,16 +349,16 @@ impl<'a, V: Send + Sync + Clone + CryptoHash> SubTreeUpdater<'a, V> { ), None => { if key == key_to_update { - Either::A(InMemSubTreeInfo::Empty) + MaybeEndRecursion::End(InMemSubTreeInfo::Empty) } else { - Either::A(self.info.materialize(self.generation)) + MaybeEndRecursion::End(self.info.materialize(self.generation)) } } }, - _ => Either::B(self), + _ => MaybeEndRecursion::Continue(self), }, SubTreeInfo::Persisted(PersistedSubTreeInfo::Leaf { leaf }) => match update { - Some(value) => Either::or( + Some(value) => MaybeEndRecursion::or( leaf.key() == *key_to_update, InMemSubTreeInfo::create_leaf_with_update( (*key_to_update, value), @@ -366,16 +368,16 @@ impl<'a, V: Send + Sync + Clone + CryptoHash> SubTreeUpdater<'a, V> { ), None => { if leaf.key() == *key_to_update { - Either::A(InMemSubTreeInfo::Empty) + MaybeEndRecursion::End(InMemSubTreeInfo::Empty) } else { - Either::A(self.info.materialize(self.generation)) + MaybeEndRecursion::End(self.info.materialize(self.generation)) } } }, - _ => Either::B(self), + _ => MaybeEndRecursion::Continue(self), } } - _ => Either::B(self), + _ => MaybeEndRecursion::Continue(self), }) } @@ -403,3 +405,18 @@ impl<'a, V: Send + Sync + Clone + CryptoHash> SubTreeUpdater<'a, V> { )) } } + +pub(crate) enum MaybeEndRecursion { + End(A), + Continue(B), +} + +impl MaybeEndRecursion { + pub fn or(cond: bool, a: A, b: B) -> Self { + if cond { + MaybeEndRecursion::End(a) + } else { + MaybeEndRecursion::Continue(b) + } + } +} diff --git a/storage/scratchpad/src/sparse_merkle/utils.rs b/storage/scratchpad/src/sparse_merkle/utils.rs index 63c0ff547cf0c..64684a2805b8f 100644 --- a/storage/scratchpad/src/sparse_merkle/utils.rs +++ b/storage/scratchpad/src/sparse_merkle/utils.rs @@ -29,18 +29,3 @@ pub(crate) fn partition(updates: &[(HashValue, T)], depth: usize) -> usize { } i } - -pub(crate) enum Either { - A(A), - B(B), -} - -impl Either { - pub fn or(cond: bool, a: A, b: B) -> Self { - if cond { - Either::A(a) - } else { - Either::B(b) - } - } -} diff --git a/storage/storage-interface/src/lib.rs b/storage/storage-interface/src/lib.rs index 70ac56f78ca0f..a1aa7c865bdf5 100644 --- a/storage/storage-interface/src/lib.rs +++ b/storage/storage-interface/src/lib.rs @@ -31,7 +31,6 @@ use aptos_types::{ AccountTransactionsWithProof, TransactionInfo, TransactionListWithProof, TransactionOutputListWithProof, TransactionToCommit, TransactionWithProof, Version, }, - write_set::WriteSet, }; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, sync::Arc}; @@ -191,17 +190,6 @@ pub trait DbReader: Send + Sync { unimplemented!() } - /// See [`AptosDB::get_write_sets`]. - /// - /// [`AptosDB::get_write_sets`]: ../aptosdb/struct.AptosDB.html#method.get_write_sets - fn get_write_sets( - &self, - start_version: Version, - end_version: Version, - ) -> Result> { - unimplemented!() - } - /// Returns events by given event key fn get_events( &self, diff --git a/storage/storage-interface/src/state_delta.rs b/storage/storage-interface/src/state_delta.rs index 0d2693b3742d4..609cb44814a2d 100644 --- a/storage/storage-interface/src/state_delta.rs +++ b/storage/storage-interface/src/state_delta.rs @@ -14,7 +14,7 @@ use std::collections::HashMap; /// reflecting the difference of `current` on top of `base`. /// /// The `base` is the state SMT that current is based on. -/// The `current` is the state SMT that results from applying udpates_since_base on top of `base`. +/// The `current` is the state SMT that results from applying updates_since_base on top of `base`. /// `updates_since_base` tracks all those key-value pairs that's changed since `base`, useful /// when the next checkpoint is calculated. #[derive(Clone, Debug)] diff --git a/types/src/block_info.rs b/types/src/block_info.rs index 2384fbb617b3e..7118beba70c89 100644 --- a/types/src/block_info.rs +++ b/types/src/block_info.rs @@ -24,7 +24,7 @@ pub const GENESIS_TIMESTAMP_USECS: u64 = 0; #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] #[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] pub struct BlockInfo { - /// Epoch number corresponds to the set of validators that are active for this block. + /// The epoch to which the block belongs. epoch: u64, /// The consensus protocol is executed in rounds, which monotonically increase per epoch. round: Round,