From aed5bceec987c12eced26dcab69edea2a2056804 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Tue, 29 Oct 2024 14:28:20 +0000 Subject: [PATCH 01/31] WIP --- .../content_addressed_append_only_tree.hpp | 29 +++++---- .../content_addressed_indexed_tree.hpp | 22 +++---- .../merkle_tree/indexed_tree/indexed_leaf.hpp | 2 +- .../merkle_tree/lmdb_store/callbacks.cpp | 12 ++-- .../merkle_tree/lmdb_store/callbacks.hpp | 2 +- .../merkle_tree/lmdb_store/lmdb_database.cpp | 2 +- .../merkle_tree/lmdb_store/lmdb_database.hpp | 3 +- .../lmdb_store/lmdb_transaction.cpp | 5 ++ .../lmdb_store/lmdb_transaction.hpp | 63 +++++++++++++++++++ .../lmdb_store/lmdb_tree_read_transaction.cpp | 16 ----- .../lmdb_store/lmdb_tree_read_transaction.hpp | 61 ------------------ .../lmdb_store/lmdb_tree_store.cpp | 3 +- .../lmdb_store/lmdb_tree_store.hpp | 5 +- .../lmdb_tree_write_transaction.cpp | 36 +---------- .../lmdb_tree_write_transaction.hpp | 43 +------------ .../crypto/merkle_tree/lmdb_store/queries.cpp | 52 +++++++++++++++ .../crypto/merkle_tree/lmdb_store/queries.hpp | 23 ++++++- .../cached_content_addressed_tree_store.hpp | 28 ++++----- 18 files changed, 204 insertions(+), 203 deletions(-) create mode 100644 barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index 8c34636f441..b1bb82e32a4 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -113,7 +113,7 @@ template class ContentAddressedAppendOn * @param on_completion Callback to be called on completion * @param includeUncommitted Whether to include uncommitted changes */ - void get_subtree_sibling_path(index_t leaf_index, + void get_subtree_sibling_path(const index_t& leaf_index, uint32_t subtree_depth, const HashPathCallback& on_completion, bool includeUncommitted) const; @@ -131,7 +131,9 @@ template class ContentAddressedAppendOn * @param includeUncommitted Whether to include uncommitted changes * @param on_completion Callback to be called on completion */ - void get_meta_data(index_t blockNumber, bool includeUncommitted, const MetaDataCallback& on_completion) const; + void get_meta_data(const index_t& blockNumber, + bool includeUncommitted, + const MetaDataCallback& on_completion) const; /** * @brief Returns the leaf value at the provided index @@ -226,12 +228,12 @@ template class ContentAddressedAppendOn const RequestContext& requestContext, ReadTransaction& tx) const; - std::optional find_leaf_hash(index_t leaf_index, + std::optional find_leaf_hash(const index_t& leaf_index, const RequestContext& requestContext, ReadTransaction& tx, bool updateNodesByIndexCache = false) const; - index_t get_batch_insertion_size(index_t treeSize, index_t remainingAppendSize); + index_t get_batch_insertion_size(const index_t& treeSize, const index_t& remainingAppendSize); void add_batch_internal( std::vector& values, fr& new_root, index_t& new_size, bool update_index, ReadTransaction& tx); @@ -323,7 +325,7 @@ void ContentAddressedAppendOnlyTree::get_meta_data(bool in } template -void ContentAddressedAppendOnlyTree::get_meta_data(index_t blockNumber, +void ContentAddressedAppendOnlyTree::get_meta_data(const index_t& blockNumber, bool includeUncommitted, const MetaDataCallback& on_completion) const { @@ -387,7 +389,7 @@ void ContentAddressedAppendOnlyTree::get_sibling_path(cons template void ContentAddressedAppendOnlyTree::get_subtree_sibling_path( - const uint32_t subtree_depth, const HashPathCallback& on_completion, bool includeUncommitted) const + uint32_t subtree_depth, const HashPathCallback& on_completion, bool includeUncommitted) const { auto job = [=, this]() { execute_and_report( @@ -409,8 +411,8 @@ void ContentAddressedAppendOnlyTree::get_subtree_sibling_p template void ContentAddressedAppendOnlyTree::get_subtree_sibling_path( - const index_t leaf_index, - const uint32_t subtree_depth, + const index_t& leaf_index, + uint32_t subtree_depth, const HashPathCallback& on_completion, bool includeUncommitted) const { @@ -450,7 +452,10 @@ fr_sibling_path ContentAddressedAppendOnlyTree::optional_s template std::optional ContentAddressedAppendOnlyTree::find_leaf_hash( - index_t leaf_index, const RequestContext& requestContext, ReadTransaction& tx, bool updateNodesByIndexCache) const + const index_t& leaf_index, + const RequestContext& requestContext, + ReadTransaction& tx, + bool updateNodesByIndexCache) const { fr hash = requestContext.root; // std::cout << "Finding leaf hash for root " << hash << " at index " << leaf_index << std::endl; @@ -510,7 +515,7 @@ template ContentAddressedAppendOnlyTree::OptionalSiblingPath ContentAddressedAppendOnlyTree< Store, HashingPolicy>::get_subtree_sibling_path_internal(const index_t& leaf_index, - const uint32_t subtree_depth, + uint32_t subtree_depth, const RequestContext& requestContext, ReadTransaction& tx) const { @@ -783,8 +788,8 @@ void ContentAddressedAppendOnlyTree::finalise_block(const } template -index_t ContentAddressedAppendOnlyTree::get_batch_insertion_size(index_t treeSize, - index_t remainingAppendSize) +index_t ContentAddressedAppendOnlyTree::get_batch_insertion_size( + const index_t& treeSize, const index_t& remainingAppendSize) { index_t minPower2 = 1; if (treeSize != 0U) { diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp index e2e1434bcee..1c952a3ad19 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp @@ -55,7 +55,7 @@ class ContentAddressedIndexedTree : public ContentAddressedAppendOnlyTree store, std::shared_ptr workers, - index_t initial_size); + const index_t& initial_size); ContentAddressedIndexedTree(ContentAddressedIndexedTree const& other) = delete; ContentAddressedIndexedTree(ContentAddressedIndexedTree&& other) = delete; ~ContentAddressedIndexedTree() = default; @@ -122,7 +122,7 @@ class ContentAddressedIndexedTree : public ContentAddressedAppendOnlyTree::FindLeafCallback& on_completion) const; @@ -151,7 +151,7 @@ class ContentAddressedIndexedTree : public ContentAddressedAppendOnlyTree::FindLeafCallback& on_completion) const; @@ -230,7 +230,7 @@ class ContentAddressedIndexedTree : public ContentAddressedAppendOnlyTree> insertions, const InsertionCompletionCallback& completion); - void perform_insertions_without_witness(index_t highest_index, + void perform_insertions_without_witness(const index_t& highest_index, std::shared_ptr> insertions, const InsertionCompletionCallback& completion); @@ -257,7 +257,7 @@ class ContentAddressedIndexedTree : public ContentAddressedAppendOnlyTree ContentAddressedIndexedTree::ContentAddressedIndexedTree(std::unique_ptr store, std::shared_ptr workers, - index_t initial_size) + const index_t& initial_size) : ContentAddressedAppendOnlyTree(std::move(store), workers) { if (initial_size < 2) { @@ -415,7 +415,7 @@ void ContentAddressedIndexedTree::find_leaf_index( template void ContentAddressedIndexedTree::find_leaf_index_from( const LeafValueType& leaf, - index_t start_index, + const index_t& start_index, bool includeUncommitted, const ContentAddressedAppendOnlyTree::FindLeafCallback& on_completion) const { @@ -442,7 +442,7 @@ template void ContentAddressedIndexedTree::find_leaf_index_from( const LeafValueType& leaf, const index_t& blockNumber, - index_t start_index, + const index_t& start_index, bool includeUncommitted, const ContentAddressedAppendOnlyTree::FindLeafCallback& on_completion) const { @@ -557,7 +557,7 @@ void ContentAddressedIndexedTree::add_or_update_values(con template void ContentAddressedIndexedTree::add_or_update_values( const std::vector& values, - const uint32_t subtree_depth, + uint32_t subtree_depth, const AddCompletionCallbackWithWitness& completion) { add_or_update_values_internal(values, subtree_depth, completion, true); @@ -565,7 +565,7 @@ void ContentAddressedIndexedTree::add_or_update_values( template void ContentAddressedIndexedTree::add_or_update_values(const std::vector& values, - const uint32_t subtree_depth, + uint32_t subtree_depth, const AddCompletionCallback& completion) { auto final_completion = [=](const TypedResponse>& add_data_response) { @@ -584,7 +584,7 @@ void ContentAddressedIndexedTree::add_or_update_values(con template void ContentAddressedIndexedTree::add_or_update_values_internal( const std::vector& values, - const uint32_t subtree_depth, + uint32_t subtree_depth, const AddCompletionCallbackWithWitness& completion, bool capture_witness) { @@ -845,7 +845,7 @@ void ContentAddressedIndexedTree::perform_insertions( template void ContentAddressedIndexedTree::perform_insertions_without_witness( - index_t highest_index, + const index_t& highest_index, std::shared_ptr> insertions, const InsertionCompletionCallback& completion) { diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp index 796abefc1a1..7ed7d76d102 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp @@ -128,7 +128,7 @@ template struct IndexedLeaf { IndexedLeaf() = default; - IndexedLeaf(const LeafType& val, index_t nextIdx, fr nextVal) + IndexedLeaf(const LeafType& val, const index_t& nextIdx, const fr& nextVal) : value(val) , nextIndex(nextIdx) , nextValue(nextVal) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.cpp index 9c4c7b67f6e..1f39eaf2fb1 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -29,7 +30,7 @@ void deserialise_key(void* data, uint8_t& key) // 64 bit integers are stored in little endian byte order std::vector serialise_key(uint64_t key) { - uint64_t le = key; + uint64_t le = htole64(key); const uint8_t* p = reinterpret_cast(&le); return std::vector(p, p + sizeof(key)); } @@ -38,10 +39,10 @@ void deserialise_key(void* data, uint64_t& key) { uint64_t le = 0; std::memcpy(&le, data, sizeof(le)); - key = le; + key = le64toh(le); } -std::vector serialise_key(uint256_t key) +std::vector serialise_key(const uint256_t& key) { std::vector buf(32); std::memcpy(buf.data(), key.data, 32); @@ -58,10 +59,7 @@ int size_cmp(const MDB_val* a, const MDB_val* b) if (a->mv_size < b->mv_size) { return -1; } - if (a->mv_size > b->mv_size) { - return 1; - } - return 0; + return (a->mv_size > b->mv_size) ? 1 : 0; } std::vector mdb_val_to_vector(const MDB_val& dbVal) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp index bd9c8f56df6..cae491afa0b 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp @@ -19,7 +19,7 @@ int size_cmp(const MDB_val* a, const MDB_val* b); std::vector serialise_key(uint8_t key); std::vector serialise_key(uint64_t key); -std::vector serialise_key(uint256_t key); +std::vector serialise_key(const uint256_t& key); void deserialise_key(void* data, uint8_t& key); void deserialise_key(void* data, uint64_t& key); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.cpp index 33ce9eeca3c..c761ec99bd9 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.cpp @@ -1,6 +1,6 @@ #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" - #include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" +#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" #include diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp index 81538f50fc3..6443c996ec3 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp @@ -1,9 +1,10 @@ #pragma once #include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" namespace bb::crypto::merkle_tree { + +class LMDBDatabaseCreationTransaction; /** * RAII wrapper atound the opening and closing of an LMDB database * Contains a reference to its LMDB environment diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp index dcafbd1b5a7..3e1445ab706 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp @@ -29,4 +29,9 @@ void LMDBTransaction::abort() call_lmdb_func(mdb_txn_abort, _transaction); state = TransactionState::ABORTED; } + +bool LMDBTransaction::get_value(std::vector& key, std::vector& data, const LMDBDatabase& db) const +{ + return lmdb_queries::get_value(key, data, db, *this); +} } // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp index c2931225d42..6ae56bd8f9f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp @@ -1,5 +1,9 @@ #pragma once +#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" +#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" +#include +#include namespace bb::crypto::merkle_tree { @@ -33,9 +37,68 @@ class LMDBTransaction { */ virtual void abort(); + template + bool get_value_or_previous(T& key, + std::vector& data, + const LMDBDatabase& db, + const std::function&)>& is_valid) const; + + template bool get_value_or_previous(T& key, std::vector& data, const LMDBDatabase& db) const; + + template bool get_value(T& key, std::vector& data, const LMDBDatabase& db) const; + + template + void get_all_values_greater_or_equal_key(const T& key, + std::vector>& data, + const LMDBDatabase& db) const; + + template + void get_all_values_lesser_or_equal_key(const T& key, + std::vector>& data, + const LMDBDatabase& db) const; + + bool get_value(std::vector& key, std::vector& data, const LMDBDatabase& db) const; + protected: std::shared_ptr _environment; MDB_txn* _transaction; TransactionState state; }; + +template bool LMDBTransaction::get_value(T& key, std::vector& data, const LMDBDatabase& db) const +{ + std::vector keyBuffer = serialise_key(key); + return get_value(keyBuffer, data, db); +} + +template +bool LMDBTransaction::get_value_or_previous(T& key, std::vector& data, const LMDBDatabase& db) const +{ + return lmdb_queries::get_value_or_previous(key, data, db, *this); +} + +template +bool LMDBTransaction::get_value_or_previous(T& key, + std::vector& data, + const LMDBDatabase& db, + const std::function&)>& is_valid) const +{ + return lmdb_queries::get_value_or_previous(key, data, db, is_valid, *this); +} + +template +void LMDBTransaction::get_all_values_greater_or_equal_key(const T& key, + std::vector>& data, + const LMDBDatabase& db) const +{ + lmdb_queries::get_all_values_greater_or_equal_key(key, data, db, *this); +} + +template +void LMDBTransaction::get_all_values_lesser_or_equal_key(const T& key, + std::vector>& data, + const LMDBDatabase& db) const +{ + lmdb_queries::get_all_values_lesser_or_equal_key(key, data, db, *this); +} } // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.cpp index 0df28587e7a..303e8f654ff 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.cpp @@ -18,20 +18,4 @@ void LMDBTreeReadTransaction::abort() LMDBTransaction::abort(); _environment->release_reader(); } - -bool LMDBTreeReadTransaction::get_value(std::vector& key, - std::vector& data, - const LMDBDatabase& db) const -{ - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - MDB_val dbVal; - if (!call_lmdb_func(mdb_get, underlying(), db.underlying(), &dbKey, &dbVal)) { - return false; - } - copy_to_vector(dbVal, data); - return true; -} } // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp index 68e5b56aa2a..89a20df8e7a 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp @@ -31,67 +31,6 @@ class LMDBTreeReadTransaction : public LMDBTransaction { ~LMDBTreeReadTransaction() override; - template - bool get_value_or_previous(T& key, - std::vector& data, - const LMDBDatabase& db, - const std::function&)>& is_valid) const; - - template bool get_value_or_previous(T& key, std::vector& data, const LMDBDatabase& db) const; - - template bool get_value(T& key, std::vector& data, const LMDBDatabase& db) const; - - template - void get_all_values_greater_or_equal_key(const T& key, - std::vector>& data, - const LMDBDatabase& db) const; - - template - void get_all_values_lesser_or_equal_key(const T& key, - std::vector>& data, - const LMDBDatabase& db) const; - - bool get_value(std::vector& key, std::vector& data, const LMDBDatabase& db) const; - void abort() override; }; - -template -bool LMDBTreeReadTransaction::get_value(T& key, std::vector& data, const LMDBDatabase& db) const -{ - std::vector keyBuffer = serialise_key(key); - return get_value(keyBuffer, data, db); -} - -template -bool LMDBTreeReadTransaction::get_value_or_previous(T& key, std::vector& data, const LMDBDatabase& db) const -{ - return lmdb_queries::get_value_or_previous(key, data, db, *this); -} - -template -bool LMDBTreeReadTransaction::get_value_or_previous( - T& key, - std::vector& data, - const LMDBDatabase& db, - const std::function&)>& is_valid) const -{ - return lmdb_queries::get_value_or_previous(key, data, db, is_valid, *this); -} - -template -void LMDBTreeReadTransaction::get_all_values_greater_or_equal_key(const T& key, - std::vector>& data, - const LMDBDatabase& db) const -{ - lmdb_queries::get_all_values_greater_or_equal_key(key, data, db, *this); -} - -template -void LMDBTreeReadTransaction::get_all_values_lesser_or_equal_key(const T& key, - std::vector>& data, - const LMDBDatabase& db) const -{ - lmdb_queries::get_all_values_lesser_or_equal_key(key, data, db, *this); -} } // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp index c9fdef9a9f1..fc319a7a7c6 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp @@ -2,6 +2,7 @@ #include "barretenberg/common/serialize.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" +#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/numeric/uint128/uint128.hpp" @@ -237,7 +238,7 @@ void LMDBTreeStore::delete_leaf_by_hash(const fr& leafHash, WriteTransaction& tx fr LMDBTreeStore::find_low_leaf(const fr& leafValue, Indices& indices, - std::optional sizeLimit, + const std::optional& sizeLimit, ReadTransaction& tx) { std::vector data; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp index e1cbeddb4ff..18e1749acab 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp @@ -142,7 +142,10 @@ class LMDBTreeStore { template bool read_leaf_indices(const fr& leafValue, Indices& indices, TxType& tx); - fr find_low_leaf(const fr& leafValue, Indices& indices, std::optional sizeLimit, ReadTransaction& tx); + fr find_low_leaf(const fr& leafValue, + Indices& indices, + const std::optional& sizeLimit, + ReadTransaction& tx); void write_leaf_indices(const fr& leafValue, const Indices& indices, WriteTransaction& tx); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp index 5065575ac69..4b4cd846a2f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp @@ -5,6 +5,7 @@ #include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" +#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" #include "lmdb.h" #include @@ -36,44 +37,13 @@ void LMDBTreeWriteTransaction::try_abort() LMDBTransaction::abort(); } -bool LMDBTreeWriteTransaction::get_value(std::vector& key, - std::vector& data, - const LMDBDatabase& db) const -{ - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - MDB_val dbVal; - if (!call_lmdb_func(mdb_get, underlying(), db.underlying(), &dbKey, &dbVal)) { - return false; - } - copy_to_vector(dbVal, data); - return true; -} - void LMDBTreeWriteTransaction::put_value(std::vector& key, std::vector& data, const LMDBDatabase& db) { - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - MDB_val dbVal; - dbVal.mv_size = data.size(); - dbVal.mv_data = (void*)data.data(); - call_lmdb_func("mdb_put", mdb_put, underlying(), db.underlying(), &dbKey, &dbVal, 0U); + lmdb_queries::put_value(key, data, db, *this); } void LMDBTreeWriteTransaction::delete_value(std::vector& key, const LMDBDatabase& db) { - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - MDB_val* dbVal = nullptr; - int code = call_lmdb_func_with_return(mdb_del, underlying(), db.underlying(), &dbKey, dbVal); - if (code != 0 && code != MDB_NOTFOUND) { - throw_error("mdb_del", code); - } + lmdb_queries::delete_value(key, db, *this); } } // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp index eb7a2f09df1..d12d5fdc3ad 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp @@ -38,57 +38,26 @@ class LMDBTreeWriteTransaction : public LMDBTransaction { void delete_value(std::vector& key, const LMDBDatabase& db); - // There are a the following two 'getters' here copied from LMDBTreeReadTransaction - // This could be rationalised to prevent the duplication - template bool get_value(T& key, std::vector& data, const LMDBDatabase& db) const; - - template - void get_all_values_greater_or_equal_key(const T& key, - std::vector>& data, - const LMDBDatabase& db) const; - template void delete_all_values_greater_or_equal_key(const T& key, const LMDBDatabase& db) const; - template - void get_all_values_lesser_or_equal_key(const T& key, - std::vector>& data, - const LMDBDatabase& db) const; - template void delete_all_values_lesser_or_equal_key(const T& key, const LMDBDatabase& db) const; - bool get_value(std::vector& key, std::vector& data, const LMDBDatabase& db) const; - void commit(); void try_abort(); }; -template -bool LMDBTreeWriteTransaction::get_value(T& key, std::vector& data, const LMDBDatabase& db) const -{ - std::vector keyBuffer = serialise_key(key); - return get_value(keyBuffer, data, db); -} - template void LMDBTreeWriteTransaction::put_value(T& key, std::vector& data, const LMDBDatabase& db) { std::vector keyBuffer = serialise_key(key); - put_value(keyBuffer, data, db); + lmdb_queries::put_value(keyBuffer, data, db, *this); } template void LMDBTreeWriteTransaction::delete_value(T& key, const LMDBDatabase& db) { std::vector keyBuffer = serialise_key(key); - delete_value(keyBuffer, db); -} - -template -void LMDBTreeWriteTransaction::get_all_values_greater_or_equal_key(const T& key, - std::vector>& data, - const LMDBDatabase& db) const -{ - lmdb_queries::get_all_values_greater_or_equal_key(key, data, db, *this); + lmdb_queries::delete_value(keyBuffer, db, *this); } template @@ -97,14 +66,6 @@ void LMDBTreeWriteTransaction::delete_all_values_greater_or_equal_key(const T& k lmdb_queries::delete_all_values_greater_or_equal_key(key, db, *this); } -template -void LMDBTreeWriteTransaction::get_all_values_lesser_or_equal_key(const T& key, - std::vector>& data, - const LMDBDatabase& db) const -{ - lmdb_queries::get_all_values_lesser_or_equal_key(key, data, db, *this); -} - template void LMDBTreeWriteTransaction::delete_all_values_lesser_or_equal_key(const T& key, const LMDBDatabase& db) const { diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp new file mode 100644 index 00000000000..311b7484d45 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp @@ -0,0 +1,52 @@ +#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" +#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp" + +namespace bb::crypto::merkle_tree::lmdb_queries { + +void put_value(std::vector& key, + std::vector& data, + const LMDBDatabase& db, + bb::crypto::merkle_tree::LMDBTreeWriteTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + dbVal.mv_size = data.size(); + dbVal.mv_data = (void*)data.data(); + call_lmdb_func("mdb_put", mdb_put, tx.underlying(), db.underlying(), &dbKey, &dbVal, 0U); +} + +void delete_value(std::vector& key, + const LMDBDatabase& db, + bb::crypto::merkle_tree::LMDBTreeWriteTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val* dbVal = nullptr; + int code = call_lmdb_func_with_return(mdb_del, tx.underlying(), db.underlying(), &dbKey, dbVal); + if (code != 0 && code != MDB_NOTFOUND) { + throw_error("mdb_del", code); + } +} + +bool get_value(std::vector& key, + std::vector& data, + const LMDBDatabase& db, + const bb::crypto::merkle_tree::LMDBTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + if (!call_lmdb_func(mdb_get, tx.underlying(), db.underlying(), &dbKey, &dbVal)) { + return false; + } + copy_to_vector(dbVal, data); + return true; +} +} // namespace bb::crypto::merkle_tree::lmdb_queries \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp index 2c9efc21e0e..3269dc13952 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp @@ -7,7 +7,13 @@ #include #include -namespace bb::crypto::merkle_tree::lmdb_queries { +namespace bb::crypto::merkle_tree { + +class LMDBTransaction; +class LMDBTreeWriteTransaction; + +namespace lmdb_queries { + template bool get_value_or_previous(TKey& key, std::vector& data, const LMDBDatabase& db, const TxType& tx) { @@ -394,4 +400,17 @@ void delete_all_values_lesser_or_equal_key(const TKey& key, const LMDBDatabase& } call_lmdb_func(mdb_cursor_close, cursor); } -} // namespace bb::crypto::merkle_tree::lmdb_queries + +void put_value(std::vector& key, + std::vector& data, + const LMDBDatabase& db, + LMDBTreeWriteTransaction& tx); + +void delete_value(std::vector& key, const LMDBDatabase& db, LMDBTreeWriteTransaction& tx); + +bool get_value(std::vector& key, + std::vector& data, + const LMDBDatabase& db, + const LMDBTransaction& tx); +} // namespace lmdb_queries +} // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 9b113365e1a..27b0cb29c7f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -125,12 +125,12 @@ template class ContentAddressedCachedTreeStore { /** * @brief Writes the provided data at the given node coordinates. Only writes to uncommitted data. */ - void put_cached_node_by_index(uint32_t level, index_t index, const fr& data, bool overwriteIfPresent = true); + void put_cached_node_by_index(uint32_t level, const index_t& index, const fr& data, bool overwriteIfPresent = true); /** * @brief Returns the data at the given node coordinates if available. */ - bool get_cached_node_by_index(uint32_t level, index_t index, fr& data) const; + bool get_cached_node_by_index(uint32_t level, const index_t& index, fr& data) const; /** * @brief Writes the provided meta data to uncommitted state @@ -159,7 +159,7 @@ template class ContentAddressedCachedTreeStore { * @brief Finds the index of the given leaf value in the tree if available. Includes uncommitted data if requested. */ std::optional find_leaf_index_from(const LeafValueType& leaf, - index_t start_index, + const index_t& start_index, const RequestContext& requestContext, ReadTransaction& tx, bool includeUncommitted) const; @@ -202,7 +202,7 @@ template class ContentAddressedCachedTreeStore { std::optional get_fork_block() const; - void advance_finalised_block(index_t blockNumber); + void advance_finalised_block(const index_t& blockNumber); private: std::string name_; @@ -244,7 +244,7 @@ template class ContentAddressedCachedTreeStore { void persist_leaf_indices(WriteTransaction& tx); - void persist_leaf_keys(index_t startIndex, WriteTransaction& tx); + void persist_leaf_keys(const index_t& startIndex, WriteTransaction& tx); void persist_leaf_pre_image(const fr& hash, WriteTransaction& tx); @@ -252,10 +252,10 @@ template class ContentAddressedCachedTreeStore { void remove_node(const std::optional& optional_hash, uint32_t level, - std::optional maxIndex, + const std::optional& maxIndex, WriteTransaction& tx); - void remove_leaf(const fr& hash, std::optional maxIndex, WriteTransaction& tx); + void remove_leaf(const fr& hash, const std::optional& maxIndex, WriteTransaction& tx); void remove_leaf_indices(const fr& key, const index_t& maxIndex, WriteTransaction& tx); @@ -445,7 +445,7 @@ std::optional ContentAddressedCachedTreeStore::find_leaf template std::optional ContentAddressedCachedTreeStore::find_leaf_index_from( const LeafValueType& leaf, - index_t start_index, + const index_t& start_index, const RequestContext& requestContext, ReadTransaction& tx, bool includeUncommitted) const @@ -521,7 +521,7 @@ bool ContentAddressedCachedTreeStore::get_node_by_hash(const fr& template void ContentAddressedCachedTreeStore::put_cached_node_by_index(uint32_t level, - index_t index, + const index_t& index, const fr& data, bool overwriteIfPresent) { @@ -540,7 +540,7 @@ void ContentAddressedCachedTreeStore::put_cached_node_by_index(ui template bool ContentAddressedCachedTreeStore::get_cached_node_by_index(uint32_t level, - index_t index, + const index_t& index, fr& data) const { // Accessing nodes_by_index_ under a lock @@ -697,7 +697,7 @@ void ContentAddressedCachedTreeStore::persist_leaf_indices(WriteT } template -void ContentAddressedCachedTreeStore::persist_leaf_keys(index_t startIndex, WriteTransaction& tx) +void ContentAddressedCachedTreeStore::persist_leaf_keys(const index_t& startIndex, WriteTransaction& tx) { for (auto& idx : indices_) { FrKeyType key = idx.first; @@ -797,7 +797,7 @@ void ContentAddressedCachedTreeStore::persist_meta(TreeMeta& m, W } template -void ContentAddressedCachedTreeStore::advance_finalised_block(index_t blockNumber) +void ContentAddressedCachedTreeStore::advance_finalised_block(const index_t& blockNumber) { TreeMeta committedMeta; TreeMeta uncommittedMeta; @@ -1017,7 +1017,7 @@ void ContentAddressedCachedTreeStore::remove_leaf_indices(const f template void ContentAddressedCachedTreeStore::remove_leaf(const fr& hash, - std::optional maxIndex, + const std::optional& maxIndex, WriteTransaction& tx) { // std::cout << "Removing leaf " << hash << std::endl; @@ -1045,7 +1045,7 @@ void ContentAddressedCachedTreeStore::remove_leaf(const fr& hash, template void ContentAddressedCachedTreeStore::remove_node(const std::optional& optional_hash, uint32_t level, - std::optional maxIndex, + const std::optional& maxIndex, WriteTransaction& tx) { if (!optional_hash.has_value()) { From 0894c13fd3c8f7e439989d5e4a9be303bd4512ac Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Tue, 29 Oct 2024 18:01:02 +0000 Subject: [PATCH 02/31] Updated logging --- .../content_addressed_append_only_tree.hpp | 40 ++++++--- ...ontent_addressed_append_only_tree.test.cpp | 5 +- .../content_addressed_indexed_tree.hpp | 47 ++++++++--- .../content_addressed_indexed_tree.test.cpp | 10 ++- .../merkle_tree/indexed_tree/indexed_leaf.hpp | 6 ++ .../cached_content_addressed_tree_store.hpp | 81 ++++++++++++++----- 6 files changed, 144 insertions(+), 45 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index b1bb82e32a4..47ca4f80eff 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -337,7 +337,9 @@ void ContentAddressedAppendOnlyTree::get_meta_data(const i BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Data for block unavailable"); + throw std::runtime_error((std::stringstream() << "Unable to get meta data for block " << blockNumber + << ", failed to get block data.") + .str()); } response.inner.meta.size = blockData.size; @@ -367,12 +369,15 @@ void ContentAddressedAppendOnlyTree::get_sibling_path(cons execute_and_report( [=, this](TypedResponse& response) { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to get sibling path at block 0"); } ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Data for block unavailable"); + throw std::runtime_error((std::stringstream() + << "Unable to get sibling path for index " << index << " at block " + << blockNumber << ", failed to get block data.") + .str()); } RequestContext requestContext; @@ -591,15 +596,21 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ execute_and_report( [=, this](TypedResponse& response) { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to get leaf at block 0"); } ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Data for block unavailable"); + throw std::runtime_error((std::stringstream() + << "Unable to get leaf at index " << leaf_index << " for block " + << blockNumber << ", failed to get block data.") + .str()); } if (blockData.size < leaf_index) { - response.message = "Data for block unavailable"; + response.message = + (std::stringstream() << "Unable to get leaf at index " << leaf_index << " for block " + << blockNumber << ", leaf index is too high.") + .str(); response.success = false; return; } @@ -670,12 +681,15 @@ void ContentAddressedAppendOnlyTree::find_leaf_index_from( execute_and_report( [=, this](TypedResponse& response) { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to find leaf index for block number 0"); } ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Data for block unavailable"); + throw std::runtime_error((std::stringstream() + << "Unable to find leaf from index " << start_index << " for block " + << blockNumber << ", failed to get block data.") + .str()); } RequestContext requestContext; requestContext.blockNumber = blockNumber; @@ -744,7 +758,7 @@ void ContentAddressedAppendOnlyTree::remove_historic_block execute_and_report( [=, this]() { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to remove historic block 0"); } store_->remove_historical_block(blockNumber); }, @@ -761,7 +775,7 @@ void ContentAddressedAppendOnlyTree::unwind_block( execute_and_report( [=, this]() { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to unwind block 0"); } store_->unwind_block(blockNumber); }, @@ -778,7 +792,7 @@ void ContentAddressedAppendOnlyTree::finalise_block(const execute_and_report( [=, this]() { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to finalise block 0"); } store_->advance_finalised_block(blockNumber); }, @@ -850,7 +864,9 @@ void ContentAddressedAppendOnlyTree::add_batch_internal( } if (new_size > max_size_) { - throw std::runtime_error("Tree is full"); + throw std::runtime_error((std::stringstream() << "Unable to append leaves to tree " << meta.name + << " new size: " << new_size << " max size: " << max_size_) + .str()); } // Add the values at the leaf nodes of the tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index b15c3c4df61..f0bf52f91df 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -455,10 +455,13 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, reports_an_error_if_tree_is_ } add_values(tree, values); + std::stringstream ss; + ss << "Unable to append leaves to tree " << name << " new size: 17 max size: 16"; + Signal signal; auto add_completion = [&](const TypedResponse& response) { EXPECT_EQ(response.success, false); - EXPECT_EQ(response.message, "Tree is full"); + EXPECT_EQ(response.message, ss.str()); signal.signal_level(); }; tree.add_value(VALUES[16], add_completion); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp index 1c952a3ad19..32fcaf93ce2 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp @@ -266,7 +266,6 @@ ContentAddressedIndexedTree::ContentAddressedIndexedTree(s zero_hashes_.resize(depth_ + 1); // Create the zero hashes for the tree - // Indexed_LeafType zero_leaf{ 0, 0, 0 }; auto current = fr::zero(); for (uint32_t i = depth_; i > 0; --i) { zero_hashes_[i] = current; @@ -363,12 +362,15 @@ void ContentAddressedIndexedTree::get_leaf(const index_t& execute_and_report>( [=, this](TypedResponse>& response) { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to get leaf for block number 0"); } ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Data for block unavailable"); + throw std::runtime_error((std::stringstream() + << "Unable to get leaf at index " << index << " for block " << blockNumber + << ", failed to get block data.") + .str()); } RequestContext requestContext; requestContext.blockNumber = blockNumber; @@ -450,12 +452,15 @@ void ContentAddressedIndexedTree::find_leaf_index_from( execute_and_report( [=, this](TypedResponse& response) { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to find leaf index from for block 0"); } typename Store::ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Data for block unavailable"); + throw std::runtime_error((std::stringstream() + << "Unable to find leaf from index " << start_index << " for block " + << blockNumber << ", failed to get block data.") + .str()); } RequestContext requestContext; requestContext.blockNumber = blockNumber; @@ -505,12 +510,14 @@ void ContentAddressedIndexedTree::find_low_leaf(const fr& execute_and_report( [=, this](TypedResponse& response) { if (blockNumber == 0) { - throw std::runtime_error("Invalid block number"); + throw std::runtime_error("Unable to find low leaf for block 0"); } typename Store::ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Data for block unavailable"); + throw std::runtime_error((std::stringstream() << "Unable to find low leaf for block " << blockNumber + << ", failed to get block data.") + .str()); } RequestContext requestContext; requestContext.blockNumber = blockNumber; @@ -980,7 +987,10 @@ void ContentAddressedIndexedTree::generate_insertions( // Ensure that the tree is not going to be overfilled index_t new_total_size = num_leaves_to_be_inserted + meta.size; if (new_total_size > max_size_) { - throw std::runtime_error("Tree is full"); + throw std::runtime_error((std::stringstream() + << "Unable to insert values into tree " << meta.name + << " new size: " << new_total_size << " max size: " << max_size_) + .str()); } for (size_t i = 0; i < values.size(); ++i) { std::pair& value_pair = values[i]; @@ -992,7 +1002,10 @@ void ContentAddressedIndexedTree::generate_insertions( fr value = value_pair.first.get_key(); auto it = unique_values.insert(value); if (!it.second) { - throw std::runtime_error("Duplicate key not allowed in same batch"); + throw std::runtime_error((std::stringstream() + << "Duplicate key not allowed in same batch, key value: " << value + << ", tree: " << meta.name) + .str()); } // This gives us the leaf that need updating @@ -1020,7 +1033,10 @@ void ContentAddressedIndexedTree::generate_insertions( if (!low_leaf_hash.has_value()) { // std::cout << "Failed to find low leaf" << std::endl; - throw std::runtime_error("Failed to find low leaf"); + throw std::runtime_error((std::stringstream() + << "Unable to insert values into tree " << meta.name + << " failed to find low leaf at index " << low_leaf_index) + .str()); } // std::cout << "Low leaf hash " << low_leaf_hash.value() << std::endl; @@ -1029,7 +1045,11 @@ void ContentAddressedIndexedTree::generate_insertions( if (!low_leaf_option.has_value()) { // std::cout << "No pre-image" << std::endl; - throw std::runtime_error("Failed to find pre-image for low leaf"); + throw std::runtime_error((std::stringstream() + << "Unable to insert values into tree " << meta.name + << " failed to get leaf pre-image by hash for index " + << low_leaf_index) + .str()); } // std::cout << "Low leaf pre-image " << low_leaf_option.value() << std::endl; low_leaf = low_leaf_option.value(); @@ -1078,7 +1098,10 @@ void ContentAddressedIndexedTree::generate_insertions( // The set of appended leaves already has an empty leaf in the slot at index // 'index_into_appended_leaves' } else { - throw std::runtime_error("IndexedLeafValue is not updateable"); + throw std::runtime_error((std::stringstream() + << "Unable to insert values into tree " << meta.name << " leaf type " + << IndexedLeafValueType::name() << " is not updateable") + .str()); } response.inner.highest_index = std::max(response.inner.highest_index, low_leaf_index); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp index c7618d62031..fc195a7d828 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp @@ -505,10 +505,13 @@ TEST_F(PersistedContentAddressedIndexedTreeTest, reports_an_error_if_tree_is_ove } add_values(tree, values); + std::stringstream ss; + ss << "Unable to insert values into tree " << name << " new size: 17 max size: 16"; + Signal signal; auto add_completion = [&](const TypedResponse>& response) { EXPECT_EQ(response.success, false); - EXPECT_EQ(response.message, "Tree is full"); + EXPECT_EQ(response.message, ss.str()); signal.signal_level(); }; tree.add_or_update_value(NullifierLeafValue(VALUES[16]), add_completion); @@ -939,10 +942,13 @@ TEST_F(PersistedContentAddressedIndexedTreeTest, reports_an_error_if_batch_conta } values[8] = values[0]; + std::stringstream ss; + ss << "Duplicate key not allowed in same batch, key value: " << values[0].value << ", tree: " << name; + Signal signal; auto add_completion = [&](const TypedResponse>& response) { EXPECT_EQ(response.success, false); - EXPECT_EQ(response.message, "Duplicate key not allowed in same batch"); + EXPECT_EQ(response.message, ss.str()); signal.signal_level(); }; tree.add_or_update_values(values, add_completion); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp index 7ed7d76d102..615f2ce4cf2 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp @@ -59,6 +59,8 @@ struct NullifierLeafValue { static NullifierLeafValue empty() { return { fr::zero() }; } static NullifierLeafValue padding(index_t i) { return { i }; } + + static std::string name() { return std::string("NullifierLeafValue"); }; }; struct PublicDataLeafValue { @@ -117,6 +119,8 @@ struct PublicDataLeafValue { static PublicDataLeafValue empty() { return { fr::zero(), fr::zero() }; } static PublicDataLeafValue padding(index_t i) { return { i, fr::zero() }; } + + static std::string name() { return std::string("PublicDataLeafValue"); }; }; template struct IndexedLeaf { @@ -140,6 +144,8 @@ template struct IndexedLeaf { static bool is_updateable() { return LeafType::is_updateable(); } + static std::string name() { return LeafType::name(); } + bool operator==(IndexedLeaf const& other) const { return value == other.value && nextValue == other.nextValue && nextIndex == other.nextIndex; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 27b0cb29c7f..334e5e2932f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -10,6 +10,7 @@ #include "barretenberg/serialize/msgpack.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" #include "msgpack/assert.hpp" +#include #include #include #include @@ -679,7 +680,8 @@ template void ContentAddressedCachedTreeStorecommit(); } catch (std::exception& e) { tx->try_abort(); - throw; + throw std::runtime_error( + (std::stringstream() << "Unable to commit data to tree: " << name_ << " Error: " << e.what()).str()); } } @@ -803,7 +805,9 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con TreeMeta uncommittedMeta; BlockPayload blockPayload; if (blockNumber < 1) { - throw std::runtime_error("Unable to remove block"); + throw std::runtime_error( + (std::stringstream() << "Unable to advance finalised block: " << blockNumber << ". Tree name: " << name_) + .str()); } if (initialised_from_block_.has_value()) { throw std::runtime_error("Advancing the finalised block on a fork is forbidden"); @@ -814,7 +818,9 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con get_meta(uncommittedMeta, *tx, true); get_meta(committedMeta, *tx, false); if (!dataStore_->read_block_data(blockNumber, blockPayload, *tx)) { - throw std::runtime_error("Failed to retrieve block data"); + throw std::runtime_error((std::stringstream() << "Unable to advance finalised block: " << blockNumber + << ". Failed to read block data. Tree name: " << name_) + .str()); } } // can only finalise blocks that are not finalised @@ -846,7 +852,9 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con writeTx->commit(); } catch (std::exception& e) { writeTx->try_abort(); - throw; + throw std::runtime_error((std::stringstream() << "Unable to commit advance of finalised block: " << blockNumber + << ". Tree name: " << name_ << " Error: " << e.what()) + .str()); } // commit successful, now also update the uncommitted meta @@ -862,7 +870,9 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& BlockPayload blockData; BlockPayload previousBlockData; if (blockNumber < 1) { - throw std::runtime_error("Unable to remove block"); + throw std::runtime_error( + (std::stringstream() << "Unable to remove historical block: " << blockNumber << ". Tree name: " << name_) + .str()); } if (initialised_from_block_.has_value()) { throw std::runtime_error("Removing a block on a fork is forbidden"); @@ -872,13 +882,23 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& get_meta(uncommittedMeta, *tx, true); get_meta(committedMeta, *tx, false); if (committedMeta != uncommittedMeta) { - throw std::runtime_error("Can't unwind with uncommitted data, first rollback before unwinding"); + throw std::runtime_error( + (std::stringstream() + << "Unable to unwind block: " << blockNumber + << " Can't unwind with uncommitted data, first rollback before unwinding. Tree name: " << name_) + .str()); } if (blockNumber != uncommittedMeta.unfinalisedBlockHeight) { - throw std::runtime_error("Block number is not the most recent"); + throw std::runtime_error((std::stringstream() + << "Unable to unwind block: " << blockNumber << " unfinalisedBlockHeight: " + << committedMeta.unfinalisedBlockHeight << ". Tree name: " << name_) + .str()); } if (blockNumber <= uncommittedMeta.finalisedBlockHeight) { - throw std::runtime_error("Can't unwind a finalised block"); + throw std::runtime_error((std::stringstream() + << "Unable to unwind block: " << blockNumber << " finalisedBlockHeight: " + << committedMeta.finalisedBlockHeight << ". Tree name: " << name_) + .str()); } // populate the required data for the previous block @@ -887,12 +907,17 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& previousBlockData.size = uncommittedMeta.initialSize; previousBlockData.blockNumber = 0; } else if (!dataStore_->read_block_data(blockNumber - 1, previousBlockData, *tx)) { - throw std::runtime_error("Failed to retrieve previous block data"); + throw std::runtime_error((std::stringstream() + << "Unable to unwind block: " << blockNumber + << ". Failed to read previous block data. Tree name: " << name_) + .str()); } // now get the root for the block we want to unwind if (!dataStore_->read_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Failed to retrieve block data for block to unwind"); + throw std::runtime_error((std::stringstream() << "Unable to unwind block: " << blockNumber + << ". Failed to read block data. Tree name: " << name_) + .str()); } } WriteTransactionPtr writeTx = create_write_transaction(); @@ -915,7 +940,9 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& writeTx->commit(); } catch (std::exception& e) { writeTx->try_abort(); - throw; + throw std::runtime_error((std::stringstream() << "Unable to commit unwind of block: " << blockNumber + << ". Tree name: " << name_ << " Error: " << e.what()) + .str()); } // now update the uncommitted meta @@ -929,7 +956,9 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con TreeMeta uncommittedMeta; BlockPayload blockData; if (blockNumber < 1) { - throw std::runtime_error("Unable to remove block"); + throw std::runtime_error( + (std::stringstream() << "Unable to remove historical block: " << blockNumber << ". Tree name: " << name_) + .str()); } if (initialised_from_block_.has_value()) { throw std::runtime_error("Removing a block on a fork is forbidden"); @@ -941,14 +970,22 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con get_meta(uncommittedMeta, *tx, true); get_meta(committedMeta, *tx, false); if (blockNumber != committedMeta.oldestHistoricBlock) { - throw std::runtime_error("Block number is not the most historic"); + throw std::runtime_error( + (std::stringstream() << "Unable to remove historical block: " << blockNumber << " oldestHistoricBlock: " + << committedMeta.oldestHistoricBlock << ". Tree name: " << name_) + .str()); } if (blockNumber >= committedMeta.finalisedBlockHeight) { - throw std::runtime_error("Can't remove current finalised block"); + throw std::runtime_error( + (std::stringstream() << "Unable to remove historical block: " << blockNumber << " oldestHistoricBlock: " + << committedMeta.finalisedBlockHeight << ". Tree name: " << name_) + .str()); } if (!dataStore_->read_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error("Failed to retrieve block data for historical block"); + throw std::runtime_error((std::stringstream() << "Unable to remove historical block: " << blockNumber + << ". Failed to read block data. Tree name: " << name_) + .str()); } } WriteTransactionPtr writeTx = create_write_transaction(); @@ -964,7 +1001,9 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con writeTx->commit(); } catch (std::exception& e) { writeTx->try_abort(); - throw; + throw std::runtime_error((std::stringstream() << "Unable to commit removal of historical block: " << blockNumber + << ". Tree name: " << name_ << " Error: " << e.what()) + .str()); } // commit was successful, update the uncommitted meta @@ -1129,10 +1168,16 @@ void ContentAddressedCachedTreeStore::initialise_from_block(const } if (meta_.unfinalisedBlockHeight < blockNumber) { - throw std::runtime_error("Unable to initialise from future block"); + throw std::runtime_error((std::stringstream() << "Unable to initialise from future block: " << blockNumber + << " unfinalisedBlockHeight: " << meta_.unfinalisedBlockHeight + << ". Tree name: " << name_) + .str()); } if (meta_.oldestHistoricBlock > blockNumber && blockNumber != 0) { - throw std::runtime_error("Unable to fork from expired historical block"); + throw std::runtime_error((std::stringstream() << "Unable to fork from expired historical block: " + << blockNumber << " unfinalisedBlockHeight: " + << meta_.oldestHistoricBlock << ". Tree name: " << name_) + .str()); } BlockPayload blockData; if (blockNumber == 0) { From 68f2c2524b18d0c56f1f7f3521ec44cce7586128 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Tue, 29 Oct 2024 22:28:17 +0000 Subject: [PATCH 03/31] Removed erroneous code --- .../node_store/cached_content_addressed_tree_store.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 334e5e2932f..9663c81d013 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -10,7 +10,6 @@ #include "barretenberg/serialize/msgpack.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" #include "msgpack/assert.hpp" -#include #include #include #include From 4962d966fde9187e1c2bfcc8ec902b2cfd94f079 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 30 Oct 2024 10:51:32 +0000 Subject: [PATCH 04/31] Detect if trees are out of sync --- .../src/barretenberg/world_state/types.hpp | 9 +- .../barretenberg/world_state/world_state.cpp | 86 +++++++++++++++++-- .../barretenberg/world_state/world_state.hpp | 8 +- .../world-state/src/native/message.ts | 2 + .../src/native/native_world_state.test.ts | 36 ++++++-- .../src/native/native_world_state.ts | 12 ++- .../src/native/native_world_state_instance.ts | 4 +- 7 files changed, 134 insertions(+), 23 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/world_state/types.hpp b/barretenberg/cpp/src/barretenberg/world_state/types.hpp index 69011a69392..2284c47d7ea 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/types.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state/types.hpp @@ -20,6 +20,7 @@ enum MerkleTreeId { }; const uint64_t CANONICAL_FORK_ID = 0; +const uint64_t NUM_TREES = 5; std::string getMerkleTreeName(MerkleTreeId id); @@ -41,20 +42,22 @@ struct WorldStateStatus { index_t unfinalisedBlockNumber; index_t finalisedBlockNumber; index_t oldestHistoricalBlock; - MSGPACK_FIELDS(unfinalisedBlockNumber, finalisedBlockNumber, oldestHistoricalBlock); + bool treesAreSynched; + MSGPACK_FIELDS(unfinalisedBlockNumber, finalisedBlockNumber, oldestHistoricalBlock, treesAreSynched); bool operator==(const WorldStateStatus& other) const { return unfinalisedBlockNumber == other.unfinalisedBlockNumber && finalisedBlockNumber == other.finalisedBlockNumber && - oldestHistoricalBlock == other.oldestHistoricalBlock; + oldestHistoricalBlock == other.oldestHistoricalBlock && treesAreSynched == other.treesAreSynched; } friend std::ostream& operator<<(std::ostream& os, const WorldStateStatus& status) { os << "unfinalisedBlockNumber: " << status.unfinalisedBlockNumber << ", finalisedBlockNumber: " << status.finalisedBlockNumber - << ", oldestHistoricalBlock: " << status.oldestHistoricalBlock; + << ", oldestHistoricalBlock: " << status.oldestHistoricalBlock + << ", treesAreSynched: " << status.treesAreSynched; return os; } }; diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp index e4761843c75..b1270dd12bb 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp @@ -14,6 +14,7 @@ #include "barretenberg/world_state/types.hpp" #include "barretenberg/world_state/world_state_stores.hpp" #include "barretenberg/world_state_napi/message.hpp" +#include #include #include #include @@ -258,6 +259,41 @@ TreeMetaResponse WorldState::get_tree_info(const WorldStateRevision& revision, M fork->_trees.at(tree_id)); } +void WorldState::get_all_tree_info(const WorldStateRevision& revision, std::array& responses) const +{ + Fork::SharedPtr fork = retrieve_fork(revision.forkId); + + std::vector tree_ids{ + MerkleTreeId::NULLIFIER_TREE, MerkleTreeId::NOTE_HASH_TREE, MerkleTreeId::PUBLIC_DATA_TREE, + MerkleTreeId::L1_TO_L2_MESSAGE_TREE, MerkleTreeId::ARCHIVE, + }; + + Signal signal(static_cast(tree_ids.size())); + std::mutex mutex; + + for (auto id : tree_ids) { + const auto& tree = fork->_trees.at(id); + auto callback = [&signal, &responses, &mutex, id](const TypedResponse& meta) { + { + std::lock_guard lock(mutex); + responses[id] = meta.inner; + } + signal.signal_decrement(); + }; + std::visit( + [&callback, &revision](auto&& wrapper) { + if (revision.blockNumber) { + wrapper.tree->get_meta_data(revision.blockNumber, revision.includeUncommitted, callback); + } else { + wrapper.tree->get_meta_data(revision.includeUncommitted, callback); + } + }, + tree); + } + + signal.wait_for_level(0); +} + StateReference WorldState::get_state_reference(const WorldStateRevision& revision) const { return get_state_reference(revision, retrieve_fork(revision.forkId)); @@ -372,17 +408,21 @@ void WorldState::update_archive(const StateReference& block_state_ref, } } -bool WorldState::commit() +std::pair WorldState::commit() { // NOTE: the calling code is expected to ensure no other reads or writes happen during commit Fork::SharedPtr fork = retrieve_fork(CANONICAL_FORK_ID); std::atomic_bool success = true; + std::string message; Signal signal(static_cast(fork->_trees.size())); for (auto& [id, tree] : fork->_trees) { std::visit( - [&signal, &success](auto&& wrapper) { + [&signal, &success, &message](auto&& wrapper) { wrapper.tree->commit([&](const Response& response) { - success = response.success && success; + bool expected = true; + if (!response.success && success.compare_exchange_strong(expected, false)) { + message = response.message; + } signal.signal_decrement(); }); }, @@ -390,7 +430,7 @@ bool WorldState::commit() } signal.wait_for_level(0); - return success; + return std::make_pair(success.load(), message); } void WorldState::rollback() @@ -416,11 +456,13 @@ WorldStateStatus WorldState::sync_block( const std::vector& nullifiers, const std::vector>& public_writes) { + validate_trees_are_equally_synched(); WorldStateStatus status; if (is_same_state_reference(WorldStateRevision::uncommitted(), block_state_ref) && is_archive_tip(WorldStateRevision::uncommitted(), block_header_hash)) { - if (!commit()) { - throw std::runtime_error("Commit failed"); + std::pair result = commit(); + if (!result.first) { + throw std::runtime_error(result.second); } get_status(status); return status; @@ -499,8 +541,9 @@ WorldStateStatus WorldState::sync_block( throw std::runtime_error("Can't synch block: block state does not match world state"); } - if (!commit()) { - throw std::runtime_error("Commit failed"); + std::pair result = commit(); + if (!result.first) { + throw std::runtime_error(result.second); } get_status(status); return status; @@ -694,10 +737,13 @@ bool WorldState::is_archive_tip(const WorldStateRevision& revision, const bb::fr void WorldState::get_status(WorldStateStatus& status) const { WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; - TreeMetaResponse archive_state = get_tree_info(revision, MerkleTreeId::ARCHIVE); + std::array responses; + get_all_tree_info(revision, responses); + TreeMetaResponse& archive_state = responses[MerkleTreeId::ARCHIVE]; status.unfinalisedBlockNumber = archive_state.meta.unfinalisedBlockHeight; status.finalisedBlockNumber = archive_state.meta.finalisedBlockHeight; status.oldestHistoricalBlock = archive_state.meta.oldestHistoricBlock; + status.treesAreSynched = determine_if_synched(responses); } bool WorldState::is_same_state_reference(const WorldStateRevision& revision, const StateReference& state_ref) const @@ -705,4 +751,26 @@ bool WorldState::is_same_state_reference(const WorldStateRevision& revision, con return state_ref == get_state_reference(revision); } +void WorldState::validate_trees_are_equally_synched() +{ + WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; + std::array responses; + get_all_tree_info(revision, responses); + + if (!determine_if_synched(responses)) { + throw std::runtime_error("World state trees are out of sync"); + } +} + +bool WorldState::determine_if_synched(std::array& metaResponses) const +{ + index_t blockNumber = metaResponses[0].meta.unfinalisedBlockHeight; + for (size_t i = 1; i < metaResponses.size(); i++) { + if (blockNumber != metaResponses[i].meta.unfinalisedBlockHeight) { + return false; + } + } + return true; +} + } // namespace bb::world_state diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp index 7c09ccc0606..d2fb7240295 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp @@ -200,7 +200,7 @@ class WorldState { /** * @brief Commits the current state of the world state. */ - bool commit(); + std::pair commit(); /** * @brief Rolls back any uncommitted changes made to the world state. @@ -247,6 +247,12 @@ class WorldState { bool remove_historical_block(const index_t& blockNumber); bool set_finalised_block(const index_t& blockNumber); + void get_all_tree_info(const WorldStateRevision& revision, + std::array& responses) const; + bool determine_if_synched(std::array& metaResponses) const; + + void validate_trees_are_equally_synched(); + static bool block_state_matches_world_state(const StateReference& block_state_ref, const StateReference& tree_state_ref); diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index 64d0e0b3faa..8f60a774a16 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -91,6 +91,8 @@ export interface WorldStateStatus { finalisedBlockNumber: bigint; /** Oldest block still available for historical queries and forks. */ oldestHistoricalBlock: bigint; + /** Whether the trees are in sync with each other */ + treesAreSynched: boolean; } interface WithForkId { diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index 3892d3f1968..7abb85a17a1 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -12,6 +12,7 @@ import { NativeWorldStateService } from './native_world_state.js'; describe('NativeWorldState', () => { let dataDir: string; let rollupAddress: EthAddress; + const defaultDBMapSize = 10 * 1024 * 1024; beforeAll(async () => { dataDir = await mkdtemp(join(tmpdir(), 'world-state-test')); @@ -27,7 +28,7 @@ describe('NativeWorldState', () => { let messages: Fr[]; beforeAll(async () => { - const ws = await NativeWorldStateService.new(rollupAddress, dataDir); + const ws = await NativeWorldStateService.new(rollupAddress, dataDir, defaultDBMapSize); const fork = await ws.fork(); ({ block, messages } = await mockBlock(1, 2, fork)); await fork.close(); @@ -37,7 +38,7 @@ describe('NativeWorldState', () => { }, 30_000); it('correctly restores committed state', async () => { - const ws = await NativeWorldStateService.new(rollupAddress, dataDir); + const ws = await NativeWorldStateService.new(rollupAddress, dataDir, defaultDBMapSize); await expect( ws.getCommitted().findLeafIndex(MerkleTreeId.NOTE_HASH_TREE, block.body.txEffects[0].noteHashes[0]), ).resolves.toBeDefined(); @@ -46,7 +47,7 @@ describe('NativeWorldState', () => { it('clears the database if the rollup is different', async () => { // open ws against the same data dir but a different rollup - let ws = await NativeWorldStateService.new(EthAddress.random(), dataDir); + let ws = await NativeWorldStateService.new(EthAddress.random(), dataDir, defaultDBMapSize); // db should be empty await expect( ws.getCommitted().findLeafIndex(MerkleTreeId.NOTE_HASH_TREE, block.body.txEffects[0].noteHashes[0]), @@ -56,19 +57,44 @@ describe('NativeWorldState', () => { // later on, open ws against the original rollup and same data dir // db should be empty because we wiped all its files earlier - ws = await NativeWorldStateService.new(rollupAddress, dataDir); + ws = await NativeWorldStateService.new(rollupAddress, dataDir, defaultDBMapSize); await expect( ws.getCommitted().findLeafIndex(MerkleTreeId.NOTE_HASH_TREE, block.body.txEffects[0].noteHashes[0]), ).resolves.toBeUndefined(); await ws.close(); }); + + it('Fails to sync further blocks if trees are out of sync', async () => { + // open ws against the same data dir but a different rollup + const rollupAddress = EthAddress.random(); + let ws = await NativeWorldStateService.new(rollupAddress, dataDir, 1024); + const initialFork = await ws.fork(); + + const { block: block1, messages: messages1 } = await mockBlock(1, 8, initialFork); + const { block: block2, messages: messages2 } = await mockBlock(2, 8, initialFork); + const { block: block3, messages: messages3 } = await mockBlock(3, 8, initialFork); + + // The first block should succeed + await expect(ws.handleL2BlockAndMessages(block1, messages1)).resolves.toBeDefined(); + + // The second block should fail + await expect(ws.handleL2BlockAndMessages(block2, messages2)).rejects.toThrow(); + + // Commits should always fail now, the trees are in an inconsistent state + await expect(ws.handleL2BlockAndMessages(block2, messages2)).rejects.toThrow("World state trees are out of sync"); + await expect(ws.handleL2BlockAndMessages(block3, messages3)).rejects.toThrow("World state trees are out of sync"); + + // Creating another world state instance should fail + await ws.close(); + await expect(NativeWorldStateService.new(rollupAddress, dataDir, 1024)).rejects.toThrow("World state trees are out of sync"); + }); }); describe('Forks', () => { let ws: NativeWorldStateService; beforeEach(async () => { - ws = await NativeWorldStateService.new(EthAddress.random(), dataDir); + ws = await NativeWorldStateService.new(EthAddress.random(), dataDir, defaultDBMapSize); }); afterEach(async () => { diff --git a/yarn-project/world-state/src/native/native_world_state.ts b/yarn-project/world-state/src/native/native_world_state.ts index 8d1d83818c0..67a97cd1cb6 100644 --- a/yarn-project/world-state/src/native/native_world_state.ts +++ b/yarn-project/world-state/src/native/native_world_state.ts @@ -53,6 +53,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { static async new( rollupAddress: EthAddress, dataDir: string, + dbMapSizeKb: number, log = createDebugLogger('aztec:world-state:database'), cleanup = () => Promise.resolve(), ): Promise { @@ -68,7 +69,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { await mkdir(dataDir, { recursive: true }); await writeFile(rollupAddressFile, rollupAddress.toString(), 'utf8'); - const instance = new NativeWorldState(dataDir); + const instance = new NativeWorldState(dataDir, dbMapSizeKb); const worldState = new this(instance, log, cleanup); await worldState.init(); return worldState; @@ -77,7 +78,8 @@ export class NativeWorldStateService implements MerkleTreeDatabase { static async tmp(rollupAddress = EthAddress.ZERO, cleanupTmpDir = true): Promise { const log = createDebugLogger('aztec:world-state:database'); const dataDir = await mkdtemp(join(tmpdir(), 'aztec-world-state-')); - log.debug(`Created temporary world state database: ${dataDir}`); + const dbMapSizeKb = 10 * 1024 * 1024; + log.debug(`Created temporary world state database at: ${dataDir} with size: ${dbMapSizeKb}`); // pass a cleanup callback because process.on('beforeExit', cleanup) does not work under Jest const cleanup = async () => { @@ -89,10 +91,14 @@ export class NativeWorldStateService implements MerkleTreeDatabase { } }; - return this.new(rollupAddress, dataDir, log, cleanup); + return this.new(rollupAddress, dataDir, dbMapSizeKb, log, cleanup); } protected async init() { + const status = await this.getStatus(); + if (!status.treesAreSynched) { + throw new Error("World state trees are out of sync"); + } this.initialHeader = await this.buildInitialHeader(); const committed = this.getCommitted(); diff --git a/yarn-project/world-state/src/native/native_world_state_instance.ts b/yarn-project/world-state/src/native/native_world_state_instance.ts index e1dd6c88d6a..6ce0aea0bd0 100644 --- a/yarn-project/world-state/src/native/native_world_state_instance.ts +++ b/yarn-project/world-state/src/native/native_world_state_instance.ts @@ -82,7 +82,7 @@ export class NativeWorldState implements NativeWorldStateInstance { private queue = new SerialQueue(); /** Creates a new native WorldState instance */ - constructor(dataDir: string, private log = createDebugLogger('aztec:world-state:database')) { + constructor(dataDir: string, dbMapSizeKb: number, private log = createDebugLogger('aztec:world-state:database')) { this.instance = new NATIVE_MODULE[NATIVE_CLASS_NAME]( dataDir, { @@ -97,7 +97,7 @@ export class NativeWorldState implements NativeWorldStateInstance { [MerkleTreeId.PUBLIC_DATA_TREE]: 2 * MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, }, GeneratorIndex.BLOCK_HASH, - 10 * 1024 * 1024, // 10 GB per tree (in KB) + dbMapSizeKb, Math.min(cpus().length, MAX_WORLD_STATE_THREADS), ); this.queue.start(); From 6511d8a213694a4d7ad078f045528350ff3040d4 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Thu, 31 Oct 2024 21:25:37 +0000 Subject: [PATCH 05/31] WIP --- .../barretenberg/crypto/merkle_tree/fixtures.hpp | 2 +- .../merkle_tree/lmdb_store/lmdb_tree_store.cpp | 14 +++++++------- .../merkle_tree/lmdb_store/lmdb_tree_store.hpp | 14 +++++++++++--- .../src/barretenberg/world_state/world_state.cpp | 2 +- yarn-project/foundation/src/config/env_var.ts | 3 ++- .../world-state/src/synchronizer/config.ts | 9 +++++++++ .../world-state/src/synchronizer/factory.ts | 4 ++-- 7 files changed, 33 insertions(+), 15 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp index d20378b1ac4..59a75801eb4 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp @@ -64,7 +64,7 @@ inline ThreadPoolPtr make_thread_pool(uint64_t numThreads) void inline print_store_data(LMDBTreeStore::SharedPtr db, std::ostream& os) { LMDBTreeStore::ReadTransaction::Ptr tx = db->create_read_transaction(); - StatsMap stats; + TreeDBStats stats; db->get_stats(stats, *tx); for (const auto& m : stats) { diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp index fc319a7a7c6..092557ed668 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp @@ -47,7 +47,7 @@ int index_key_cmp(const MDB_val* a, const MDB_val* b) return value_cmp(a, b); } -std::ostream& operator<<(std::ostream& os, const StatsMap& stats) +std::ostream& operator<<(std::ostream& os, const TreeDBStats& stats) { for (const auto& it : stats) { os << it.second << std::endl; @@ -107,22 +107,22 @@ LMDBTreeStore::ReadTransaction::Ptr LMDBTreeStore::create_read_transaction() return std::make_unique(_environment); } -void LMDBTreeStore::get_stats(StatsMap& stats, ReadTransaction& tx) +void LMDBTreeStore::get_stats(TreeDBStats& stats, ReadTransaction& tx) { MDB_stat stat; MDB_envinfo info; call_lmdb_func(mdb_env_info, _environment->underlying(), &info); call_lmdb_func(mdb_stat, tx.underlying(), _blockDatabase->underlying(), &stat); - stats["blocks"] = DBStats("block", info, stat); + stats[BLOCKS_DB] = DBStats(BLOCKS_DB, info, stat); call_lmdb_func(mdb_stat, tx.underlying(), _leafHashToPreImageDatabase->underlying(), &stat); - stats["leaf preimages"] = DBStats("leaf preimages", info, stat); + stats[LEAF_PREIMAGES_DB] = DBStats(LEAF_PREIMAGES_DB, info, stat); call_lmdb_func(mdb_stat, tx.underlying(), _leafValueToIndexDatabase->underlying(), &stat); - stats["leaf indices"] = DBStats("leaf indices", info, stat); + stats[LEAF_INDICES_DB] = DBStats(LEAF_INDICES_DB, info, stat); call_lmdb_func(mdb_stat, tx.underlying(), _nodeDatabase->underlying(), &stat); - stats["nodes"] = DBStats("nodes", info, stat); + stats[NODES_DB] = DBStats(NODES_DB, info, stat); call_lmdb_func(mdb_stat, tx.underlying(), _leafIndexToKeyDatabase->underlying(), &stat); - stats["leaf keys"] = DBStats("leaf keys", info, stat); + stats[LEAF_KEYS_DB] = DBStats(LEAF_KEYS_DB, info, stat); } void LMDBTreeStore::write_block_data(uint64_t blockNumber, diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp index 18e1749acab..1739c18ed3d 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp @@ -10,6 +10,7 @@ #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/serialize/msgpack.hpp" +#include "barretenberg/world_state/types.hpp" #include "lmdb.h" #include #include @@ -62,6 +63,11 @@ struct NodePayload { return left == other.left && right == other.right && ref == other.ref; } }; +const std::string BLOCKS_DB = "blocks"; +const std::string NODES_DB = "nodes"; +const std::string LEAF_PREIMAGES_DB = "leaf preimages"; +const std::string LEAF_KEYS_DB = "leaf keys"; +const std::string LEAF_INDICES_DB = "leaf indices"; struct DBStats { std::string name; @@ -103,9 +109,11 @@ struct DBStats { } }; -using StatsMap = std::unordered_map; +using TreeDBStats = std::unordered_map; -std::ostream& operator<<(std::ostream& os, const StatsMap& stats); +using WorldStateDBStats = std::unordered_map; + +std::ostream& operator<<(std::ostream& os, const TreeDBStats& stats); /** * Creates an abstraction against a collection of LMDB databases within a single environment used to store merkle tree @@ -128,7 +136,7 @@ class LMDBTreeStore { WriteTransaction::Ptr create_write_transaction() const; ReadTransaction::Ptr create_read_transaction(); - void get_stats(StatsMap& stats, ReadTransaction& tx); + void get_stats(TreeDBStats& stats, ReadTransaction& tx); void write_block_data(uint64_t blockNumber, const BlockPayload& blockData, WriteTransaction& tx); diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp index b1270dd12bb..02f419c0545 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp @@ -754,7 +754,7 @@ bool WorldState::is_same_state_reference(const WorldStateRevision& revision, con void WorldState::validate_trees_are_equally_synched() { WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; - std::array responses; + std::array responses; get_all_tree_info(revision, responses); if (!determine_if_synched(responses)) { diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 06a7745487b..fdf32902ac2 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -143,4 +143,5 @@ export type EnvVar = | 'VERIFIER_VIEM_POLLING_INTERVAL_MS' | 'L1_READER_VIEM_POLLING_INTERVAL_MS' | 'PROVER_VIEM_POLLING_INTERVAL_MS' - | 'SEQ_VIEM_POLLING_INTERVAL_MS'; + | 'SEQ_VIEM_POLLING_INTERVAL_MS' + | 'WS_DB_MAP_SIZE_KB'; diff --git a/yarn-project/world-state/src/synchronizer/config.ts b/yarn-project/world-state/src/synchronizer/config.ts index 92f126bbc9b..1308cd801a3 100644 --- a/yarn-project/world-state/src/synchronizer/config.ts +++ b/yarn-project/world-state/src/synchronizer/config.ts @@ -10,6 +10,9 @@ export interface WorldStateConfig { /** Size of the batch for each get-blocks request from the synchronizer to the archiver. */ worldStateBlockRequestBatchSize?: number; + + /** The maximum size of the combined world state db in KB*/ + worldStateDbMapSizeKb: number; } export const worldStateConfigMappings: ConfigMappingsType = { @@ -29,6 +32,12 @@ export const worldStateConfigMappings: ConfigMappingsType = { parseEnv: (val: string | undefined) => (val ? +val : undefined), description: 'Size of the batch for each get-blocks request from the synchronizer to the archiver.', }, + worldStateDbMapSizeKb: { + env: 'WS_DB_MAP_SIZE_KB', + parseEnv: (val: string | undefined) => (val ? +val : undefined), + defaultValue: 1024 * 1024 * 1024, // 1TB + description: 'The maximum possible size of the world state DB', + }, }; /** diff --git a/yarn-project/world-state/src/synchronizer/factory.ts b/yarn-project/world-state/src/synchronizer/factory.ts index 0e15ac07fd5..231822c473d 100644 --- a/yarn-project/world-state/src/synchronizer/factory.ts +++ b/yarn-project/world-state/src/synchronizer/factory.ts @@ -18,14 +18,14 @@ export async function createWorldStateSynchronizer( return new ServerWorldStateSynchronizer(merkleTrees, l2BlockSource, config); } -export async function createWorldState(config: DataStoreConfig, client: TelemetryClient = new NoopTelemetryClient()) { +export async function createWorldState(config: WorldStateConfig & DataStoreConfig, client: TelemetryClient = new NoopTelemetryClient()) { const merkleTrees = ['true', '1'].includes(process.env.USE_LEGACY_WORLD_STATE ?? '') ? await MerkleTrees.new( await createStore('world-state', config, createDebugLogger('aztec:world-state:lmdb')), client, ) : config.dataDirectory - ? await NativeWorldStateService.new(config.l1Contracts.rollupAddress, config.dataDirectory) + ? await NativeWorldStateService.new(config.l1Contracts.rollupAddress, config.dataDirectory, config.worldStateDbMapSizeKb) : await NativeWorldStateService.tmp( config.l1Contracts.rollupAddress, !['true', '1'].includes(process.env.DEBUG_WORLD_STATE!), From e536a597c2ca8d790388a635b87d76aff53eff69 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Mon, 11 Nov 2024 16:44:07 +0000 Subject: [PATCH 06/31] WIP --- .../content_addressed_append_only_tree.hpp | 30 +++++++++------ .../cached_content_addressed_tree_store.hpp | 38 ++++++++++++++++--- .../crypto/merkle_tree/response.hpp | 16 ++++++++ 3 files changed, 66 insertions(+), 18 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index 47ca4f80eff..b62d8fe2028 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -44,10 +44,10 @@ template class ContentAddressedAppendOn using HashPathCallback = std::function&)>; using FindLeafCallback = std::function&)>; using GetLeafCallback = std::function&)>; - using CommitCallback = std::function; + using CommitCallback = std::function)>; using RollbackCallback = std::function; - using RemoveHistoricBlockCallback = std::function; - using UnwindBlockCallback = std::function; + using RemoveHistoricBlockCallback = std::function)>; + using UnwindBlockCallback = std::function)>; using FinaliseBlockCallback = std::function; // Only construct from provided store and thread pool, no copies or moves @@ -739,7 +739,13 @@ void ContentAddressedAppendOnlyTree::add_values_internal( template void ContentAddressedAppendOnlyTree::commit(const CommitCallback& on_completion) { - auto job = [=, this]() { execute_and_report([=, this]() { store_->commit(); }, on_completion); }; + auto job = [=, this]() { + execute_and_report( + [=, this](TypedResponse& response) { + store_->commit(response.inner.meta, response.inner.stats); + }, + on_completion); + }; workers_->enqueue(job); } @@ -755,12 +761,12 @@ void ContentAddressedAppendOnlyTree::remove_historic_block const index_t& blockNumber, const RemoveHistoricBlockCallback& on_completion) { auto job = [=, this]() { - execute_and_report( - [=, this]() { + execute_and_report( + [=, this](TypedResponse& response) { if (blockNumber == 0) { throw std::runtime_error("Unable to remove historic block 0"); } - store_->remove_historical_block(blockNumber); + store_->remove_historical_block(blockNumber, response.inner.meta, response.inner.stats); }, on_completion); }; @@ -768,16 +774,16 @@ void ContentAddressedAppendOnlyTree::remove_historic_block } template -void ContentAddressedAppendOnlyTree::unwind_block( - const index_t& blockNumber, const RemoveHistoricBlockCallback& on_completion) +void ContentAddressedAppendOnlyTree::unwind_block(const index_t& blockNumber, + const UnwindBlockCallback& on_completion) { auto job = [=, this]() { - execute_and_report( - [=, this]() { + execute_and_report( + [=, this](TypedResponse& response) { if (blockNumber == 0) { throw std::runtime_error("Unable to unwind block 0"); } - store_->unwind_block(blockNumber); + store_->unwind_block(blockNumber, response.inner.meta, response.inner.stats); }, on_completion); }; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 9663c81d013..b4b62aed52f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -167,7 +167,7 @@ template class ContentAddressedCachedTreeStore { /** * @brief Commits the uncommitted data to the underlying store */ - void commit(bool asBlock = true); + void commit(TreeMeta& finalMeta, TreeDBStats& dbStats, bool asBlock = true); /** * @brief Rolls back the uncommitted state @@ -196,9 +196,9 @@ template class ContentAddressedCachedTreeStore { fr get_current_root(ReadTransaction& tx, bool includeUncommitted) const; - void remove_historical_block(const index_t& blockNumber); + void remove_historical_block(const index_t& blockNumber, TreeMeta& finalMeta, TreeDBStats& dbStats); - void unwind_block(const index_t& blockNumber); + void unwind_block(const index_t& blockNumber, TreeMeta& finalMeta, TreeDBStats& dbStats); std::optional get_fork_block() const; @@ -261,6 +261,8 @@ template class ContentAddressedCachedTreeStore { void remove_leaf_indices_after_or_equal_index(const index_t& maxIndex, WriteTransaction& tx); + void extract_db_stats(TreeDBStats& stats); + index_t constrain_tree_size(const RequestContext& requestContext, ReadTransaction& tx) const; WriteTransactionPtr create_write_transaction() const { return dataStore_->create_write_transaction(); } @@ -622,7 +624,8 @@ fr ContentAddressedCachedTreeStore::get_current_root(ReadTransact // It is assumed that when these operations are being executed that no other state accessing operations // are in progress, hence no data synchronisation is used. -template void ContentAddressedCachedTreeStore::commit(bool asBlock) +template +void ContentAddressedCachedTreeStore::commit(TreeMeta& finalMeta, TreeDBStats& dbStats, bool asBlock) { bool dataPresent = false; TreeMeta uncommittedMeta; @@ -683,9 +686,22 @@ template void ContentAddressedCachedTreeStore +void ContentAddressedCachedTreeStore::extract_db_stats(TreeDBStats& stats) +{ + try { + ReadTransactionPtr tx = create_read_transaction(); + dataStore_->get_stats(stats, *tx); + } catch (std::exception&) { + } } template @@ -862,7 +878,9 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con } template -void ContentAddressedCachedTreeStore::unwind_block(const index_t& blockNumber) +void ContentAddressedCachedTreeStore::unwind_block(const index_t& blockNumber, + TreeMeta& finalMeta, + TreeDBStats& dbStats) { TreeMeta uncommittedMeta; TreeMeta committedMeta; @@ -946,10 +964,15 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& // now update the uncommitted meta put_meta(uncommittedMeta); + finalMeta = uncommittedMeta; + + extract_db_stats(dbStats); } template -void ContentAddressedCachedTreeStore::remove_historical_block(const index_t& blockNumber) +void ContentAddressedCachedTreeStore::remove_historical_block(const index_t& blockNumber, + TreeMeta& finalMeta, + TreeDBStats& dbStats) { TreeMeta committedMeta; TreeMeta uncommittedMeta; @@ -1008,6 +1031,9 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con // commit was successful, update the uncommitted meta uncommittedMeta.oldestHistoricBlock = committedMeta.oldestHistoricBlock; put_meta(uncommittedMeta); + finalMeta = uncommittedMeta; + + extract_db_stats(dbStats); } template diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp index 5ecf3c8c75d..7a1ee06e0c0 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp @@ -2,6 +2,7 @@ #include "barretenberg/crypto/merkle_tree/hash_path.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" +#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" #include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" @@ -65,6 +66,21 @@ struct GetLowIndexedLeafResponse { } }; +struct CommitResponse { + TreeMeta meta; + TreeDBStats stats; +}; + +struct UnwindResponse { + TreeMeta meta; + TreeDBStats stats; +}; + +struct RemoveHistoricResponse { + TreeMeta meta; + TreeDBStats stats; +}; + template struct TypedResponse { ResponseType inner; bool success{ true }; From 5019a35cf76497ac46b9f6e48ee1915c1bf3510a Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Mon, 11 Nov 2024 17:02:32 +0000 Subject: [PATCH 07/31] WIP --- .../content_addressed_append_only_tree.hpp | 6 +++--- .../content_addressed_append_only_tree.test.cpp | 10 +++++----- .../src/barretenberg/world_state/world_state.cpp | 13 +++++++------ 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index b62d8fe2028..4430fde606c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -44,10 +44,10 @@ template class ContentAddressedAppendOn using HashPathCallback = std::function&)>; using FindLeafCallback = std::function&)>; using GetLeafCallback = std::function&)>; - using CommitCallback = std::function)>; + using CommitCallback = std::function&)>; using RollbackCallback = std::function; - using RemoveHistoricBlockCallback = std::function)>; - using UnwindBlockCallback = std::function)>; + using RemoveHistoricBlockCallback = std::function&)>; + using UnwindBlockCallback = std::function&)>; using FinaliseBlockCallback = std::function; // Only construct from provided store and thread pool, no copies or moves diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index f0bf52f91df..cb718ff3253 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -141,7 +141,7 @@ void check_historic_sibling_path(TreeType& tree, void commit_tree(TreeType& tree, bool expected_success = true) { Signal signal; - auto completion = [&](const Response& response) -> void { + TreeType::CommitCallback completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, expected_success); signal.signal_level(); }; @@ -163,7 +163,7 @@ void rollback_tree(TreeType& tree) void remove_historic_block(TreeType& tree, const index_t& blockNumber, bool expected_success = true) { Signal signal; - auto completion = [&](const Response& response) -> void { + auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, expected_success); signal.signal_level(); }; @@ -174,7 +174,7 @@ void remove_historic_block(TreeType& tree, const index_t& blockNumber, bool expe void unwind_block(TreeType& tree, const index_t& blockNumber, bool expected_success = true) { Signal signal; - auto completion = [&](const Response& response) -> void { + auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, expected_success); signal.signal_level(); }; @@ -504,7 +504,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, errors_are_caught_and_handle // trying to commit that should fail Signal signal; - auto completion = [&](const Response& response) -> void { + auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, false); signal.signal_level(); }; @@ -1044,7 +1044,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_add_single_whilst_readin Signal signal(1 + num_reads); auto add_completion = [&](const TypedResponse&) { - auto commit_completion = [&](const Response&) { signal.signal_decrement(); }; + auto commit_completion = [&](const TypedResponse&) { signal.signal_decrement(); }; tree.commit(commit_completion); }; tree.add_value(VALUES[0], add_completion); diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp index 02f419c0545..8cb8a5225be 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp @@ -418,7 +418,7 @@ std::pair WorldState::commit() for (auto& [id, tree] : fork->_trees) { std::visit( [&signal, &success, &message](auto&& wrapper) { - wrapper.tree->commit([&](const Response& response) { + wrapper.tree->commit([&](const TypedResponse& response) { bool expected = true; if (!response.success && success.compare_exchange_strong(expected, false)) { message = response.message; @@ -656,7 +656,7 @@ bool WorldState::unwind_block(const index_t& blockNumber) for (auto& [id, tree] : fork->_trees) { std::visit( [&signal, &success, blockNumber](auto&& wrapper) { - wrapper.tree->unwind_block(blockNumber, [&signal, &success](const Response& resp) { + wrapper.tree->unwind_block(blockNumber, [&signal, &success](const TypedResponse& resp) { success = success && resp.success; signal.signal_decrement(); }); @@ -675,10 +675,11 @@ bool WorldState::remove_historical_block(const index_t& blockNumber) for (auto& [id, tree] : fork->_trees) { std::visit( [&signal, &success, blockNumber](auto&& wrapper) { - wrapper.tree->remove_historic_block(blockNumber, [&signal, &success](const Response& resp) { - success = success && resp.success; - signal.signal_decrement(); - }); + wrapper.tree->remove_historic_block( + blockNumber, [&signal, &success](const TypedResponse& resp) { + success = success && resp.success; + signal.signal_decrement(); + }); }, tree); } From eb72f4f79209d34c1b940d308e8e08ded9f3f34e Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Tue, 12 Nov 2024 20:50:29 +0000 Subject: [PATCH 08/31] WIP --- .../content_addressed_append_only_tree.hpp | 11 +- .../crypto/merkle_tree/fixtures.hpp | 4 +- .../content_addressed_indexed_tree.hpp | 3 +- .../content_addressed_indexed_tree.test.cpp | 8 +- .../lmdb_store/lmdb_tree_store.cpp | 19 +- .../lmdb_store/lmdb_tree_store.hpp | 52 ---- .../crypto/merkle_tree/response.hpp | 2 +- .../barretenberg/crypto/merkle_tree/types.hpp | 101 +++++++ .../src/barretenberg/world_state/types.hpp | 113 +++++++- .../barretenberg/world_state/world_state.cpp | 273 +++++++++++++----- .../barretenberg/world_state/world_state.hpp | 114 +++++++- .../world_state/world_state.test.cpp | 48 ++- .../barretenberg/world_state_napi/addon.cpp | 29 +- .../barretenberg/world_state_napi/message.hpp | 2 +- .../world-state/src/native/message.ts | 60 +++- .../src/native/native_world_state.ts | 17 +- .../server_world_state_synchronizer.ts | 4 +- .../src/world-state-db/merkle_tree_db.ts | 12 +- .../src/world-state-db/merkle_trees.ts | 16 +- 19 files changed, 665 insertions(+), 223 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index 4430fde606c..b15f4666160 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -44,10 +44,10 @@ template class ContentAddressedAppendOn using HashPathCallback = std::function&)>; using FindLeafCallback = std::function&)>; using GetLeafCallback = std::function&)>; - using CommitCallback = std::function&)>; + using CommitCallback = std::function&)>; using RollbackCallback = std::function; - using RemoveHistoricBlockCallback = std::function&)>; - using UnwindBlockCallback = std::function&)>; + using RemoveHistoricBlockCallback = std::function&)>; + using UnwindBlockCallback = std::function&)>; using FinaliseBlockCallback = std::function; // Only construct from provided store and thread pool, no copies or moves @@ -280,7 +280,8 @@ ContentAddressedAppendOnlyTree::ContentAddressedAppendOnly meta.initialRoot = meta.root = current; meta.initialSize = meta.size = 0; store_->put_meta(meta); - store_->commit(false); + TreeDBStats stats; + store_->commit(meta, stats, false); // if we were given initial values to insert then we do that now if (!initial_values.empty()) { @@ -305,7 +306,7 @@ ContentAddressedAppendOnlyTree::ContentAddressedAppendOnly meta.initialSize = meta.size = result.inner.size; store_->put_meta(meta); - store_->commit(false); + store_->commit(meta, stats, false); } } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp index 59a75801eb4..d7774730aac 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp @@ -67,8 +67,6 @@ void inline print_store_data(LMDBTreeStore::SharedPtr db, std::ostream& os) TreeDBStats stats; db->get_stats(stats, *tx); - for (const auto& m : stats) { - os << m.first << m.second << std::endl; - } + os << stats; } } // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp index 32fcaf93ce2..055ac0a8c5f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp @@ -318,7 +318,8 @@ ContentAddressedIndexedTree::ContentAddressedIndexedTree(s meta.initialRoot = result.inner.root; meta.initialSize = result.inner.size; store_->put_meta(meta); - store_->commit(false); + TreeDBStats stats; + store_->commit(meta, stats, false); } template diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp index fc195a7d828..52a2296a086 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.test.cpp @@ -336,7 +336,7 @@ template void check_unfinalised_block_height(TypeOfTree& t template void commit_tree(TypeOfTree& tree, bool expectedSuccess = true) { Signal signal; - auto completion = [&](const Response& response) -> void { + auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, expectedSuccess); signal.signal_level(); }; @@ -387,7 +387,7 @@ template void remove_historic_block(TypeOfTree& tree, const index_t& blockNumber, bool expected_success = true) { Signal signal; - auto completion = [&](const Response& response) -> void { + auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, expected_success); signal.signal_level(); }; @@ -411,7 +411,7 @@ template void unwind_block(TypeOfTree& tree, const index_t& blockNumber, bool expected_success = true) { Signal signal; - auto completion = [&](const Response& response) -> void { + auto completion = [&](const TypedResponse& response) -> void { EXPECT_EQ(response.success, expected_success); signal.signal_level(); }; @@ -1211,7 +1211,7 @@ TEST_F(PersistedContentAddressedIndexedTreeTest, can_add_single_whilst_reading) Signal signal(1 + num_reads); auto add_completion = [&](const TypedResponse>&) { - auto commit_completion = [&](const Response&) { signal.signal_decrement(); }; + auto commit_completion = [&](const TypedResponse&) { signal.signal_decrement(); }; tree.commit(commit_completion); }; tree.add_or_update_value(VALUES[0], add_completion); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp index 092557ed668..742f36d0395 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp @@ -47,14 +47,6 @@ int index_key_cmp(const MDB_val* a, const MDB_val* b) return value_cmp(a, b); } -std::ostream& operator<<(std::ostream& os, const TreeDBStats& stats) -{ - for (const auto& it : stats) { - os << it.second << std::endl; - } - return os; -} - LMDBTreeStore::LMDBTreeStore(std::string directory, std::string name, uint64_t mapSizeKb, uint64_t maxNumReaders) : _name(std::move(name)) , _directory(std::move(directory)) @@ -113,16 +105,17 @@ void LMDBTreeStore::get_stats(TreeDBStats& stats, ReadTransaction& tx) MDB_stat stat; MDB_envinfo info; call_lmdb_func(mdb_env_info, _environment->underlying(), &info); + stats.mapSize = info.me_mapsize; call_lmdb_func(mdb_stat, tx.underlying(), _blockDatabase->underlying(), &stat); - stats[BLOCKS_DB] = DBStats(BLOCKS_DB, info, stat); + stats.blocksDBStats = DBStats(BLOCKS_DB, stat); call_lmdb_func(mdb_stat, tx.underlying(), _leafHashToPreImageDatabase->underlying(), &stat); - stats[LEAF_PREIMAGES_DB] = DBStats(LEAF_PREIMAGES_DB, info, stat); + stats.leafPreimagesDBStats = DBStats(LEAF_PREIMAGES_DB, stat); call_lmdb_func(mdb_stat, tx.underlying(), _leafValueToIndexDatabase->underlying(), &stat); - stats[LEAF_INDICES_DB] = DBStats(LEAF_INDICES_DB, info, stat); + stats.leafIndicesDBStats = DBStats(LEAF_INDICES_DB, stat); call_lmdb_func(mdb_stat, tx.underlying(), _nodeDatabase->underlying(), &stat); - stats[NODES_DB] = DBStats(NODES_DB, info, stat); + stats.nodesDBStats = DBStats(NODES_DB, stat); call_lmdb_func(mdb_stat, tx.underlying(), _leafIndexToKeyDatabase->underlying(), &stat); - stats[LEAF_KEYS_DB] = DBStats(LEAF_KEYS_DB, info, stat); + stats.leafKeysDBStats = DBStats(LEAF_KEYS_DB, stat); } void LMDBTreeStore::write_block_data(uint64_t blockNumber, diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp index 1739c18ed3d..760a948dd6f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp @@ -63,58 +63,6 @@ struct NodePayload { return left == other.left && right == other.right && ref == other.ref; } }; -const std::string BLOCKS_DB = "blocks"; -const std::string NODES_DB = "nodes"; -const std::string LEAF_PREIMAGES_DB = "leaf preimages"; -const std::string LEAF_KEYS_DB = "leaf keys"; -const std::string LEAF_INDICES_DB = "leaf indices"; - -struct DBStats { - std::string name; - uint64_t mapSize; - uint64_t numDataItems; - uint64_t totalUsedSize; - - DBStats() = default; - DBStats(const DBStats& other) = default; - DBStats(DBStats&& other) noexcept - : name(std::move(other.name)) - , mapSize(other.mapSize) - , numDataItems(other.numDataItems) - , totalUsedSize(other.totalUsedSize) - {} - ~DBStats() = default; - DBStats(std::string name, MDB_envinfo& env, MDB_stat& stat) - : name(std::move(name)) - , mapSize(env.me_mapsize) - , numDataItems(stat.ms_entries) - , totalUsedSize(stat.ms_psize * (stat.ms_branch_pages + stat.ms_leaf_pages + stat.ms_overflow_pages)) - {} - - MSGPACK_FIELDS(name, mapSize, numDataItems, totalUsedSize) - - bool operator==(const DBStats& other) const - { - return name == other.name && mapSize == other.mapSize && numDataItems == other.numDataItems && - totalUsedSize == other.totalUsedSize; - } - - DBStats& operator=(const DBStats& other) = default; - - friend std::ostream& operator<<(std::ostream& os, const DBStats& stats) - { - os << "DB " << stats.name << ", map size: " << stats.mapSize << ", num items: " << stats.numDataItems - << ", total used size: " << stats.totalUsedSize; - return os; - } -}; - -using TreeDBStats = std::unordered_map; - -using WorldStateDBStats = std::unordered_map; - -std::ostream& operator<<(std::ostream& os, const TreeDBStats& stats); - /** * Creates an abstraction against a collection of LMDB databases within a single environment used to store merkle tree * data diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp index 7a1ee06e0c0..6d7765e520f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp @@ -94,7 +94,7 @@ struct Response { template void execute_and_report(const std::function&)>& f, - const std::function&)>& on_completion) + const std::function&)>& on_completion) { TypedResponse response; try { diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp index 4637a111c43..ad27ae15142 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp @@ -1,6 +1,7 @@ #pragma once #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "lmdb.h" #include #include namespace bb::crypto::merkle_tree { @@ -11,4 +12,104 @@ struct RequestContext { std::optional blockNumber; bb::fr root; }; + +const std::string BLOCKS_DB = "blocks"; +const std::string NODES_DB = "nodes"; +const std::string LEAF_PREIMAGES_DB = "leaf preimages"; +const std::string LEAF_KEYS_DB = "leaf keys"; +const std::string LEAF_INDICES_DB = "leaf indices"; + +struct DBStats { + std::string name; + uint64_t numDataItems; + uint64_t totalUsedSize; + + DBStats() = default; + DBStats(const DBStats& other) = default; + DBStats(DBStats&& other) noexcept { *this = std::move(other); } + ~DBStats() = default; + DBStats(std::string name, MDB_stat& stat) + : name(std::move(name)) + , numDataItems(stat.ms_entries) + , totalUsedSize(stat.ms_psize * (stat.ms_branch_pages + stat.ms_leaf_pages + stat.ms_overflow_pages)) + {} + + MSGPACK_FIELDS(name, numDataItems, totalUsedSize) + + bool operator==(const DBStats& other) const + { + return name == other.name && numDataItems == other.numDataItems && totalUsedSize == other.totalUsedSize; + } + + DBStats& operator=(const DBStats& other) = default; + + DBStats& operator=(DBStats&& other) noexcept + { + if (this != &other) { + name = std::move(other.name); + numDataItems = other.numDataItems; + totalUsedSize = other.totalUsedSize; + } + return *this; + } + + friend std::ostream& operator<<(std::ostream& os, const DBStats& stats) + { + os << "DB " << stats.name << ", num items: " << stats.numDataItems + << ", total used size: " << stats.totalUsedSize; + return os; + } +}; + +struct TreeDBStats { + uint64_t mapSize; + DBStats blocksDBStats; + DBStats nodesDBStats; + DBStats leafPreimagesDBStats; + DBStats leafKeysDBStats; + DBStats leafIndicesDBStats; + + TreeDBStats() = default; + TreeDBStats(uint64_t mapSize) + : mapSize(mapSize) + {} + TreeDBStats(const TreeDBStats& other) = default; + TreeDBStats(TreeDBStats&& other) noexcept { *this = std::move(other); } + + ~TreeDBStats() = default; + + MSGPACK_FIELDS(mapSize, blocksDBStats, nodesDBStats, leafPreimagesDBStats, leafKeysDBStats, leafIndicesDBStats) + + bool operator==(const TreeDBStats& other) const + { + return mapSize == other.mapSize && blocksDBStats == other.blocksDBStats && nodesDBStats == other.nodesDBStats && + leafPreimagesDBStats == other.leafPreimagesDBStats && leafKeysDBStats == other.leafPreimagesDBStats && + leafIndicesDBStats == other.leafIndicesDBStats; + } + + TreeDBStats& operator=(TreeDBStats&& other) noexcept + { + if (this != &other) { + mapSize = other.mapSize; + blocksDBStats = std::move(other.blocksDBStats); + nodesDBStats = std::move(other.nodesDBStats); + leafPreimagesDBStats = std::move(other.leafPreimagesDBStats); + leafKeysDBStats = std::move(other.leafKeysDBStats); + leafIndicesDBStats = std::move(other.leafIndicesDBStats); + } + return *this; + } + + TreeDBStats& operator=(const TreeDBStats& other) = default; + + friend std::ostream& operator<<(std::ostream& os, const TreeDBStats& stats) + { + os << "Map Size: " << stats.mapSize << " Blocks DB " << stats.blocksDBStats << ", Nodes DB " + << stats.nodesDBStats << ", Leaf Pre-images DB " << stats.leafPreimagesDBStats << ", Leaf Keys DB " + << stats.leafKeysDBStats << ", Leaf Indices DB " << stats.leafIndicesDBStats; + return os; + } +}; + +std::ostream& operator<<(std::ostream& os, const TreeDBStats& stats); } // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/world_state/types.hpp b/barretenberg/cpp/src/barretenberg/world_state/types.hpp index 2284c47d7ea..faee6cac09f 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/types.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state/types.hpp @@ -1,10 +1,12 @@ #pragma once #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" +#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/serialize/msgpack.hpp" #include +#include #include namespace bb::world_state { @@ -38,21 +40,46 @@ struct WorldStateRevision { static WorldStateRevision uncommitted() { return WorldStateRevision{ .includeUncommitted = true }; } }; -struct WorldStateStatus { +struct WorldStateStatusSummary { index_t unfinalisedBlockNumber; index_t finalisedBlockNumber; index_t oldestHistoricalBlock; bool treesAreSynched; MSGPACK_FIELDS(unfinalisedBlockNumber, finalisedBlockNumber, oldestHistoricalBlock, treesAreSynched); - bool operator==(const WorldStateStatus& other) const + WorldStateStatusSummary() = default; + WorldStateStatusSummary(const index_t& unfinalisedBlockNumber, + const index_t& finalisedBlockNumber, + const index_t& oldestHistoricBlock, + bool treesAreSynched) + : unfinalisedBlockNumber(unfinalisedBlockNumber) + , finalisedBlockNumber(finalisedBlockNumber) + , oldestHistoricalBlock(oldestHistoricBlock) + , treesAreSynched(treesAreSynched) + {} + WorldStateStatusSummary(const WorldStateStatusSummary& other) = default; + WorldStateStatusSummary(WorldStateStatusSummary&& other) noexcept { *this = std::move(other); } + + WorldStateStatusSummary& operator=(WorldStateStatusSummary&& other) noexcept + { + if (this != &other) { + *this = other; + } + return *this; + } + + ~WorldStateStatusSummary() = default; + + WorldStateStatusSummary& operator=(const WorldStateStatusSummary& other) = default; + + bool operator==(const WorldStateStatusSummary& other) const { return unfinalisedBlockNumber == other.unfinalisedBlockNumber && finalisedBlockNumber == other.finalisedBlockNumber && oldestHistoricalBlock == other.oldestHistoricalBlock && treesAreSynched == other.treesAreSynched; } - friend std::ostream& operator<<(std::ostream& os, const WorldStateStatus& status) + friend std::ostream& operator<<(std::ostream& os, const WorldStateStatusSummary& status) { os << "unfinalisedBlockNumber: " << status.unfinalisedBlockNumber << ", finalisedBlockNumber: " << status.finalisedBlockNumber @@ -61,4 +88,84 @@ struct WorldStateStatus { return os; } }; + +struct WorldStateDBStats { + TreeDBStats noteHashTreeStats; + TreeDBStats messageTreeStats; + TreeDBStats archiveTreeStats; + TreeDBStats publicDataTreeStats; + TreeDBStats nullifierTreeStats; + + MSGPACK_FIELDS(noteHashTreeStats, messageTreeStats, archiveTreeStats, publicDataTreeStats, nullifierTreeStats); + + WorldStateDBStats() = default; + WorldStateDBStats(const WorldStateDBStats& other) = default; + WorldStateDBStats(WorldStateDBStats&& other) noexcept { *this = std::move(other); } + + WorldStateDBStats& operator=(WorldStateDBStats&& other) noexcept + { + if (this != &other) { + noteHashTreeStats = std::move(other.noteHashTreeStats); + messageTreeStats = std::move(other.messageTreeStats); + archiveTreeStats = std::move(other.archiveTreeStats); + publicDataTreeStats = std::move(other.publicDataTreeStats); + nullifierTreeStats = std::move(other.nullifierTreeStats); + } + return *this; + } + + ~WorldStateDBStats() = default; + + bool operator==(const WorldStateDBStats& other) const + { + return noteHashTreeStats == other.noteHashTreeStats && messageTreeStats == other.messageTreeStats && + archiveTreeStats == other.archiveTreeStats && publicDataTreeStats == other.publicDataTreeStats && + nullifierTreeStats == other.nullifierTreeStats; + } + + WorldStateDBStats& operator=(const WorldStateDBStats& other) = default; + + friend std::ostream& operator<<(std::ostream& os, const WorldStateDBStats& stats) + { + os << "Note hash tree stats " << stats.noteHashTreeStats << ", Message tree stats " << stats.messageTreeStats + << ", Archive tree stats " << stats.archiveTreeStats << ", Public Data tree stats " + << stats.publicDataTreeStats << ", Nullifier tree stats " << stats.nullifierTreeStats; + return os; + } +}; + +struct WorldStateStatusFull { + WorldStateStatusSummary summary; + WorldStateDBStats dbStats; + + MSGPACK_FIELDS(summary, dbStats); + + WorldStateStatusFull() = default; + WorldStateStatusFull(const WorldStateStatusFull& other) = default; + WorldStateStatusFull(WorldStateStatusFull&& other) noexcept { *this = std::move(other); } + + WorldStateStatusFull& operator=(WorldStateStatusFull&& other) noexcept + { + if (this != &other) { + summary = std::move(other.summary); + dbStats = std::move(other.dbStats); + } + return *this; + } + + ~WorldStateStatusFull() = default; + + WorldStateStatusFull& operator=(const WorldStateStatusFull& other) = default; + + bool operator==(const WorldStateStatusFull& other) const + { + return summary == other.summary && dbStats == other.dbStats; + } + + friend std::ostream& operator<<(std::ostream& os, const WorldStateStatusFull& status) + { + os << "Summary: " << status.summary << ", DB Stats " << status.dbStats; + return os; + } +}; } // namespace bb::world_state diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp index 8cb8a5225be..39c9e7478a4 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp @@ -5,6 +5,7 @@ #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" +#include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/response.hpp" #include "barretenberg/crypto/merkle_tree/signal.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" @@ -144,9 +145,9 @@ uint64_t WorldState::create_fork(const std::optional& blockNumber) index_t blockNumberForFork = 0; if (!blockNumber.has_value()) { // we are forking at latest - WorldStateStatus currentStatus; - get_status(currentStatus); - blockNumberForFork = currentStatus.unfinalisedBlockNumber; + WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; + TreeMetaResponse archiveMeta = get_tree_info(revision, MerkleTreeId::ARCHIVE); + blockNumberForFork = archiveMeta.meta.unfinalisedBlockHeight; } else { blockNumberForFork = blockNumber.value(); } @@ -259,7 +260,7 @@ TreeMetaResponse WorldState::get_tree_info(const WorldStateRevision& revision, M fork->_trees.at(tree_id)); } -void WorldState::get_all_tree_info(const WorldStateRevision& revision, std::array& responses) const +void WorldState::get_all_tree_info(const WorldStateRevision& revision, std::array& responses) const { Fork::SharedPtr fork = retrieve_fork(revision.forkId); @@ -276,7 +277,7 @@ void WorldState::get_all_tree_info(const WorldStateRevision& revision, std::arra auto callback = [&signal, &responses, &mutex, id](const TypedResponse& meta) { { std::lock_guard lock(mutex); - responses[id] = meta.inner; + responses[id] = meta.inner.meta; } signal.signal_decrement(); }; @@ -408,25 +409,62 @@ void WorldState::update_archive(const StateReference& block_state_ref, } } -std::pair WorldState::commit() +std::pair WorldState::commit(WorldStateStatusFull& status, + std::array& metaResponses) { // NOTE: the calling code is expected to ensure no other reads or writes happen during commit Fork::SharedPtr fork = retrieve_fork(CANONICAL_FORK_ID); std::atomic_bool success = true; std::string message; Signal signal(static_cast(fork->_trees.size())); - for (auto& [id, tree] : fork->_trees) { - std::visit( - [&signal, &success, &message](auto&& wrapper) { - wrapper.tree->commit([&](const TypedResponse& response) { - bool expected = true; - if (!response.success && success.compare_exchange_strong(expected, false)) { - message = response.message; - } - signal.signal_decrement(); - }); - }, - tree); + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::NULLIFIER_TREE)); + commit_tree(status.dbStats.nullifierTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::NULLIFIER_TREE]); + } + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::PUBLIC_DATA_TREE)); + commit_tree(status.dbStats.publicDataTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::PUBLIC_DATA_TREE]); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::NOTE_HASH_TREE)); + commit_tree(status.dbStats.noteHashTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::NOTE_HASH_TREE]); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::L1_TO_L2_MESSAGE_TREE)); + commit_tree(status.dbStats.messageTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::L1_TO_L2_MESSAGE_TREE]); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::ARCHIVE)); + commit_tree(status.dbStats.archiverTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::ARCHIVE]); } signal.wait_for_level(0); @@ -448,7 +486,7 @@ void WorldState::rollback() signal.wait_for_level(); } -WorldStateStatus WorldState::sync_block( +WorldStateStatusFull WorldState::sync_block( const StateReference& block_state_ref, const bb::fr& block_header_hash, const std::vector& notes, @@ -457,14 +495,15 @@ WorldStateStatus WorldState::sync_block( const std::vector>& public_writes) { validate_trees_are_equally_synched(); - WorldStateStatus status; + WorldStateStatusFull status; if (is_same_state_reference(WorldStateRevision::uncommitted(), block_state_ref) && is_archive_tip(WorldStateRevision::uncommitted(), block_header_hash)) { - std::pair result = commit(); + std::array metaResponses; + std::pair result = commit(status, metaResponses); if (!result.first) { throw std::runtime_error(result.second); } - get_status(status); + get_status_summary_from_meta_responses(status.summary, metaResponses); return status; } rollback(); @@ -541,11 +580,12 @@ WorldStateStatus WorldState::sync_block( throw std::runtime_error("Can't synch block: block state does not match world state"); } - std::pair result = commit(); + std::array metaResponses; + std::pair result = commit(status, metaResponses); if (!result.first) { throw std::runtime_error(result.second); } - get_status(status); + get_status_summary_from_meta_responses(status.summary, metaResponses); return status; } @@ -583,7 +623,7 @@ GetLowIndexedLeafResponse WorldState::find_low_leaf_index(const WorldStateRevisi return low_leaf_info; } -WorldStateStatus WorldState::set_finalised_blocks(const index_t& toBlockNumber) +WorldStateStatusSummary WorldState::set_finalised_blocks(const index_t& toBlockNumber) { WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; TreeMetaResponse archive_state = get_tree_info(revision, MerkleTreeId::ARCHIVE); @@ -593,40 +633,42 @@ WorldStateStatus WorldState::set_finalised_blocks(const index_t& toBlockNumber) if (!set_finalised_block(toBlockNumber)) { throw std::runtime_error("Failed to set finalised block"); } - WorldStateStatus status; - get_status(status); + WorldStateStatusSummary status; + get_status_summary(status); return status; } -WorldStateStatus WorldState::unwind_blocks(const index_t& toBlockNumber) +WorldStateStatusFull WorldState::unwind_blocks(const index_t& toBlockNumber) { WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; TreeMetaResponse archive_state = get_tree_info(revision, MerkleTreeId::ARCHIVE); if (toBlockNumber >= archive_state.meta.unfinalisedBlockHeight) { throw std::runtime_error("Unable to unwind block, block not found"); } + WorldStateStatusFull status; + std::array metaResponses; for (index_t blockNumber = archive_state.meta.unfinalisedBlockHeight; blockNumber > toBlockNumber; blockNumber--) { - if (!unwind_block(blockNumber)) { + if (!unwind_block(blockNumber, status, metaResponses)) { throw std::runtime_error("Failed to unwind block"); } } - WorldStateStatus status; - get_status(status); + get_status_summary_from_meta_responses(status.summary, metaResponses); return status; } -WorldStateStatus WorldState::remove_historical_blocks(const index_t& toBlockNumber) +WorldStateStatusFull WorldState::remove_historical_blocks(const index_t& toBlockNumber) { WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; TreeMetaResponse archive_state = get_tree_info(revision, MerkleTreeId::ARCHIVE); if (toBlockNumber <= archive_state.meta.oldestHistoricBlock) { throw std::runtime_error("Unable to remove historical block, block not found"); } + WorldStateStatusFull status; + std::array metaResponses; for (index_t blockNumber = archive_state.meta.oldestHistoricBlock; blockNumber < toBlockNumber; blockNumber++) { - if (!remove_historical_block(blockNumber)) { + if (!remove_historical_block(blockNumber, status, metaResponses)) { throw std::runtime_error("Failed to remove historical block"); } } - WorldStateStatus status; - get_status(status); + get_status_summary_from_meta_responses(status.summary, metaResponses); return status; } @@ -648,40 +690,131 @@ bool WorldState::set_finalised_block(const index_t& blockNumber) signal.wait_for_level(); return success; } -bool WorldState::unwind_block(const index_t& blockNumber) +bool WorldState::unwind_block(const index_t& blockNumber, + WorldStateStatusFull& status, + std::array& metaResponses) { std::atomic_bool success = true; + std::string message; Fork::SharedPtr fork = retrieve_fork(CANONICAL_FORK_ID); Signal signal(static_cast(fork->_trees.size())); - for (auto& [id, tree] : fork->_trees) { - std::visit( - [&signal, &success, blockNumber](auto&& wrapper) { - wrapper.tree->unwind_block(blockNumber, [&signal, &success](const TypedResponse& resp) { - success = success && resp.success; - signal.signal_decrement(); - }); - }, - tree); + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::NULLIFIER_TREE)); + unwind_tree(status.dbStats.nullifierTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::NULLIFIER_TREE], + blockNumber); + } + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::PUBLIC_DATA_TREE)); + unwind_tree(status.dbStats.publicDataTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::PUBLIC_DATA_TREE], + blockNumber); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::NOTE_HASH_TREE)); + unwind_tree(status.dbStats.noteHashTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::NOTE_HASH_TREE], + blockNumber); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::L1_TO_L2_MESSAGE_TREE)); + unwind_tree(status.dbStats.messageTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::L1_TO_L2_MESSAGE_TREE], + blockNumber); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::ARCHIVE)); + unwind_tree(status.dbStats.archiverTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::ARCHIVE], + blockNumber); } signal.wait_for_level(); remove_forks_for_block(blockNumber); return success; } -bool WorldState::remove_historical_block(const index_t& blockNumber) +bool WorldState::remove_historical_block(const index_t& blockNumber, + WorldStateStatusFull& status, + std::array& metaResponses) { std::atomic_bool success = true; + std::string message; Fork::SharedPtr fork = retrieve_fork(CANONICAL_FORK_ID); Signal signal(static_cast(fork->_trees.size())); - for (auto& [id, tree] : fork->_trees) { - std::visit( - [&signal, &success, blockNumber](auto&& wrapper) { - wrapper.tree->remove_historic_block( - blockNumber, [&signal, &success](const TypedResponse& resp) { - success = success && resp.success; - signal.signal_decrement(); - }); - }, - tree); + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::NULLIFIER_TREE)); + remove_historic_block_for_tree(status.dbStats.nullifierTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::NULLIFIER_TREE], + blockNumber); + } + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::PUBLIC_DATA_TREE)); + remove_historic_block_for_tree(status.dbStats.publicDataTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::PUBLIC_DATA_TREE], + blockNumber); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::NOTE_HASH_TREE)); + remove_historic_block_for_tree(status.dbStats.noteHashTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::NOTE_HASH_TREE], + blockNumber); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::L1_TO_L2_MESSAGE_TREE)); + remove_historic_block_for_tree(status.dbStats.messageTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::L1_TO_L2_MESSAGE_TREE], + blockNumber); + } + + { + auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::ARCHIVE)); + remove_historic_block_for_tree(status.dbStats.archiverTreeStats, + signal, + *wrapper.tree, + success, + message, + metaResponses[MerkleTreeId::ARCHIVE], + blockNumber); } signal.wait_for_level(); remove_forks_for_block(blockNumber); @@ -735,16 +868,22 @@ bool WorldState::is_archive_tip(const WorldStateRevision& revision, const bb::fr return archive_state.meta.size == leaf_index.value() + 1; } -void WorldState::get_status(WorldStateStatus& status) const +void WorldState::get_status_summary(WorldStateStatusSummary& status) const { WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; - std::array responses; + std::array responses; get_all_tree_info(revision, responses); - TreeMetaResponse& archive_state = responses[MerkleTreeId::ARCHIVE]; - status.unfinalisedBlockNumber = archive_state.meta.unfinalisedBlockHeight; - status.finalisedBlockNumber = archive_state.meta.finalisedBlockHeight; - status.oldestHistoricalBlock = archive_state.meta.oldestHistoricBlock; - status.treesAreSynched = determine_if_synched(responses); + get_status_summary_from_meta_responses(status, responses); +} + +void WorldState::get_status_summary_from_meta_responses(WorldStateStatusSummary& status, + std::array& metaResponses) +{ + TreeMeta& archive_state = metaResponses[MerkleTreeId::ARCHIVE]; + status.unfinalisedBlockNumber = archive_state.unfinalisedBlockHeight; + status.finalisedBlockNumber = archive_state.finalisedBlockHeight; + status.oldestHistoricalBlock = archive_state.oldestHistoricBlock; + status.treesAreSynched = determine_if_synched(metaResponses); } bool WorldState::is_same_state_reference(const WorldStateRevision& revision, const StateReference& state_ref) const @@ -755,7 +894,7 @@ bool WorldState::is_same_state_reference(const WorldStateRevision& revision, con void WorldState::validate_trees_are_equally_synched() { WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; - std::array responses; + std::array responses; get_all_tree_info(revision, responses); if (!determine_if_synched(responses)) { @@ -763,11 +902,11 @@ void WorldState::validate_trees_are_equally_synched() } } -bool WorldState::determine_if_synched(std::array& metaResponses) const +bool WorldState::determine_if_synched(std::array& metaResponses) { - index_t blockNumber = metaResponses[0].meta.unfinalisedBlockHeight; + index_t blockNumber = metaResponses[0].unfinalisedBlockHeight; for (size_t i = 1; i < metaResponses.size(); i++) { - if (blockNumber != metaResponses[i].meta.unfinalisedBlockHeight) { + if (blockNumber != metaResponses[i].unfinalisedBlockHeight) { return false; } } diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp index d2fb7240295..0c884d14527 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp @@ -200,7 +200,7 @@ class WorldState { /** * @brief Commits the current state of the world state. */ - std::pair commit(); + std::pair commit(WorldStateStatusFull& status, std::array& metaResponses); /** * @brief Rolls back any uncommitted changes made to the world state. @@ -210,12 +210,12 @@ class WorldState { uint64_t create_fork(const std::optional& blockNumber); void delete_fork(const uint64_t& forkId); - WorldStateStatus set_finalised_blocks(const index_t& toBlockNumber); - WorldStateStatus unwind_blocks(const index_t& toBlockNumber); - WorldStateStatus remove_historical_blocks(const index_t& toBlockNumber); + WorldStateStatusSummary set_finalised_blocks(const index_t& toBlockNumber); + WorldStateStatusFull unwind_blocks(const index_t& toBlockNumber); + WorldStateStatusFull remove_historical_blocks(const index_t& toBlockNumber); - void get_status(WorldStateStatus& status) const; - WorldStateStatus sync_block( + void get_status_summary(WorldStateStatusSummary& status) const; + WorldStateStatusFull sync_block( const StateReference& block_state_ref, const bb::fr& block_header_hash, const std::vector& notes, @@ -243,13 +243,15 @@ class WorldState { Fork::SharedPtr create_new_fork(const index_t& blockNumber); void remove_forks_for_block(const index_t& blockNumber); - bool unwind_block(const index_t& blockNumber); - bool remove_historical_block(const index_t& blockNumber); + bool unwind_block(const index_t& blockNumber, + WorldStateStatusFull& status, + std::array& metaResponses); + bool remove_historical_block(const index_t& blockNumber, + WorldStateStatusFull& status, + std::array& metaResponses); bool set_finalised_block(const index_t& blockNumber); - void get_all_tree_info(const WorldStateRevision& revision, - std::array& responses) const; - bool determine_if_synched(std::array& metaResponses) const; + void get_all_tree_info(const WorldStateRevision& revision, std::array& responses) const; void validate_trees_are_equally_synched(); @@ -264,8 +266,98 @@ class WorldState { static StateReference get_state_reference(const WorldStateRevision& revision, Fork::SharedPtr fork, bool initial_state = false); + + static bool determine_if_synched(std::array& metaResponses); + + static void get_status_summary_from_meta_responses(WorldStateStatusSummary& status, + std::array& metaResponses); + + template + void commit_tree(TreeDBStats& dbStats, + Signal& signal, + TreeType& tree, + std::atomic_bool& success, + std::string& message, + TreeMeta& meta); + + template + void unwind_tree(TreeDBStats& dbStats, + Signal& signal, + TreeType& tree, + std::atomic_bool& success, + std::string& message, + TreeMeta& meta, + const index_t& blockNumber); + + template + void remove_historic_block_for_tree(TreeDBStats& dbStats, + Signal& signal, + TreeType& tree, + std::atomic_bool& success, + std::string& message, + TreeMeta& meta, + const index_t& blockNumber); }; +template +void WorldState::commit_tree(TreeDBStats& dbStats, + Signal& signal, + TreeType& tree, + std::atomic_bool& success, + std::string& message, + TreeMeta& meta) +{ + tree.commit([&](TypedResponse& response) { + bool expected = true; + if (!response.success && success.compare_exchange_strong(expected, false)) { + message = response.message; + } + dbStats = std::move(response.inner.stats); + meta = std::move(response.inner.meta); + signal.signal_decrement(); + }); +} + +template +void WorldState::unwind_tree(TreeDBStats& dbStats, + Signal& signal, + TreeType& tree, + std::atomic_bool& success, + std::string& message, + TreeMeta& meta, + const index_t& blockNumber) +{ + tree.unwind_block(blockNumber, [&](TypedResponse& response) { + bool expected = true; + if (!response.success && success.compare_exchange_strong(expected, false)) { + message = response.message; + } + dbStats = std::move(response.inner.stats); + meta = std::move(response.inner.meta); + signal.signal_decrement(); + }); +} + +template +void WorldState::remove_historic_block_for_tree(TreeDBStats& dbStats, + Signal& signal, + TreeType& tree, + std::atomic_bool& success, + std::string& message, + TreeMeta& meta, + const index_t& blockNumber) +{ + tree.remove_historic_block(blockNumber, [&](TypedResponse& response) { + bool expected = true; + if (!response.success && success.compare_exchange_strong(expected, false)) { + message = response.message; + } + dbStats = std::move(response.inner.stats); + meta = std::move(response.inner.meta); + signal.signal_decrement(); + }); +} + template std::optional> WorldState::get_indexed_leaf(const WorldStateRevision& rev, MerkleTreeId id, diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp index f52718c7c71..78ff9945cc2 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp @@ -1,11 +1,13 @@ #include "barretenberg/world_state/world_state.hpp" #include "barretenberg/crypto/merkle_tree/fixtures.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" +#include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/response.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/vm/aztec_constants.hpp" #include "barretenberg/world_state/fork.hpp" #include "barretenberg/world_state/types.hpp" +#include #include #include #include @@ -247,7 +249,9 @@ TEST_F(WorldStateTest, GetInitialStateReference) auto before_commit = ws.get_initial_state_reference(); ws.append_leaves(MerkleTreeId::NOTE_HASH_TREE, { 1 }); - ws.commit(); + WorldStateStatusFull status; + std::array metaResponses; + ws.commit(status, metaResponses); auto after_commit = ws.get_initial_state_reference(); @@ -281,7 +285,9 @@ TEST_F(WorldStateTest, AppendOnlyTrees) EXPECT_EQ(committed.meta.size, initial.meta.size); EXPECT_EQ(committed.meta.root, initial.meta.root); - ws.commit(); + WorldStateStatusFull status; + std::array metaResponses; + ws.commit(status, metaResponses); assert_leaf_value(ws, WorldStateRevision::committed(), tree_id, 0, fr(42)); assert_leaf_index(ws, WorldStateRevision::committed(), tree_id, fr(42), 0); @@ -330,7 +336,9 @@ TEST_F(WorldStateTest, AppendOnlyAllowDuplicates) assert_leaf_value(ws, WorldStateRevision::uncommitted(), tree_id, 1, fr(42)); assert_leaf_value(ws, WorldStateRevision::uncommitted(), tree_id, 2, fr(42)); - ws.commit(); + WorldStateStatusFull status; + std::array metaRespoonses; + ws.commit(status, metaRespoonses); assert_leaf_value(ws, WorldStateRevision::committed(), tree_id, 0, fr(42)); assert_leaf_value(ws, WorldStateRevision::committed(), tree_id, 1, fr(42)); @@ -351,7 +359,9 @@ TEST_F(WorldStateTest, NullifierTree) ws.append_leaves(tree_id, { test_nullifier }); assert_leaf_value(ws, WorldStateRevision::uncommitted(), tree_id, 128, test_nullifier); - ws.commit(); + WorldStateStatusFull status; + std::array metaRespoonses; + ws.commit(status, metaRespoonses); auto test_leaf = ws.get_indexed_leaf(WorldStateRevision::committed(), tree_id, 128); // at this point 142 should be the biggest leaf so it wraps back to 0 @@ -381,7 +391,9 @@ TEST_F(WorldStateTest, NullifierTreeDuplicates) NullifierLeafValue test_nullifier(142); ws.append_leaves(tree_id, { test_nullifier }); - ws.commit(); + WorldStateStatusFull status; + std::array metaRespoonses; + ws.commit(status, metaRespoonses); assert_tree_size(ws, WorldStateRevision::committed(), tree_id, 129); EXPECT_THROW(ws.append_leaves(tree_id, { test_nullifier }), std::runtime_error); @@ -458,7 +470,9 @@ TEST_F(WorldStateTest, CommitsAndRollsBackAllTrees) ws.append_leaves(MerkleTreeId::NULLIFIER_TREE, { NullifierLeafValue(142) }); ws.append_leaves(MerkleTreeId::PUBLIC_DATA_TREE, { PublicDataLeafValue(142, 1) }); - ws.commit(); + WorldStateStatusFull status; + std::array metaRespoonses; + ws.commit(status, metaRespoonses); assert_leaf_value(ws, WorldStateRevision::committed(), MerkleTreeId::NOTE_HASH_TREE, 0, fr(42)); assert_leaf_value(ws, WorldStateRevision::committed(), MerkleTreeId::L1_TO_L2_MESSAGE_TREE, 0, fr(42)); @@ -498,10 +512,10 @@ TEST_F(WorldStateTest, SyncExternalBlockFromEmpty) { fr("0x20ea8ca97f96508aaed2d6cdc4198a41c77c640bfa8785a51bb905b9a672ba0b"), 1 } }, }; - WorldStateStatus status = ws.sync_block( + WorldStateStatusFull status = ws.sync_block( block_state_ref, fr(1), { 42 }, { 43 }, { NullifierLeafValue(144) }, { { PublicDataLeafValue(145, 1) } }); - WorldStateStatus expected{ .unfinalisedBlockNumber = 1, .finalisedBlockNumber = 0, .oldestHistoricalBlock = 1 }; - EXPECT_EQ(status, expected); + WorldStateStatusSummary expected(1, 0, 1, true); + EXPECT_EQ(status.summary, expected); assert_leaf_value(ws, WorldStateRevision::committed(), MerkleTreeId::NOTE_HASH_TREE, 0, fr(42)); assert_leaf_value(ws, WorldStateRevision::committed(), MerkleTreeId::L1_TO_L2_MESSAGE_TREE, 0, fr(43)); @@ -540,10 +554,10 @@ TEST_F(WorldStateTest, SyncBlockFromDirtyState) EXPECT_NE(uncommitted_state_ref.at(tree_id), snapshot); } - WorldStateStatus status = ws.sync_block( + WorldStateStatusFull status = ws.sync_block( block_state_ref, fr(1), { 42 }, { 43 }, { NullifierLeafValue(144) }, { { PublicDataLeafValue(145, 1) } }); - WorldStateStatus expected{ .unfinalisedBlockNumber = 1, .finalisedBlockNumber = 0, .oldestHistoricalBlock = 1 }; - EXPECT_EQ(status, expected); + WorldStateStatusSummary expected{ 1, 0, 1, true }; + EXPECT_EQ(status.summary, expected); assert_leaf_value(ws, WorldStateRevision::committed(), MerkleTreeId::NOTE_HASH_TREE, 0, fr(42)); assert_leaf_value(ws, WorldStateRevision::committed(), MerkleTreeId::L1_TO_L2_MESSAGE_TREE, 0, fr(43)); @@ -584,10 +598,10 @@ TEST_F(WorldStateTest, SyncCurrentBlock) EXPECT_EQ(uncommitted_state_ref.at(tree_id), snapshot); } - WorldStateStatus status = ws.sync_block( + WorldStateStatusFull status = ws.sync_block( block_state_ref, fr(1), { 42 }, { 43 }, { NullifierLeafValue(144) }, { { PublicDataLeafValue(145, 1) } }); - WorldStateStatus expected{ .unfinalisedBlockNumber = 1, .finalisedBlockNumber = 0, .oldestHistoricalBlock = 1 }; - EXPECT_EQ(status, expected); + WorldStateStatusSummary expected{ 1, 0, 1, true }; + EXPECT_EQ(status.summary, expected); assert_leaf_value(ws, WorldStateRevision::uncommitted(), MerkleTreeId::ARCHIVE, 1, fr(1)); @@ -731,7 +745,9 @@ TEST_F(WorldStateTest, ForkingAtBlock0AndAdvancingCanonicalState) EXPECT_NE(fork_archive_state_after_insert.meta, fork_archive_state_before_insert.meta); EXPECT_NE(fork_archive_state_after_insert.meta, canonical_archive_state_after_insert.meta); - ws.commit(); + WorldStateStatusFull status; + std::array metaResponses; + ws.commit(status, metaResponses); auto canonical_archive_state_after_commit = ws.get_tree_info(WorldStateRevision::committed(), MerkleTreeId::ARCHIVE); auto fork_archive_state_after_commit = ws.get_tree_info( diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp b/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp index 0e6604cd58a..0911764b5b9 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp @@ -554,12 +554,12 @@ bool WorldStateAddon::sync_block(msgpack::object& obj, msgpack::sbuffer& buf) TypedMessage request; obj.convert(request); - WorldStateStatus status = _ws->sync_block(request.value.blockStateRef, - request.value.blockHeaderHash, - request.value.paddedNoteHashes, - request.value.paddedL1ToL2Messages, - request.value.paddedNullifiers, - request.value.batchesOfPaddedPublicDataWrites); + WorldStateStatusFull status = _ws->sync_block(request.value.blockStateRef, + request.value.blockHeaderHash, + request.value.paddedNoteHashes, + request.value.paddedL1ToL2Messages, + request.value.paddedNullifiers, + request.value.batchesOfPaddedPublicDataWrites); MsgHeader header(request.header.messageId); messaging::TypedMessage resp_msg(WorldStateMessageType::SYNC_BLOCK, header, { status }); @@ -619,9 +619,10 @@ bool WorldStateAddon::set_finalised(msgpack::object& obj, msgpack::sbuffer& buf) { TypedMessage request; obj.convert(request); - WorldStateStatus status = _ws->set_finalised_blocks(request.value.toBlockNumber); + WorldStateStatusSummary status = _ws->set_finalised_blocks(request.value.toBlockNumber); MsgHeader header(request.header.messageId); - messaging::TypedMessage resp_msg(WorldStateMessageType::FINALISE_BLOCKS, header, { status }); + messaging::TypedMessage resp_msg( + WorldStateMessageType::FINALISE_BLOCKS, header, { status }); msgpack::pack(buf, resp_msg); return true; @@ -632,10 +633,10 @@ bool WorldStateAddon::unwind(msgpack::object& obj, msgpack::sbuffer& buf) const TypedMessage request; obj.convert(request); - WorldStateStatus status = _ws->unwind_blocks(request.value.toBlockNumber); + WorldStateStatusFull status = _ws->unwind_blocks(request.value.toBlockNumber); MsgHeader header(request.header.messageId); - messaging::TypedMessage resp_msg(WorldStateMessageType::UNWIND_BLOCKS, header, { status }); + messaging::TypedMessage resp_msg(WorldStateMessageType::UNWIND_BLOCKS, header, { status }); msgpack::pack(buf, resp_msg); return true; @@ -645,10 +646,10 @@ bool WorldStateAddon::remove_historical(msgpack::object& obj, msgpack::sbuffer& { TypedMessage request; obj.convert(request); - WorldStateStatus status = _ws->remove_historical_blocks(request.value.toBlockNumber); + WorldStateStatusFull status = _ws->remove_historical_blocks(request.value.toBlockNumber); MsgHeader header(request.header.messageId); - messaging::TypedMessage resp_msg( + messaging::TypedMessage resp_msg( WorldStateMessageType::REMOVE_HISTORICAL_BLOCKS, header, { status }); msgpack::pack(buf, resp_msg); @@ -660,11 +661,11 @@ bool WorldStateAddon::get_status(msgpack::object& obj, msgpack::sbuffer& buf) co HeaderOnlyMessage request; obj.convert(request); - WorldStateStatus status; + WorldStateStatusSummary status; _ws->get_status(status); MsgHeader header(request.header.messageId); - messaging::TypedMessage resp_msg(WorldStateMessageType::GET_STATUS, header, { status }); + messaging::TypedMessage resp_msg(WorldStateMessageType::GET_STATUS, header, { status }); msgpack::pack(buf, resp_msg); return true; diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp b/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp index 4868af1b473..fa544ce8c89 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp @@ -193,7 +193,7 @@ struct SyncBlockRequest { }; struct SyncBlockResponse { - WorldStateStatus status; + WorldStateStatusSummary status; MSGPACK_FIELDS(status); }; diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index 8f60a774a16..bb8f2f265dc 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -1,5 +1,5 @@ import { MerkleTreeId } from '@aztec/circuit-types'; -import { AppendOnlyTreeSnapshot, Fr, type StateReference, type UInt32 } from '@aztec/circuits.js'; +import { AppendOnlyTreeSnapshot, Fr, TreeLeafReadRequest, type StateReference, type UInt32 } from '@aztec/circuits.js'; import { type Tuple } from '@aztec/foundation/serialize'; export type MessageHeaderInit = { @@ -84,7 +84,7 @@ interface WithTreeId { treeId: MerkleTreeId; } -export interface WorldStateStatus { +export interface WorldStateStatusSummary { /** Last block number that can still be unwound. */ unfinalisedBlockNumber: bigint; /** Last block number that is finalised and cannot be unwound. */ @@ -95,6 +95,48 @@ export interface WorldStateStatus { treesAreSynched: boolean; } +export interface DBStats { + /** The name of the DB */ + name: string; + /** The total number of key/value pairs in the DB */ + numDataItems: bigint; + /** The current mapped size of the DB */ + totalUsedSize: bigint; +} + +export interface TreeDBStats { + /** The configured max size of the DB mapping file (effectively the max possible size of the DB) */ + mapSize: bigint; + /** Stats for the 'blocks' DB */ + blocksDBStats: DBStats; + /** Stats for the 'nodes' DB */ + nodesDBStats: DBStats; + /** Stats for the 'leaf pre-images' DB */ + leafPreimagesDBStats: DBStats; + /** Stats for the 'leaf keys' DB */ + leafKeysDBStats: DBStats; + /** Stats for the 'leaf indices' DB */ + leafIndicesDBStats: DBStats; +} + +export interface WorldStateDBStats { + /** Full stats for the note hash tree */ + noteHashTreeStats: TreeDBStats; + /** Full stats for the message tree */ + messageTreeStats: TreeDBStats; + /** Full stats for the archive tree */ + archiveTreeStats: TreeDBStats; + /** Full stats for the public data tree */ + publicDataTreeStats: TreeDBStats; + /** Full stats for the nullifier tree */ + nullifierTreeStats: TreeDBStats; +} + +export interface WorldStateStatusFull { + summary: WorldStateStatusSummary; + dbStats: WorldStateDBStats; +} + interface WithForkId { forkId: number; } @@ -195,10 +237,6 @@ interface SyncBlockRequest { batchesOfPaddedPublicDataWrites: readonly SerializedLeafValue[][]; } -interface SyncBlockResponse { - status: WorldStateStatus; -} - interface CreateForkRequest { latest: boolean; blockNumber: number; @@ -274,16 +312,16 @@ export type WorldStateResponse = { [WorldStateMessageType.COMMIT]: void; [WorldStateMessageType.ROLLBACK]: void; - [WorldStateMessageType.SYNC_BLOCK]: SyncBlockResponse; + [WorldStateMessageType.SYNC_BLOCK]: WorldStateStatusFull; [WorldStateMessageType.CREATE_FORK]: CreateForkResponse; [WorldStateMessageType.DELETE_FORK]: void; - [WorldStateMessageType.REMOVE_HISTORICAL_BLOCKS]: WorldStateStatus; - [WorldStateMessageType.UNWIND_BLOCKS]: WorldStateStatus; - [WorldStateMessageType.FINALISE_BLOCKS]: WorldStateStatus; + [WorldStateMessageType.REMOVE_HISTORICAL_BLOCKS]: WorldStateStatusFull; + [WorldStateMessageType.UNWIND_BLOCKS]: WorldStateStatusFull; + [WorldStateMessageType.FINALISE_BLOCKS]: WorldStateStatusFull; - [WorldStateMessageType.GET_STATUS]: WorldStateStatus; + [WorldStateMessageType.GET_STATUS]: WorldStateStatusSummary; [WorldStateMessageType.CLOSE]: void; }; diff --git a/yarn-project/world-state/src/native/native_world_state.ts b/yarn-project/world-state/src/native/native_world_state.ts index aaec50a4d68..5b944c0823b 100644 --- a/yarn-project/world-state/src/native/native_world_state.ts +++ b/yarn-project/world-state/src/native/native_world_state.ts @@ -32,7 +32,8 @@ import { type MerkleTreeAdminDatabase as MerkleTreeDatabase } from '../world-sta import { MerkleTreesFacade, MerkleTreesForkFacade, serializeLeaf } from './merkle_trees_facade.js'; import { WorldStateMessageType, - type WorldStateStatus, + WorldStateStatusFull, + type WorldStateStatusSummary, blockStateReference, treeStateReferenceToSnapshot, worldStateRevision, @@ -71,7 +72,13 @@ export class NativeWorldStateService implements MerkleTreeDatabase { const instance = new NativeWorldState(dataDir, dbMapSizeKb); const worldState = new this(instance, log, cleanup); - await worldState.init(); + try { + await worldState.init(); + } catch(e) { + log.error(`Error initialising world state: ${e}`); + throw e; + } + return worldState; } @@ -97,7 +104,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { protected async init() { const status = await this.getStatus(); if (!status.treesAreSynched) { - throw new Error("World state trees are out of sync"); + throw new Error("World state trees are out of sync, please delete your data directory and re-sync"); } this.initialHeader = await this.buildInitialHeader(); const committed = this.getCommitted(); @@ -134,7 +141,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { return this.initialHeader!; } - public async handleL2BlockAndMessages(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { + public async handleL2BlockAndMessages(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { // We have to pad both the tx effects and the values within tx effects because that's how the trees are built // by circuits. const paddedTxEffects = padArrayEnd( @@ -174,7 +181,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { batchesOfPaddedPublicDataWrites: batchesOfPaddedPublicDataWrites.map(batch => batch.map(serializeLeaf)), blockStateRef: blockStateReference(l2Block.header.state), }); - return response.status; + return response.summary; } public async close(): Promise { diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts index 9ae8380797a..f13ab087b9d 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts @@ -24,7 +24,7 @@ import { promiseWithResolvers } from '@aztec/foundation/promise'; import { elapsed } from '@aztec/foundation/timer'; import { SHA256Trunc } from '@aztec/merkle-tree'; -import { type WorldStateStatus } from '../native/message.js'; +import { type WorldStateStatusSummary } from '../native/message.js'; import { type MerkleTreeAdminDatabase } from '../world-state-db/merkle_tree_db.js'; import { type WorldStateConfig } from './config.js'; @@ -225,7 +225,7 @@ export class ServerWorldStateSynchronizer * @param l1ToL2Messages - The L1 to L2 messages for the block. * @returns Whether the block handled was produced by this same node. */ - private async handleL2Block(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { + private async handleL2Block(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { // First we check that the L1 to L2 messages hash to the block inHash. // Note that we cannot optimize this check by checking the root of the subtree after inserting the messages // to the real L1_TO_L2_MESSAGE_TREE (like we do in merkleTreeDb.handleL2BlockAndMessages(...)) because that diff --git a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts index 9ca9e586f9d..edc73b6eeb4 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts @@ -3,7 +3,7 @@ import { type MerkleTreeReadOperations, type MerkleTreeWriteOperations } from '@ import { type Fr, MAX_NULLIFIERS_PER_TX, MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX } from '@aztec/circuits.js'; import { type IndexedTreeSnapshot, type TreeSnapshot } from '@aztec/merkle-tree'; -import { type WorldStateStatus } from '../native/message.js'; +import { type WorldStateStatusSummary } from '../native/message.js'; /** * @@ -38,7 +38,7 @@ export interface MerkleTreeAdminDatabase { * @param block - The L2 block to handle. * @param l1ToL2Messages - The L1 to L2 messages for the block. */ - handleL2BlockAndMessages(block: L2Block, l1ToL2Messages: Fr[]): Promise; + handleL2BlockAndMessages(block: L2Block, l1ToL2Messages: Fr[]): Promise; /** * Gets a handle that allows reading the latest committed state @@ -62,27 +62,27 @@ export interface MerkleTreeAdminDatabase { * @param toBlockNumber The block number of the new oldest historical block * @returns The new WorldStateStatus */ - removeHistoricalBlocks(toBlockNumber: bigint): Promise; + removeHistoricalBlocks(toBlockNumber: bigint): Promise; /** * Removes all pending blocks down to but not including the given block number * @param toBlockNumber The block number of the new tip of the pending chain, * @returns The new WorldStateStatus */ - unwindBlocks(toBlockNumber: bigint): Promise; + unwindBlocks(toBlockNumber: bigint): Promise; /** * Advances the finalised block number to be the number provided * @param toBlockNumber The block number that is now the tip of the finalised chain * @returns The new WorldStateStatus */ - setFinalised(toBlockNumber: bigint): Promise; + setFinalised(toBlockNumber: bigint): Promise; /** * Gets the current status of the database. * @returns The current WorldStateStatus. */ - getStatus(): Promise; + getStatus(): Promise; /** Stops the database */ close(): Promise; diff --git a/yarn-project/world-state/src/world-state-db/merkle_trees.ts b/yarn-project/world-state/src/world-state-db/merkle_trees.ts index 7b0f9214d44..18576f89f5f 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_trees.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_trees.ts @@ -51,7 +51,7 @@ import { type TelemetryClient } from '@aztec/telemetry-client'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; import { type Hasher } from '@aztec/types/interfaces'; -import { type WorldStateStatus } from '../native/message.js'; +import { type WorldStateStatusSummary } from '../native/message.js'; import { INITIAL_NULLIFIER_TREE_SIZE, INITIAL_PUBLIC_DATA_TREE_SIZE, @@ -198,19 +198,19 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { } } - public removeHistoricalBlocks(_toBlockNumber: bigint): Promise { + public removeHistoricalBlocks(_toBlockNumber: bigint): Promise { throw new Error('Method not implemented.'); } - public unwindBlocks(_toBlockNumber: bigint): Promise { + public unwindBlocks(_toBlockNumber: bigint): Promise { throw new Error('Method not implemented.'); } - public setFinalised(_toBlockNumber: bigint): Promise { + public setFinalised(_toBlockNumber: bigint): Promise { throw new Error('Method not implemented.'); } - public getStatus(): Promise { + public getStatus(): Promise { throw new Error('Method not implemented.'); } @@ -467,7 +467,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { * @param l1ToL2Messages - The L1 to L2 messages for the block. * @returns Whether the block handled was produced by this same node. */ - public async handleL2BlockAndMessages(block: L2Block, l1ToL2Messages: Fr[]): Promise { + public async handleL2BlockAndMessages(block: L2Block, l1ToL2Messages: Fr[]): Promise { return await this.synchronize(() => this.#handleL2BlockAndMessages(block, l1ToL2Messages)); } @@ -617,7 +617,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { * @param l2Block - The L2 block to handle. * @param l1ToL2Messages - The L1 to L2 messages for the block. */ - async #handleL2BlockAndMessages(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { + async #handleL2BlockAndMessages(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { const timer = new Timer(); const treeRootWithIdPairs = [ @@ -710,7 +710,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { this.metrics.recordDbSize(this.store.estimateSize().bytes); this.metrics.recordSyncDuration('commit', timer); - return { unfinalisedBlockNumber: 0n, finalisedBlockNumber: 0n, oldestHistoricalBlock: 0n } as WorldStateStatus; + return { unfinalisedBlockNumber: 0n, finalisedBlockNumber: 0n, oldestHistoricalBlock: 0n } as WorldStateStatusSummary; } #isDbPopulated(): boolean { From c24b940301549708b4229964f85b406bdba54993 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 10:56:49 +0000 Subject: [PATCH 09/31] WIP --- .../cached_content_addressed_tree_store.hpp | 163 ++++++++++-------- .../barretenberg/crypto/merkle_tree/types.hpp | 18 ++ .../src/barretenberg/world_state/types.hpp | 83 ++++++++- .../barretenberg/world_state/world_state.cpp | 101 +++++------ .../barretenberg/world_state/world_state.hpp | 12 +- .../world_state/world_state.test.cpp | 21 +-- .../barretenberg/world_state_napi/addon.cpp | 9 +- .../barretenberg/world_state_napi/message.hpp | 5 - .../world-state/src/native/message.ts | 29 +++- 9 files changed, 275 insertions(+), 166 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index b4b62aed52f..d4596c77d01 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -682,8 +682,7 @@ void ContentAddressedCachedTreeStore::commit(TreeMeta& finalMeta, tx->commit(); } catch (std::exception& e) { tx->try_abort(); - throw std::runtime_error( - (std::stringstream() << "Unable to commit data to tree: " << name_ << " Error: " << e.what()).str()); + throw std::runtime_error(format("Unable to commit data to tree: ", name_, " Error: ", e.what())); } } finalMeta = uncommittedMeta; @@ -820,9 +819,7 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con TreeMeta uncommittedMeta; BlockPayload blockPayload; if (blockNumber < 1) { - throw std::runtime_error( - (std::stringstream() << "Unable to advance finalised block: " << blockNumber << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to advance finalised block: ", blockNumber, ". Tree name: ", name_)); } if (initialised_from_block_.has_value()) { throw std::runtime_error("Advancing the finalised block on a fork is forbidden"); @@ -833,25 +830,24 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con get_meta(uncommittedMeta, *tx, true); get_meta(committedMeta, *tx, false); if (!dataStore_->read_block_data(blockNumber, blockPayload, *tx)) { - throw std::runtime_error((std::stringstream() << "Unable to advance finalised block: " << blockNumber - << ". Failed to read block data. Tree name: " << name_) - .str()); + throw std::runtime_error(format( + "Unable to advance finalised block: ", blockNumber, ". Failed to read block data. Tree name: ", name_)); } } // can only finalise blocks that are not finalised if (committedMeta.finalisedBlockHeight >= blockNumber) { - std::stringstream ss; - ss << "Unable to finalise block " << blockNumber << " currently finalised block height " - << committedMeta.finalisedBlockHeight << std::endl; - throw std::runtime_error(ss.str()); + throw std::runtime_error(format("Unable to finalise block ", + blockNumber, + " currently finalised block height ", + committedMeta.finalisedBlockHeight)); } // can currently only finalise up to the unfinalised block height if (committedMeta.finalisedBlockHeight > committedMeta.unfinalisedBlockHeight) { - std::stringstream ss; - ss << "Unable to finalise block " << blockNumber << " currently unfinalised block height " - << committedMeta.unfinalisedBlockHeight << std::endl; - throw std::runtime_error(ss.str()); + throw std::runtime_error(format("Unable to finalise block ", + blockNumber, + " currently unfinalised block height ", + committedMeta.finalisedBlockHeight)); } // commit the new finalised block @@ -867,9 +863,12 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con writeTx->commit(); } catch (std::exception& e) { writeTx->try_abort(); - throw std::runtime_error((std::stringstream() << "Unable to commit advance of finalised block: " << blockNumber - << ". Tree name: " << name_ << " Error: " << e.what()) - .str()); + throw std::runtime_error(format("Unable to commit advance of finalised block: ", + blockNumber, + ". Tree name: ", + name_, + " Error: ", + e.what())); } // commit successful, now also update the uncommitted meta @@ -887,9 +886,7 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& BlockPayload blockData; BlockPayload previousBlockData; if (blockNumber < 1) { - throw std::runtime_error( - (std::stringstream() << "Unable to remove historical block: " << blockNumber << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to remove historical block: ", blockNumber, ". Tree name: ", name_)); } if (initialised_from_block_.has_value()) { throw std::runtime_error("Removing a block on a fork is forbidden"); @@ -900,22 +897,26 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& get_meta(committedMeta, *tx, false); if (committedMeta != uncommittedMeta) { throw std::runtime_error( - (std::stringstream() - << "Unable to unwind block: " << blockNumber - << " Can't unwind with uncommitted data, first rollback before unwinding. Tree name: " << name_) - .str()); + format("Unable to unwind block: ", + blockNumber, + " Can't unwind with uncommitted data, first rollback before unwinding. Tree name: ", + name_)); } if (blockNumber != uncommittedMeta.unfinalisedBlockHeight) { - throw std::runtime_error((std::stringstream() - << "Unable to unwind block: " << blockNumber << " unfinalisedBlockHeight: " - << committedMeta.unfinalisedBlockHeight << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to unwind block: ", + blockNumber, + " unfinalisedBlockHeight: ", + committedMeta.unfinalisedBlockHeight, + ". Tree name: ", + name_)); } if (blockNumber <= uncommittedMeta.finalisedBlockHeight) { - throw std::runtime_error((std::stringstream() - << "Unable to unwind block: " << blockNumber << " finalisedBlockHeight: " - << committedMeta.finalisedBlockHeight << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to unwind block: ", + blockNumber, + " finalisedBlockHeight: ", + committedMeta.finalisedBlockHeight, + ". Tree name: ", + name_)); } // populate the required data for the previous block @@ -924,17 +925,14 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& previousBlockData.size = uncommittedMeta.initialSize; previousBlockData.blockNumber = 0; } else if (!dataStore_->read_block_data(blockNumber - 1, previousBlockData, *tx)) { - throw std::runtime_error((std::stringstream() - << "Unable to unwind block: " << blockNumber - << ". Failed to read previous block data. Tree name: " << name_) - .str()); + throw std::runtime_error(format( + "Unable to unwind block: ", blockNumber, ". Failed to read previous block data. Tree name: ", name_)); } // now get the root for the block we want to unwind if (!dataStore_->read_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() << "Unable to unwind block: " << blockNumber - << ". Failed to read block data. Tree name: " << name_) - .str()); + throw std::runtime_error( + format("Unable to unwind block: ", blockNumber, ". Failed to read block data. Tree name: ", name_)); } } WriteTransactionPtr writeTx = create_write_transaction(); @@ -957,9 +955,8 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& writeTx->commit(); } catch (std::exception& e) { writeTx->try_abort(); - throw std::runtime_error((std::stringstream() << "Unable to commit unwind of block: " << blockNumber - << ". Tree name: " << name_ << " Error: " << e.what()) - .str()); + throw std::runtime_error( + format("Unable to commit unwind of block: ", blockNumber, ". Tree name: ", name_, " Error: ", e.what())); } // now update the uncommitted meta @@ -978,9 +975,7 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con TreeMeta uncommittedMeta; BlockPayload blockData; if (blockNumber < 1) { - throw std::runtime_error( - (std::stringstream() << "Unable to remove historical block: " << blockNumber << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to remove historical block: ", blockNumber, ". Tree name: ", name_)); } if (initialised_from_block_.has_value()) { throw std::runtime_error("Removing a block on a fork is forbidden"); @@ -992,22 +987,25 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con get_meta(uncommittedMeta, *tx, true); get_meta(committedMeta, *tx, false); if (blockNumber != committedMeta.oldestHistoricBlock) { - throw std::runtime_error( - (std::stringstream() << "Unable to remove historical block: " << blockNumber << " oldestHistoricBlock: " - << committedMeta.oldestHistoricBlock << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to remove historical block: ", + blockNumber, + " oldestHistoricBlock: ", + committedMeta.oldestHistoricBlock, + ". Tree name: ", + name_)); } if (blockNumber >= committedMeta.finalisedBlockHeight) { - throw std::runtime_error( - (std::stringstream() << "Unable to remove historical block: " << blockNumber << " oldestHistoricBlock: " - << committedMeta.finalisedBlockHeight << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to remove historical block: ", + blockNumber, + " oldestHistoricBlock: ", + committedMeta.finalisedBlockHeight, + ". Tree name: ", + name_)); } if (!dataStore_->read_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() << "Unable to remove historical block: " << blockNumber - << ". Failed to read block data. Tree name: " << name_) - .str()); + throw std::runtime_error(format( + "Unable to remove historical block: ", blockNumber, ". Failed to read block data. Tree name: ", name_)); } } WriteTransactionPtr writeTx = create_write_transaction(); @@ -1023,9 +1021,12 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con writeTx->commit(); } catch (std::exception& e) { writeTx->try_abort(); - throw std::runtime_error((std::stringstream() << "Unable to commit removal of historical block: " << blockNumber - << ". Tree name: " << name_ << " Error: " << e.what()) - .str()); + throw std::runtime_error(format("Unable to commit removal of historical block: ", + blockNumber, + ". Tree name: ", + name_, + " Error: ", + e.what())); } // commit was successful, update the uncommitted meta @@ -1149,7 +1150,7 @@ template void ContentAddressedCachedTreeStore::initialise_from_block(const bool success = read_persisted_meta(meta_, *tx); if (success) { if (name_ != meta_.name || depth_ != meta_.depth) { - throw std::runtime_error("Invalid tree meta data"); + throw std::runtime_error(format("Inconsistent tree meta data when initialising ", + name_, + " with depth ", + depth_, + " from block ", + blockNumber, + " stored name: ", + meta_.name, + "stored depth: ", + meta_.depth)); } } else { - throw std::runtime_error("Tree must be initialised"); + throw std::runtime_error(format( + "Tree found to be uninitialised when attempting to create ", name_, " from block ", blockNumber)); } if (meta_.unfinalisedBlockHeight < blockNumber) { - throw std::runtime_error((std::stringstream() << "Unable to initialise from future block: " << blockNumber - << " unfinalisedBlockHeight: " << meta_.unfinalisedBlockHeight - << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to initialise from future block: ", + blockNumber, + " unfinalisedBlockHeight: ", + meta_.unfinalisedBlockHeight, + ". Tree name: ", + name_)); } if (meta_.oldestHistoricBlock > blockNumber && blockNumber != 0) { - throw std::runtime_error((std::stringstream() << "Unable to fork from expired historical block: " - << blockNumber << " unfinalisedBlockHeight: " - << meta_.oldestHistoricBlock << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Unable to fork from expired historical block: ", + blockNumber, + " unfinalisedBlockHeight: ", + meta_.oldestHistoricBlock, + ". Tree name: ", + name_)); } BlockPayload blockData; if (blockNumber == 0) { @@ -1210,9 +1225,7 @@ void ContentAddressedCachedTreeStore::initialise_from_block(const blockData.root = meta_.initialRoot; blockData.size = meta_.initialSize; } else if (get_block_data(blockNumber, blockData, *tx) == false) { - throw std::runtime_error( - (std::stringstream() << "Failed to retrieve block data: " << blockNumber << ". Tree name: " << name_) - .str()); + throw std::runtime_error(format("Failed to retrieve block data: ", blockNumber, ". Tree name: ", name_)); } initialised_from_block_ = blockData; enrich_meta_from_block(meta_); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp index ad27ae15142..6f2ce79c474 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp @@ -33,6 +33,11 @@ struct DBStats { , numDataItems(stat.ms_entries) , totalUsedSize(stat.ms_psize * (stat.ms_branch_pages + stat.ms_leaf_pages + stat.ms_overflow_pages)) {} + DBStats(const std::string& name, uint64_t numDataItems, uint64_t totalUsedSize) + : name(name) + , numDataItems(numDataItems) + , totalUsedSize(totalUsedSize) + {} MSGPACK_FIELDS(name, numDataItems, totalUsedSize) @@ -73,6 +78,19 @@ struct TreeDBStats { TreeDBStats(uint64_t mapSize) : mapSize(mapSize) {} + TreeDBStats(uint64_t mapSize, + const DBStats& blockStats, + const DBStats& nodesStats, + const DBStats& leafPreimagesDBStats, + const DBStats& leafKeysDBStats, + const DBStats& leafIndicesStats) + : mapSize(mapSize) + , blocksDBStats(blockStats) + , nodesDBStats(nodesStats) + , leafPreimagesDBStats(leafPreimagesDBStats) + , leafKeysDBStats(leafKeysDBStats) + , leafIndicesDBStats(leafIndicesStats) + {} TreeDBStats(const TreeDBStats& other) = default; TreeDBStats(TreeDBStats&& other) noexcept { *this = std::move(other); } diff --git a/barretenberg/cpp/src/barretenberg/world_state/types.hpp b/barretenberg/cpp/src/barretenberg/world_state/types.hpp index faee6cac09f..34e74a0631c 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/types.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state/types.hpp @@ -2,6 +2,7 @@ #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" +#include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/serialize/msgpack.hpp" @@ -99,6 +100,17 @@ struct WorldStateDBStats { MSGPACK_FIELDS(noteHashTreeStats, messageTreeStats, archiveTreeStats, publicDataTreeStats, nullifierTreeStats); WorldStateDBStats() = default; + WorldStateDBStats(const TreeDBStats& noteHashStats, + const TreeDBStats& messageStats, + const TreeDBStats& archiveStats, + const TreeDBStats& publicDataStats, + const TreeDBStats& nullifierStats) + : noteHashTreeStats(noteHashStats) + , messageTreeStats(messageStats) + , archiveTreeStats(archiveStats) + , publicDataTreeStats(publicDataStats) + , nullifierTreeStats(nullifierStats) + {} WorldStateDBStats(const WorldStateDBStats& other) = default; WorldStateDBStats(WorldStateDBStats&& other) noexcept { *this = std::move(other); } @@ -134,13 +146,77 @@ struct WorldStateDBStats { } }; +struct WorldStateMeta { + TreeMeta noteHashTreeMeta; + TreeMeta messageTreeMeta; + TreeMeta archiveTreeMeta; + TreeMeta publicDataTreeMeta; + TreeMeta nullifierTreeMeta; + + MSGPACK_FIELDS(noteHashTreeMeta, messageTreeMeta, archiveTreeMeta, publicDataTreeMeta, nullifierTreeMeta); + + WorldStateMeta() = default; + WorldStateMeta(const TreeMeta& noteHashMeta, + const TreeMeta& messageMeta, + const TreeMeta& archiveMeta, + const TreeMeta& publicDataMeta, + const TreeMeta& nullifierMeta) + : noteHashTreeMeta(noteHashMeta) + , messageTreeMeta(messageMeta) + , archiveTreeMeta(archiveMeta) + , publicDataTreeMeta(publicDataMeta) + , nullifierTreeMeta(nullifierMeta) + {} + WorldStateMeta(const WorldStateMeta& other) = default; + WorldStateMeta(WorldStateMeta&& other) noexcept { *this = std::move(other); } + + WorldStateMeta& operator=(WorldStateMeta&& other) noexcept + { + if (this != &other) { + noteHashTreeMeta = std::move(other.noteHashTreeMeta); + messageTreeMeta = std::move(other.messageTreeMeta); + archiveTreeMeta = std::move(other.archiveTreeMeta); + publicDataTreeMeta = std::move(other.publicDataTreeMeta); + nullifierTreeMeta = std::move(other.nullifierTreeMeta); + } + return *this; + } + + ~WorldStateMeta() = default; + + bool operator==(const WorldStateMeta& other) const + { + return noteHashTreeMeta == other.noteHashTreeMeta && messageTreeMeta == other.messageTreeMeta && + archiveTreeMeta == other.archiveTreeMeta && publicDataTreeMeta == other.publicDataTreeMeta && + nullifierTreeMeta == other.nullifierTreeMeta; + } + + WorldStateMeta& operator=(const WorldStateMeta& other) = default; + + friend std::ostream& operator<<(std::ostream& os, const WorldStateMeta& stats) + { + os << "Note hash tree meta " << stats.noteHashTreeMeta << ", Message tree meta " << stats.messageTreeMeta + << ", Archive tree meta " << stats.archiveTreeMeta << ", Public Data tree meta " << stats.publicDataTreeMeta + << ", Nullifier tree meta " << stats.nullifierTreeMeta; + return os; + } +}; + struct WorldStateStatusFull { WorldStateStatusSummary summary; WorldStateDBStats dbStats; + WorldStateMeta meta; - MSGPACK_FIELDS(summary, dbStats); + MSGPACK_FIELDS(summary, dbStats, meta); WorldStateStatusFull() = default; + WorldStateStatusFull(const WorldStateStatusSummary& summary, + const WorldStateDBStats& dbStats, + const WorldStateMeta& meta) + : summary(summary) + , dbStats(dbStats) + , meta(meta) + {} WorldStateStatusFull(const WorldStateStatusFull& other) = default; WorldStateStatusFull(WorldStateStatusFull&& other) noexcept { *this = std::move(other); } @@ -149,6 +225,7 @@ struct WorldStateStatusFull { if (this != &other) { summary = std::move(other.summary); dbStats = std::move(other.dbStats); + meta = std::move(other.meta); } return *this; } @@ -159,12 +236,12 @@ struct WorldStateStatusFull { bool operator==(const WorldStateStatusFull& other) const { - return summary == other.summary && dbStats == other.dbStats; + return summary == other.summary && dbStats == other.dbStats && meta == other.meta; } friend std::ostream& operator<<(std::ostream& os, const WorldStateStatusFull& status) { - os << "Summary: " << status.summary << ", DB Stats " << status.dbStats; + os << "Summary: " << status.summary << ", DB Stats " << status.dbStats << ", Meta " << status.meta; return os; } }; diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp index 39c9e7478a4..a61b0fe1914 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp @@ -409,8 +409,7 @@ void WorldState::update_archive(const StateReference& block_state_ref, } } -std::pair WorldState::commit(WorldStateStatusFull& status, - std::array& metaResponses) +std::pair WorldState::commit(WorldStateStatusFull& status) { // NOTE: the calling code is expected to ensure no other reads or writes happen during commit Fork::SharedPtr fork = retrieve_fork(CANONICAL_FORK_ID); @@ -420,12 +419,8 @@ std::pair WorldState::commit(WorldStateStatusFull& status, { auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::NULLIFIER_TREE)); - commit_tree(status.dbStats.nullifierTreeStats, - signal, - *wrapper.tree, - success, - message, - metaResponses[MerkleTreeId::NULLIFIER_TREE]); + commit_tree( + status.dbStats.nullifierTreeStats, signal, *wrapper.tree, success, message, status.meta.nullifierTreeMeta); } { auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::PUBLIC_DATA_TREE)); @@ -434,37 +429,25 @@ std::pair WorldState::commit(WorldStateStatusFull& status, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::PUBLIC_DATA_TREE]); + status.meta.publicDataTreeMeta); } { auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::NOTE_HASH_TREE)); - commit_tree(status.dbStats.noteHashTreeStats, - signal, - *wrapper.tree, - success, - message, - metaResponses[MerkleTreeId::NOTE_HASH_TREE]); + commit_tree( + status.dbStats.noteHashTreeStats, signal, *wrapper.tree, success, message, status.meta.noteHashTreeMeta); } { auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::L1_TO_L2_MESSAGE_TREE)); - commit_tree(status.dbStats.messageTreeStats, - signal, - *wrapper.tree, - success, - message, - metaResponses[MerkleTreeId::L1_TO_L2_MESSAGE_TREE]); + commit_tree( + status.dbStats.messageTreeStats, signal, *wrapper.tree, success, message, status.meta.messageTreeMeta); } { auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::ARCHIVE)); - commit_tree(status.dbStats.archiverTreeStats, - signal, - *wrapper.tree, - success, - message, - metaResponses[MerkleTreeId::ARCHIVE]); + commit_tree( + status.dbStats.archiveTreeStats, signal, *wrapper.tree, success, message, status.meta.archiveTreeMeta); } signal.wait_for_level(0); @@ -498,12 +481,11 @@ WorldStateStatusFull WorldState::sync_block( WorldStateStatusFull status; if (is_same_state_reference(WorldStateRevision::uncommitted(), block_state_ref) && is_archive_tip(WorldStateRevision::uncommitted(), block_header_hash)) { - std::array metaResponses; - std::pair result = commit(status, metaResponses); + std::pair result = commit(status); if (!result.first) { throw std::runtime_error(result.second); } - get_status_summary_from_meta_responses(status.summary, metaResponses); + populate_status_summary(status); return status; } rollback(); @@ -580,12 +562,11 @@ WorldStateStatusFull WorldState::sync_block( throw std::runtime_error("Can't synch block: block state does not match world state"); } - std::array metaResponses; - std::pair result = commit(status, metaResponses); + std::pair result = commit(status); if (!result.first) { throw std::runtime_error(result.second); } - get_status_summary_from_meta_responses(status.summary, metaResponses); + populate_status_summary(status); return status; } @@ -645,13 +626,12 @@ WorldStateStatusFull WorldState::unwind_blocks(const index_t& toBlockNumber) throw std::runtime_error("Unable to unwind block, block not found"); } WorldStateStatusFull status; - std::array metaResponses; for (index_t blockNumber = archive_state.meta.unfinalisedBlockHeight; blockNumber > toBlockNumber; blockNumber--) { - if (!unwind_block(blockNumber, status, metaResponses)) { + if (!unwind_block(blockNumber, status)) { throw std::runtime_error("Failed to unwind block"); } } - get_status_summary_from_meta_responses(status.summary, metaResponses); + populate_status_summary(status); return status; } WorldStateStatusFull WorldState::remove_historical_blocks(const index_t& toBlockNumber) @@ -662,13 +642,12 @@ WorldStateStatusFull WorldState::remove_historical_blocks(const index_t& toBlock throw std::runtime_error("Unable to remove historical block, block not found"); } WorldStateStatusFull status; - std::array metaResponses; for (index_t blockNumber = archive_state.meta.oldestHistoricBlock; blockNumber < toBlockNumber; blockNumber++) { - if (!remove_historical_block(blockNumber, status, metaResponses)) { + if (!remove_historical_block(blockNumber, status)) { throw std::runtime_error("Failed to remove historical block"); } } - get_status_summary_from_meta_responses(status.summary, metaResponses); + populate_status_summary(status); return status; } @@ -690,9 +669,7 @@ bool WorldState::set_finalised_block(const index_t& blockNumber) signal.wait_for_level(); return success; } -bool WorldState::unwind_block(const index_t& blockNumber, - WorldStateStatusFull& status, - std::array& metaResponses) +bool WorldState::unwind_block(const index_t& blockNumber, WorldStateStatusFull& status) { std::atomic_bool success = true; std::string message; @@ -705,7 +682,7 @@ bool WorldState::unwind_block(const index_t& blockNumber, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::NULLIFIER_TREE], + status.meta.nullifierTreeMeta, blockNumber); } { @@ -715,7 +692,7 @@ bool WorldState::unwind_block(const index_t& blockNumber, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::PUBLIC_DATA_TREE], + status.meta.publicDataTreeMeta, blockNumber); } @@ -726,7 +703,7 @@ bool WorldState::unwind_block(const index_t& blockNumber, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::NOTE_HASH_TREE], + status.meta.noteHashTreeMeta, blockNumber); } @@ -737,27 +714,25 @@ bool WorldState::unwind_block(const index_t& blockNumber, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::L1_TO_L2_MESSAGE_TREE], + status.meta.messageTreeMeta, blockNumber); } { auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::ARCHIVE)); - unwind_tree(status.dbStats.archiverTreeStats, + unwind_tree(status.dbStats.archiveTreeStats, signal, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::ARCHIVE], + status.meta.archiveTreeMeta, blockNumber); } signal.wait_for_level(); remove_forks_for_block(blockNumber); return success; } -bool WorldState::remove_historical_block(const index_t& blockNumber, - WorldStateStatusFull& status, - std::array& metaResponses) +bool WorldState::remove_historical_block(const index_t& blockNumber, WorldStateStatusFull& status) { std::atomic_bool success = true; std::string message; @@ -770,7 +745,7 @@ bool WorldState::remove_historical_block(const index_t& blockNumber, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::NULLIFIER_TREE], + status.meta.nullifierTreeMeta, blockNumber); } { @@ -780,7 +755,7 @@ bool WorldState::remove_historical_block(const index_t& blockNumber, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::PUBLIC_DATA_TREE], + status.meta.publicDataTreeMeta, blockNumber); } @@ -791,7 +766,7 @@ bool WorldState::remove_historical_block(const index_t& blockNumber, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::NOTE_HASH_TREE], + status.meta.noteHashTreeMeta, blockNumber); } @@ -802,18 +777,18 @@ bool WorldState::remove_historical_block(const index_t& blockNumber, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::L1_TO_L2_MESSAGE_TREE], + status.meta.messageTreeMeta, blockNumber); } { auto& wrapper = std::get>(fork->_trees.at(MerkleTreeId::ARCHIVE)); - remove_historic_block_for_tree(status.dbStats.archiverTreeStats, + remove_historic_block_for_tree(status.dbStats.archiveTreeStats, signal, *wrapper.tree, success, message, - metaResponses[MerkleTreeId::ARCHIVE], + status.meta.archiveTreeMeta, blockNumber); } signal.wait_for_level(); @@ -886,6 +861,18 @@ void WorldState::get_status_summary_from_meta_responses(WorldStateStatusSummary& status.treesAreSynched = determine_if_synched(metaResponses); } +void WorldState::populate_status_summary(WorldStateStatusFull& status) +{ + status.summary.finalisedBlockNumber = status.meta.archiveTreeMeta.finalisedBlockHeight; + status.summary.unfinalisedBlockNumber = status.meta.archiveTreeMeta.unfinalisedBlockHeight; + status.summary.oldestHistoricalBlock = status.meta.archiveTreeMeta.oldestHistoricBlock; + status.summary.treesAreSynched = + status.meta.messageTreeMeta.unfinalisedBlockHeight == status.summary.unfinalisedBlockNumber && + status.meta.noteHashTreeMeta.unfinalisedBlockHeight == status.summary.unfinalisedBlockNumber && + status.meta.nullifierTreeMeta.unfinalisedBlockHeight == status.summary.unfinalisedBlockNumber && + status.meta.publicDataTreeMeta.unfinalisedBlockHeight == status.summary.unfinalisedBlockNumber; +} + bool WorldState::is_same_state_reference(const WorldStateRevision& revision, const StateReference& state_ref) const { return state_ref == get_state_reference(revision); diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp index 0c884d14527..5c08a3d6559 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp @@ -200,7 +200,7 @@ class WorldState { /** * @brief Commits the current state of the world state. */ - std::pair commit(WorldStateStatusFull& status, std::array& metaResponses); + std::pair commit(WorldStateStatusFull& status); /** * @brief Rolls back any uncommitted changes made to the world state. @@ -243,12 +243,8 @@ class WorldState { Fork::SharedPtr create_new_fork(const index_t& blockNumber); void remove_forks_for_block(const index_t& blockNumber); - bool unwind_block(const index_t& blockNumber, - WorldStateStatusFull& status, - std::array& metaResponses); - bool remove_historical_block(const index_t& blockNumber, - WorldStateStatusFull& status, - std::array& metaResponses); + bool unwind_block(const index_t& blockNumber, WorldStateStatusFull& status); + bool remove_historical_block(const index_t& blockNumber, WorldStateStatusFull& status); bool set_finalised_block(const index_t& blockNumber); void get_all_tree_info(const WorldStateRevision& revision, std::array& responses) const; @@ -272,6 +268,8 @@ class WorldState { static void get_status_summary_from_meta_responses(WorldStateStatusSummary& status, std::array& metaResponses); + static void populate_status_summary(WorldStateStatusFull& status); + template void commit_tree(TreeDBStats& dbStats, Signal& signal, diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp index 78ff9945cc2..1967ec6c8b8 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp @@ -250,8 +250,7 @@ TEST_F(WorldStateTest, GetInitialStateReference) auto before_commit = ws.get_initial_state_reference(); ws.append_leaves(MerkleTreeId::NOTE_HASH_TREE, { 1 }); WorldStateStatusFull status; - std::array metaResponses; - ws.commit(status, metaResponses); + ws.commit(status); auto after_commit = ws.get_initial_state_reference(); @@ -286,8 +285,7 @@ TEST_F(WorldStateTest, AppendOnlyTrees) EXPECT_EQ(committed.meta.root, initial.meta.root); WorldStateStatusFull status; - std::array metaResponses; - ws.commit(status, metaResponses); + ws.commit(status); assert_leaf_value(ws, WorldStateRevision::committed(), tree_id, 0, fr(42)); assert_leaf_index(ws, WorldStateRevision::committed(), tree_id, fr(42), 0); @@ -337,8 +335,7 @@ TEST_F(WorldStateTest, AppendOnlyAllowDuplicates) assert_leaf_value(ws, WorldStateRevision::uncommitted(), tree_id, 2, fr(42)); WorldStateStatusFull status; - std::array metaRespoonses; - ws.commit(status, metaRespoonses); + ws.commit(status); assert_leaf_value(ws, WorldStateRevision::committed(), tree_id, 0, fr(42)); assert_leaf_value(ws, WorldStateRevision::committed(), tree_id, 1, fr(42)); @@ -360,8 +357,7 @@ TEST_F(WorldStateTest, NullifierTree) assert_leaf_value(ws, WorldStateRevision::uncommitted(), tree_id, 128, test_nullifier); WorldStateStatusFull status; - std::array metaRespoonses; - ws.commit(status, metaRespoonses); + ws.commit(status); auto test_leaf = ws.get_indexed_leaf(WorldStateRevision::committed(), tree_id, 128); // at this point 142 should be the biggest leaf so it wraps back to 0 @@ -392,8 +388,7 @@ TEST_F(WorldStateTest, NullifierTreeDuplicates) ws.append_leaves(tree_id, { test_nullifier }); WorldStateStatusFull status; - std::array metaRespoonses; - ws.commit(status, metaRespoonses); + ws.commit(status); assert_tree_size(ws, WorldStateRevision::committed(), tree_id, 129); EXPECT_THROW(ws.append_leaves(tree_id, { test_nullifier }), std::runtime_error); @@ -471,8 +466,7 @@ TEST_F(WorldStateTest, CommitsAndRollsBackAllTrees) ws.append_leaves(MerkleTreeId::PUBLIC_DATA_TREE, { PublicDataLeafValue(142, 1) }); WorldStateStatusFull status; - std::array metaRespoonses; - ws.commit(status, metaRespoonses); + ws.commit(status); assert_leaf_value(ws, WorldStateRevision::committed(), MerkleTreeId::NOTE_HASH_TREE, 0, fr(42)); assert_leaf_value(ws, WorldStateRevision::committed(), MerkleTreeId::L1_TO_L2_MESSAGE_TREE, 0, fr(42)); @@ -746,8 +740,7 @@ TEST_F(WorldStateTest, ForkingAtBlock0AndAdvancingCanonicalState) EXPECT_NE(fork_archive_state_after_insert.meta, canonical_archive_state_after_insert.meta); WorldStateStatusFull status; - std::array metaResponses; - ws.commit(status, metaResponses); + ws.commit(status); auto canonical_archive_state_after_commit = ws.get_tree_info(WorldStateRevision::committed(), MerkleTreeId::ARCHIVE); auto fork_archive_state_after_commit = ws.get_tree_info( diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp b/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp index 0911764b5b9..ce0e8c301a4 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp @@ -526,10 +526,11 @@ bool WorldStateAddon::commit(msgpack::object& obj, msgpack::sbuffer& buf) HeaderOnlyMessage request; obj.convert(request); - _ws->commit(); + WorldStateStatusFull status; + _ws->commit(status); MsgHeader header(request.header.messageId); - messaging::TypedMessage resp_msg(WorldStateMessageType::COMMIT, header, {}); + messaging::TypedMessage resp_msg(WorldStateMessageType::COMMIT, header, { status }); msgpack::pack(buf, resp_msg); return true; @@ -562,7 +563,7 @@ bool WorldStateAddon::sync_block(msgpack::object& obj, msgpack::sbuffer& buf) request.value.batchesOfPaddedPublicDataWrites); MsgHeader header(request.header.messageId); - messaging::TypedMessage resp_msg(WorldStateMessageType::SYNC_BLOCK, header, { status }); + messaging::TypedMessage resp_msg(WorldStateMessageType::SYNC_BLOCK, header, { status }); msgpack::pack(buf, resp_msg); return true; @@ -662,7 +663,7 @@ bool WorldStateAddon::get_status(msgpack::object& obj, msgpack::sbuffer& buf) co obj.convert(request); WorldStateStatusSummary status; - _ws->get_status(status); + _ws->get_status_summary(status); MsgHeader header(request.header.messageId); messaging::TypedMessage resp_msg(WorldStateMessageType::GET_STATUS, header, { status }); diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp b/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp index fa544ce8c89..4d686c9362e 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp @@ -192,11 +192,6 @@ struct SyncBlockRequest { batchesOfPaddedPublicDataWrites); }; -struct SyncBlockResponse { - WorldStateStatusSummary status; - MSGPACK_FIELDS(status); -}; - } // namespace bb::world_state MSGPACK_ADD_ENUM(bb::world_state::WorldStateMessageType) diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index bb8f2f265dc..d5c81f1813a 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -1,5 +1,5 @@ import { MerkleTreeId } from '@aztec/circuit-types'; -import { AppendOnlyTreeSnapshot, Fr, TreeLeafReadRequest, type StateReference, type UInt32 } from '@aztec/circuits.js'; +import { AppendOnlyTreeSnapshot, Fr, type StateReference, TreeLeafReadRequest, type UInt32 } from '@aztec/circuits.js'; import { type Tuple } from '@aztec/foundation/serialize'; export type MessageHeaderInit = { @@ -95,6 +95,19 @@ export interface WorldStateStatusSummary { treesAreSynched: boolean; } +export interface TreeMeta { + name: string; + depth: number; + size: bigint; + committedSize: bigint; + root: Fr; + initialSize: bigint; + initialRoot: Fr; + oldestHistoricBlock: bigint; + unfinalisedBlockHeight: bigint; + finalisedBlockHeight: bigint; +} + export interface DBStats { /** The name of the DB */ name: string; @@ -119,6 +132,19 @@ export interface TreeDBStats { leafIndicesDBStats: DBStats; } +export interface WorldStateMeta { + /** Tree meta for the note hash tree */ + noteHashTreeMeta: TreeMeta; + /** Tree meta for the message tree */ + messageTreeMeta: TreeMeta; + /** Tree meta for the archive tree */ + archiveTreeMeta: TreeMeta; + /** Tree meta for the public data tree */ + publicDataTreeMeta: TreeMeta; + /** Tree meta for the nullifier tree */ + nullifierTreeMeta: TreeMeta; +} + export interface WorldStateDBStats { /** Full stats for the note hash tree */ noteHashTreeStats: TreeDBStats; @@ -135,6 +161,7 @@ export interface WorldStateDBStats { export interface WorldStateStatusFull { summary: WorldStateStatusSummary; dbStats: WorldStateDBStats; + meta: WorldStateMeta; } interface WithForkId { From 75e847206cbc31e9cf97586b020c9885fc614369 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 11:42:00 +0000 Subject: [PATCH 10/31] WIP --- .../world-state/src/native/message.ts | 83 ++++++++++++++++++- .../src/native/native_world_state.test.ts | 44 +++++----- .../src/native/native_world_state.ts | 15 ++-- .../src/native/native_world_state_cmp.test.ts | 2 +- .../server_world_state_synchronizer.test.ts | 4 +- .../server_world_state_synchronizer.ts | 4 +- .../world-state/src/test/integration.test.ts | 1 + .../src/world-state-db/merkle_tree_db.ts | 14 ++-- .../src/world-state-db/merkle_trees.ts | 20 +++-- 9 files changed, 137 insertions(+), 50 deletions(-) diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index d5c81f1813a..deb6d7c11a4 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -96,15 +96,25 @@ export interface WorldStateStatusSummary { } export interface TreeMeta { + /** The name of the tree */ name: string; + /** The depth of the tree */ depth: number; + /** The current size of the tree (number of leaves) */ size: bigint; + /** The committed size of the tree */ committedSize: bigint; + /** The current root of the tree */ root: Fr; + /** The tree's initial size */ initialSize: bigint; + /** The tree's initial root value */ initialRoot: Fr; + /** The current oldest historical block number of the tree */ oldestHistoricBlock: bigint; + /** The current unfinalised block number of the tree */ unfinalisedBlockHeight: bigint; + /** The current finalised block number of the tree */ finalisedBlockHeight: bigint; } @@ -164,6 +174,77 @@ export interface WorldStateStatusFull { meta: WorldStateMeta; } +export function buildEmptyDBStats() { + return { + name: '', + numDataItems: 0n, + totalUsedSize: 0n, + } as DBStats; +} + +export function buildEmptyTreeDBStats() { + return { + mapSize: 0n, + blocksDBStats: buildEmptyDBStats(), + nodesDBStats: buildEmptyDBStats(), + leafIndicesDBStats: buildEmptyDBStats(), + leafKeysDBStats: buildEmptyDBStats(), + leafPreimagesDBStats: buildEmptyDBStats(), + } as TreeDBStats; +} + +export function buildEmptyTreeMeta() { + return { + name: '', + depth: 0, + size: 0n, + committedSize: 0n, + unfinalisedBlockHeight: 0n, + finalisedBlockHeight: 0n, + oldestHistoricBlock: 0n, + root: Fr.ZERO, + initialRoot: Fr.ZERO, + initialSize: 0n, + } as TreeMeta; +} + +export function buildEmptyWorldStateMeta() { + return { + noteHashTreeMeta: buildEmptyTreeMeta(), + messageTreeMeta: buildEmptyTreeMeta(), + publicDataTreeMeta: buildEmptyTreeMeta(), + nullifierTreeMeta: buildEmptyTreeMeta(), + archiveTreeMeta: buildEmptyTreeMeta(), + } as WorldStateMeta; +} + +export function buildEmptyWorldStateDBStats() { + return { + noteHashTreeStats: buildEmptyTreeDBStats(), + archiveTreeStats: buildEmptyTreeDBStats(), + messageTreeStats: buildEmptyTreeDBStats(), + publicDataTreeStats: buildEmptyTreeDBStats(), + nullifierTreeStats: buildEmptyTreeDBStats(), + } as WorldStateDBStats; +} + +export function buildEmptyWorldStateSummary() { + return { + unfinalisedBlockNumber: 0n, + finalisedBlockNumber: 0n, + oldestHistoricalBlock: 0n, + treesAreSynched: true, + } as WorldStateStatusSummary; +} + +export function buildEmptyWorldStateStatusFull() { + return { + meta: buildEmptyWorldStateMeta(), + dbStats: buildEmptyWorldStateDBStats(), + summary: buildEmptyWorldStateSummary(), + } as WorldStateStatusFull; +} + interface WithForkId { forkId: number; } @@ -346,7 +427,7 @@ export type WorldStateResponse = { [WorldStateMessageType.REMOVE_HISTORICAL_BLOCKS]: WorldStateStatusFull; [WorldStateMessageType.UNWIND_BLOCKS]: WorldStateStatusFull; - [WorldStateMessageType.FINALISE_BLOCKS]: WorldStateStatusFull; + [WorldStateMessageType.FINALISE_BLOCKS]: WorldStateStatusSummary; [WorldStateMessageType.GET_STATUS]: WorldStateStatusSummary; diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index 7abb85a17a1..f51a0c7ecb1 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -67,7 +67,7 @@ describe('NativeWorldState', () => { it('Fails to sync further blocks if trees are out of sync', async () => { // open ws against the same data dir but a different rollup const rollupAddress = EthAddress.random(); - let ws = await NativeWorldStateService.new(rollupAddress, dataDir, 1024); + const ws = await NativeWorldStateService.new(rollupAddress, dataDir, 1024); const initialFork = await ws.fork(); const { block: block1, messages: messages1 } = await mockBlock(1, 8, initialFork); @@ -81,12 +81,14 @@ describe('NativeWorldState', () => { await expect(ws.handleL2BlockAndMessages(block2, messages2)).rejects.toThrow(); // Commits should always fail now, the trees are in an inconsistent state - await expect(ws.handleL2BlockAndMessages(block2, messages2)).rejects.toThrow("World state trees are out of sync"); - await expect(ws.handleL2BlockAndMessages(block3, messages3)).rejects.toThrow("World state trees are out of sync"); + await expect(ws.handleL2BlockAndMessages(block2, messages2)).rejects.toThrow('World state trees are out of sync'); + await expect(ws.handleL2BlockAndMessages(block3, messages3)).rejects.toThrow('World state trees are out of sync'); // Creating another world state instance should fail await ws.close(); - await expect(NativeWorldStateService.new(rollupAddress, dataDir, 1024)).rejects.toThrow("World state trees are out of sync"); + await expect(NativeWorldStateService.new(rollupAddress, dataDir, 1024)).rejects.toThrow( + 'World state trees are out of sync', + ); }); }); @@ -163,7 +165,7 @@ describe('NativeWorldState', () => { const { block, messages } = await mockBlock(blockNumber, 1, fork); const status = await ws.handleL2BlockAndMessages(block, messages); - expect(status.unfinalisedBlockNumber).toBe(blockNumber); + expect(status.summary.unfinalisedBlockNumber).toBe(blockNumber); } const forkAtZero = await ws.fork(0); @@ -191,8 +193,8 @@ describe('NativeWorldState', () => { const { block, messages } = await mockBlock(blockNumber, 1, fork); const status = await ws.handleL2BlockAndMessages(block, messages); - expect(status.unfinalisedBlockNumber).toBe(blockNumber); - expect(status.oldestHistoricalBlock).toBe(1); + expect(status.summary.unfinalisedBlockNumber).toBe(blockNumber); + expect(status.summary.oldestHistoricalBlock).toBe(1); if (provenBlock > 0) { const provenStatus = await ws.setFinalised(BigInt(provenBlock)); @@ -200,7 +202,7 @@ describe('NativeWorldState', () => { expect(provenStatus.finalisedBlockNumber).toBe(provenBlock); expect(provenStatus.oldestHistoricalBlock).toBe(1); } else { - expect(status.finalisedBlockNumber).toBe(0); + expect(status.summary.finalisedBlockNumber).toBe(0); } } }, 30_000); @@ -213,9 +215,9 @@ describe('NativeWorldState', () => { const { block, messages } = await mockBlock(blockNumber, 1, fork); const status = await ws.handleL2BlockAndMessages(block, messages); - expect(status.unfinalisedBlockNumber).toBe(blockNumber); - expect(status.oldestHistoricalBlock).toBe(1); - expect(status.finalisedBlockNumber).toBe(0); + expect(status.summary.unfinalisedBlockNumber).toBe(blockNumber); + expect(status.summary.oldestHistoricalBlock).toBe(1); + expect(status.summary.finalisedBlockNumber).toBe(0); } const status = await ws.setFinalised(8n); @@ -237,7 +239,7 @@ describe('NativeWorldState', () => { const { block, messages } = await mockBlock(blockNumber, 1, fork); const status = await ws.handleL2BlockAndMessages(block, messages); - expect(status.unfinalisedBlockNumber).toBe(blockNumber); + expect(status.summary.unfinalisedBlockNumber).toBe(blockNumber); const blockFork = await ws.fork(); forks.push(blockFork); @@ -246,14 +248,14 @@ describe('NativeWorldState', () => { const provenStatus = await ws.setFinalised(BigInt(provenBlock)); expect(provenStatus.finalisedBlockNumber).toBe(provenBlock); } else { - expect(status.finalisedBlockNumber).toBe(0); + expect(status.summary.finalisedBlockNumber).toBe(0); } if (prunedBlockNumber > 0) { const prunedStatus = await ws.removeHistoricalBlocks(BigInt(prunedBlockNumber + 1)); - expect(prunedStatus.oldestHistoricalBlock).toBe(prunedBlockNumber + 1); + expect(prunedStatus.summary.oldestHistoricalBlock).toBe(prunedBlockNumber + 1); } else { - expect(status.oldestHistoricalBlock).toBe(1); + expect(status.summary.oldestHistoricalBlock).toBe(1); } } @@ -299,10 +301,8 @@ describe('NativeWorldState', () => { siblingPaths.push(siblingPath); if (blockNumber < 9) { - await nonReorgState.handleL2BlockAndMessages(block, messages); - const statusNonReorg = await nonReorgState.handleL2BlockAndMessages(block, messages); - expect(status).toEqual(statusNonReorg); + expect(status.summary).toEqual(statusNonReorg.summary); const treeInfoNonReorg = await nonReorgState.getCommitted().getTreeInfo(MerkleTreeId.NULLIFIER_TREE); expect(treeInfo).toEqual(treeInfoNonReorg); @@ -321,7 +321,7 @@ describe('NativeWorldState', () => { .getSiblingPath(MerkleTreeId.NULLIFIER_TREE, 0n); expect(unwindTreeInfo).toEqual(blockTreeInfos[blockNumber - 2]); - expect(unwindStatus).toEqual(blockStats[blockNumber - 2]); + expect(unwindStatus.summary).toEqual(blockStats[blockNumber - 2].summary); expect(await unwindFork.getTreeInfo(MerkleTreeId.NULLIFIER_TREE)).toEqual( await blockForks[blockNumber - 2].getTreeInfo(MerkleTreeId.NULLIFIER_TREE), ); @@ -338,10 +338,10 @@ describe('NativeWorldState', () => { const unwoundFork = await ws.fork(); const unwoundTreeInfo = await ws.getCommitted().getTreeInfo(MerkleTreeId.NULLIFIER_TREE); - const unwoundStatus = await ws.getStatus(); + const unwoundStatus = await ws.getStatusSummary(); const unwoundSiblingPath = await ws.getCommitted().getSiblingPath(MerkleTreeId.NULLIFIER_TREE, 0n); - expect(unwoundStatus).toEqual(blockStats[7]); + expect(unwoundStatus).toEqual(blockStats[7].summary); expect(unwoundTreeInfo).toEqual(blockTreeInfos[7]); expect(await ws.getCommitted().getTreeInfo(MerkleTreeId.NULLIFIER_TREE)).toEqual(blockTreeInfos[7]); expect(await unwoundFork.getTreeInfo(MerkleTreeId.NULLIFIER_TREE)).toEqual(blockTreeInfos[7]); @@ -363,7 +363,7 @@ describe('NativeWorldState', () => { siblingPaths[i] = siblingPath; const statusNonReorg = await nonReorgState.handleL2BlockAndMessages(block, messages); - expect(status).toEqual(statusNonReorg); + expect(status.summary).toEqual(statusNonReorg.summary); } // compare snapshot across the chains diff --git a/yarn-project/world-state/src/native/native_world_state.ts b/yarn-project/world-state/src/native/native_world_state.ts index 5b944c0823b..3ecf2326b82 100644 --- a/yarn-project/world-state/src/native/native_world_state.ts +++ b/yarn-project/world-state/src/native/native_world_state.ts @@ -32,8 +32,7 @@ import { type MerkleTreeAdminDatabase as MerkleTreeDatabase } from '../world-sta import { MerkleTreesFacade, MerkleTreesForkFacade, serializeLeaf } from './merkle_trees_facade.js'; import { WorldStateMessageType, - WorldStateStatusFull, - type WorldStateStatusSummary, + type WorldStateStatusFull, blockStateReference, treeStateReferenceToSnapshot, worldStateRevision, @@ -74,7 +73,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { const worldState = new this(instance, log, cleanup); try { await worldState.init(); - } catch(e) { + } catch (e) { log.error(`Error initialising world state: ${e}`); throw e; } @@ -102,9 +101,9 @@ export class NativeWorldStateService implements MerkleTreeDatabase { } protected async init() { - const status = await this.getStatus(); + const status = await this.getStatusSummary(); if (!status.treesAreSynched) { - throw new Error("World state trees are out of sync, please delete your data directory and re-sync"); + throw new Error('World state trees are out of sync, please delete your data directory and re-sync'); } this.initialHeader = await this.buildInitialHeader(); const committed = this.getCommitted(); @@ -141,7 +140,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { return this.initialHeader!; } - public async handleL2BlockAndMessages(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { + public async handleL2BlockAndMessages(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { // We have to pad both the tx effects and the values within tx effects because that's how the trees are built // by circuits. const paddedTxEffects = padArrayEnd( @@ -181,7 +180,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { batchesOfPaddedPublicDataWrites: batchesOfPaddedPublicDataWrites.map(batch => batch.map(serializeLeaf)), blockStateRef: blockStateReference(l2Block.header.state), }); - return response.summary; + return response; } public async close(): Promise { @@ -227,7 +226,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { }); } - public async getStatus() { + public async getStatusSummary() { return await this.instance.call(WorldStateMessageType.GET_STATUS, void 0); } diff --git a/yarn-project/world-state/src/native/native_world_state_cmp.test.ts b/yarn-project/world-state/src/native/native_world_state_cmp.test.ts index 803ea9f28e9..32e5a77e01e 100644 --- a/yarn-project/world-state/src/native/native_world_state_cmp.test.ts +++ b/yarn-project/world-state/src/native/native_world_state_cmp.test.ts @@ -48,7 +48,7 @@ describe('NativeWorldState', () => { }); beforeAll(async () => { - nativeWS = await NativeWorldStateService.new(EthAddress.random(), nativeDataDir); + nativeWS = await NativeWorldStateService.new(EthAddress.random(), nativeDataDir, 1024 * 1024); legacyWS = await MerkleTrees.new(AztecLmdbStore.open(legacyDataDir), new NoopTelemetryClient()); }); diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts index e4e0ec7384a..a8e0a3098bb 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts @@ -17,6 +17,7 @@ import { jest } from '@jest/globals'; import { type MockProxy, mock } from 'jest-mock-extended'; import { type MerkleTreeAdminDatabase, type WorldStateConfig } from '../index.js'; +import { buildEmptyWorldStateStatusFull } from '../native/message.js'; import { ServerWorldStateSynchronizer } from './server_world_state_synchronizer.js'; describe('ServerWorldStateSynchronizer', () => { @@ -62,7 +63,7 @@ describe('ServerWorldStateSynchronizer', () => { merkleTreeDb.getCommitted.mockReturnValue(merkleTreeRead); merkleTreeDb.handleL2BlockAndMessages.mockImplementation((l2Block: L2Block) => { latestHandledBlockNumber = l2Block.number; - return Promise.resolve({ unfinalisedBlockNumber: 0n, finalisedBlockNumber: 0n, oldestHistoricalBlock: 0n }); + return Promise.resolve(buildEmptyWorldStateStatusFull()); }); latestHandledBlockNumber = 0; @@ -73,6 +74,7 @@ describe('ServerWorldStateSynchronizer', () => { const config: WorldStateConfig = { worldStateBlockCheckIntervalMS: 100, worldStateProvenBlocksOnly: false, + worldStateDbMapSizeKb: 1024 * 1024, }; server = new TestWorldStateSynchronizer(merkleTreeDb, blockAndMessagesSource, config, l2BlockStream); diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts index f13ab087b9d..1678b22e41a 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts @@ -163,7 +163,7 @@ export class ServerWorldStateSynchronizer /** Returns the latest L2 block number for each tip of the chain (latest, proven, finalized). */ public async getL2Tips(): Promise { - const status = await this.merkleTreeDb.getStatus(); + const status = await this.merkleTreeDb.getStatusSummary(); const unfinalisedBlockHash = await this.getL2BlockHash(Number(status.unfinalisedBlockNumber)); const latestBlockId: L2BlockId = { number: Number(status.unfinalisedBlockNumber), hash: unfinalisedBlockHash! }; @@ -240,7 +240,7 @@ export class ServerWorldStateSynchronizer this.syncPromise.resolve(); } - return result; + return result.summary; } private async handleChainFinalized(blockNumber: number) { diff --git a/yarn-project/world-state/src/test/integration.test.ts b/yarn-project/world-state/src/test/integration.test.ts index 259a5de9839..3a6752f011f 100644 --- a/yarn-project/world-state/src/test/integration.test.ts +++ b/yarn-project/world-state/src/test/integration.test.ts @@ -42,6 +42,7 @@ describe('world-state integration', () => { worldStateBlockCheckIntervalMS: 20, worldStateProvenBlocksOnly: false, worldStateBlockRequestBatchSize: 5, + worldStateDbMapSizeKb: 1024 * 1024, }; archiver = new MockPrefilledArchiver(blocks, messages); diff --git a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts index edc73b6eeb4..33dd884996e 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts @@ -3,7 +3,7 @@ import { type MerkleTreeReadOperations, type MerkleTreeWriteOperations } from '@ import { type Fr, MAX_NULLIFIERS_PER_TX, MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX } from '@aztec/circuits.js'; import { type IndexedTreeSnapshot, type TreeSnapshot } from '@aztec/merkle-tree'; -import { type WorldStateStatusSummary } from '../native/message.js'; +import { WorldStateStatusFull, type WorldStateStatusSummary } from '../native/message.js'; /** * @@ -38,7 +38,7 @@ export interface MerkleTreeAdminDatabase { * @param block - The L2 block to handle. * @param l1ToL2Messages - The L1 to L2 messages for the block. */ - handleL2BlockAndMessages(block: L2Block, l1ToL2Messages: Fr[]): Promise; + handleL2BlockAndMessages(block: L2Block, l1ToL2Messages: Fr[]): Promise; /** * Gets a handle that allows reading the latest committed state @@ -62,27 +62,27 @@ export interface MerkleTreeAdminDatabase { * @param toBlockNumber The block number of the new oldest historical block * @returns The new WorldStateStatus */ - removeHistoricalBlocks(toBlockNumber: bigint): Promise; + removeHistoricalBlocks(toBlockNumber: bigint): Promise; /** * Removes all pending blocks down to but not including the given block number * @param toBlockNumber The block number of the new tip of the pending chain, * @returns The new WorldStateStatus */ - unwindBlocks(toBlockNumber: bigint): Promise; + unwindBlocks(toBlockNumber: bigint): Promise; /** * Advances the finalised block number to be the number provided * @param toBlockNumber The block number that is now the tip of the finalised chain * @returns The new WorldStateStatus */ - setFinalised(toBlockNumber: bigint): Promise; + setFinalised(toBlockNumber: bigint): Promise; /** - * Gets the current status of the database. + * Gets the current status summary of the database. * @returns The current WorldStateStatus. */ - getStatus(): Promise; + getStatusSummary(): Promise; /** Stops the database */ close(): Promise; diff --git a/yarn-project/world-state/src/world-state-db/merkle_trees.ts b/yarn-project/world-state/src/world-state-db/merkle_trees.ts index 18576f89f5f..2d3d2a4171e 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_trees.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_trees.ts @@ -51,7 +51,11 @@ import { type TelemetryClient } from '@aztec/telemetry-client'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; import { type Hasher } from '@aztec/types/interfaces'; -import { type WorldStateStatusSummary } from '../native/message.js'; +import { + type WorldStateStatusFull, + type WorldStateStatusSummary, + buildEmptyWorldStateStatusFull, +} from '../native/message.js'; import { INITIAL_NULLIFIER_TREE_SIZE, INITIAL_PUBLIC_DATA_TREE_SIZE, @@ -198,19 +202,19 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { } } - public removeHistoricalBlocks(_toBlockNumber: bigint): Promise { + public removeHistoricalBlocks(_toBlockNumber: bigint): Promise { throw new Error('Method not implemented.'); } - public unwindBlocks(_toBlockNumber: bigint): Promise { + public unwindBlocks(_toBlockNumber: bigint): Promise { throw new Error('Method not implemented.'); } - public setFinalised(_toBlockNumber: bigint): Promise { + public setFinalised(_toBlockNumber: bigint): Promise { throw new Error('Method not implemented.'); } - public getStatus(): Promise { + public getStatusSummary(): Promise { throw new Error('Method not implemented.'); } @@ -467,7 +471,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { * @param l1ToL2Messages - The L1 to L2 messages for the block. * @returns Whether the block handled was produced by this same node. */ - public async handleL2BlockAndMessages(block: L2Block, l1ToL2Messages: Fr[]): Promise { + public async handleL2BlockAndMessages(block: L2Block, l1ToL2Messages: Fr[]): Promise { return await this.synchronize(() => this.#handleL2BlockAndMessages(block, l1ToL2Messages)); } @@ -617,7 +621,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { * @param l2Block - The L2 block to handle. * @param l1ToL2Messages - The L1 to L2 messages for the block. */ - async #handleL2BlockAndMessages(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { + async #handleL2BlockAndMessages(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { const timer = new Timer(); const treeRootWithIdPairs = [ @@ -710,7 +714,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { this.metrics.recordDbSize(this.store.estimateSize().bytes); this.metrics.recordSyncDuration('commit', timer); - return { unfinalisedBlockNumber: 0n, finalisedBlockNumber: 0n, oldestHistoricalBlock: 0n } as WorldStateStatusSummary; + return buildEmptyWorldStateStatusFull(); } #isDbPopulated(): boolean { From 14fa37bfa246dd217d7b02d6e18c2eedbe7de8f0 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 12:54:45 +0000 Subject: [PATCH 11/31] WIP --- .../world-state/src/native/message.ts | 58 ++++++++ .../src/native/native_world_state.test.ts | 134 +++++++++++++++++- .../src/native/native_world_state.ts | 9 +- .../src/world-state-db/merkle_tree_db.ts | 2 +- .../src/world-state-db/merkle_trees.ts | 2 +- 5 files changed, 199 insertions(+), 6 deletions(-) diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index deb6d7c11a4..c520746c27f 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -245,6 +245,64 @@ export function buildEmptyWorldStateStatusFull() { } as WorldStateStatusFull; } +export function sanitiseSummary(summary: WorldStateStatusSummary) { + summary.finalisedBlockNumber = BigInt(summary.finalisedBlockNumber); + summary.unfinalisedBlockNumber = BigInt(summary.unfinalisedBlockNumber); + summary.oldestHistoricalBlock = BigInt(summary.oldestHistoricalBlock); + return summary; +} + +export function sanitiseDBStats(stats: DBStats) { + stats.numDataItems = BigInt(stats.numDataItems); + stats.totalUsedSize = BigInt(stats.totalUsedSize); + return stats; +} + +export function sanitiseMeta(meta: TreeMeta) { + meta.committedSize = BigInt(meta.committedSize); + meta.finalisedBlockHeight = BigInt(meta.finalisedBlockHeight); + meta.initialSize = BigInt(meta.initialSize); + meta.oldestHistoricBlock = BigInt(meta.oldestHistoricBlock); + meta.size = BigInt(meta.size); + meta.unfinalisedBlockHeight = BigInt(meta.unfinalisedBlockHeight); + return meta; +} + +export function sanitiseTreeDBStats(stats: TreeDBStats) { + stats.blocksDBStats = sanitiseDBStats(stats.blocksDBStats); + stats.leafIndicesDBStats = sanitiseDBStats(stats.leafIndicesDBStats); + stats.leafKeysDBStats = sanitiseDBStats(stats.leafKeysDBStats); + stats.leafPreimagesDBStats = sanitiseDBStats(stats.leafPreimagesDBStats); + stats.nodesDBStats = sanitiseDBStats(stats.nodesDBStats); + stats.mapSize = BigInt(stats.mapSize); + return stats; +} + +export function sanitiseWorldStateDBStats(stats: WorldStateDBStats) { + stats.archiveTreeStats = sanitiseTreeDBStats(stats.archiveTreeStats); + stats.messageTreeStats = sanitiseTreeDBStats(stats.messageTreeStats); + stats.noteHashTreeStats = sanitiseTreeDBStats(stats.noteHashTreeStats); + stats.nullifierTreeStats = sanitiseTreeDBStats(stats.nullifierTreeStats); + stats.publicDataTreeStats = sanitiseTreeDBStats(stats.publicDataTreeStats); + return stats; +} + +export function sanitiseWorldStateTreeMeta(meta: WorldStateMeta) { + meta.archiveTreeMeta = sanitiseMeta(meta.archiveTreeMeta); + meta.messageTreeMeta = sanitiseMeta(meta.messageTreeMeta); + meta.noteHashTreeMeta = sanitiseMeta(meta.noteHashTreeMeta); + meta.nullifierTreeMeta = sanitiseMeta(meta.nullifierTreeMeta); + meta.publicDataTreeMeta = sanitiseMeta(meta.publicDataTreeMeta); + return meta; +} + +export function sanitiseFullStatus(status: WorldStateStatusFull) { + status.dbStats = sanitiseWorldStateDBStats(status.dbStats); + status.summary = sanitiseSummary(status.summary); + status.meta = sanitiseWorldStateTreeMeta(status.meta); + return status; +} + interface WithForkId { forkId: number; } diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index f51a0c7ecb1..5707d0be35e 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -1,5 +1,19 @@ import { type L2Block, MerkleTreeId } from '@aztec/circuit-types'; -import { AppendOnlyTreeSnapshot, EthAddress, Fr, Header } from '@aztec/circuits.js'; +import { + ARCHIVE_HEIGHT, + AppendOnlyTreeSnapshot, + EthAddress, + Fr, + Header, + L1_TO_L2_MSG_TREE_HEIGHT, + MAX_L2_TO_L1_MSGS_PER_TX, + MAX_NOTE_HASHES_PER_TX, + MAX_NULLIFIERS_PER_TX, + MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, + NOTE_HASH_TREE_HEIGHT, + NULLIFIER_TREE_HEIGHT, + PUBLIC_DATA_TREE_HEIGHT, +} from '@aztec/circuits.js'; import { makeContentCommitment, makeGlobalVariables } from '@aztec/circuits.js/testing'; import { mkdtemp, rm } from 'fs/promises'; @@ -7,6 +21,8 @@ import { tmpdir } from 'os'; import { join } from 'path'; import { assertSameState, compareChains, mockBlock } from '../test/utils.js'; +import { INITIAL_NULLIFIER_TREE_SIZE, INITIAL_PUBLIC_DATA_TREE_SIZE } from '../world-state-db/merkle_tree_db.js'; +import { type WorldStateStatusSummary } from './message.js'; import { NativeWorldStateService } from './native_world_state.js'; describe('NativeWorldState', () => { @@ -409,4 +425,120 @@ describe('NativeWorldState', () => { } }, 30_000); }); + + describe('status reporting', () => { + let block: L2Block; + let messages: Fr[]; + + it('correctly reports status', async () => { + const ws = await NativeWorldStateService.new(rollupAddress, dataDir, defaultDBMapSize); + const statuses = []; + for (let i = 0; i < 2; i++) { + const fork = await ws.fork(); + ({ block, messages } = await mockBlock(1, 2, fork)); + await fork.close(); + const status = await ws.handleL2BlockAndMessages(block, messages); + statuses.push(status); + + expect(status.summary).toEqual({ + unfinalisedBlockNumber: BigInt(i + 1), + finalisedBlockNumber: 0n, + oldestHistoricalBlock: 1n, + treesAreSynched: true, + } as WorldStateStatusSummary); + + expect(status.meta.archiveTreeMeta).toMatchObject({ + depth: ARCHIVE_HEIGHT, + size: BigInt(i + 2), + committedSize: BigInt(i + 2), + initialSize: BigInt(1), + oldestHistoricBlock: 1n, + unfinalisedBlockHeight: BigInt(i + 1), + finalisedBlockHeight: 0n, + }); + + expect(status.meta.noteHashTreeMeta).toMatchObject({ + depth: NOTE_HASH_TREE_HEIGHT, + size: BigInt(2 * MAX_NOTE_HASHES_PER_TX * (i + 1)), + committedSize: BigInt(2 * MAX_NOTE_HASHES_PER_TX * (i + 1)), + initialSize: BigInt(0), + oldestHistoricBlock: 1n, + unfinalisedBlockHeight: BigInt(i + 1), + finalisedBlockHeight: 0n, + }); + + expect(status.meta.nullifierTreeMeta).toMatchObject({ + depth: NULLIFIER_TREE_HEIGHT, + size: BigInt(2 * MAX_NULLIFIERS_PER_TX * (i + 1) + INITIAL_NULLIFIER_TREE_SIZE), + committedSize: BigInt(2 * MAX_NULLIFIERS_PER_TX * (i + 1) + INITIAL_NULLIFIER_TREE_SIZE), + initialSize: BigInt(INITIAL_NULLIFIER_TREE_SIZE), + oldestHistoricBlock: 1n, + unfinalisedBlockHeight: BigInt(i + 1), + finalisedBlockHeight: 0n, + }); + + expect(status.meta.publicDataTreeMeta).toMatchObject({ + depth: PUBLIC_DATA_TREE_HEIGHT, + size: BigInt(2 * (MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX + 1) * (i + 1) + INITIAL_PUBLIC_DATA_TREE_SIZE), + committedSize: BigInt( + 2 * (MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX + 1) * (i + 1) + INITIAL_PUBLIC_DATA_TREE_SIZE, + ), + initialSize: BigInt(INITIAL_PUBLIC_DATA_TREE_SIZE), + oldestHistoricBlock: 1n, + unfinalisedBlockHeight: BigInt(i + 1), + finalisedBlockHeight: 0n, + }); + + expect(status.meta.messageTreeMeta).toMatchObject({ + depth: L1_TO_L2_MSG_TREE_HEIGHT, + size: BigInt(2 * MAX_L2_TO_L1_MSGS_PER_TX * (i + 1)), + committedSize: BigInt(2 * MAX_L2_TO_L1_MSGS_PER_TX * (i + 1)), + initialSize: BigInt(0), + oldestHistoricBlock: 1n, + unfinalisedBlockHeight: BigInt(i + 1), + finalisedBlockHeight: 0n, + }); + } + + expect(statuses[1].dbStats.archiveTreeStats.nodesDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.archiveTreeStats.nodesDBStats.numDataItems, + ); + expect(statuses[1].dbStats.archiveTreeStats.blocksDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.archiveTreeStats.blocksDBStats.numDataItems, + ); + expect(statuses[1].dbStats.messageTreeStats.nodesDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.messageTreeStats.nodesDBStats.numDataItems, + ); + expect(statuses[1].dbStats.messageTreeStats.blocksDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.messageTreeStats.blocksDBStats.numDataItems, + ); + expect(statuses[1].dbStats.noteHashTreeStats.nodesDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.noteHashTreeStats.nodesDBStats.numDataItems, + ); + expect(statuses[1].dbStats.noteHashTreeStats.blocksDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.noteHashTreeStats.blocksDBStats.numDataItems, + ); + expect(statuses[1].dbStats.nullifierTreeStats.nodesDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.nullifierTreeStats.nodesDBStats.numDataItems, + ); + expect(statuses[1].dbStats.nullifierTreeStats.blocksDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.nullifierTreeStats.blocksDBStats.numDataItems, + ); + expect(statuses[1].dbStats.publicDataTreeStats.nodesDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.publicDataTreeStats.nodesDBStats.numDataItems, + ); + expect(statuses[1].dbStats.publicDataTreeStats.blocksDBStats.numDataItems).toBeGreaterThan( + statuses[0].dbStats.publicDataTreeStats.blocksDBStats.numDataItems, + ); + + const mapSizeBytes = BigInt(1024 * defaultDBMapSize); + expect(statuses[0].dbStats.archiveTreeStats.mapSize).toBe(mapSizeBytes); + expect(statuses[0].dbStats.messageTreeStats.mapSize).toBe(mapSizeBytes); + expect(statuses[0].dbStats.nullifierTreeStats.mapSize).toBe(mapSizeBytes); + expect(statuses[0].dbStats.noteHashTreeStats.mapSize).toBe(mapSizeBytes); + expect(statuses[0].dbStats.publicDataTreeStats.mapSize).toBe(mapSizeBytes); + + await ws.close(); + }, 30_000); + }); }); diff --git a/yarn-project/world-state/src/native/native_world_state.ts b/yarn-project/world-state/src/native/native_world_state.ts index 3ecf2326b82..0de27f10c37 100644 --- a/yarn-project/world-state/src/native/native_world_state.ts +++ b/yarn-project/world-state/src/native/native_world_state.ts @@ -34,6 +34,7 @@ import { WorldStateMessageType, type WorldStateStatusFull, blockStateReference, + sanitiseFullStatus, treeStateReferenceToSnapshot, worldStateRevision, } from './message.js'; @@ -180,7 +181,7 @@ export class NativeWorldStateService implements MerkleTreeDatabase { batchesOfPaddedPublicDataWrites: batchesOfPaddedPublicDataWrites.map(batch => batch.map(serializeLeaf)), blockStateRef: blockStateReference(l2Block.header.state), }); - return response; + return sanitiseFullStatus(response); } public async close(): Promise { @@ -210,9 +211,10 @@ export class NativeWorldStateService implements MerkleTreeDatabase { * @returns The new WorldStateStatus */ public async removeHistoricalBlocks(toBlockNumber: bigint) { - return await this.instance.call(WorldStateMessageType.REMOVE_HISTORICAL_BLOCKS, { + const response = await this.instance.call(WorldStateMessageType.REMOVE_HISTORICAL_BLOCKS, { toBlockNumber, }); + return sanitiseFullStatus(response); } /** @@ -221,9 +223,10 @@ export class NativeWorldStateService implements MerkleTreeDatabase { * @returns The new WorldStateStatus */ public async unwindBlocks(toBlockNumber: bigint) { - return await this.instance.call(WorldStateMessageType.UNWIND_BLOCKS, { + const response = await this.instance.call(WorldStateMessageType.UNWIND_BLOCKS, { toBlockNumber, }); + return sanitiseFullStatus(response); } public async getStatusSummary() { diff --git a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts index 33dd884996e..2adabfd5835 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts @@ -76,7 +76,7 @@ export interface MerkleTreeAdminDatabase { * @param toBlockNumber The block number that is now the tip of the finalised chain * @returns The new WorldStateStatus */ - setFinalised(toBlockNumber: bigint): Promise; + setFinalised(toBlockNumber: bigint): Promise; /** * Gets the current status summary of the database. diff --git a/yarn-project/world-state/src/world-state-db/merkle_trees.ts b/yarn-project/world-state/src/world-state-db/merkle_trees.ts index 2d3d2a4171e..335efdee061 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_trees.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_trees.ts @@ -210,7 +210,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { throw new Error('Method not implemented.'); } - public setFinalised(_toBlockNumber: bigint): Promise { + public setFinalised(_toBlockNumber: bigint): Promise { throw new Error('Method not implemented.'); } From 21a5a1caa8cab2545792da86fd0f792ea636f0e3 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 13:01:18 +0000 Subject: [PATCH 12/31] WIP --- cspell.json | 7 ++-- .../src/native/native_world_state.test.ts | 36 +++++++++---------- .../src/native/native_world_state.ts | 7 ++-- 3 files changed, 28 insertions(+), 22 deletions(-) diff --git a/cspell.json b/cspell.json index 1e9853b9d54..def40448ad5 100644 --- a/cspell.json +++ b/cspell.json @@ -6,7 +6,6 @@ "acvm", "addrs", "alphanet", - "Governance", "archiver", "assignement", "asyncify", @@ -106,6 +105,7 @@ "gitrepo", "Gossipable", "gossipsub", + "Governance", "grumpkin", "gtest", "gzipped", @@ -224,6 +224,7 @@ "rollup", "rollups", "rushstack", + "sanitise", "schnorr", "secp", "SEMRESATTRS", @@ -313,5 +314,7 @@ "lib", "*.cmake" ], - "flagWords": ["anonymous"] + "flagWords": [ + "anonymous" + ] } diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index 5707d0be35e..4ed555b5b98 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -181,7 +181,7 @@ describe('NativeWorldState', () => { const { block, messages } = await mockBlock(blockNumber, 1, fork); const status = await ws.handleL2BlockAndMessages(block, messages); - expect(status.summary.unfinalisedBlockNumber).toBe(blockNumber); + expect(status.summary.unfinalisedBlockNumber).toBe(BigInt(blockNumber)); } const forkAtZero = await ws.fork(0); @@ -209,16 +209,16 @@ describe('NativeWorldState', () => { const { block, messages } = await mockBlock(blockNumber, 1, fork); const status = await ws.handleL2BlockAndMessages(block, messages); - expect(status.summary.unfinalisedBlockNumber).toBe(blockNumber); - expect(status.summary.oldestHistoricalBlock).toBe(1); + expect(status.summary.unfinalisedBlockNumber).toBe(BigInt(blockNumber)); + expect(status.summary.oldestHistoricalBlock).toBe(1n); if (provenBlock > 0) { const provenStatus = await ws.setFinalised(BigInt(provenBlock)); - expect(provenStatus.unfinalisedBlockNumber).toBe(blockNumber); - expect(provenStatus.finalisedBlockNumber).toBe(provenBlock); - expect(provenStatus.oldestHistoricalBlock).toBe(1); + expect(provenStatus.unfinalisedBlockNumber).toBe(BigInt(blockNumber)); + expect(provenStatus.finalisedBlockNumber).toBe(BigInt(provenBlock)); + expect(provenStatus.oldestHistoricalBlock).toBe(1n); } else { - expect(status.summary.finalisedBlockNumber).toBe(0); + expect(status.summary.finalisedBlockNumber).toBe(0n); } } }, 30_000); @@ -231,15 +231,15 @@ describe('NativeWorldState', () => { const { block, messages } = await mockBlock(blockNumber, 1, fork); const status = await ws.handleL2BlockAndMessages(block, messages); - expect(status.summary.unfinalisedBlockNumber).toBe(blockNumber); - expect(status.summary.oldestHistoricalBlock).toBe(1); - expect(status.summary.finalisedBlockNumber).toBe(0); + expect(status.summary.unfinalisedBlockNumber).toBe(BigInt(blockNumber)); + expect(status.summary.oldestHistoricalBlock).toBe(1n); + expect(status.summary.finalisedBlockNumber).toBe(0n); } const status = await ws.setFinalised(8n); - expect(status.unfinalisedBlockNumber).toBe(16); - expect(status.oldestHistoricalBlock).toBe(1); - expect(status.finalisedBlockNumber).toBe(8); + expect(status.unfinalisedBlockNumber).toBe(16n); + expect(status.oldestHistoricalBlock).toBe(1n); + expect(status.finalisedBlockNumber).toBe(8n); }, 30_000); it('Can prune historic blocks', async () => { @@ -255,23 +255,23 @@ describe('NativeWorldState', () => { const { block, messages } = await mockBlock(blockNumber, 1, fork); const status = await ws.handleL2BlockAndMessages(block, messages); - expect(status.summary.unfinalisedBlockNumber).toBe(blockNumber); + expect(status.summary.unfinalisedBlockNumber).toBe(BigInt(blockNumber)); const blockFork = await ws.fork(); forks.push(blockFork); if (provenBlock > 0) { const provenStatus = await ws.setFinalised(BigInt(provenBlock)); - expect(provenStatus.finalisedBlockNumber).toBe(provenBlock); + expect(provenStatus.finalisedBlockNumber).toBe(BigInt(provenBlock)); } else { - expect(status.summary.finalisedBlockNumber).toBe(0); + expect(status.summary.finalisedBlockNumber).toBe(0n); } if (prunedBlockNumber > 0) { const prunedStatus = await ws.removeHistoricalBlocks(BigInt(prunedBlockNumber + 1)); - expect(prunedStatus.summary.oldestHistoricalBlock).toBe(prunedBlockNumber + 1); + expect(prunedStatus.summary.oldestHistoricalBlock).toBe(BigInt(prunedBlockNumber + 1)); } else { - expect(status.summary.oldestHistoricalBlock).toBe(1); + expect(status.summary.oldestHistoricalBlock).toBe(1n); } } diff --git a/yarn-project/world-state/src/native/native_world_state.ts b/yarn-project/world-state/src/native/native_world_state.ts index 0de27f10c37..260bfc15a31 100644 --- a/yarn-project/world-state/src/native/native_world_state.ts +++ b/yarn-project/world-state/src/native/native_world_state.ts @@ -35,6 +35,7 @@ import { type WorldStateStatusFull, blockStateReference, sanitiseFullStatus, + sanitiseSummary, treeStateReferenceToSnapshot, worldStateRevision, } from './message.js'; @@ -200,9 +201,10 @@ export class NativeWorldStateService implements MerkleTreeDatabase { * @returns The new WorldStateStatus */ public async setFinalised(toBlockNumber: bigint) { - return await this.instance.call(WorldStateMessageType.FINALISE_BLOCKS, { + const response = await this.instance.call(WorldStateMessageType.FINALISE_BLOCKS, { toBlockNumber, }); + return sanitiseSummary(response); } /** @@ -230,7 +232,8 @@ export class NativeWorldStateService implements MerkleTreeDatabase { } public async getStatusSummary() { - return await this.instance.call(WorldStateMessageType.GET_STATUS, void 0); + const response = await this.instance.call(WorldStateMessageType.GET_STATUS, void 0); + return sanitiseSummary(response); } updateLeaf( From 0ad1bdb6a7d25d98c0cf381be51f7dfbfdbc1a7a Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 13:04:20 +0000 Subject: [PATCH 13/31] WIP --- yarn-project/world-state/src/native/native_world_state.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index 4ed555b5b98..2712ec8b652 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -28,7 +28,7 @@ import { NativeWorldStateService } from './native_world_state.js'; describe('NativeWorldState', () => { let dataDir: string; let rollupAddress: EthAddress; - const defaultDBMapSize = 10 * 1024 * 1024; + const defaultDBMapSize = 25 * 1024 * 1024; beforeAll(async () => { dataDir = await mkdtemp(join(tmpdir(), 'world-state-test')); From 5cc95c5976b97d34f45a388d2b80cb1a9eb6f811 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 13:11:19 +0000 Subject: [PATCH 14/31] Fixes --- .../end-to-end/src/composed/integration_l1_publisher.test.ts | 1 + yarn-project/world-state/src/world-state-db/merkle_tree_db.ts | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts b/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts index fa6691d7d51..194839cda36 100644 --- a/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts +++ b/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts @@ -144,6 +144,7 @@ describe('L1Publisher integration', () => { const worldStateConfig: WorldStateConfig = { worldStateBlockCheckIntervalMS: 10000, worldStateProvenBlocksOnly: false, + worldStateDbMapSizeKb: 10 * 1024 * 1024, }; worldStateSynchronizer = new ServerWorldStateSynchronizer(builderDb, blockSource, worldStateConfig); await worldStateSynchronizer.start(); diff --git a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts index 2adabfd5835..a9e9389b687 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts @@ -3,7 +3,7 @@ import { type MerkleTreeReadOperations, type MerkleTreeWriteOperations } from '@ import { type Fr, MAX_NULLIFIERS_PER_TX, MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX } from '@aztec/circuits.js'; import { type IndexedTreeSnapshot, type TreeSnapshot } from '@aztec/merkle-tree'; -import { WorldStateStatusFull, type WorldStateStatusSummary } from '../native/message.js'; +import { type WorldStateStatusFull, type WorldStateStatusSummary } from '../native/message.js'; /** * From 0ac899ff51b0af5b697b79a01ba0c105243b9b45 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 13:58:17 +0000 Subject: [PATCH 15/31] Formatting --- .../src/native/native_world_state_instance.ts | 2 +- yarn-project/world-state/src/synchronizer/factory.ts | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/yarn-project/world-state/src/native/native_world_state_instance.ts b/yarn-project/world-state/src/native/native_world_state_instance.ts index 6ce0aea0bd0..01f6871c0b4 100644 --- a/yarn-project/world-state/src/native/native_world_state_instance.ts +++ b/yarn-project/world-state/src/native/native_world_state_instance.ts @@ -82,7 +82,7 @@ export class NativeWorldState implements NativeWorldStateInstance { private queue = new SerialQueue(); /** Creates a new native WorldState instance */ - constructor(dataDir: string, dbMapSizeKb: number, private log = createDebugLogger('aztec:world-state:database')) { + constructor(dataDir: string, dbMapSizeKb: number, private log = createDebugLogger('aztec:world-state:database')) { this.instance = new NATIVE_MODULE[NATIVE_CLASS_NAME]( dataDir, { diff --git a/yarn-project/world-state/src/synchronizer/factory.ts b/yarn-project/world-state/src/synchronizer/factory.ts index 231822c473d..7a846652ca4 100644 --- a/yarn-project/world-state/src/synchronizer/factory.ts +++ b/yarn-project/world-state/src/synchronizer/factory.ts @@ -18,14 +18,21 @@ export async function createWorldStateSynchronizer( return new ServerWorldStateSynchronizer(merkleTrees, l2BlockSource, config); } -export async function createWorldState(config: WorldStateConfig & DataStoreConfig, client: TelemetryClient = new NoopTelemetryClient()) { +export async function createWorldState( + config: WorldStateConfig & DataStoreConfig, + client: TelemetryClient = new NoopTelemetryClient(), +) { const merkleTrees = ['true', '1'].includes(process.env.USE_LEGACY_WORLD_STATE ?? '') ? await MerkleTrees.new( await createStore('world-state', config, createDebugLogger('aztec:world-state:lmdb')), client, ) : config.dataDirectory - ? await NativeWorldStateService.new(config.l1Contracts.rollupAddress, config.dataDirectory, config.worldStateDbMapSizeKb) + ? await NativeWorldStateService.new( + config.l1Contracts.rollupAddress, + config.dataDirectory, + config.worldStateDbMapSizeKb, + ) : await NativeWorldStateService.tmp( config.l1Contracts.rollupAddress, !['true', '1'].includes(process.env.DEBUG_WORLD_STATE!), From ed8be9f8c0a5a73c8846b394996c80123b46b744 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 14:02:18 +0000 Subject: [PATCH 16/31] Additional test case --- .../src/native/native_world_state.test.ts | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index 2712ec8b652..069c081eb3c 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -93,9 +93,28 @@ describe('NativeWorldState', () => { // The first block should succeed await expect(ws.handleL2BlockAndMessages(block1, messages1)).resolves.toBeDefined(); + // The trees should be synched at block 1 + const goodSummary = await ws.getStatusSummary(); + expect(goodSummary).toEqual({ + unfinalisedBlockNumber: 1n, + finalisedBlockNumber: 0n, + oldestHistoricalBlock: 1n, + treesAreSynched: true, + } as WorldStateStatusSummary); + // The second block should fail await expect(ws.handleL2BlockAndMessages(block2, messages2)).rejects.toThrow(); + // The summary should indicate that the unfinalised block number (that of the archive tree) is 2 + // But it should also tell us that the trees are not synched + const badSummary = await ws.getStatusSummary(); + expect(badSummary).toEqual({ + unfinalisedBlockNumber: 2n, + finalisedBlockNumber: 0n, + oldestHistoricalBlock: 1n, + treesAreSynched: false, + } as WorldStateStatusSummary); + // Commits should always fail now, the trees are in an inconsistent state await expect(ws.handleL2BlockAndMessages(block2, messages2)).rejects.toThrow('World state trees are out of sync'); await expect(ws.handleL2BlockAndMessages(block3, messages3)).rejects.toThrow('World state trees are out of sync'); From c7e64bb5150d66b53d4c9d2dec25f94beca5bcb1 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 14:15:45 +0000 Subject: [PATCH 17/31] Log formatting --- .../content_addressed_indexed_tree.hpp | 76 +++++++++++-------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp index 055ac0a8c5f..8aa2412208a 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp @@ -309,7 +309,7 @@ ContentAddressedIndexedTree::ContentAddressedIndexedTree(s ContentAddressedAppendOnlyTree::add_values_internal(appended_hashes, completion, false); signal.wait_for_level(0); if (!result.success) { - throw std::runtime_error("Failed to initialise tree: " + result.message); + throw std::runtime_error(format("Failed to initialise tree: ", result.message)); } { ReadTransactionPtr tx = store_->create_read_transaction(); @@ -337,12 +337,14 @@ void ContentAddressedIndexedTree::get_leaf(const index_t& std::optional leaf_hash = find_leaf_hash(index, requestContext, *tx); if (!leaf_hash.has_value()) { response.success = false; + response.message = "Failed to find leaf hash for current root"; return; } std::optional leaf = store_->get_leaf_by_hash(leaf_hash.value(), *tx, includeUncommitted); if (!leaf.has_value()) { response.success = false; + response.message = "Failed to find leaf by it's hash"; return; } response.success = true; @@ -368,10 +370,11 @@ void ContentAddressedIndexedTree::get_leaf(const index_t& ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() - << "Unable to get leaf at index " << index << " for block " << blockNumber - << ", failed to get block data.") - .str()); + throw std::runtime_error(format("Unable to get leaf at index ", + index, + " for block ", + blockNumber, + ", failed to get block data.")); } RequestContext requestContext; requestContext.blockNumber = blockNumber; @@ -380,12 +383,14 @@ void ContentAddressedIndexedTree::get_leaf(const index_t& std::optional leaf_hash = find_leaf_hash(index, requestContext, *tx); if (!leaf_hash.has_value()) { response.success = false; + response.message = format("Failed to find leaf hash for root of block ", blockNumber); return; } std::optional leaf = store_->get_leaf_by_hash(leaf_hash.value(), *tx, includeUncommitted); if (!leaf.has_value()) { response.success = false; + response.message = format("Unable to get leaf at index ", index, " for block ", blockNumber); return; } response.success = true; @@ -434,6 +439,8 @@ void ContentAddressedIndexedTree::find_leaf_index_from( response.success = leaf_index.has_value(); if (response.success) { response.inner.leaf_index = leaf_index.value(); + } else { + response.message = format("Index not found for leaf ", leaf); } }, on_completion); @@ -458,10 +465,11 @@ void ContentAddressedIndexedTree::find_leaf_index_from( typename Store::ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() - << "Unable to find leaf from index " << start_index << " for block " - << blockNumber << ", failed to get block data.") - .str()); + throw std::runtime_error(format("Unable to find leaf from index ", + start_index, + " for block ", + blockNumber, + ", failed to get block data.")); } RequestContext requestContext; requestContext.blockNumber = blockNumber; @@ -472,6 +480,9 @@ void ContentAddressedIndexedTree::find_leaf_index_from( response.success = leaf_index.has_value(); if (response.success) { response.inner.leaf_index = leaf_index.value(); + } else { + response.message = + format("Unable to find leaf from index ", start_index, " for block ", blockNumber); } }, on_completion); @@ -516,9 +527,8 @@ void ContentAddressedIndexedTree::find_low_leaf(const fr& typename Store::ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() << "Unable to find low leaf for block " << blockNumber - << ", failed to get block data.") - .str()); + throw std::runtime_error( + format("Unable to find low leaf for block ", blockNumber, ", failed to get block data.")); } RequestContext requestContext; requestContext.blockNumber = blockNumber; @@ -988,10 +998,12 @@ void ContentAddressedIndexedTree::generate_insertions( // Ensure that the tree is not going to be overfilled index_t new_total_size = num_leaves_to_be_inserted + meta.size; if (new_total_size > max_size_) { - throw std::runtime_error((std::stringstream() - << "Unable to insert values into tree " << meta.name - << " new size: " << new_total_size << " max size: " << max_size_) - .str()); + throw std::runtime_error(format("Unable to insert values into tree ", + meta.name, + " new size: ", + new_total_size, + " max size: ", + max_size_)); } for (size_t i = 0; i < values.size(); ++i) { std::pair& value_pair = values[i]; @@ -1003,10 +1015,8 @@ void ContentAddressedIndexedTree::generate_insertions( fr value = value_pair.first.get_key(); auto it = unique_values.insert(value); if (!it.second) { - throw std::runtime_error((std::stringstream() - << "Duplicate key not allowed in same batch, key value: " << value - << ", tree: " << meta.name) - .str()); + throw std::runtime_error(format( + "Duplicate key not allowed in same batch, key value: ", value, ", tree: ", meta.name)); } // This gives us the leaf that need updating @@ -1034,10 +1044,10 @@ void ContentAddressedIndexedTree::generate_insertions( if (!low_leaf_hash.has_value()) { // std::cout << "Failed to find low leaf" << std::endl; - throw std::runtime_error((std::stringstream() - << "Unable to insert values into tree " << meta.name - << " failed to find low leaf at index " << low_leaf_index) - .str()); + throw std::runtime_error(format("Unable to insert values into tree ", + meta.name, + " failed to find low leaf at index ", + low_leaf_index)); } // std::cout << "Low leaf hash " << low_leaf_hash.value() << std::endl; @@ -1046,11 +1056,10 @@ void ContentAddressedIndexedTree::generate_insertions( if (!low_leaf_option.has_value()) { // std::cout << "No pre-image" << std::endl; - throw std::runtime_error((std::stringstream() - << "Unable to insert values into tree " << meta.name - << " failed to get leaf pre-image by hash for index " - << low_leaf_index) - .str()); + throw std::runtime_error(format("Unable to insert values into tree ", + meta.name, + " failed to get leaf pre-image by hash for index ", + low_leaf_index)); } // std::cout << "Low leaf pre-image " << low_leaf_option.value() << std::endl; low_leaf = low_leaf_option.value(); @@ -1099,10 +1108,11 @@ void ContentAddressedIndexedTree::generate_insertions( // The set of appended leaves already has an empty leaf in the slot at index // 'index_into_appended_leaves' } else { - throw std::runtime_error((std::stringstream() - << "Unable to insert values into tree " << meta.name << " leaf type " - << IndexedLeafValueType::name() << " is not updateable") - .str()); + throw std::runtime_error(format("Unable to insert values into tree ", + meta.name, + " leaf type ", + IndexedLeafValueType::name(), + " is not updateable")); } response.inner.highest_index = std::max(response.inner.highest_index, low_leaf_index); From b0b25bc6581e14a8450d816dd826cf0691a984aa Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 14:30:12 +0000 Subject: [PATCH 18/31] More log formatting --- .../content_addressed_append_only_tree.hpp | 58 +++++++++++-------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index b15f4666160..4a303ccc362 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -294,7 +294,7 @@ ContentAddressedAppendOnlyTree::ContentAddressedAppendOnly signal.wait_for_level(0); if (!result.success) { - throw std::runtime_error("Failed to initialise tree: " + result.message); + throw std::runtime_error(format("Failed to initialise tree: ", result.message)); } { @@ -338,9 +338,8 @@ void ContentAddressedAppendOnlyTree::get_meta_data(const i BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() << "Unable to get meta data for block " << blockNumber - << ", failed to get block data.") - .str()); + throw std::runtime_error( + format("Unable to get meta data for block ", blockNumber, ", failed to get block data.")); } response.inner.meta.size = blockData.size; @@ -375,10 +374,11 @@ void ContentAddressedAppendOnlyTree::get_sibling_path(cons ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() - << "Unable to get sibling path for index " << index << " at block " - << blockNumber << ", failed to get block data.") - .str()); + throw std::runtime_error(format("Unable to get sibling path for index ", + index, + " at block ", + blockNumber, + ", failed to get block data.")); } RequestContext requestContext; @@ -580,6 +580,8 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ response.success = leaf_hash.has_value(); if (response.success) { response.inner.leaf = leaf_hash.value(); + } else { + response.message = format("Failed to find leaf hash at index ", leaf_index); } }, on_completion); @@ -602,16 +604,18 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() - << "Unable to get leaf at index " << leaf_index << " for block " - << blockNumber << ", failed to get block data.") - .str()); + throw std::runtime_error(format("Unable to get leaf at index ", + leaf_index, + " for block ", + blockNumber, + ", failed to get block data.")); } if (blockData.size < leaf_index) { - response.message = - (std::stringstream() << "Unable to get leaf at index " << leaf_index << " for block " - << blockNumber << ", leaf index is too high.") - .str(); + response.message = format("Unable to get leaf at index ", + leaf_index, + " for block ", + blockNumber, + ", leaf index is too high."); response.success = false; return; } @@ -623,6 +627,9 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ response.success = leaf_hash.has_value(); if (response.success) { response.inner.leaf = leaf_hash.value(); + } else { + response.message = + format("Failed to find leaf hash at index ", leaf_index, " for block number ", blockNumber); } }, on_completion); @@ -663,6 +670,8 @@ void ContentAddressedAppendOnlyTree::find_leaf_index_from( response.success = leaf_index.has_value(); if (response.success) { response.inner.leaf_index = leaf_index.value(); + } else { + response.message = format("Failed to find index from ", start_index, " for leaf ", leaf); } }, on_completion); @@ -687,10 +696,11 @@ void ContentAddressedAppendOnlyTree::find_leaf_index_from( ReadTransactionPtr tx = store_->create_read_transaction(); BlockPayload blockData; if (!store_->get_block_data(blockNumber, blockData, *tx)) { - throw std::runtime_error((std::stringstream() - << "Unable to find leaf from index " << start_index << " for block " - << blockNumber << ", failed to get block data.") - .str()); + throw std::runtime_error(format("Unable to find leaf from index ", + start_index, + " for block ", + blockNumber, + ", failed to get block data.")); } RequestContext requestContext; requestContext.blockNumber = blockNumber; @@ -701,6 +711,9 @@ void ContentAddressedAppendOnlyTree::find_leaf_index_from( response.success = leaf_index.has_value(); if (response.success) { response.inner.leaf_index = leaf_index.value(); + } else { + response.message = format( + "Failed to find index from ", start_index, " for leaf ", leaf, " at block ", blockNumber); } }, on_completion); @@ -871,9 +884,8 @@ void ContentAddressedAppendOnlyTree::add_batch_internal( } if (new_size > max_size_) { - throw std::runtime_error((std::stringstream() << "Unable to append leaves to tree " << meta.name - << " new size: " << new_size << " max size: " << max_size_) - .str()); + throw std::runtime_error( + format("Unable to append leaves to tree ", meta.name, " new size: ", new_size, " max size: ", max_size_)); } // Add the values at the leaf nodes of the tree From 6224fa8a6389fc5ee8a085b9125a167a5a931460 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 13 Nov 2024 15:22:47 +0000 Subject: [PATCH 19/31] Formatting --- yarn-project/world-state/src/native/message.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index c520746c27f..51b6163c223 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -1,5 +1,5 @@ import { MerkleTreeId } from '@aztec/circuit-types'; -import { AppendOnlyTreeSnapshot, Fr, type StateReference, TreeLeafReadRequest, type UInt32 } from '@aztec/circuits.js'; +import { AppendOnlyTreeSnapshot, Fr, type StateReference, type UInt32 } from '@aztec/circuits.js'; import { type Tuple } from '@aztec/foundation/serialize'; export type MessageHeaderInit = { From 4704900e2941e2153ffa7a65ef1aa5e70efe01a7 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Thu, 14 Nov 2024 17:24:59 +0000 Subject: [PATCH 20/31] Data directory config --- yarn-project/archiver/src/archiver/config.ts | 11 +----- yarn-project/archiver/src/factory.ts | 3 +- .../aztec-node/src/aztec-node/config.ts | 4 ++- .../aztec/src/cli/cmds/start_archiver.ts | 10 +++++- .../src/benchmarks/bench_prover.test.ts | 25 +++++++------- .../end-to-end/src/benchmarks/utils.ts | 14 +++++--- yarn-project/foundation/src/config/env_var.ts | 2 ++ yarn-project/kv-store/package.json | 4 ++- yarn-project/kv-store/src/config.ts | 34 +++++++++++++++++++ yarn-project/kv-store/src/lmdb/store.test.ts | 8 +++-- yarn-project/kv-store/src/lmdb/store.ts | 4 ++- yarn-project/kv-store/src/utils.ts | 12 ++++--- yarn-project/kv-store/tsconfig.json | 3 ++ yarn-project/p2p/src/client/index.ts | 3 +- yarn-project/p2p/src/config.ts | 9 ----- yarn-project/p2p/src/mocks/index.ts | 2 +- .../reqresp/reqresp.integration.test.ts | 3 +- yarn-project/p2p/src/util.ts | 2 +- yarn-project/prover-node/src/config.ts | 4 +++ yarn-project/prover-node/src/factory.ts | 3 +- .../src/prover-coordination/factory.ts | 3 +- yarn-project/pxe/src/config/index.ts | 11 +++--- .../pxe/src/pxe_service/create_pxe_service.ts | 6 ++-- .../src/pxe_service/test/pxe_service.test.ts | 17 ++++++++-- .../world-state/src/synchronizer/config.ts | 12 +++++-- .../world-state/src/synchronizer/factory.ts | 15 +++++--- .../world-state/src/test/integration.test.ts | 4 +-- 27 files changed, 152 insertions(+), 76 deletions(-) create mode 100644 yarn-project/kv-store/src/config.ts diff --git a/yarn-project/archiver/src/archiver/config.ts b/yarn-project/archiver/src/archiver/config.ts index a3115f7da67..6aa953cd087 100644 --- a/yarn-project/archiver/src/archiver/config.ts +++ b/yarn-project/archiver/src/archiver/config.ts @@ -38,11 +38,6 @@ export type ArchiverConfig = { */ l1Contracts: L1ContractAddresses; - /** - * Optional dir to store data. If omitted will store in memory. - */ - dataDirectory: string | undefined; - /** The max number of logs that can be obtained in 1 "getUnencryptedLogs" call. */ maxLogs?: number; } & L1ReaderConfig & @@ -57,11 +52,7 @@ export const archiverConfigMappings: ConfigMappingsType = { archiverPollingIntervalMS: { env: 'ARCHIVER_POLLING_INTERVAL_MS', description: 'The polling interval in ms for retrieving new L2 blocks and encrypted logs.', - ...numberConfigHelper(1000), - }, - dataDirectory: { - env: 'DATA_DIRECTORY', - description: 'Optional dir to store data. If omitted will store in memory.', + ...numberConfigHelper(1_000), }, maxLogs: { env: 'ARCHIVER_MAX_LOGS', diff --git a/yarn-project/archiver/src/factory.ts b/yarn-project/archiver/src/factory.ts index ac5b2786757..e439ab370d5 100644 --- a/yarn-project/archiver/src/factory.ts +++ b/yarn-project/archiver/src/factory.ts @@ -2,6 +2,7 @@ import { type ArchiverApi, type Service } from '@aztec/circuit-types'; import { type ContractClassPublic } from '@aztec/circuits.js'; import { createDebugLogger } from '@aztec/foundation/log'; import { type Maybe } from '@aztec/foundation/types'; +import { type DataStoreConfig } from '@aztec/kv-store/config'; import { createStore } from '@aztec/kv-store/utils'; import { getCanonicalProtocolContract, protocolContractNames } from '@aztec/protocol-contracts'; import { type TelemetryClient } from '@aztec/telemetry-client'; @@ -13,7 +14,7 @@ import { KVArchiverDataStore } from './archiver/index.js'; import { createArchiverClient } from './rpc/index.js'; export async function createArchiver( - config: ArchiverConfig, + config: ArchiverConfig & DataStoreConfig, telemetry: TelemetryClient = new NoopTelemetryClient(), opts: { blockUntilSync: boolean } = { blockUntilSync: true }, ): Promise> { diff --git a/yarn-project/aztec-node/src/aztec-node/config.ts b/yarn-project/aztec-node/src/aztec-node/config.ts index 2bd36b1052e..c083a518315 100644 --- a/yarn-project/aztec-node/src/aztec-node/config.ts +++ b/yarn-project/aztec-node/src/aztec-node/config.ts @@ -1,5 +1,6 @@ import { type ArchiverConfig, archiverConfigMappings } from '@aztec/archiver'; import { type ConfigMappingsType, booleanConfigHelper, getConfigFromMappings } from '@aztec/foundation/config'; +import { type DataStoreConfig, dataConfigMappings } from '@aztec/kv-store/config'; import { type P2PConfig, p2pConfigMappings } from '@aztec/p2p'; import { type ProverClientConfig, proverClientConfigMappings } from '@aztec/prover-client'; import { type SequencerClientConfig, sequencerClientConfigMappings } from '@aztec/sequencer-client'; @@ -24,7 +25,7 @@ export type AztecNodeConfig = ArchiverConfig & P2PConfig & { /** Whether the validator is disabled for this node */ disableValidator: boolean; - }; + } & DataStoreConfig; export const aztecNodeConfigMappings: ConfigMappingsType = { ...archiverConfigMappings, @@ -33,6 +34,7 @@ export const aztecNodeConfigMappings: ConfigMappingsType = { ...proverClientConfigMappings, ...worldStateConfigMappings, ...p2pConfigMappings, + ...dataConfigMappings, disableValidator: { env: 'VALIDATOR_DISABLED', description: 'Whether the validator is disabled for this node.', diff --git a/yarn-project/aztec/src/cli/cmds/start_archiver.ts b/yarn-project/aztec/src/cli/cmds/start_archiver.ts index cdee55cc214..541ec726db5 100644 --- a/yarn-project/aztec/src/cli/cmds/start_archiver.ts +++ b/yarn-project/aztec/src/cli/cmds/start_archiver.ts @@ -2,6 +2,7 @@ import { Archiver, type ArchiverConfig, KVArchiverDataStore, archiverConfigMappi import { createDebugLogger } from '@aztec/aztec.js'; import { ArchiverApiSchema } from '@aztec/circuit-types'; import { type NamespacedApiHandlers } from '@aztec/foundation/json-rpc/server'; +import { type DataStoreConfig, dataConfigMappings } from '@aztec/kv-store/config'; import { createStore } from '@aztec/kv-store/utils'; import { createAndStartTelemetryClient, @@ -16,7 +17,14 @@ export async function startArchiver( signalHandlers: (() => Promise)[], services: NamespacedApiHandlers, ) { - const archiverConfig = extractRelevantOptions(options, archiverConfigMappings, 'archiver'); + const archiverConfig = extractRelevantOptions( + options, + { + ...archiverConfigMappings, + ...dataConfigMappings, + }, + 'archiver', + ); const storeLog = createDebugLogger('aztec:archiver:lmdb'); const store = await createStore('archiver', archiverConfig, storeLog); diff --git a/yarn-project/end-to-end/src/benchmarks/bench_prover.test.ts b/yarn-project/end-to-end/src/benchmarks/bench_prover.test.ts index 5aa2b0c33fc..04755b0b341 100644 --- a/yarn-project/end-to-end/src/benchmarks/bench_prover.test.ts +++ b/yarn-project/end-to-end/src/benchmarks/bench_prover.test.ts @@ -5,7 +5,7 @@ import { BBCircuitVerifier } from '@aztec/bb-prover'; import { CompleteAddress, Fq, Fr, GasSettings } from '@aztec/circuits.js'; import { FPCContract, FeeJuiceContract, TestContract, TokenContract } from '@aztec/noir-contracts.js'; import { ProtocolContractAddress } from '@aztec/protocol-contracts'; -import { type PXEService, createPXEService } from '@aztec/pxe'; +import { type PXEService, PXEServiceConfig, createPXEService } from '@aztec/pxe'; import { jest } from '@jest/globals'; @@ -142,17 +142,18 @@ describe('benchmarks/proving', () => { ctx.logger.info('Starting PXEs configured with real proofs'); provingPxes = []; for (let i = 0; i < 4; i++) { - const pxe = await createPXEService( - ctx.aztecNode, - { - proverEnabled: true, - bbBinaryPath: bbConfig.bbBinaryPath, - bbWorkingDirectory: bbConfig.bbWorkingDirectory, - l2BlockPollingIntervalMS: 1000, - l2StartingBlock: 1, - }, - `proving-pxe-${i}`, - ); + const l1Contracts = await ctx.aztecNode.getL1ContractAddresses(); + const pxeConfig = { + proverEnabled: true, + bbBinaryPath: bbConfig.bbBinaryPath, + bbWorkingDirectory: bbConfig.bbWorkingDirectory, + l2BlockPollingIntervalMS: 1000, + l2StartingBlock: 1, + dataDirectory: undefined, + dataStoreMapSizeKB: 1024 * 1024, + l1Contracts, + } as PXEServiceConfig; + const pxe = await createPXEService(ctx.aztecNode, pxeConfig, `proving-pxe-${i}`); await getSchnorrAccount(pxe, schnorrWalletEncKey, schnorrWalletSigningKey, schnorrWalletSalt).register(); await pxe.registerContract(initialTokenContract); diff --git a/yarn-project/end-to-end/src/benchmarks/utils.ts b/yarn-project/end-to-end/src/benchmarks/utils.ts index 36c5f1fed76..e801f0a33d9 100644 --- a/yarn-project/end-to-end/src/benchmarks/utils.ts +++ b/yarn-project/end-to-end/src/benchmarks/utils.ts @@ -12,8 +12,9 @@ import { } from '@aztec/aztec.js'; import { times } from '@aztec/foundation/collection'; import { randomInt } from '@aztec/foundation/crypto'; +import { DataStoreConfig } from '@aztec/kv-store/config'; import { BenchmarkingContract } from '@aztec/noir-contracts.js/Benchmarking'; -import { type PXEService, createPXEService } from '@aztec/pxe'; +import { type PXEService, PXEServiceConfig, createPXEService } from '@aztec/pxe'; import { mkdirpSync } from 'fs-extra'; import { globSync } from 'glob'; @@ -112,10 +113,15 @@ export async function waitNewPXESynced( contract: BenchmarkingContract, startingBlock: number = INITIAL_L2_BLOCK_NUM, ): Promise { - const pxe = await createPXEService(node, { - l2BlockPollingIntervalMS: 100, + const l1Contracts = await node.getL1ContractAddresses(); + const pxeConfig = { l2StartingBlock: startingBlock, - }); + l2BlockPollingIntervalMS: 100, + dataDirectory: undefined, + dataStoreMapSizeKB: 1024 * 1024, + l1Contracts, + } as PXEServiceConfig; + const pxe = await createPXEService(node, pxeConfig); await pxe.registerContract(contract); await retryUntil(() => pxe.isGlobalStateSynchronized(), 'pxe-global-sync'); return pxe; diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 7b2469bca97..4dd7da63d04 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -37,6 +37,7 @@ export type EnvVar = | 'BOT_STOP_WHEN_UNHEALTHY' | 'COINBASE' | 'DATA_DIRECTORY' + | 'DATA_STORE_MAP_SIZE_KB' | 'DEBUG' | 'DEPLOY_AZTEC_CONTRACTS_SALT' | 'DEPLOY_AZTEC_CONTRACTS' @@ -152,6 +153,7 @@ export type EnvVar = | 'PROVER_VIEM_POLLING_INTERVAL_MS' | 'SEQ_VIEM_POLLING_INTERVAL_MS' | 'WS_DB_MAP_SIZE_KB' + | 'WS_DATA_DIRECTORY' | 'ETHEREUM_SLOT_DURATION' | 'AZTEC_SLOT_DURATION' | 'AZTEC_EPOCH_DURATION' diff --git a/yarn-project/kv-store/package.json b/yarn-project/kv-store/package.json index ae05345eeaa..870afec3c63 100644 --- a/yarn-project/kv-store/package.json +++ b/yarn-project/kv-store/package.json @@ -5,7 +5,8 @@ "exports": { ".": "./dest/interfaces/index.js", "./lmdb": "./dest/lmdb/index.js", - "./utils": "./dest/utils.js" + "./utils": "./dest/utils.js", + "./config": "./dest/config.js" }, "scripts": { "build": "yarn clean && tsc -b", @@ -55,6 +56,7 @@ ] }, "dependencies": { + "@aztec/ethereum": "workspace:^", "@aztec/foundation": "workspace:^", "lmdb": "^3.0.6" }, diff --git a/yarn-project/kv-store/src/config.ts b/yarn-project/kv-store/src/config.ts new file mode 100644 index 00000000000..41592bd10b1 --- /dev/null +++ b/yarn-project/kv-store/src/config.ts @@ -0,0 +1,34 @@ +import { l1ContractAddressesMapping } from '@aztec/ethereum'; +import { type ConfigMappingsType, getConfigFromMappings, numberConfigHelper } from '@aztec/foundation/config'; +import { type EthAddress } from '@aztec/foundation/eth-address'; + +export type DataStoreConfig = { + dataDirectory: string | undefined; + dataStoreMapSizeKB: number; + l1Contracts: { rollupAddress: EthAddress }; +}; + +export const dataConfigMappings: ConfigMappingsType = { + dataDirectory: { + env: 'DATA_DIRECTORY', + description: 'Optional dir to store data. If omitted will store in memory.', + }, + dataStoreMapSizeKB: { + env: 'DATA_STORE_MAP_SIZE_KB', + description: 'DB mapping size to be applied to all key/value stores', + ...numberConfigHelper(1_024 * 1_024 * 1_024), // Defaulted to 1TB + }, + l1Contracts: { + description: 'The deployed L1 contract addresses', + defaultValue: l1ContractAddressesMapping, + }, +}; + +/** + * Returns the archiver configuration from the environment variables. + * Note: If an environment variable is not set, the default value is used. + * @returns The archiver configuration. + */ +export function getDataConfigFromEnv(): DataStoreConfig { + return getConfigFromMappings(dataConfigMappings); +} diff --git a/yarn-project/kv-store/src/lmdb/store.test.ts b/yarn-project/kv-store/src/lmdb/store.test.ts index b443b630528..c09953cb55b 100644 --- a/yarn-project/kv-store/src/lmdb/store.test.ts +++ b/yarn-project/kv-store/src/lmdb/store.test.ts @@ -4,6 +4,8 @@ import { join } from 'path'; import { AztecLmdbStore } from './store.js'; +const defaultMapSize = 1024 * 1024 * 1024 * 10; + describe('AztecLmdbStore', () => { const itForks = async (store: AztecLmdbStore) => { const singleton = store.openSingleton('singleton'); @@ -21,17 +23,17 @@ describe('AztecLmdbStore', () => { it('forks a persistent store', async () => { const path = await mkdtemp(join(tmpdir(), 'aztec-store-test-')); - const store = AztecLmdbStore.open(path, false); + const store = AztecLmdbStore.open(path, defaultMapSize, false); await itForks(store); }); it('forks a persistent store with no path', async () => { - const store = AztecLmdbStore.open(undefined, false); + const store = AztecLmdbStore.open(undefined, defaultMapSize, false); await itForks(store); }); it('forks an ephemeral store', async () => { - const store = AztecLmdbStore.open(undefined, true); + const store = AztecLmdbStore.open(undefined, defaultMapSize, true); await itForks(store); }); }); diff --git a/yarn-project/kv-store/src/lmdb/store.ts b/yarn-project/kv-store/src/lmdb/store.ts index 1c5e53f71ff..fa48c822842 100644 --- a/yarn-project/kv-store/src/lmdb/store.ts +++ b/yarn-project/kv-store/src/lmdb/store.ts @@ -58,6 +58,7 @@ export class AztecLmdbStore implements AztecKVStore { */ static open( path?: string, + mapSizeKb?: number, ephemeral: boolean = false, log = createDebugLogger('aztec:kv-store:lmdb'), ): AztecLmdbStore { @@ -65,7 +66,8 @@ export class AztecLmdbStore implements AztecKVStore { if (path) { mkdirSync(path, { recursive: true }); } - const rootDb = open({ path, noSync: ephemeral }); + const mapSize = mapSizeKb === undefined ? undefined : 1024 * mapSizeKb; + const rootDb = open({ path, noSync: ephemeral, mapSize }); return new AztecLmdbStore(rootDb, ephemeral, path); } diff --git a/yarn-project/kv-store/src/utils.ts b/yarn-project/kv-store/src/utils.ts index 640f50932c3..2a205c778a1 100644 --- a/yarn-project/kv-store/src/utils.ts +++ b/yarn-project/kv-store/src/utils.ts @@ -3,11 +3,10 @@ import { type Logger, createDebugLogger } from '@aztec/foundation/log'; import { join } from 'path'; +import { type DataStoreConfig } from './config.js'; import { type AztecKVStore } from './interfaces/store.js'; import { AztecLmdbStore } from './lmdb/store.js'; -export type DataStoreConfig = { dataDirectory: string | undefined; l1Contracts: { rollupAddress: EthAddress } }; - export function createStore(name: string, config: DataStoreConfig, log: Logger = createDebugLogger('aztec:kv-store')) { let { dataDirectory } = config; if (typeof dataDirectory !== 'undefined') { @@ -19,7 +18,11 @@ export function createStore(name: string, config: DataStoreConfig, log: Logger = ? `Creating ${name} data store at directory ${dataDirectory}` : `Creating ${name} ephemeral data store`, ); - return initStoreForRollup(AztecLmdbStore.open(dataDirectory, false), config.l1Contracts.rollupAddress, log); + return initStoreForRollup( + AztecLmdbStore.open(dataDirectory, config.dataStoreMapSizeKB, false), + config.l1Contracts.rollupAddress, + log, + ); } /** @@ -60,5 +63,6 @@ async function initStoreForRollup( * @returns A new store */ export function openTmpStore(ephemeral: boolean = false): AztecKVStore { - return AztecLmdbStore.open(undefined, ephemeral); + const mapSize = 1024 * 1024 * 1024 * 10; // 10 GB map size + return AztecLmdbStore.open(undefined, mapSize, ephemeral); } diff --git a/yarn-project/kv-store/tsconfig.json b/yarn-project/kv-store/tsconfig.json index 63f8ab3e9f7..18fc3bcf3f2 100644 --- a/yarn-project/kv-store/tsconfig.json +++ b/yarn-project/kv-store/tsconfig.json @@ -8,6 +8,9 @@ "references": [ { "path": "../foundation" + }, + { + "path": "../ethereum" } ], "include": ["src"] diff --git a/yarn-project/p2p/src/client/index.ts b/yarn-project/p2p/src/client/index.ts index 2b4c498ca59..1c0dd17bb2d 100644 --- a/yarn-project/p2p/src/client/index.ts +++ b/yarn-project/p2p/src/client/index.ts @@ -1,7 +1,8 @@ import type { ClientProtocolCircuitVerifier, L2BlockSource, WorldStateSynchronizer } from '@aztec/circuit-types'; import { createDebugLogger } from '@aztec/foundation/log'; import { type AztecKVStore } from '@aztec/kv-store'; -import { type DataStoreConfig, createStore } from '@aztec/kv-store/utils'; +import { type DataStoreConfig } from '@aztec/kv-store/config'; +import { createStore } from '@aztec/kv-store/utils'; import { type TelemetryClient } from '@aztec/telemetry-client'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index c98bc9d741a..7cff1711b48 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -83,11 +83,6 @@ export interface P2PConfig extends P2PReqRespConfig { */ maxPeerCount: number; - /** - * Data directory for peer & tx databases. - */ - dataDirectory?: string; - /** * If announceUdpAddress or announceTcpAddress are not provided, query for the IP address of the machine. Default is false. */ @@ -222,10 +217,6 @@ export const p2pConfigMappings: ConfigMappingsType = { description: 'The maximum number of peers to connect to.', ...numberConfigHelper(100), }, - dataDirectory: { - env: 'DATA_DIRECTORY', - description: 'Data directory for peer & tx databases. Will use temporary location if not set.', - }, queryForIp: { env: 'P2P_QUERY_FOR_IP', description: diff --git a/yarn-project/p2p/src/mocks/index.ts b/yarn-project/p2p/src/mocks/index.ts index 27c37a475fa..8703ba3286b 100644 --- a/yarn-project/p2p/src/mocks/index.ts +++ b/yarn-project/p2p/src/mocks/index.ts @@ -4,7 +4,7 @@ import { type Tx, type WorldStateSynchronizer, } from '@aztec/circuit-types'; -import { type DataStoreConfig } from '@aztec/kv-store/utils'; +import { type DataStoreConfig } from '@aztec/kv-store/config'; import { type TelemetryClient } from '@aztec/telemetry-client'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; diff --git a/yarn-project/p2p/src/service/reqresp/reqresp.integration.test.ts b/yarn-project/p2p/src/service/reqresp/reqresp.integration.test.ts index 3a1dc6f502e..3e28c031a0d 100644 --- a/yarn-project/p2p/src/service/reqresp/reqresp.integration.test.ts +++ b/yarn-project/p2p/src/service/reqresp/reqresp.integration.test.ts @@ -4,7 +4,8 @@ import { type ClientProtocolCircuitVerifier, type WorldStateSynchronizer, mockTx import { createDebugLogger } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; import { type AztecKVStore } from '@aztec/kv-store'; -import { type DataStoreConfig, openTmpStore } from '@aztec/kv-store/utils'; +import { type DataStoreConfig } from '@aztec/kv-store/config'; +import { openTmpStore } from '@aztec/kv-store/utils'; import { SignableENR } from '@chainsafe/enr'; import { describe, expect, it, jest } from '@jest/globals'; diff --git a/yarn-project/p2p/src/util.ts b/yarn-project/p2p/src/util.ts index 0550ad5ec92..38654557483 100644 --- a/yarn-project/p2p/src/util.ts +++ b/yarn-project/p2p/src/util.ts @@ -1,4 +1,4 @@ -import { type DataStoreConfig } from '@aztec/kv-store/utils'; +import { type DataStoreConfig } from '@aztec/kv-store/config'; import type { GossipSub } from '@chainsafe/libp2p-gossipsub'; import { resolve } from 'dns/promises'; diff --git a/yarn-project/prover-node/src/config.ts b/yarn-project/prover-node/src/config.ts index ce0f8676065..12894b5cd0d 100644 --- a/yarn-project/prover-node/src/config.ts +++ b/yarn-project/prover-node/src/config.ts @@ -5,6 +5,7 @@ import { getConfigFromMappings, numberConfigHelper, } from '@aztec/foundation/config'; +import { type DataStoreConfig, dataConfigMappings, getDataConfigFromEnv } from '@aztec/kv-store/config'; import { type P2PConfig, getP2PConfigFromEnv, p2pConfigMappings } from '@aztec/p2p'; import { type ProverClientConfig, getProverEnvVars, proverClientConfigMappings } from '@aztec/prover-client'; import { @@ -30,6 +31,7 @@ export type ProverNodeConfig = ArchiverConfig & WorldStateConfig & PublisherConfig & TxSenderConfig & + DataStoreConfig & ProverCoordinationConfig & ProverBondManagerConfig & QuoteProviderConfig & { @@ -77,6 +79,7 @@ const quoteProviderConfigMappings: ConfigMappingsType = { }; export const proverNodeConfigMappings: ConfigMappingsType = { + ...dataConfigMappings, ...archiverConfigMappings, ...proverClientConfigMappings, ...p2pConfigMappings, @@ -91,6 +94,7 @@ export const proverNodeConfigMappings: ConfigMappingsType = { export function getProverNodeConfigFromEnv(): ProverNodeConfig { return { + ...getDataConfigFromEnv(), ...getArchiverConfigFromEnv(), ...getProverEnvVars(), ...getP2PConfigFromEnv(), diff --git a/yarn-project/prover-node/src/factory.ts b/yarn-project/prover-node/src/factory.ts index 3d9cec10579..5632ba0428d 100644 --- a/yarn-project/prover-node/src/factory.ts +++ b/yarn-project/prover-node/src/factory.ts @@ -3,6 +3,7 @@ import { type ProverCoordination } from '@aztec/circuit-types'; import { createEthereumChain } from '@aztec/ethereum'; import { Buffer32 } from '@aztec/foundation/buffer'; import { type DebugLogger, createDebugLogger } from '@aztec/foundation/log'; +import { type DataStoreConfig } from '@aztec/kv-store/config'; import { RollupAbi } from '@aztec/l1-artifacts'; import { createProverClient } from '@aztec/prover-client'; import { L1Publisher } from '@aztec/sequencer-client'; @@ -25,7 +26,7 @@ import { QuoteSigner } from './quote-signer.js'; /** Creates a new prover node given a config. */ export async function createProverNode( - config: ProverNodeConfig, + config: ProverNodeConfig & DataStoreConfig, deps: { telemetry?: TelemetryClient; log?: DebugLogger; diff --git a/yarn-project/prover-node/src/prover-coordination/factory.ts b/yarn-project/prover-node/src/prover-coordination/factory.ts index 1c44d2673dc..a6353294dc2 100644 --- a/yarn-project/prover-node/src/prover-coordination/factory.ts +++ b/yarn-project/prover-node/src/prover-coordination/factory.ts @@ -2,6 +2,7 @@ import { type ArchiveSource, type Archiver } from '@aztec/archiver'; import { BBCircuitVerifier, TestCircuitVerifier } from '@aztec/bb-prover'; import { type ProverCoordination, type WorldStateSynchronizer, createAztecNodeClient } from '@aztec/circuit-types'; import { createDebugLogger } from '@aztec/foundation/log'; +import { type DataStoreConfig } from '@aztec/kv-store/config'; import { createP2PClient } from '@aztec/p2p'; import { type TelemetryClient } from '@aztec/telemetry-client'; @@ -22,7 +23,7 @@ type ProverCoordinationDeps = { * If an aztec node is provided, it is returned directly. */ export async function createProverCoordination( - config: ProverNodeConfig, + config: ProverNodeConfig & DataStoreConfig, deps: ProverCoordinationDeps, ): Promise { const log = createDebugLogger('aztec:createProverCoordination'); diff --git a/yarn-project/pxe/src/config/index.ts b/yarn-project/pxe/src/config/index.ts index 0873a845414..d97cacbdb92 100644 --- a/yarn-project/pxe/src/config/index.ts +++ b/yarn-project/pxe/src/config/index.ts @@ -5,6 +5,7 @@ import { getConfigFromMappings, numberConfigHelper, } from '@aztec/foundation/config'; +import { type DataStoreConfig, dataConfigMappings } from '@aztec/kv-store/config'; import { type Network } from '@aztec/types/network'; import { readFileSync } from 'fs'; @@ -35,11 +36,9 @@ export interface PXEConfig { l2BlockPollingIntervalMS: number; /** L2 block to start scanning from for new accounts */ l2StartingBlock: number; - /** Where to store PXE data. If not set, will store in memory */ - dataDirectory?: string; } -export type PXEServiceConfig = PXEConfig & KernelProverConfig & BBProverConfig; +export type PXEServiceConfig = PXEConfig & KernelProverConfig & BBProverConfig & DataStoreConfig; export type CliPXEOptions = { /** External Aztec network to connect to. e.g. devnet */ @@ -51,6 +50,7 @@ export type CliPXEOptions = { }; export const pxeConfigMappings: ConfigMappingsType = { + ...dataConfigMappings, l2BlockPollingIntervalMS: { env: 'PXE_BLOCK_POLLING_INTERVAL_MS', description: 'The interval to wait between polling for new blocks.', @@ -61,10 +61,6 @@ export const pxeConfigMappings: ConfigMappingsType = { ...numberConfigHelper(INITIAL_L2_BLOCK_NUM), description: 'L2 block to start scanning from for new accounts', }, - dataDirectory: { - env: 'PXE_DATA_DIRECTORY', - description: 'Where to store PXE data. If not set, will store in memory', - }, bbBinaryPath: { env: 'BB_BINARY_PATH', description: 'Path to the BB binary', @@ -111,6 +107,7 @@ export const pxeCliConfigMappings: ConfigMappingsType = { export const allPxeConfigMappings: ConfigMappingsType = { ...pxeConfigMappings, ...pxeCliConfigMappings, + ...dataConfigMappings, proverEnabled: { env: 'PXE_PROVER_ENABLED', parseEnv: (val: string) => ['1', 'true', 'TRUE'].includes(val) || !!process.env.NETWORK, diff --git a/yarn-project/pxe/src/pxe_service/create_pxe_service.ts b/yarn-project/pxe/src/pxe_service/create_pxe_service.ts index bb608b93873..9d400b48615 100644 --- a/yarn-project/pxe/src/pxe_service/create_pxe_service.ts +++ b/yarn-project/pxe/src/pxe_service/create_pxe_service.ts @@ -30,12 +30,10 @@ export async function createPXEService( const logSuffix = typeof useLogSuffix === 'boolean' ? (useLogSuffix ? randomBytes(3).toString('hex') : undefined) : useLogSuffix; - const l1Contracts = await aztecNode.getL1ContractAddresses(); - const storeConfig = { dataDirectory: config.dataDirectory, l1Contracts }; const keyStore = new KeyStore( - await createStore('pxe_key_store', storeConfig, createDebugLogger('aztec:pxe:keystore:lmdb')), + await createStore('pxe_key_store', config, createDebugLogger('aztec:pxe:keystore:lmdb')), ); - const db = new KVPxeDatabase(await createStore('pxe_data', storeConfig, createDebugLogger('aztec:pxe:data:lmdb'))); + const db = new KVPxeDatabase(await createStore('pxe_data', config, createDebugLogger('aztec:pxe:data:lmdb'))); const prover = proofCreator ?? (await createProver(config, logSuffix)); const server = new PXEService(keyStore, aztecNode, db, prover, config, logSuffix); diff --git a/yarn-project/pxe/src/pxe_service/test/pxe_service.test.ts b/yarn-project/pxe/src/pxe_service/test/pxe_service.test.ts index ca0d00de0db..c4fd6d2a627 100644 --- a/yarn-project/pxe/src/pxe_service/test/pxe_service.test.ts +++ b/yarn-project/pxe/src/pxe_service/test/pxe_service.test.ts @@ -19,7 +19,13 @@ function createPXEService(): Promise { const keyStore = new KeyStore(kvStore); const node = mock(); const db = new KVPxeDatabase(kvStore); - const config: PXEServiceConfig = { l2BlockPollingIntervalMS: 100, l2StartingBlock: INITIAL_L2_BLOCK_NUM }; + const config: PXEServiceConfig = { + l2BlockPollingIntervalMS: 100, + l2StartingBlock: INITIAL_L2_BLOCK_NUM, + dataDirectory: undefined, + dataStoreMapSizeKB: 1024 * 1024, + l1Contracts: { rollupAddress: EthAddress.random() }, + }; // Setup the relevant mocks node.getBlockNumber.mockResolvedValue(2); @@ -55,7 +61,14 @@ describe('PXEService', () => { keyStore = new KeyStore(kvStore); node = mock(); db = new KVPxeDatabase(kvStore); - config = { l2BlockPollingIntervalMS: 100, l2StartingBlock: INITIAL_L2_BLOCK_NUM, proverEnabled: false }; + config = { + l2BlockPollingIntervalMS: 100, + l2StartingBlock: INITIAL_L2_BLOCK_NUM, + proverEnabled: false, + dataDirectory: undefined, + dataStoreMapSizeKB: 1024 * 1024, + l1Contracts: { rollupAddress: EthAddress.random() }, + }; }); it('throws when submitting a tx with a nullifier of already settled tx', async () => { diff --git a/yarn-project/world-state/src/synchronizer/config.ts b/yarn-project/world-state/src/synchronizer/config.ts index 1308cd801a3..4b90127e952 100644 --- a/yarn-project/world-state/src/synchronizer/config.ts +++ b/yarn-project/world-state/src/synchronizer/config.ts @@ -11,8 +11,11 @@ export interface WorldStateConfig { /** Size of the batch for each get-blocks request from the synchronizer to the archiver. */ worldStateBlockRequestBatchSize?: number; - /** The maximum size of the combined world state db in KB*/ - worldStateDbMapSizeKb: number; + /** The maximum size of the combined world state db in KB, optional, will inherit from the general dataStoreMapSizeKB if not specified*/ + worldStateDbMapSizeKb?: number; + + /** Optional directory for the world state DB, if unspecified will default to the general data directory */ + worldStateDataDirectory?: string; } export const worldStateConfigMappings: ConfigMappingsType = { @@ -35,9 +38,12 @@ export const worldStateConfigMappings: ConfigMappingsType = { worldStateDbMapSizeKb: { env: 'WS_DB_MAP_SIZE_KB', parseEnv: (val: string | undefined) => (val ? +val : undefined), - defaultValue: 1024 * 1024 * 1024, // 1TB description: 'The maximum possible size of the world state DB', }, + worldStateDataDirectory: { + env: 'WS_DATA_DIRECTORY', + description: 'Optional directory for the world state database', + }, }; /** diff --git a/yarn-project/world-state/src/synchronizer/factory.ts b/yarn-project/world-state/src/synchronizer/factory.ts index 7a846652ca4..c68c691cf83 100644 --- a/yarn-project/world-state/src/synchronizer/factory.ts +++ b/yarn-project/world-state/src/synchronizer/factory.ts @@ -1,6 +1,7 @@ import { type L1ToL2MessageSource, type L2BlockSource } from '@aztec/circuit-types'; import { createDebugLogger } from '@aztec/foundation/log'; -import { type DataStoreConfig, createStore } from '@aztec/kv-store/utils'; +import { DataStoreConfig } from '@aztec/kv-store/config'; +import { createStore } from '@aztec/kv-store/utils'; import { type TelemetryClient } from '@aztec/telemetry-client'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; @@ -22,16 +23,20 @@ export async function createWorldState( config: WorldStateConfig & DataStoreConfig, client: TelemetryClient = new NoopTelemetryClient(), ) { + const newConfig = { + dataDirectory: config.worldStateDataDirectory ?? config.dataDirectory, + dataStoreMapSizeKB: config.worldStateDbMapSizeKb ?? config.dataStoreMapSizeKB, + } as DataStoreConfig; const merkleTrees = ['true', '1'].includes(process.env.USE_LEGACY_WORLD_STATE ?? '') ? await MerkleTrees.new( - await createStore('world-state', config, createDebugLogger('aztec:world-state:lmdb')), + await createStore('world-state', newConfig, createDebugLogger('aztec:world-state:lmdb')), client, ) - : config.dataDirectory + : newConfig.dataDirectory ? await NativeWorldStateService.new( config.l1Contracts.rollupAddress, - config.dataDirectory, - config.worldStateDbMapSizeKb, + newConfig.dataDirectory, + newConfig.dataStoreMapSizeKB, ) : await NativeWorldStateService.tmp( config.l1Contracts.rollupAddress, diff --git a/yarn-project/world-state/src/test/integration.test.ts b/yarn-project/world-state/src/test/integration.test.ts index 3a6752f011f..7ea223260c3 100644 --- a/yarn-project/world-state/src/test/integration.test.ts +++ b/yarn-project/world-state/src/test/integration.test.ts @@ -3,7 +3,7 @@ import { type L2Block, MerkleTreeId } from '@aztec/circuit-types'; import { EthAddress, type Fr } from '@aztec/circuits.js'; import { type DebugLogger, createDebugLogger } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; -import { type DataStoreConfig } from '@aztec/kv-store/utils'; +import { type DataStoreConfig } from '@aztec/kv-store/config'; import { jest } from '@jest/globals'; @@ -38,11 +38,11 @@ describe('world-state integration', () => { beforeEach(async () => { config = { dataDirectory: undefined, + dataStoreMapSizeKB: 1024 * 1024, l1Contracts: { rollupAddress }, worldStateBlockCheckIntervalMS: 20, worldStateProvenBlocksOnly: false, worldStateBlockRequestBatchSize: 5, - worldStateDbMapSizeKb: 1024 * 1024, }; archiver = new MockPrefilledArchiver(blocks, messages); From 7a0ed9b54ec63a0d85e59e5c53f1b7f38dccde0b Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Thu, 14 Nov 2024 17:27:53 +0000 Subject: [PATCH 21/31] WIP --- yarn-project/aztec/src/cli/aztec_start_options.ts | 6 ------ yarn-project/foundation/src/config/env_var.ts | 1 - yarn-project/yarn.lock | 1 + 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/yarn-project/aztec/src/cli/aztec_start_options.ts b/yarn-project/aztec/src/cli/aztec_start_options.ts index 9d9c00a7d98..53b0ab01949 100644 --- a/yarn-project/aztec/src/cli/aztec_start_options.ts +++ b/yarn-project/aztec/src/cli/aztec_start_options.ts @@ -218,12 +218,6 @@ export const aztecStartOptions: { [key: string]: AztecStartOption[] } = { defaultValue: undefined, envVar: undefined, }, - { - flag: '--pxe.dataDirectory ', - description: 'Where to store PXE data. If not set, will store in memory', - defaultValue: undefined, - envVar: 'PXE_DATA_DIRECTORY', - }, ...getOptions('pxe', allPxeConfigMappings), ], ARCHIVER: [ diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 4dd7da63d04..c62e2a03c1d 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -113,7 +113,6 @@ export type EnvVar = | 'PROVER_REQUIRED_CONFIRMATIONS' | 'PROVER_TEST_DELAY_MS' | 'PXE_BLOCK_POLLING_INTERVAL_MS' - | 'PXE_DATA_DIRECTORY' | 'PXE_L2_STARTING_BLOCK' | 'PXE_PROVER_ENABLED' | 'QUOTE_PROVIDER_BASIS_POINT_FEE' diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index 449d4424f8e..1ad1e7413d6 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -763,6 +763,7 @@ __metadata: version: 0.0.0-use.local resolution: "@aztec/kv-store@workspace:kv-store" dependencies: + "@aztec/ethereum": "workspace:^" "@aztec/foundation": "workspace:^" "@jest/globals": ^29.5.0 "@types/jest": ^29.5.0 From 5d229ebc75788e228213389dc7a8a070df3de982 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Thu, 14 Nov 2024 18:54:28 +0000 Subject: [PATCH 22/31] WIP --- cspell.json | 3 +++ yarn-project/kv-store/src/lmdb/store.ts | 2 +- yarn-project/kv-store/src/utils.ts | 4 ++-- .../pxe/src/pxe_service/create_pxe_service.ts | 12 ++++++++++-- .../world-state/src/native/native_world_state.ts | 9 +++++---- .../src/native/native_world_state_instance.ts | 1 + 6 files changed, 22 insertions(+), 9 deletions(-) diff --git a/cspell.json b/cspell.json index def40448ad5..6073069b028 100644 --- a/cspell.json +++ b/cspell.json @@ -120,6 +120,7 @@ "ierc", "indexeddb", "initialise", + "initialising", "interruptible", "isequal", "ivpk", @@ -154,6 +155,7 @@ "monomorphize", "mplex", "msgpack", + "msgpackr", "muldiv", "multiaddr", "multiaddrs", @@ -163,6 +165,7 @@ "muxers", "nada", "namespacing", + "napi", "Nargo", "nixpkgs", "nodebuffer", diff --git a/yarn-project/kv-store/src/lmdb/store.ts b/yarn-project/kv-store/src/lmdb/store.ts index fa48c822842..22672421d47 100644 --- a/yarn-project/kv-store/src/lmdb/store.ts +++ b/yarn-project/kv-store/src/lmdb/store.ts @@ -62,11 +62,11 @@ export class AztecLmdbStore implements AztecKVStore { ephemeral: boolean = false, log = createDebugLogger('aztec:kv-store:lmdb'), ): AztecLmdbStore { - log.debug(`Opening LMDB database at ${path || 'temporary location'}`); if (path) { mkdirSync(path, { recursive: true }); } const mapSize = mapSizeKb === undefined ? undefined : 1024 * mapSizeKb; + log.debug(`Opening LMDB database at ${path || 'temporary location'} with map size ${mapSize}`); const rootDb = open({ path, noSync: ephemeral, mapSize }); return new AztecLmdbStore(rootDb, ephemeral, path); } diff --git a/yarn-project/kv-store/src/utils.ts b/yarn-project/kv-store/src/utils.ts index 2a205c778a1..b8637975e8a 100644 --- a/yarn-project/kv-store/src/utils.ts +++ b/yarn-project/kv-store/src/utils.ts @@ -15,8 +15,8 @@ export function createStore(name: string, config: DataStoreConfig, log: Logger = log.info( dataDirectory - ? `Creating ${name} data store at directory ${dataDirectory}` - : `Creating ${name} ephemeral data store`, + ? `Creating ${name} data store at directory ${dataDirectory} with map size ${config.dataStoreMapSizeKB} KB` + : `Creating ${name} ephemeral data store with map size ${config.dataStoreMapSizeKB} KB`, ); return initStoreForRollup( AztecLmdbStore.open(dataDirectory, config.dataStoreMapSizeKB, false), diff --git a/yarn-project/pxe/src/pxe_service/create_pxe_service.ts b/yarn-project/pxe/src/pxe_service/create_pxe_service.ts index 9d400b48615..3fa9abe5b1e 100644 --- a/yarn-project/pxe/src/pxe_service/create_pxe_service.ts +++ b/yarn-project/pxe/src/pxe_service/create_pxe_service.ts @@ -30,10 +30,18 @@ export async function createPXEService( const logSuffix = typeof useLogSuffix === 'boolean' ? (useLogSuffix ? randomBytes(3).toString('hex') : undefined) : useLogSuffix; + const l1Contracts = await aztecNode.getL1ContractAddresses(); + const configWithContracts = { + ...config, + l1Contracts, + } as PXEServiceConfig; + const keyStore = new KeyStore( - await createStore('pxe_key_store', config, createDebugLogger('aztec:pxe:keystore:lmdb')), + await createStore('pxe_key_store', configWithContracts, createDebugLogger('aztec:pxe:keystore:lmdb')), + ); + const db = new KVPxeDatabase( + await createStore('pxe_data', configWithContracts, createDebugLogger('aztec:pxe:data:lmdb')), ); - const db = new KVPxeDatabase(await createStore('pxe_data', config, createDebugLogger('aztec:pxe:data:lmdb'))); const prover = proofCreator ?? (await createProver(config, logSuffix)); const server = new PXEService(keyStore, aztecNode, db, prover, config, logSuffix); diff --git a/yarn-project/world-state/src/native/native_world_state.ts b/yarn-project/world-state/src/native/native_world_state.ts index 260bfc15a31..8dc112fecfc 100644 --- a/yarn-project/world-state/src/native/native_world_state.ts +++ b/yarn-project/world-state/src/native/native_world_state.ts @@ -59,19 +59,20 @@ export class NativeWorldStateService implements MerkleTreeDatabase { log = createDebugLogger('aztec:world-state:database'), cleanup = () => Promise.resolve(), ): Promise { - const rollupAddressFile = join(dataDir, ROLLUP_ADDRESS_FILE); + const worldStateDirectory = join(dataDir, 'world_state'); + const rollupAddressFile = join(worldStateDirectory, ROLLUP_ADDRESS_FILE); const currentRollupStr = await readFile(rollupAddressFile, 'utf8').catch(() => undefined); const currentRollupAddress = currentRollupStr ? EthAddress.fromString(currentRollupStr.trim()) : undefined; if (currentRollupAddress && !rollupAddress.equals(currentRollupAddress)) { log.warn('Rollup address changed, deleting database'); - await rm(dataDir, { recursive: true, force: true }); + await rm(worldStateDirectory, { recursive: true, force: true }); } - await mkdir(dataDir, { recursive: true }); + await mkdir(worldStateDirectory, { recursive: true }); await writeFile(rollupAddressFile, rollupAddress.toString(), 'utf8'); - const instance = new NativeWorldState(dataDir, dbMapSizeKb); + const instance = new NativeWorldState(worldStateDirectory, dbMapSizeKb); const worldState = new this(instance, log, cleanup); try { await worldState.init(); diff --git a/yarn-project/world-state/src/native/native_world_state_instance.ts b/yarn-project/world-state/src/native/native_world_state_instance.ts index 01f6871c0b4..671baaa4fe6 100644 --- a/yarn-project/world-state/src/native/native_world_state_instance.ts +++ b/yarn-project/world-state/src/native/native_world_state_instance.ts @@ -83,6 +83,7 @@ export class NativeWorldState implements NativeWorldStateInstance { /** Creates a new native WorldState instance */ constructor(dataDir: string, dbMapSizeKb: number, private log = createDebugLogger('aztec:world-state:database')) { + log.info(`Creating world state data store at directory ${dataDir} with map size ${dbMapSizeKb} KB`); this.instance = new NATIVE_MODULE[NATIVE_CLASS_NAME]( dataDir, { From 62d9a9a83854fa6df6dfb46694d8085934eb2024 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Fri, 15 Nov 2024 09:47:20 +0000 Subject: [PATCH 23/31] Changed default value --- yarn-project/kv-store/src/config.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yarn-project/kv-store/src/config.ts b/yarn-project/kv-store/src/config.ts index 41592bd10b1..0292bd0b487 100644 --- a/yarn-project/kv-store/src/config.ts +++ b/yarn-project/kv-store/src/config.ts @@ -16,7 +16,7 @@ export const dataConfigMappings: ConfigMappingsType = { dataStoreMapSizeKB: { env: 'DATA_STORE_MAP_SIZE_KB', description: 'DB mapping size to be applied to all key/value stores', - ...numberConfigHelper(1_024 * 1_024 * 1_024), // Defaulted to 1TB + ...numberConfigHelper(128 * 1_024 * 1_024), // Defaulted to 128 GB }, l1Contracts: { description: 'The deployed L1 contract addresses', From d8b6906988d579be92595f4c34bae932ad3eec1b Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Fri, 15 Nov 2024 14:33:23 +0000 Subject: [PATCH 24/31] WIP --- .../lmdb_store/lmdb_transaction.cpp | 5 + .../lmdb_store/lmdb_transaction.hpp | 29 ++-- .../lmdb_store/lmdb_tree_store.cpp | 26 ++-- .../lmdb_store/lmdb_tree_store.hpp | 28 +--- .../lmdb_store/lmdb_tree_store.test.cpp | 14 +- .../lmdb_tree_write_transaction.cpp | 5 + .../lmdb_tree_write_transaction.hpp | 12 +- .../crypto/merkle_tree/lmdb_store/queries.cpp | 38 ++++++ .../crypto/merkle_tree/lmdb_store/queries.hpp | 28 ++-- .../cached_content_addressed_tree_store.hpp | 127 ++++++++---------- 10 files changed, 172 insertions(+), 140 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp index 3e1445ab706..b41787138eb 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp @@ -34,4 +34,9 @@ bool LMDBTransaction::get_value(std::vector& key, std::vector& { return lmdb_queries::get_value(key, data, db, *this); } + +bool LMDBTransaction::get_value(std::vector& key, index_t& data, const LMDBDatabase& db) const +{ + return lmdb_queries::get_value(key, data, db, *this); +} } // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp index 6ae56bd8f9f..cf2a55c1285 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp @@ -2,6 +2,7 @@ #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" +#include "lmdb.h" #include #include @@ -37,16 +38,18 @@ class LMDBTransaction { */ virtual void abort(); - template + template bool get_value_or_previous(T& key, - std::vector& data, + K& data, const LMDBDatabase& db, - const std::function&)>& is_valid) const; + const std::function& is_valid) const; - template bool get_value_or_previous(T& key, std::vector& data, const LMDBDatabase& db) const; + template bool get_value_or_previous(T& key, K& data, const LMDBDatabase& db) const; template bool get_value(T& key, std::vector& data, const LMDBDatabase& db) const; + template bool get_value(T& key, index_t& data, const LMDBDatabase& db) const; + template void get_all_values_greater_or_equal_key(const T& key, std::vector>& data, @@ -59,6 +62,8 @@ class LMDBTransaction { bool get_value(std::vector& key, std::vector& data, const LMDBDatabase& db) const; + bool get_value(std::vector& key, index_t& data, const LMDBDatabase& db) const; + protected: std::shared_ptr _environment; MDB_txn* _transaction; @@ -71,17 +76,23 @@ template bool LMDBTransaction::get_value(T& key, std::vector -bool LMDBTransaction::get_value_or_previous(T& key, std::vector& data, const LMDBDatabase& db) const +template bool LMDBTransaction::get_value(T& key, index_t& data, const LMDBDatabase& db) const +{ + std::vector keyBuffer = serialise_key(key); + return get_value(keyBuffer, data, db); +} + +template +bool LMDBTransaction::get_value_or_previous(T& key, K& data, const LMDBDatabase& db) const { return lmdb_queries::get_value_or_previous(key, data, db, *this); } -template +template bool LMDBTransaction::get_value_or_previous(T& key, - std::vector& data, + K& data, const LMDBDatabase& db, - const std::function&)>& is_valid) const + const std::function& is_valid) const { return lmdb_queries::get_value_or_previous(key, data, db, is_valid, *this); } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp index 742f36d0395..cab4cc54e15 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp @@ -166,17 +166,14 @@ bool LMDBTreeStore::read_meta_data(TreeMeta& metaData, LMDBTreeStore::ReadTransa return success; } -void LMDBTreeStore::write_leaf_indices(const fr& leafValue, const Indices& indices, LMDBTreeStore::WriteTransaction& tx) +void LMDBTreeStore::write_leaf_index(const fr& leafValue, const index_t& index, LMDBTreeStore::WriteTransaction& tx) { - msgpack::sbuffer buffer; - msgpack::pack(buffer, indices); - std::vector encoded(buffer.data(), buffer.data() + buffer.size()); FrKeyType key(leafValue); // std::cout << "Writing leaf indices by key " << key << std::endl; - tx.put_value(key, encoded, *_leafValueToIndexDatabase); + tx.put_value(key, index, *_leafValueToIndexDatabase); } -void LMDBTreeStore::delete_leaf_indices(const fr& leafValue, LMDBTreeStore::WriteTransaction& tx) +void LMDBTreeStore::delete_leaf_index(const fr& leafValue, LMDBTreeStore::WriteTransaction& tx) { FrKeyType key(leafValue); // std::cout << "Deleting leaf indices by key " << key << std::endl; @@ -230,23 +227,20 @@ void LMDBTreeStore::delete_leaf_by_hash(const fr& leafHash, WriteTransaction& tx } fr LMDBTreeStore::find_low_leaf(const fr& leafValue, - Indices& indices, + index_t& index, const std::optional& sizeLimit, ReadTransaction& tx) { - std::vector data; FrKeyType key(leafValue); - auto is_valid = [&](const std::vector& data) { - Indices tmp; - msgpack::unpack((const char*)data.data(), data.size()).get().convert(tmp); - return tmp.indices[0] < sizeLimit.value(); + auto is_valid = [&](const MDB_val& data) { + index_t tmp = 0; + deserialise_key(data.mv_data, tmp); + return tmp < sizeLimit.value(); }; if (!sizeLimit.has_value()) { - tx.get_value_or_previous(key, data, *_leafValueToIndexDatabase); - msgpack::unpack((const char*)data.data(), data.size()).get().convert(indices); + tx.get_value_or_previous(key, index, *_leafValueToIndexDatabase); } else { - tx.get_value_or_previous(key, data, *_leafValueToIndexDatabase, is_valid); - msgpack::unpack((const char*)data.data(), data.size()).get().convert(indices); + tx.get_value_or_previous(key, index, *_leafValueToIndexDatabase, is_valid); } return key; } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp index 760a948dd6f..6eda2b7eade 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp @@ -43,14 +43,6 @@ inline std::ostream& operator<<(std::ostream& os, const BlockPayload& block) return os; } -struct Indices { - std::vector indices; - - MSGPACK_FIELDS(indices); - - bool operator==(const Indices& other) const { return indices == other.indices; } -}; - struct NodePayload { std::optional left; std::optional right; @@ -96,16 +88,13 @@ class LMDBTreeStore { bool read_meta_data(TreeMeta& metaData, ReadTransaction& tx); - template bool read_leaf_indices(const fr& leafValue, Indices& indices, TxType& tx); + template bool read_leaf_index(const fr& leafValue, index_t& leafIndex, TxType& tx); - fr find_low_leaf(const fr& leafValue, - Indices& indices, - const std::optional& sizeLimit, - ReadTransaction& tx); + fr find_low_leaf(const fr& leafValue, index_t& index, const std::optional& sizeLimit, ReadTransaction& tx); - void write_leaf_indices(const fr& leafValue, const Indices& indices, WriteTransaction& tx); + void write_leaf_index(const fr& leafValue, const index_t& leafIndex, WriteTransaction& tx); - void delete_leaf_indices(const fr& leafValue, WriteTransaction& tx); + void delete_leaf_index(const fr& leafValue, WriteTransaction& tx); bool read_node(const fr& nodeHash, NodePayload& nodeData, ReadTransaction& tx); @@ -152,15 +141,10 @@ class LMDBTreeStore { template bool get_node_data(const fr& nodeHash, NodePayload& nodeData, TxType& tx); }; -template bool LMDBTreeStore::read_leaf_indices(const fr& leafValue, Indices& indices, TxType& tx) +template bool LMDBTreeStore::read_leaf_index(const fr& leafValue, index_t& leafIndex, TxType& tx) { FrKeyType key(leafValue); - std::vector data; - bool success = tx.template get_value(key, data, *_leafValueToIndexDatabase); - if (success) { - msgpack::unpack((const char*)data.data(), data.size()).get().convert(indices); - } - return success; + return tx.template get_value(key, leafIndex, *_leafValueToIndexDatabase); } template diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp index f04ec2a0f39..f7b10f9f4a8 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp @@ -104,25 +104,23 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_meta_data) TEST_F(LMDBTreeStoreTest, can_write_and_read_leaf_indices) { - Indices indices; - indices.indices.push_back(47); - indices.indices.push_back(86); + index_t index = 47; bb::fr key = VALUES[5]; LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); { LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.write_leaf_indices(key, indices, *transaction); + store.write_leaf_index(key, index, *transaction); transaction->commit(); } { LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - Indices readBack; - bool success = store.read_leaf_indices(key, readBack, *transaction); + index_t readBack = 0; + bool success = store.read_leaf_index(key, readBack, *transaction); EXPECT_TRUE(success); - EXPECT_EQ(readBack, indices); + EXPECT_EQ(readBack, index); - success = store.read_leaf_indices(VALUES[6], readBack, *transaction); + success = store.read_leaf_index(VALUES[6], readBack, *transaction); EXPECT_FALSE(success); } } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp index 4b4cd846a2f..5e524ca2fff 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp @@ -42,6 +42,11 @@ void LMDBTreeWriteTransaction::put_value(std::vector& key, std::vector< lmdb_queries::put_value(key, data, db, *this); } +void LMDBTreeWriteTransaction::put_value(std::vector& key, const index_t& data, const LMDBDatabase& db) +{ + lmdb_queries::put_value(key, data, db, *this); +} + void LMDBTreeWriteTransaction::delete_value(std::vector& key, const LMDBDatabase& db) { lmdb_queries::delete_value(key, db, *this); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp index d12d5fdc3ad..927e14fb4fa 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp @@ -32,8 +32,12 @@ class LMDBTreeWriteTransaction : public LMDBTransaction { template void put_value(T& key, std::vector& data, const LMDBDatabase& db); + template void put_value(T& key, const index_t& data, const LMDBDatabase& db); + void put_value(std::vector& key, std::vector& data, const LMDBDatabase& db); + void put_value(std::vector& key, const index_t& data, const LMDBDatabase& db); + template void delete_value(T& key, const LMDBDatabase& db); void delete_value(std::vector& key, const LMDBDatabase& db); @@ -51,7 +55,13 @@ template void LMDBTreeWriteTransaction::put_value(T& key, std::vector& data, const LMDBDatabase& db) { std::vector keyBuffer = serialise_key(key); - lmdb_queries::put_value(keyBuffer, data, db, *this); + put_value(keyBuffer, data, db); +} + +template void LMDBTreeWriteTransaction::put_value(T& key, const index_t& data, const LMDBDatabase& db) +{ + std::vector keyBuffer = serialise_key(key); + put_value(keyBuffer, data, db); } template void LMDBTreeWriteTransaction::delete_value(T& key, const LMDBDatabase& db) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp index 311b7484d45..939cd58dde1 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp @@ -1,5 +1,7 @@ #include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" +#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp" +#include namespace bb::crypto::merkle_tree::lmdb_queries { @@ -18,6 +20,24 @@ void put_value(std::vector& key, call_lmdb_func("mdb_put", mdb_put, tx.underlying(), db.underlying(), &dbKey, &dbVal, 0U); } +void put_value(std::vector& key, + const index_t& data, + const LMDBDatabase& db, + bb::crypto::merkle_tree::LMDBTreeWriteTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + // use the serialise key method for serialising the index + std::vector serialised = serialise_key(data); + + MDB_val dbVal; + dbVal.mv_size = serialised.size(); + dbVal.mv_data = (void*)serialised.data(); + call_lmdb_func("mdb_put", mdb_put, tx.underlying(), db.underlying(), &dbKey, &dbVal, 0U); +} + void delete_value(std::vector& key, const LMDBDatabase& db, bb::crypto::merkle_tree::LMDBTreeWriteTransaction& tx) @@ -49,4 +69,22 @@ bool get_value(std::vector& key, copy_to_vector(dbVal, data); return true; } + +bool get_value(std::vector& key, + index_t& data, + const LMDBDatabase& db, + const bb::crypto::merkle_tree::LMDBTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + if (!call_lmdb_func(mdb_get, tx.underlying(), db.underlying(), &dbKey, &dbVal)) { + return false; + } + // use the deserialise key method for deserialising the index + deserialise_key(dbVal.mv_data, data); + return true; +} } // namespace bb::crypto::merkle_tree::lmdb_queries \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp index 3269dc13952..aa97a2d2518 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp @@ -14,8 +14,8 @@ class LMDBTreeWriteTransaction; namespace lmdb_queries { -template -bool get_value_or_previous(TKey& key, std::vector& data, const LMDBDatabase& db, const TxType& tx) +template +bool get_value_or_previous(TKey& key, TValue& data, const LMDBDatabase& db, const TxType& tx) { std::vector keyBuffer = serialise_key(key); uint32_t keySize = static_cast(keyBuffer.size()); @@ -36,7 +36,7 @@ bool get_value_or_previous(TKey& key, std::vector& data, const LMDBData std::vector temp = mdb_val_to_vector(dbKey); if (keyBuffer == temp) { // we have the exact key - copy_to_vector(dbVal, data); + deserialise_key(dbVal.mv_data, data); success = true; } else { // We have a key of the same size but larger value OR a larger size @@ -48,7 +48,7 @@ bool get_value_or_previous(TKey& key, std::vector& data, const LMDBData if (dbKey.mv_size != keySize) { // There is no previous key, do nothing } else { - copy_to_vector(dbVal, data); + deserialise_key(dbVal.mv_data, data); deserialise_key(dbKey.mv_data, key); success = true; } @@ -66,7 +66,7 @@ bool get_value_or_previous(TKey& key, std::vector& data, const LMDBData if (dbKey.mv_size != keySize) { // The key is not the same size, same as not found, do nothing } else { - copy_to_vector(dbVal, data); + deserialise_key(dbVal.mv_data, data); deserialise_key(dbKey.mv_data, key); success = true; } @@ -86,11 +86,11 @@ bool get_value_or_previous(TKey& key, std::vector& data, const LMDBData return success; } -template +template bool get_value_or_previous(TKey& key, - std::vector& data, + TValue& data, const LMDBDatabase& db, - const std::function&)>& is_valid, + const std::function& is_valid, const TxType& tx) { std::vector keyBuffer = serialise_key(key); @@ -114,8 +114,8 @@ bool get_value_or_previous(TKey& key, std::vector temp = mdb_val_to_vector(dbKey); if (keyBuffer == temp || lower) { // We have the exact key, we need to determine if it is valid - copy_to_vector(dbVal, data); - if (is_valid(data)) { + if (is_valid(dbVal)) { + deserialise_key(dbVal.mv_data, data); deserialise_key(dbKey.mv_data, key); success = true; // It's valid @@ -151,8 +151,8 @@ bool get_value_or_previous(TKey& key, // The key is not the same size, same as not found, exit break; } - copy_to_vector(dbVal, data); - if (is_valid(data)) { + if (is_valid(dbVal)) { + deserialise_key(dbVal.mv_data, data); deserialise_key(dbKey.mv_data, key); success = true; // It's valid @@ -406,11 +406,15 @@ void put_value(std::vector& key, const LMDBDatabase& db, LMDBTreeWriteTransaction& tx); +void put_value(std::vector& key, const index_t& data, const LMDBDatabase& db, LMDBTreeWriteTransaction& tx); + void delete_value(std::vector& key, const LMDBDatabase& db, LMDBTreeWriteTransaction& tx); bool get_value(std::vector& key, std::vector& data, const LMDBDatabase& db, const LMDBTransaction& tx); + +bool get_value(std::vector& key, index_t& data, const LMDBDatabase& db, const LMDBTransaction& tx); } // namespace lmdb_queries } // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index d4596c77d01..a262de12ef3 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -215,9 +215,8 @@ template class ContentAddressedCachedTreeStore { std::unordered_map nodes_; // This is a store mapping the leaf key (e.g. slot for public data or nullifier value for nullifier tree) to the - // indices in the tree For indexed tress there is only ever one index against the key, for append-only trees there - // can be multiple - std::map indices_; + // index in the tree + std::map indices_; // This is a mapping from leaf hash to leaf pre-image. This will contain entries that need to be omitted when // commiting updates @@ -240,7 +239,7 @@ template class ContentAddressedCachedTreeStore { void persist_meta(TreeMeta& m, WriteTransaction& tx); - void hydrate_indices_from_persisted_store(ReadTransaction& tx); + // void hydrate_indices_from_persisted_store(ReadTransaction& tx); void persist_leaf_indices(WriteTransaction& tx); @@ -314,14 +313,14 @@ std::pair ContentAddressedCachedTreeStore::find_lo const fr& new_leaf_key, const RequestContext& requestContext, ReadTransaction& tx) const { auto new_value_as_number = uint256_t(new_leaf_key); - Indices committed; + index_t committed; std::optional sizeLimit = std::nullopt; if (initialised_from_block_.has_value() || requestContext.blockNumber.has_value()) { sizeLimit = constrain_tree_size(requestContext, tx); } fr found_key = dataStore_->find_low_leaf(new_leaf_key, committed, sizeLimit, tx); - auto db_index = committed.indices[0]; + index_t db_index = committed; uint256_t retrieved_value = found_key; // Accessing indices_ from here under a lock @@ -338,12 +337,12 @@ std::pair ContentAddressedCachedTreeStore::find_lo --it; // we need to return the larger of the db value or the cached value - return std::make_pair(false, it->first > retrieved_value ? it->second.indices[0] : db_index); + return std::make_pair(false, it->first > retrieved_value ? it->second : db_index); } if (it->first == uint256_t(new_value_as_number)) { // the value is already present and the iterator points to it - return std::make_pair(true, it->second.indices[0]); + return std::make_pair(true, it->second); } // the iterator points to the element immediately larger than the requested value // We need to return the highest value from @@ -355,7 +354,7 @@ std::pair ContentAddressedCachedTreeStore::find_lo } --it; // it now points to the value less than that requested - return std::make_pair(false, it->first > retrieved_value ? it->second.indices[0] : db_index); + return std::make_pair(false, it->first > retrieved_value ? it->second : db_index); } template @@ -427,14 +426,10 @@ void ContentAddressedCachedTreeStore::update_index(const index_t& // std::cout << "update_index at index " << index << " leaf " << leaf << std::endl; // Accessing indices_ under a lock std::unique_lock lock(mtx_); - auto it = indices_.find(uint256_t(leaf)); - if (it == indices_.end()) { - Indices ind; - ind.indices.push_back(index); - indices_[uint256_t(leaf)] = ind; - return; + const auto [it, success] = indices_.insert({ uint256_t(leaf), index }); + if (!success) { + std::cout << "FAILED TO INSERT LEAF INDEX" << std::endl; } - it->second.indices.push_back(index); } template @@ -452,47 +447,35 @@ std::optional ContentAddressedCachedTreeStore::find_leaf ReadTransaction& tx, bool includeUncommitted) const { - Indices committed; - std::optional result = std::nullopt; - FrKeyType key = leaf; - std::vector value; - bool success = dataStore_->read_leaf_indices(key, committed, tx); - if (success) { - index_t sizeLimit = constrain_tree_size(requestContext, tx); - if (!committed.indices.empty()) { - for (index_t ind : committed.indices) { - if (ind < start_index) { - continue; - } - if (ind >= sizeLimit) { - continue; - } - if (!result.has_value()) { - result = ind; - continue; - } - result = std::min(ind, result.value()); - } - } - } if (includeUncommitted) { // Accessing indices_ under a lock std::unique_lock lock(mtx_); auto it = indices_.find(uint256_t(leaf)); - if (it != indices_.end() && !it->second.indices.empty()) { - for (index_t ind : it->second.indices) { - if (ind < start_index) { - continue; - } - if (!result.has_value()) { - result = ind; - continue; - } - result = std::min(ind, result.value()); + if (it != indices_.end()) { + // we have an uncommitted value, we will return from here + if (it->second >= start_index) { + // we have a qualifying value + return std::make_optional(it->second); } + return std::nullopt; + } + } + + // we have been asked to not include uncommitted data, or there is none available + index_t committed; + FrKeyType key = leaf; + bool success = dataStore_->read_leaf_index(key, committed, tx); + if (success) { + index_t sizeLimit = constrain_tree_size(requestContext, tx); + if (committed < start_index) { + return std::nullopt; } + if (committed >= sizeLimit) { + return std::nullopt; + } + return std::make_optional(committed); } - return result; + return std::nullopt; } template @@ -654,7 +637,7 @@ void ContentAddressedCachedTreeStore::commit(TreeMeta& finalMeta, } } else { // data is present, hydrate persisted indices - hydrate_indices_from_persisted_store(*tx); + // hydrate_indices_from_persisted_store(*tx); } } { @@ -708,7 +691,7 @@ void ContentAddressedCachedTreeStore::persist_leaf_indices(WriteT { for (auto& idx : indices_) { FrKeyType key = idx.first; - dataStore_->write_leaf_indices(key, idx.second, tx); + dataStore_->write_leaf_index(key, idx.second, tx); } } @@ -719,12 +702,11 @@ void ContentAddressedCachedTreeStore::persist_leaf_keys(const ind FrKeyType key = idx.first; // write the leaf key against the indices, this is for the pending chain store of indices - for (index_t indexForKey : idx.second.indices) { - if (indexForKey < startIndex) { - continue; - } - dataStore_->write_leaf_key_by_index(key, indexForKey, tx); + index_t indexForKey = idx.second; + if (indexForKey < startIndex) { + continue; } + dataStore_->write_leaf_key_by_index(key, indexForKey, tx); } } @@ -777,20 +759,20 @@ void ContentAddressedCachedTreeStore::persist_node(const std::opt persist_node(nodePayloadIter->second.right, level + 1, tx); } -template -void ContentAddressedCachedTreeStore::hydrate_indices_from_persisted_store(ReadTransaction& tx) -{ - for (auto& idx : indices_) { - std::vector value; - FrKeyType key = idx.first; - Indices persistedIndices; - bool success = dataStore_->read_leaf_indices(key, persistedIndices, tx); - if (success) { - idx.second.indices.insert( - idx.second.indices.begin(), persistedIndices.indices.begin(), persistedIndices.indices.end()); - } - } -} +// template +// void ContentAddressedCachedTreeStore::hydrate_indices_from_persisted_store(ReadTransaction& tx) +// { +// for (auto& idx : indices_) { +// std::vector value; +// FrKeyType key = idx.first; +// Indices persistedIndices; +// bool success = dataStore_->read_leaf_indices(key, persistedIndices, tx); +// if (success) { +// idx.second.indices.insert( +// idx.second.indices.begin(), persistedIndices.indices.begin(), persistedIndices.indices.end()); +// } +// } +// } template void ContentAddressedCachedTreeStore::rollback() { @@ -800,7 +782,8 @@ template void ContentAddressedCachedTreeStore(); - indices_ = std::map(); + nodes_.clear(); + indices_ = std::map(); leaves_ = std::unordered_map(); nodes_by_index_ = std::vector>(depth_ + 1, std::unordered_map()); leaf_pre_image_by_index_ = std::unordered_map(); From 71edf8957db7dc88523d77663f4df2c50a53c758 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Fri, 15 Nov 2024 18:20:44 +0000 Subject: [PATCH 25/31] Initial work on duplicate removal --- ...ontent_addressed_append_only_tree.test.cpp | 124 +++--------------- .../lmdb_store/lmdb_tree_store.cpp | 12 +- .../lmdb_store/lmdb_tree_store.hpp | 4 +- .../cached_content_addressed_tree_store.hpp | 45 +++---- .../crypto/merkle_tree/test_fixtures.hpp | 7 +- 5 files changed, 45 insertions(+), 147 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index cb718ff3253..73198cb4537 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -475,6 +475,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, errors_are_caught_and_handle std::string name = random_string(); std::string directory = random_temp_directory(); std::filesystem::create_directories(directory); + auto& random_engine = numeric::get_randomness(); { LMDBTreeStore::SharedPtr db = std::make_shared(_directory, name, 50, _maxReaders); @@ -492,9 +493,10 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, errors_are_caught_and_handle // Add lots of values to the tree uint32_t num_values_to_add = 16 * 1024; - std::vector values(num_values_to_add, VALUES[0]); + std::vector values; for (uint32_t i = 0; i < num_values_to_add; i++) { - memdb.update_element(i, VALUES[0]); + values.emplace_back(random_engine.get_random_uint256()); + memdb.update_element(i, values[i]); } add_values(tree, values); @@ -714,46 +716,31 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, test_find_leaf_index) commit_tree(tree); - values = { 16, 4, 18, 22 }; + values = { 16, 4, 19, 22 }; add_values(tree, values); - // we now have duplicate leaf 18, one committed the other not - check_find_leaf_index(tree, 18, 5, true, true); - check_find_leaf_index(tree, 18, 5, true, false); - // verify the find index from api check_find_leaf_index_from(tree, 18, 0, 5, true, true); - check_find_leaf_index_from(tree, 18, 6, 10, true, true); - check_find_leaf_index_from(tree, 18, 6, 0, false, false); + check_find_leaf_index_from(tree, 19, 6, 10, true, true); + check_find_leaf_index_from(tree, 19, 0, 0, false, false); commit_tree(tree); - // add another leaf 18 - add_value(tree, 18); - - // should return the first index - check_find_leaf_index_from(tree, 18, 0, 5, true, false); - check_find_leaf_index_from(tree, 18, 0, 5, true, true); - add_value(tree, 88); - // and another uncommitted 18 - add_value(tree, 18); add_value(tree, 32); - // should return the first uncommitted - check_find_leaf_index_from(tree, 18, 12, 12, true, true); - check_find_leaf_index_from(tree, 18, 14, 14, true, true); - check_find_leaf_index_from(tree, 18, 15, 0, false, true); + check_size(tree, 14); + check_size(tree, 12, false); // look past the last instance of this leaf - check_find_leaf_index_from(tree, 18, 17, 0, false, true); + check_find_leaf_index_from(tree, 18, 6, 0, false, true); // look beyond the end of uncommitted - check_find_leaf_index_from(tree, 18, 18, 0, false, true); + check_find_leaf_index_from(tree, 18, 15, 0, false, true); // look beyond the end of committed and don't include uncomitted - check_find_leaf_index_from(tree, 18, 14, 0, false, false); + check_find_leaf_index_from(tree, 88, 13, 0, false, false); } TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_add_multiple_values) @@ -975,7 +962,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, test_find_historic_leaf_inde commit_tree(tree); - values = { 16, 4, 18, 22 }; + values = { 16, 4, 19, 22 }; add_values(tree, values); // should not be present at block 1 @@ -987,15 +974,15 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, test_find_historic_leaf_inde check_find_historic_leaf_index_from(tree, 1, 18, 2, 0, false, false); // at block 2 it should be check_find_historic_leaf_index_from(tree, 2, 18, 2, 5, true); - // at block 2, from index 6 it should not be found if looking only at committed - check_find_historic_leaf_index_from(tree, 2, 18, 6, 5, false, false); - // at block 2, from index 6 it should be found if looking at uncommitted too - check_find_historic_leaf_index_from(tree, 2, 18, 6, 10, true); + // at block 2, from index 6, 19 should not be found if looking only at committed + check_find_historic_leaf_index_from(tree, 2, 19, 6, 5, false, false); + // at block 2, from index 6, 19 should be found if looking at uncommitted too + check_find_historic_leaf_index_from(tree, 2, 19, 6, 10, true); commit_tree(tree); - // at block 3, from index 6 it should now be found in committed only - check_find_historic_leaf_index_from(tree, 3, 18, 6, 10, true, false); + // at block 3, from index 6, should now be found in committed only + check_find_historic_leaf_index_from(tree, 3, 19, 6, 10, true, false); } TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_be_filled) @@ -1418,79 +1405,6 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_unwind_all_blocks) test_unwind(_directory, "DB", _mapSize, _maxReaders, 10, 16, 16, 16, second); } -TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_unwind_blocks_with_duplicate_leaves) -{ - constexpr size_t depth = 4; - std::string name = random_string(); - LMDBTreeStore::SharedPtr db = std::make_shared(_directory, name, _mapSize, _maxReaders); - std::unique_ptr store = std::make_unique(name, depth, db); - ThreadPoolPtr pool = make_thread_pool(1); - TreeType tree(std::move(store), pool); - MemoryTree memdb(depth); - - constexpr size_t blockSize = 2; - constexpr size_t numBlocks = 2; - constexpr size_t numBlocksToUnwind = 1; - - std::vector values = create_values(blockSize); - - // Add the same batch of values many times - for (size_t i = 0; i < numBlocks; i++) { - for (size_t j = 0; j < values.size(); j++) { - size_t ind = i * blockSize + j; - memdb.update_element(ind, values[j]); - } - add_values(tree, values); - commit_tree(tree); - check_block_and_root_data(db, i + 1, memdb.root(), true); - - for (size_t j = 0; j < values.size(); j++) { - size_t ind = i * blockSize + j; - // query the indices db directly - check_indices_data(db, values[j], ind, true, true); - } - } - - for (size_t i = 0; i < numBlocks; i++) { - index_t startIndex = i * blockSize; - index_t expectedIndex = startIndex + 1; - - // search for the leaf from start of each batch - check_find_leaf_index_from(tree, values[1], startIndex, expectedIndex, true); - // search for the leaf from start of the next batch - check_find_leaf_index_from(tree, values[1], startIndex + 2, expectedIndex + blockSize, i < (numBlocks - 1)); - } - - const uint32_t blocksToRemove = numBlocksToUnwind; - for (uint32_t i = 0; i < blocksToRemove; i++) { - const index_t blockNumber = numBlocks - i; - unwind_block(tree, blockNumber); - - const index_t previousValidBlock = blockNumber - 1; - index_t deletedBlockStartIndex = previousValidBlock * blockSize; - - check_block_height(tree, previousValidBlock); - check_size(tree, deletedBlockStartIndex); - - for (size_t j = 0; j < numBlocks; j++) { - index_t startIndex = j * blockSize; - index_t expectedIndex = startIndex + 1; - - // search for the leaf from start of each batch - check_find_leaf_index_from(tree, values[1], startIndex, expectedIndex, j < previousValidBlock); - // search for the leaf from start of the next batch - check_find_leaf_index_from( - tree, values[1], startIndex + 2, expectedIndex + blockSize, j < (previousValidBlock - 1)); - - for (size_t k = 0; k < values.size(); k++) { - size_t ind = j * blockSize + k; - // query the indices db directly. If block number == 1 that means the entry should not be present - check_indices_data(db, values[k], ind, blockNumber > 1, ind < deletedBlockStartIndex); - } - } - } -} - TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_sync_and_unwind_large_blocks) { diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp index cab4cc54e15..0420669d305 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp @@ -69,7 +69,7 @@ LMDBTreeStore::LMDBTreeStore(std::string directory, std::string name, uint64_t m { LMDBDatabaseCreationTransaction tx(_environment); - _leafValueToIndexDatabase = std::make_unique( + _leafKeyToIndexDatabase = std::make_unique( _environment, tx, _name + std::string("leaf indices"), false, false, fr_key_cmp); tx.commit(); } @@ -110,7 +110,7 @@ void LMDBTreeStore::get_stats(TreeDBStats& stats, ReadTransaction& tx) stats.blocksDBStats = DBStats(BLOCKS_DB, stat); call_lmdb_func(mdb_stat, tx.underlying(), _leafHashToPreImageDatabase->underlying(), &stat); stats.leafPreimagesDBStats = DBStats(LEAF_PREIMAGES_DB, stat); - call_lmdb_func(mdb_stat, tx.underlying(), _leafValueToIndexDatabase->underlying(), &stat); + call_lmdb_func(mdb_stat, tx.underlying(), _leafKeyToIndexDatabase->underlying(), &stat); stats.leafIndicesDBStats = DBStats(LEAF_INDICES_DB, stat); call_lmdb_func(mdb_stat, tx.underlying(), _nodeDatabase->underlying(), &stat); stats.nodesDBStats = DBStats(NODES_DB, stat); @@ -170,14 +170,14 @@ void LMDBTreeStore::write_leaf_index(const fr& leafValue, const index_t& index, { FrKeyType key(leafValue); // std::cout << "Writing leaf indices by key " << key << std::endl; - tx.put_value(key, index, *_leafValueToIndexDatabase); + tx.put_value(key, index, *_leafKeyToIndexDatabase); } void LMDBTreeStore::delete_leaf_index(const fr& leafValue, LMDBTreeStore::WriteTransaction& tx) { FrKeyType key(leafValue); // std::cout << "Deleting leaf indices by key " << key << std::endl; - tx.delete_value(key, *_leafValueToIndexDatabase); + tx.delete_value(key, *_leafKeyToIndexDatabase); } void LMDBTreeStore::increment_node_reference_count(const fr& nodeHash, WriteTransaction& tx) @@ -238,9 +238,9 @@ fr LMDBTreeStore::find_low_leaf(const fr& leafValue, return tmp < sizeLimit.value(); }; if (!sizeLimit.has_value()) { - tx.get_value_or_previous(key, index, *_leafValueToIndexDatabase); + tx.get_value_or_previous(key, index, *_leafKeyToIndexDatabase); } else { - tx.get_value_or_previous(key, index, *_leafValueToIndexDatabase, is_valid); + tx.get_value_or_previous(key, index, *_leafKeyToIndexDatabase, is_valid); } return key; } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp index 6eda2b7eade..03f42e6619e 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp @@ -134,7 +134,7 @@ class LMDBTreeStore { LMDBEnvironment::SharedPtr _environment; LMDBDatabase::Ptr _blockDatabase; LMDBDatabase::Ptr _nodeDatabase; - LMDBDatabase::Ptr _leafValueToIndexDatabase; + LMDBDatabase::Ptr _leafKeyToIndexDatabase; LMDBDatabase::Ptr _leafHashToPreImageDatabase; LMDBDatabase::Ptr _leafIndexToKeyDatabase; @@ -144,7 +144,7 @@ class LMDBTreeStore { template bool LMDBTreeStore::read_leaf_index(const fr& leafValue, index_t& leafIndex, TxType& tx) { FrKeyType key(leafValue); - return tx.template get_value(key, leafIndex, *_leafValueToIndexDatabase); + return tx.template get_value(key, leafIndex, *_leafKeyToIndexDatabase); } template diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index a262de12ef3..5baf21ddb53 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -256,7 +256,7 @@ template class ContentAddressedCachedTreeStore { void remove_leaf(const fr& hash, const std::optional& maxIndex, WriteTransaction& tx); - void remove_leaf_indices(const fr& key, const index_t& maxIndex, WriteTransaction& tx); + void remove_leaf_index(const fr& key, const index_t& maxIndex, WriteTransaction& tx); void remove_leaf_indices_after_or_equal_index(const index_t& maxIndex, WriteTransaction& tx); @@ -701,7 +701,7 @@ void ContentAddressedCachedTreeStore::persist_leaf_keys(const ind for (auto& idx : indices_) { FrKeyType key = idx.first; - // write the leaf key against the indices, this is for the pending chain store of indices + // write the leaf key against the index, this is for the pending chain store of indices index_t indexForKey = idx.second; if (indexForKey < startIndex) { continue; @@ -1027,38 +1027,23 @@ void ContentAddressedCachedTreeStore::remove_leaf_indices_after_o std::vector leafKeys; dataStore_->read_all_leaf_keys_after_or_equal_index(index, leafKeys, tx); for (const fr& key : leafKeys) { - remove_leaf_indices(key, index, tx); + remove_leaf_index(key, index, tx); } dataStore_->delete_all_leaf_keys_after_or_equal_index(index, tx); } template -void ContentAddressedCachedTreeStore::remove_leaf_indices(const fr& key, - const index_t& maxIndex, - WriteTransaction& tx) +void ContentAddressedCachedTreeStore::remove_leaf_index(const fr& key, + const index_t& maxIndex, + WriteTransaction& tx) { - // We now have the key, extract the indices - Indices indices; - // std::cout << "Reading indices for key " << key << std::endl; - dataStore_->read_leaf_indices(key, indices, tx); - // std::cout << "Indices length before removal " << indices.indices.size() << std::endl; - - size_t lengthBefore = indices.indices.size(); - - indices.indices.erase( - std::remove_if(indices.indices.begin(), indices.indices.end(), [&](index_t& ind) { return ind >= maxIndex; }), - indices.indices.end()); - - size_t lengthAfter = indices.indices.size(); - // std::cout << "Indices length after removal " << indices.indices.size() << std::endl; - - if (lengthBefore != lengthAfter) { - if (indices.indices.empty()) { - // std::cout << "Deleting indices" << std::endl; - dataStore_->delete_leaf_indices(key, tx); - } else { - // std::cout << "Writing indices" << std::endl; - dataStore_->write_leaf_indices(key, indices, tx); + // We now have the key, extract the index + index_t index = 0; + // std::cout << "Reading index for key " << key << std::endl; + if (dataStore_->read_leaf_index(key, index, tx)) { + if (index >= maxIndex) { + // std::cout << "Deleting index" << std::endl; + dataStore_->delete_leaf_index(key, tx); } } } @@ -1071,7 +1056,7 @@ void ContentAddressedCachedTreeStore::remove_leaf(const fr& hash, // std::cout << "Removing leaf " << hash << std::endl; if (maxIndex.has_value()) { // std::cout << "Max Index" << std::endl; - // We need to clear the entry from the leaf key to indices database as this leaf never existed + // We need to clear the entry from the leaf key to index database as this leaf never existed IndexedLeafValueType leaf; fr key; if (requires_preimage_for_key()) { @@ -1084,7 +1069,7 @@ void ContentAddressedCachedTreeStore::remove_leaf(const fr& hash, } else { key = hash; } - remove_leaf_indices(key, maxIndex.value(), tx); + remove_leaf_index(key, maxIndex.value(), tx); } // std::cout << "Deleting leaf by hash " << std::endl; dataStore_->delete_leaf_by_hash(hash, tx); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp index dbf7eaa44d6..291752e9775 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp @@ -56,13 +56,12 @@ void inline check_block_and_size_data(LMDBTreeStore::SharedPtr db, void inline check_indices_data( LMDBTreeStore::SharedPtr db, fr leaf, index_t index, bool entryShouldBePresent, bool indexShouldBePresent) { - Indices indices; + index_t retrieved = 0; LMDBTreeStore::ReadTransaction::Ptr tx = db->create_read_transaction(); - bool success = db->read_leaf_indices(leaf, indices, *tx); + bool success = db->read_leaf_index(leaf, retrieved, *tx); EXPECT_EQ(success, entryShouldBePresent); if (entryShouldBePresent) { - bool found = std::find(indices.indices.begin(), indices.indices.end(), index) != std::end(indices.indices); - EXPECT_EQ(found, indexShouldBePresent); + EXPECT_EQ(index == retrieved, indexShouldBePresent); } } From ede65ba52b33761c5efd88a0f6bd55d61a4b8d98 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Sun, 17 Nov 2024 18:27:24 +0000 Subject: [PATCH 26/31] Removed leaf keys DB --- ...ontent_addressed_append_only_tree.test.cpp | 26 +- .../lmdb_store/lmdb_tree_store.cpp | 28 - .../lmdb_store/lmdb_tree_store.hpp | 40 - .../lmdb_store/lmdb_tree_store.test.cpp | 738 +++++++++--------- .../cached_content_addressed_tree_store.hpp | 66 +- .../crypto/merkle_tree/test_fixtures.hpp | 44 +- .../barretenberg/crypto/merkle_tree/types.hpp | 14 +- .../world-state/src/native/message.ts | 3 - 8 files changed, 415 insertions(+), 544 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index 73198cb4537..7f596b0ee86 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -1448,11 +1448,11 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_advance_finalised_blocks index_t expectedFinalisedBlock = i < finalisedBlockDelay ? 0 : i - finalisedBlockDelay; check_finalised_block_height(tree, expectedFinalisedBlock); - index_t expectedPresentStart = i < finalisedBlockDelay ? 0 : (expectedFinalisedBlock * blockSize); - index_t expectedPresentEnd = ((i + 1) * blockSize) - 1; - std::vector toTest(values.begin() + static_cast(expectedPresentStart), - values.begin() + static_cast(expectedPresentEnd + 1)); - check_leaf_keys_are_present(db, expectedPresentStart, expectedPresentEnd, toTest); + // index_t expectedPresentStart = i < finalisedBlockDelay ? 0 : (expectedFinalisedBlock * blockSize); + // index_t expectedPresentEnd = ((i + 1) * blockSize) - 1; + // std::vector toTest(values.begin() + static_cast(expectedPresentStart), + // values.begin() + static_cast(expectedPresentEnd + 1)); + // check_leaf_keys_are_present(db, expectedPresentStart, expectedPresentEnd, toTest); if (i >= finalisedBlockDelay) { @@ -1463,8 +1463,8 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_advance_finalised_blocks finalise_block(tree, blockToFinalise, true); - index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; - check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); + // index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; + // check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); } } } @@ -1499,12 +1499,12 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_finalise_multiple_blocks index_t blockToFinalise = 8; - check_leaf_keys_are_present(db, 0, (numBlocks * blockSize) - 1, values); + // check_leaf_keys_are_present(db, 0, (numBlocks * blockSize) - 1, values); finalise_block(tree, blockToFinalise); - index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; - check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); + // index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; + // check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); } TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_finalise_block_beyond_pending_chain) @@ -1544,12 +1544,12 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_finalise_block_beyon // finalise the entire chain index_t blockToFinalise = numBlocks; - check_leaf_keys_are_present(db, 0, (numBlocks * blockSize) - 1, values); + // check_leaf_keys_are_present(db, 0, (numBlocks * blockSize) - 1, values); finalise_block(tree, blockToFinalise); - index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; - check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); + // index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; + // check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); } TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_fork_from_unwound_blocks) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp index 0420669d305..aff04986256 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp @@ -80,13 +80,6 @@ LMDBTreeStore::LMDBTreeStore(std::string directory, std::string name, uint64_t m _environment, tx, _name + std::string("leaf pre-images"), false, false, fr_key_cmp); tx.commit(); } - - { - LMDBDatabaseCreationTransaction tx(_environment); - _leafIndexToKeyDatabase = std::make_unique( - _environment, tx, _name + std::string("leaf keys"), false, false, index_key_cmp); - tx.commit(); - } } LMDBTreeStore::WriteTransaction::Ptr LMDBTreeStore::create_write_transaction() const @@ -114,8 +107,6 @@ void LMDBTreeStore::get_stats(TreeDBStats& stats, ReadTransaction& tx) stats.leafIndicesDBStats = DBStats(LEAF_INDICES_DB, stat); call_lmdb_func(mdb_stat, tx.underlying(), _nodeDatabase->underlying(), &stat); stats.nodesDBStats = DBStats(NODES_DB, stat); - call_lmdb_func(mdb_stat, tx.underlying(), _leafIndexToKeyDatabase->underlying(), &stat); - stats.leafKeysDBStats = DBStats(LEAF_KEYS_DB, stat); } void LMDBTreeStore::write_block_data(uint64_t blockNumber, @@ -245,25 +236,6 @@ fr LMDBTreeStore::find_low_leaf(const fr& leafValue, return key; } -void LMDBTreeStore::write_leaf_key_by_index(const fr& leafKey, const index_t& index, WriteTransaction& tx) -{ - std::vector data = to_buffer(leafKey); - LeafIndexKeyType key(index); - tx.put_value(key, data, *_leafIndexToKeyDatabase); -} - -void LMDBTreeStore::delete_all_leaf_keys_after_or_equal_index(const index_t& index, WriteTransaction& tx) -{ - LeafIndexKeyType key(index); - tx.delete_all_values_greater_or_equal_key(key, *_leafIndexToKeyDatabase); -} - -void LMDBTreeStore::delete_all_leaf_keys_before_or_equal_index(const index_t& index, WriteTransaction& tx) -{ - LeafIndexKeyType key(index); - tx.delete_all_values_lesser_or_equal_key(key, *_leafIndexToKeyDatabase); -} - bool LMDBTreeStore::read_node(const fr& nodeHash, NodePayload& nodeData, ReadTransaction& tx) { FrKeyType key(nodeHash); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp index 03f42e6619e..ab4dfb7316c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp @@ -136,7 +136,6 @@ class LMDBTreeStore { LMDBDatabase::Ptr _nodeDatabase; LMDBDatabase::Ptr _leafKeyToIndexDatabase; LMDBDatabase::Ptr _leafHashToPreImageDatabase; - LMDBDatabase::Ptr _leafIndexToKeyDatabase; template bool get_node_data(const fr& nodeHash, NodePayload& nodeData, TxType& tx); }; @@ -179,43 +178,4 @@ template bool LMDBTreeStore::get_node_data(const fr& nodeHash, } return success; } - -template bool LMDBTreeStore::read_leaf_key_by_index(const index_t& index, fr& leafKey, TxType& tx) -{ - LeafIndexKeyType key(index); - std::vector data; - bool success = tx.template get_value(key, data, *_leafIndexToKeyDatabase); - if (success) { - leafKey = from_buffer(data); - } - return success; -} - -template -void LMDBTreeStore::read_all_leaf_keys_after_or_equal_index(const index_t& index, - std::vector& leafKeys, - TxType& tx) -{ - LeafIndexKeyType key(index); - std::vector> values; - tx.get_all_values_greater_or_equal_key(key, values, *_leafIndexToKeyDatabase); - for (const auto& value : values) { - fr leafKey = from_buffer(value); - leafKeys.push_back(leafKey); - } -} - -template -void LMDBTreeStore::read_all_leaf_keys_before_or_equal_index(const index_t& index, - std::vector& leafKeys, - TxType& tx) -{ - LeafIndexKeyType key(index); - std::vector> values; - tx.get_all_values_lesser_or_equal_key(key, values, *_leafIndexToKeyDatabase); - for (const auto& value : values) { - fr leafKey = from_buffer(value); - leafKeys.push_back(leafKey); - } -} } // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp index f7b10f9f4a8..721db0fed2c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp @@ -176,372 +176,372 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_leaves_by_hash) } } -TEST_F(LMDBTreeStoreTest, can_read_write_key_by_index) -{ - bb::fr leafKey = VALUES[0]; - index_t leafIndex = 45; - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.write_leaf_key_by_index(leafKey, leafIndex, *transaction); - transaction->commit(); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - bb::fr readBack; - bool success = store.read_leaf_key_by_index(leafIndex, readBack, *transaction); - EXPECT_TRUE(success); - EXPECT_EQ(readBack, leafKey); - - success = store.read_leaf_key_by_index(leafIndex + 1, readBack, *transaction); - EXPECT_FALSE(success); - } -} - -TEST_F(LMDBTreeStoreTest, can_retrieve_all_keys_greater_than_index) -{ - std::vector values = create_values(1024); - index_t leafIndexStart = 45; - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - for (uint32_t i = 0; i < values.size(); i++) { - store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); - } - transaction->commit(); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - // Retrieve all but the first 150 keys - uint32_t offset = 150; - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(leafIndexStart + offset, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), values.size() - offset); - for (uint32_t i = offset; i < leafKeys.size(); i++) { - EXPECT_EQ(leafKeys[i], values[i + offset]); - } - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - // Retrieve all keys - uint32_t offset = 0; - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(leafIndexStart + offset, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), values.size() - offset); - for (uint32_t i = offset; i < leafKeys.size(); i++) { - EXPECT_EQ(leafKeys[i], values[i + offset]); - } - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - // Retrieve no keys - uint32_t offset = 10000; - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(leafIndexStart + offset, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } -} - -TEST_F(LMDBTreeStoreTest, can_delete_all_keys_greater_than_index) -{ - std::vector values = create_values(1024); - index_t leafIndexStart = 45; - uint32_t deleteFromIndex = 150; - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - for (uint32_t i = 0; i < values.size(); i++) { - store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); - } - transaction->commit(); - } - - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.delete_all_leaf_keys_after_or_equal_index(deleteFromIndex, *transaction); - transaction->commit(); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(leafIndexStart, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), deleteFromIndex - leafIndexStart); - for (uint32_t i = 0; i < leafKeys.size(); i++) { - EXPECT_EQ(leafKeys[i], values[i]); - } - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - for (uint32_t i = 0; i < 1024 + leafIndexStart; i++) { - bb::fr leafKey; - bool success = store.read_leaf_key_by_index(i, leafKey, *transaction); - EXPECT_EQ(success, (i >= leafIndexStart && (i < deleteFromIndex))); - if (success) { - EXPECT_EQ(leafKey, values[i - leafIndexStart]); - } - } - } -} - -TEST_F(LMDBTreeStoreTest, can_delete_all_keys_less_than_index) -{ - std::vector values = create_values(1024); - index_t leafIndexStart = 45; - uint32_t deleteFromIndex = 150; - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - for (uint32_t i = 0; i < values.size(); i++) { - store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); - } - transaction->commit(); - } - - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.delete_all_leaf_keys_before_or_equal_index(deleteFromIndex, *transaction); - transaction->commit(); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_before_or_equal_index(leafIndexStart + 1023, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 1024 - (deleteFromIndex - leafIndexStart + 1)); - for (uint32_t i = 0; i < leafKeys.size(); i++) { - EXPECT_EQ(leafKeys[i], values[1023 - i]); - } - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - for (uint32_t i = 0; i < 1024 + leafIndexStart; i++) { - bb::fr leafKey; - bool success = store.read_leaf_key_by_index(i, leafKey, *transaction); - EXPECT_EQ(success, (i > deleteFromIndex && (i <= leafIndexStart + 1023))); - if (success) { - EXPECT_EQ(leafKey, values[i - leafIndexStart]); - } - } - } -} - -TEST_F(LMDBTreeStoreTest, can_delete_all_keys_greater_than) -{ - std::vector values = create_values(1024); - index_t leafIndexStart = 45; - uint32_t deleteFromIndex = 0; - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - for (uint32_t i = 0; i < values.size(); i++) { - store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); - } - transaction->commit(); - } - - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.delete_all_leaf_keys_after_or_equal_index(deleteFromIndex, *transaction); - transaction->commit(); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(leafIndexStart, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(0, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(10000, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } -} - -TEST_F(LMDBTreeStoreTest, can_delete_all_keys_less_than) -{ - std::vector values = create_values(1024); - index_t leafIndexStart = 45; - uint32_t deleteFromIndex = 2000; - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - for (uint32_t i = 0; i < values.size(); i++) { - store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); - } - transaction->commit(); - } - - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.delete_all_leaf_keys_before_or_equal_index(deleteFromIndex, *transaction); - transaction->commit(); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_before_or_equal_index(leafIndexStart + 1023, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_before_or_equal_index(2000, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_before_or_equal_index(10, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } -} - -TEST_F(LMDBTreeStoreTest, can_delete_no_keys_greater_than) -{ - std::vector values = create_values(1024); - index_t leafIndexStart = 45; - uint32_t deleteFromIndex = 2000; - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - for (uint32_t i = 0; i < values.size(); i++) { - store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); - } - transaction->commit(); - } - - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.delete_all_leaf_keys_after_or_equal_index(deleteFromIndex, *transaction); - transaction->commit(); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(leafIndexStart, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 1024); - for (uint32_t i = 0; i < leafKeys.size(); i++) { - EXPECT_EQ(leafKeys[i], values[i]); - } - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(0, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 1024); - for (uint32_t i = 0; i < leafKeys.size(); i++) { - EXPECT_EQ(leafKeys[i], values[i]); - } - } - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(10000, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } -} - -TEST_F(LMDBTreeStoreTest, can_delete_no_keys_less_than) -{ - std::vector values = create_values(1024); - index_t leafIndexStart = 45; - uint32_t deleteFromIndex = 20; - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - for (uint32_t i = 0; i < values.size(); i++) { - store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); - } - transaction->commit(); - } - - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.delete_all_leaf_keys_before_or_equal_index(deleteFromIndex, *transaction); - transaction->commit(); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_before_or_equal_index(leafIndexStart + 1023, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 1024); - for (uint32_t i = 0; i < leafKeys.size(); i++) { - EXPECT_EQ(leafKeys[i], values[1023 - i]); - } - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_before_or_equal_index(2000, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 1024); - for (uint32_t i = 0; i < leafKeys.size(); i++) { - EXPECT_EQ(leafKeys[i], values[1023 - i]); - } - } - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_before_or_equal_index(10, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } -} - -TEST_F(LMDBTreeStoreTest, can_retrieve_all_keys_when_none_are_present) -{ - std::vector values = create_values(1024); - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_after_or_equal_index(0, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } - - { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); - std::vector leafKeys; - store.read_all_leaf_keys_before_or_equal_index(0, leafKeys, *transaction); - EXPECT_EQ(leafKeys.size(), 0); - } -} - -TEST_F(LMDBTreeStoreTest, can_delete_all_keys_when_none_are_present) -{ - std::vector values = create_values(1024); - LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); - - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.delete_all_leaf_keys_after_or_equal_index(0, *transaction); - transaction->commit(); - } - - { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); - store.delete_all_leaf_keys_before_or_equal_index(0, *transaction); - transaction->commit(); - } -} +// TEST_F(LMDBTreeStoreTest, can_read_write_key_by_index) +// { +// bb::fr leafKey = VALUES[0]; +// index_t leafIndex = 45; +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.write_leaf_key_by_index(leafKey, leafIndex, *transaction); +// transaction->commit(); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// bb::fr readBack; +// bool success = store.read_leaf_key_by_index(leafIndex, readBack, *transaction); +// EXPECT_TRUE(success); +// EXPECT_EQ(readBack, leafKey); + +// success = store.read_leaf_key_by_index(leafIndex + 1, readBack, *transaction); +// EXPECT_FALSE(success); +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_retrieve_all_keys_greater_than_index) +// { +// std::vector values = create_values(1024); +// index_t leafIndexStart = 45; +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// for (uint32_t i = 0; i < values.size(); i++) { +// store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); +// } +// transaction->commit(); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// // Retrieve all but the first 150 keys +// uint32_t offset = 150; +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(leafIndexStart + offset, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), values.size() - offset); +// for (uint32_t i = offset; i < leafKeys.size(); i++) { +// EXPECT_EQ(leafKeys[i], values[i + offset]); +// } +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// // Retrieve all keys +// uint32_t offset = 0; +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(leafIndexStart + offset, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), values.size() - offset); +// for (uint32_t i = offset; i < leafKeys.size(); i++) { +// EXPECT_EQ(leafKeys[i], values[i + offset]); +// } +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// // Retrieve no keys +// uint32_t offset = 10000; +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(leafIndexStart + offset, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_delete_all_keys_greater_than_index) +// { +// std::vector values = create_values(1024); +// index_t leafIndexStart = 45; +// uint32_t deleteFromIndex = 150; +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// for (uint32_t i = 0; i < values.size(); i++) { +// store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); +// } +// transaction->commit(); +// } + +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.delete_all_leaf_keys_after_or_equal_index(deleteFromIndex, *transaction); +// transaction->commit(); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(leafIndexStart, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), deleteFromIndex - leafIndexStart); +// for (uint32_t i = 0; i < leafKeys.size(); i++) { +// EXPECT_EQ(leafKeys[i], values[i]); +// } +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// for (uint32_t i = 0; i < 1024 + leafIndexStart; i++) { +// bb::fr leafKey; +// bool success = store.read_leaf_key_by_index(i, leafKey, *transaction); +// EXPECT_EQ(success, (i >= leafIndexStart && (i < deleteFromIndex))); +// if (success) { +// EXPECT_EQ(leafKey, values[i - leafIndexStart]); +// } +// } +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_delete_all_keys_less_than_index) +// { +// std::vector values = create_values(1024); +// index_t leafIndexStart = 45; +// uint32_t deleteFromIndex = 150; +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// for (uint32_t i = 0; i < values.size(); i++) { +// store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); +// } +// transaction->commit(); +// } + +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.delete_all_leaf_keys_before_or_equal_index(deleteFromIndex, *transaction); +// transaction->commit(); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_before_or_equal_index(leafIndexStart + 1023, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 1024 - (deleteFromIndex - leafIndexStart + 1)); +// for (uint32_t i = 0; i < leafKeys.size(); i++) { +// EXPECT_EQ(leafKeys[i], values[1023 - i]); +// } +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// for (uint32_t i = 0; i < 1024 + leafIndexStart; i++) { +// bb::fr leafKey; +// bool success = store.read_leaf_key_by_index(i, leafKey, *transaction); +// EXPECT_EQ(success, (i > deleteFromIndex && (i <= leafIndexStart + 1023))); +// if (success) { +// EXPECT_EQ(leafKey, values[i - leafIndexStart]); +// } +// } +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_delete_all_keys_greater_than) +// { +// std::vector values = create_values(1024); +// index_t leafIndexStart = 45; +// uint32_t deleteFromIndex = 0; +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// for (uint32_t i = 0; i < values.size(); i++) { +// store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); +// } +// transaction->commit(); +// } + +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.delete_all_leaf_keys_after_or_equal_index(deleteFromIndex, *transaction); +// transaction->commit(); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(leafIndexStart, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(0, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(10000, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_delete_all_keys_less_than) +// { +// std::vector values = create_values(1024); +// index_t leafIndexStart = 45; +// uint32_t deleteFromIndex = 2000; +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// for (uint32_t i = 0; i < values.size(); i++) { +// store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); +// } +// transaction->commit(); +// } + +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.delete_all_leaf_keys_before_or_equal_index(deleteFromIndex, *transaction); +// transaction->commit(); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_before_or_equal_index(leafIndexStart + 1023, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_before_or_equal_index(2000, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_before_or_equal_index(10, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_delete_no_keys_greater_than) +// { +// std::vector values = create_values(1024); +// index_t leafIndexStart = 45; +// uint32_t deleteFromIndex = 2000; +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// for (uint32_t i = 0; i < values.size(); i++) { +// store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); +// } +// transaction->commit(); +// } + +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.delete_all_leaf_keys_after_or_equal_index(deleteFromIndex, *transaction); +// transaction->commit(); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(leafIndexStart, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 1024); +// for (uint32_t i = 0; i < leafKeys.size(); i++) { +// EXPECT_EQ(leafKeys[i], values[i]); +// } +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(0, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 1024); +// for (uint32_t i = 0; i < leafKeys.size(); i++) { +// EXPECT_EQ(leafKeys[i], values[i]); +// } +// } +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(10000, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_delete_no_keys_less_than) +// { +// std::vector values = create_values(1024); +// index_t leafIndexStart = 45; +// uint32_t deleteFromIndex = 20; +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// for (uint32_t i = 0; i < values.size(); i++) { +// store.write_leaf_key_by_index(values[i], i + leafIndexStart, *transaction); +// } +// transaction->commit(); +// } + +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.delete_all_leaf_keys_before_or_equal_index(deleteFromIndex, *transaction); +// transaction->commit(); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_before_or_equal_index(leafIndexStart + 1023, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 1024); +// for (uint32_t i = 0; i < leafKeys.size(); i++) { +// EXPECT_EQ(leafKeys[i], values[1023 - i]); +// } +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_before_or_equal_index(2000, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 1024); +// for (uint32_t i = 0; i < leafKeys.size(); i++) { +// EXPECT_EQ(leafKeys[i], values[1023 - i]); +// } +// } +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_before_or_equal_index(10, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_retrieve_all_keys_when_none_are_present) +// { +// std::vector values = create_values(1024); +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_after_or_equal_index(0, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } + +// { +// LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); +// std::vector leafKeys; +// store.read_all_leaf_keys_before_or_equal_index(0, leafKeys, *transaction); +// EXPECT_EQ(leafKeys.size(), 0); +// } +// } + +// TEST_F(LMDBTreeStoreTest, can_delete_all_keys_when_none_are_present) +// { +// std::vector values = create_values(1024); +// LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); + +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.delete_all_leaf_keys_after_or_equal_index(0, *transaction); +// transaction->commit(); +// } + +// { +// LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); +// store.delete_all_leaf_keys_before_or_equal_index(0, *transaction); +// transaction->commit(); +// } +// } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 5baf21ddb53..12b2449088d 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -239,12 +239,8 @@ template class ContentAddressedCachedTreeStore { void persist_meta(TreeMeta& m, WriteTransaction& tx); - // void hydrate_indices_from_persisted_store(ReadTransaction& tx); - void persist_leaf_indices(WriteTransaction& tx); - void persist_leaf_keys(const index_t& startIndex, WriteTransaction& tx); - void persist_leaf_pre_image(const fr& hash, WriteTransaction& tx); void persist_node(const std::optional& optional_hash, uint32_t level, WriteTransaction& tx); @@ -258,8 +254,6 @@ template class ContentAddressedCachedTreeStore { void remove_leaf_index(const fr& key, const index_t& maxIndex, WriteTransaction& tx); - void remove_leaf_indices_after_or_equal_index(const index_t& maxIndex, WriteTransaction& tx); - void extract_db_stats(TreeDBStats& stats); index_t constrain_tree_size(const RequestContext& requestContext, ReadTransaction& tx) const; @@ -426,10 +420,12 @@ void ContentAddressedCachedTreeStore::update_index(const index_t& // std::cout << "update_index at index " << index << " leaf " << leaf << std::endl; // Accessing indices_ under a lock std::unique_lock lock(mtx_); - const auto [it, success] = indices_.insert({ uint256_t(leaf), index }); - if (!success) { - std::cout << "FAILED TO INSERT LEAF INDEX" << std::endl; - } + indices_.insert({ uint256_t(leaf), index }); + // const auto [it, success] = indices_.insert({ uint256_t(leaf), index }); + // if (!success) { + // std::cout << "Attempting to set leaf " << leaf << " at index " << index << " failed, leaf already found at + // index " << it->second << std::endl; + // } } template @@ -462,7 +458,7 @@ std::optional ContentAddressedCachedTreeStore::find_leaf } // we have been asked to not include uncommitted data, or there is none available - index_t committed; + index_t committed = 0; FrKeyType key = leaf; bool success = dataStore_->read_leaf_index(key, committed, tx); if (success) { @@ -646,7 +642,6 @@ void ContentAddressedCachedTreeStore::commit(TreeMeta& finalMeta, if (dataPresent) { // std::cout << "Persisting data for block " << uncommittedMeta.unfinalisedBlockHeight + 1 << std::endl; persist_leaf_indices(*tx); - persist_leaf_keys(uncommittedMeta.committedSize, *tx); persist_node(std::optional(uncommittedMeta.root), 0, *tx); if (asBlock) { ++uncommittedMeta.unfinalisedBlockHeight; @@ -695,21 +690,6 @@ void ContentAddressedCachedTreeStore::persist_leaf_indices(WriteT } } -template -void ContentAddressedCachedTreeStore::persist_leaf_keys(const index_t& startIndex, WriteTransaction& tx) -{ - for (auto& idx : indices_) { - FrKeyType key = idx.first; - - // write the leaf key against the index, this is for the pending chain store of indices - index_t indexForKey = idx.second; - if (indexForKey < startIndex) { - continue; - } - dataStore_->write_leaf_key_by_index(key, indexForKey, tx); - } -} - template void ContentAddressedCachedTreeStore::persist_leaf_pre_image(const fr& hash, WriteTransaction& tx) { @@ -759,21 +739,6 @@ void ContentAddressedCachedTreeStore::persist_node(const std::opt persist_node(nodePayloadIter->second.right, level + 1, tx); } -// template -// void ContentAddressedCachedTreeStore::hydrate_indices_from_persisted_store(ReadTransaction& tx) -// { -// for (auto& idx : indices_) { -// std::vector value; -// FrKeyType key = idx.first; -// Indices persistedIndices; -// bool success = dataStore_->read_leaf_indices(key, persistedIndices, tx); -// if (success) { -// idx.second.indices.insert( -// idx.second.indices.begin(), persistedIndices.indices.begin(), persistedIndices.indices.end()); -// } -// } -// } - template void ContentAddressedCachedTreeStore::rollback() { // Extract the committed meta data and destroy the cache @@ -836,11 +801,7 @@ void ContentAddressedCachedTreeStore::advance_finalised_block(con // commit the new finalised block WriteTransactionPtr writeTx = create_write_transaction(); try { - // determine where we need to prune the leaf keys store up to - index_t highestIndexToRemove = blockPayload.size - 1; committedMeta.finalisedBlockHeight = blockNumber; - // clean up the leaf keys index table - dataStore_->delete_all_leaf_keys_before_or_equal_index(highestIndexToRemove, *writeTx); // persist the new meta data persist_meta(committedMeta, *writeTx); writeTx->commit(); @@ -927,7 +888,6 @@ void ContentAddressedCachedTreeStore::unwind_block(const index_t& remove_node(std::optional(blockData.root), 0, maxIndex, *writeTx); // remove the block from the block data table dataStore_->delete_block_data(blockNumber, *writeTx); - remove_leaf_indices_after_or_equal_index(previousBlockData.size, *writeTx); uncommittedMeta.unfinalisedBlockHeight = previousBlockData.blockNumber; uncommittedMeta.size = previousBlockData.size; uncommittedMeta.committedSize = previousBlockData.size; @@ -1020,18 +980,6 @@ void ContentAddressedCachedTreeStore::remove_historical_block(con extract_db_stats(dbStats); } -template -void ContentAddressedCachedTreeStore::remove_leaf_indices_after_or_equal_index(const index_t& index, - WriteTransaction& tx) -{ - std::vector leafKeys; - dataStore_->read_all_leaf_keys_after_or_equal_index(index, leafKeys, tx); - for (const fr& key : leafKeys) { - remove_leaf_index(key, index, tx); - } - dataStore_->delete_all_leaf_keys_after_or_equal_index(index, tx); -} - template void ContentAddressedCachedTreeStore::remove_leaf_index(const fr& key, const index_t& maxIndex, diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp index 291752e9775..6e7cc1c3995 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp @@ -77,28 +77,28 @@ void check_leaf_by_hash(LMDBTreeStore::SharedPtr db, IndexedLeaf leaf, } } -void inline check_leaf_keys_are_present(LMDBTreeStore::SharedPtr db, - uint64_t startIndex, - uint64_t endIndex, - const std::vector& keys) -{ - LMDBTreeStore::ReadTransaction::Ptr tx = db->create_read_transaction(); - for (uint64_t i = startIndex; i <= endIndex; i++) { - fr leafKey; - bool success = db->read_leaf_key_by_index(i, leafKey, *tx); - EXPECT_TRUE(success); - EXPECT_EQ(leafKey, keys[i - startIndex]); - } -} +// void inline check_leaf_keys_are_present(LMDBTreeStore::SharedPtr db, +// uint64_t startIndex, +// uint64_t endIndex, +// const std::vector& keys) +// { +// LMDBTreeStore::ReadTransaction::Ptr tx = db->create_read_transaction(); +// for (uint64_t i = startIndex; i <= endIndex; i++) { +// fr leafKey; +// bool success = db->read_leaf_key_by_index(i, leafKey, *tx); +// EXPECT_TRUE(success); +// EXPECT_EQ(leafKey, keys[i - startIndex]); +// } +// } -void inline check_leaf_keys_are_not_present(LMDBTreeStore::SharedPtr db, uint64_t startIndex, uint64_t endIndex) -{ - LMDBTreeStore::ReadTransaction::Ptr tx = db->create_read_transaction(); - for (uint64_t i = startIndex; i < endIndex; i++) { - fr leafKey; - bool success = db->read_leaf_key_by_index(i, leafKey, *tx); - EXPECT_FALSE(success); - } -} +// void inline check_leaf_keys_are_not_present(LMDBTreeStore::SharedPtr db, uint64_t startIndex, uint64_t endIndex) +// { +// LMDBTreeStore::ReadTransaction::Ptr tx = db->create_read_transaction(); +// for (uint64_t i = startIndex; i < endIndex; i++) { +// fr leafKey; +// bool success = db->read_leaf_key_by_index(i, leafKey, *tx); +// EXPECT_FALSE(success); +// } +// } } // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp index 6f2ce79c474..8d9c5de4933 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp @@ -16,7 +16,6 @@ struct RequestContext { const std::string BLOCKS_DB = "blocks"; const std::string NODES_DB = "nodes"; const std::string LEAF_PREIMAGES_DB = "leaf preimages"; -const std::string LEAF_KEYS_DB = "leaf keys"; const std::string LEAF_INDICES_DB = "leaf indices"; struct DBStats { @@ -71,7 +70,6 @@ struct TreeDBStats { DBStats blocksDBStats; DBStats nodesDBStats; DBStats leafPreimagesDBStats; - DBStats leafKeysDBStats; DBStats leafIndicesDBStats; TreeDBStats() = default; @@ -82,13 +80,11 @@ struct TreeDBStats { const DBStats& blockStats, const DBStats& nodesStats, const DBStats& leafPreimagesDBStats, - const DBStats& leafKeysDBStats, const DBStats& leafIndicesStats) : mapSize(mapSize) , blocksDBStats(blockStats) , nodesDBStats(nodesStats) , leafPreimagesDBStats(leafPreimagesDBStats) - , leafKeysDBStats(leafKeysDBStats) , leafIndicesDBStats(leafIndicesStats) {} TreeDBStats(const TreeDBStats& other) = default; @@ -96,13 +92,12 @@ struct TreeDBStats { ~TreeDBStats() = default; - MSGPACK_FIELDS(mapSize, blocksDBStats, nodesDBStats, leafPreimagesDBStats, leafKeysDBStats, leafIndicesDBStats) + MSGPACK_FIELDS(mapSize, blocksDBStats, nodesDBStats, leafPreimagesDBStats, leafIndicesDBStats) bool operator==(const TreeDBStats& other) const { return mapSize == other.mapSize && blocksDBStats == other.blocksDBStats && nodesDBStats == other.nodesDBStats && - leafPreimagesDBStats == other.leafPreimagesDBStats && leafKeysDBStats == other.leafPreimagesDBStats && - leafIndicesDBStats == other.leafIndicesDBStats; + leafPreimagesDBStats == other.leafPreimagesDBStats && leafIndicesDBStats == other.leafIndicesDBStats; } TreeDBStats& operator=(TreeDBStats&& other) noexcept @@ -112,7 +107,6 @@ struct TreeDBStats { blocksDBStats = std::move(other.blocksDBStats); nodesDBStats = std::move(other.nodesDBStats); leafPreimagesDBStats = std::move(other.leafPreimagesDBStats); - leafKeysDBStats = std::move(other.leafKeysDBStats); leafIndicesDBStats = std::move(other.leafIndicesDBStats); } return *this; @@ -123,8 +117,8 @@ struct TreeDBStats { friend std::ostream& operator<<(std::ostream& os, const TreeDBStats& stats) { os << "Map Size: " << stats.mapSize << " Blocks DB " << stats.blocksDBStats << ", Nodes DB " - << stats.nodesDBStats << ", Leaf Pre-images DB " << stats.leafPreimagesDBStats << ", Leaf Keys DB " - << stats.leafKeysDBStats << ", Leaf Indices DB " << stats.leafIndicesDBStats; + << stats.nodesDBStats << ", Leaf Pre-images DB " << stats.leafPreimagesDBStats << ", Leaf Indices DB " + << stats.leafIndicesDBStats; return os; } }; diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index 51b6163c223..e40ef6011e1 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -136,8 +136,6 @@ export interface TreeDBStats { nodesDBStats: DBStats; /** Stats for the 'leaf pre-images' DB */ leafPreimagesDBStats: DBStats; - /** Stats for the 'leaf keys' DB */ - leafKeysDBStats: DBStats; /** Stats for the 'leaf indices' DB */ leafIndicesDBStats: DBStats; } @@ -271,7 +269,6 @@ export function sanitiseMeta(meta: TreeMeta) { export function sanitiseTreeDBStats(stats: TreeDBStats) { stats.blocksDBStats = sanitiseDBStats(stats.blocksDBStats); stats.leafIndicesDBStats = sanitiseDBStats(stats.leafIndicesDBStats); - stats.leafKeysDBStats = sanitiseDBStats(stats.leafKeysDBStats); stats.leafPreimagesDBStats = sanitiseDBStats(stats.leafPreimagesDBStats); stats.nodesDBStats = sanitiseDBStats(stats.nodesDBStats); stats.mapSize = BigInt(stats.mapSize); From 94f35d98d3b611081456a119c73b08636c566b6b Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Sun, 17 Nov 2024 18:48:44 +0000 Subject: [PATCH 27/31] Comments cleanup --- ...ontent_addressed_append_only_tree.test.cpp | 20 +------------------ .../world-state/src/synchronizer/config.ts | 2 +- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index 7f596b0ee86..52fc12daa9f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -1448,23 +1448,15 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_advance_finalised_blocks index_t expectedFinalisedBlock = i < finalisedBlockDelay ? 0 : i - finalisedBlockDelay; check_finalised_block_height(tree, expectedFinalisedBlock); - // index_t expectedPresentStart = i < finalisedBlockDelay ? 0 : (expectedFinalisedBlock * blockSize); - // index_t expectedPresentEnd = ((i + 1) * blockSize) - 1; - // std::vector toTest(values.begin() + static_cast(expectedPresentStart), - // values.begin() + static_cast(expectedPresentEnd + 1)); - // check_leaf_keys_are_present(db, expectedPresentStart, expectedPresentEnd, toTest); if (i >= finalisedBlockDelay) { index_t blockToFinalise = expectedFinalisedBlock + 1; - // attemnpting to finalise a block that doesn't exist should fail + // attempting to finalise a block that doesn't exist should fail finalise_block(tree, blockToFinalise + numBlocks, false); finalise_block(tree, blockToFinalise, true); - - // index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; - // check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); } } } @@ -1499,12 +1491,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_finalise_multiple_blocks index_t blockToFinalise = 8; - // check_leaf_keys_are_present(db, 0, (numBlocks * blockSize) - 1, values); - finalise_block(tree, blockToFinalise); - - // index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; - // check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); } TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_finalise_block_beyond_pending_chain) @@ -1544,12 +1531,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_finalise_block_beyon // finalise the entire chain index_t blockToFinalise = numBlocks; - // check_leaf_keys_are_present(db, 0, (numBlocks * blockSize) - 1, values); - finalise_block(tree, blockToFinalise); - - // index_t expectedNotPresentEnd = (blockToFinalise * blockSize) - 1; - // check_leaf_keys_are_not_present(db, 0, expectedNotPresentEnd); } TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_not_fork_from_unwound_blocks) diff --git a/yarn-project/world-state/src/synchronizer/config.ts b/yarn-project/world-state/src/synchronizer/config.ts index 4b90127e952..bdd96365da9 100644 --- a/yarn-project/world-state/src/synchronizer/config.ts +++ b/yarn-project/world-state/src/synchronizer/config.ts @@ -11,7 +11,7 @@ export interface WorldStateConfig { /** Size of the batch for each get-blocks request from the synchronizer to the archiver. */ worldStateBlockRequestBatchSize?: number; - /** The maximum size of the combined world state db in KB, optional, will inherit from the general dataStoreMapSizeKB if not specified*/ + /** The map size to be provided to LMDB for each world state tree DB, optional, will inherit from the general dataStoreMapSizeKB if not specified*/ worldStateDbMapSizeKb?: number; /** Optional directory for the world state DB, if unspecified will default to the general data directory */ From d7fdb5680047dede71e88f152f56525027379543 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Sun, 17 Nov 2024 18:52:09 +0000 Subject: [PATCH 28/31] Formatting --- .../node_store/cached_content_addressed_tree_store.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 7af3a1c3626..3d16de6bbea 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -308,7 +308,7 @@ std::pair ContentAddressedCachedTreeStore::find_lo const fr& new_leaf_key, const RequestContext& requestContext, ReadTransaction& tx) const { auto new_value_as_number = uint256_t(new_leaf_key); - index_t committed; + index_t committed = 0; std::optional sizeLimit = std::nullopt; if (initialised_from_block_.has_value() || requestContext.blockNumber.has_value()) { sizeLimit = constrain_tree_size(requestContext, tx); From 9c0c34934795bceb341f304c0ff5a25592e240b6 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Mon, 18 Nov 2024 17:00:32 +0000 Subject: [PATCH 29/31] Additional data store metrics for archiver, tx pool and world state --- .../archiver/src/archiver/archiver.ts | 6 + .../archiver/src/archiver/archiver_store.ts | 5 + .../archiver/src/archiver/instrumentation.ts | 22 +++ .../kv_archiver_store/kv_archiver_store.ts | 6 +- .../memory_archiver_store.ts | 4 + .../composed/integration_l1_publisher.test.ts | 7 +- yarn-project/kv-store/src/interfaces/store.ts | 2 +- yarn-project/kv-store/src/lmdb/store.ts | 47 +++++- .../p2p/src/mem_pools/instrumentation.ts | 31 +++- .../src/mem_pools/tx_pool/aztec_kv_tx_pool.ts | 2 + yarn-project/telemetry-client/src/index.ts | 1 + .../telemetry-client/src/lmdb_metrics.ts | 38 +++++ yarn-project/telemetry-client/src/metrics.ts | 92 +++++++++++ .../world-state/src/synchronizer/factory.ts | 2 +- .../src/synchronizer/instrumentation.ts | 150 ++++++++++++++++++ .../server_world_state_synchronizer.test.ts | 3 +- .../server_world_state_synchronizer.ts | 26 ++- .../world-state/src/test/integration.test.ts | 19 ++- .../src/world-state-db/merkle_trees.ts | 2 +- 19 files changed, 441 insertions(+), 24 deletions(-) create mode 100644 yarn-project/telemetry-client/src/lmdb_metrics.ts create mode 100644 yarn-project/world-state/src/synchronizer/instrumentation.ts diff --git a/yarn-project/archiver/src/archiver/archiver.ts b/yarn-project/archiver/src/archiver/archiver.ts index a2a10d8c7a2..451ecab15f8 100644 --- a/yarn-project/archiver/src/archiver/archiver.ts +++ b/yarn-project/archiver/src/archiver/archiver.ts @@ -265,6 +265,9 @@ export class Archiver implements ArchiveSource { // the chain locally before we start unwinding stuff. This can be optimized by figuring out // up to which point we're pruning, and then requesting L2 blocks up to that point only. await this.handleEpochPrune(provenBlockNumber, currentL1BlockNumber); + + const storeSizes = this.store.estimateSize(); + this.instrumentation.recordDBMetrics(storeSizes.mappingSize, storeSizes.numItems, storeSizes.actualSize); } } @@ -1006,6 +1009,9 @@ class ArchiverStoreHelper getTotalL1ToL2MessageCount(): Promise { return this.store.getTotalL1ToL2MessageCount(); } + estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { + return this.store.estimateSize(); + } } type L1RollupConstants = { diff --git a/yarn-project/archiver/src/archiver/archiver_store.ts b/yarn-project/archiver/src/archiver/archiver_store.ts index 4eb2c80ccc0..110e42f86f3 100644 --- a/yarn-project/archiver/src/archiver/archiver_store.ts +++ b/yarn-project/archiver/src/archiver/archiver_store.ts @@ -250,4 +250,9 @@ export interface ArchiverDataStore { addContractArtifact(address: AztecAddress, contract: ContractArtifact): Promise; getContractArtifact(address: AztecAddress): Promise; + + /** + * Estimates the size of the store in bytes. + */ + estimateSize(): { mappingSize: number; actualSize: number; numItems: number }; } diff --git a/yarn-project/archiver/src/archiver/instrumentation.ts b/yarn-project/archiver/src/archiver/instrumentation.ts index 6a53027f460..119bb14010a 100644 --- a/yarn-project/archiver/src/archiver/instrumentation.ts +++ b/yarn-project/archiver/src/archiver/instrumentation.ts @@ -4,6 +4,7 @@ import { Attributes, type Gauge, type Histogram, + LmdbMetrics, Metrics, type TelemetryClient, type UpDownCounter, @@ -18,6 +19,7 @@ export class ArchiverInstrumentation { private syncDuration: Histogram; private proofsSubmittedDelay: Histogram; private proofsSubmittedCount: UpDownCounter; + private dbMetrics: LmdbMetrics; private log = createDebugLogger('aztec:archiver:instrumentation'); @@ -55,6 +57,26 @@ export class ArchiverInstrumentation { explicitBucketBoundaries: millisecondBuckets(1, 80), // 10ms -> ~3hs }, }); + + this.dbMetrics = new LmdbMetrics( + meter, + { + name: Metrics.ARCHIVER_DB_MAP_SIZE, + description: 'Database map size for the archiver', + }, + { + name: Metrics.ARCHIVER_DB_USED_SIZE, + description: 'Database used size for the archiver', + }, + { + name: Metrics.ARCHIVER_DB_NUM_ITEMS, + description: 'Num items in the archiver database', + }, + ); + } + + public recordDBMetrics(dbMapSize: number, dbNumItems: number, dbUsedSize: number) { + this.dbMetrics.recordDBMetrics(dbMapSize, dbNumItems, dbUsedSize); } public isEnabled(): boolean { diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.ts index 8cbb627a5ce..313190e8767 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.ts @@ -47,7 +47,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { #log = createDebugLogger('aztec:archiver:data-store'); - constructor(db: AztecKVStore, logsMaxPageSize: number = 1000) { + constructor(private db: AztecKVStore, logsMaxPageSize: number = 1000) { this.#blockStore = new BlockStore(db); this.#logStore = new LogStore(db, this.#blockStore, logsMaxPageSize); this.#messageStore = new MessageStore(db); @@ -324,4 +324,8 @@ export class KVArchiverDataStore implements ArchiverDataStore { messagesSynchedTo: this.#messageStore.getSynchedL1BlockNumber(), }); } + + public estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { + return this.db.estimateSize(); + } } diff --git a/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts b/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts index e49ab8eccc2..1f20956380b 100644 --- a/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts +++ b/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts @@ -686,4 +686,8 @@ export class MemoryArchiverStore implements ArchiverDataStore { public getContractArtifact(address: AztecAddress): Promise { return Promise.resolve(this.contractArtifacts.get(address.toString())); } + + public estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { + return { mappingSize: 0, actualSize: 0, numItems: 0 }; + } } diff --git a/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts b/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts index 194839cda36..f1339b66836 100644 --- a/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts +++ b/yarn-project/end-to-end/src/composed/integration_l1_publisher.test.ts @@ -146,7 +146,12 @@ describe('L1Publisher integration', () => { worldStateProvenBlocksOnly: false, worldStateDbMapSizeKb: 10 * 1024 * 1024, }; - worldStateSynchronizer = new ServerWorldStateSynchronizer(builderDb, blockSource, worldStateConfig); + worldStateSynchronizer = new ServerWorldStateSynchronizer( + builderDb, + blockSource, + worldStateConfig, + new NoopTelemetryClient(), + ); await worldStateSynchronizer.start(); fork = await worldStateSynchronizer.fork(); builder = new LightweightBlockBuilder(fork, new NoopTelemetryClient()); diff --git a/yarn-project/kv-store/src/interfaces/store.ts b/yarn-project/kv-store/src/interfaces/store.ts index df37d45e0a6..9764a474546 100644 --- a/yarn-project/kv-store/src/interfaces/store.ts +++ b/yarn-project/kv-store/src/interfaces/store.ts @@ -72,5 +72,5 @@ export interface AztecKVStore { /** * Estimates the size of the store in bytes. */ - estimateSize(): { bytes: number }; + estimateSize(): { mappingSize: number; actualSize: number; numItems: number }; } diff --git a/yarn-project/kv-store/src/lmdb/store.ts b/yarn-project/kv-store/src/lmdb/store.ts index 22672421d47..726007937d6 100644 --- a/yarn-project/kv-store/src/lmdb/store.ts +++ b/yarn-project/kv-store/src/lmdb/store.ts @@ -182,15 +182,50 @@ export class AztecLmdbStore implements AztecKVStore { } } - estimateSize(): { bytes: number } { + estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { const stats = this.#rootDb.getStats(); - // `mapSize` represents to total amount of memory currently being used by the database. - // since the database is mmap'd, this is a good estimate of the size of the database for now. + // The 'mapSize' is the total amount of vertual address space allocated to the DB (effectively the maximum possible size) // http://www.lmdb.tech/doc/group__mdb.html#a4bde3c8b676457342cba2fe27aed5fbd + let mapSize = 0; if ('mapSize' in stats && typeof stats.mapSize === 'number') { - return { bytes: stats.mapSize }; - } else { - return { bytes: 0 }; + mapSize = stats.mapSize; } + const dataResult = this.estimateSubDBSize(this.#data); + const multiResult = this.estimateSubDBSize(this.#multiMapData); + return { + mappingSize: mapSize, + actualSize: dataResult.actualSize + multiResult.actualSize, + numItems: dataResult.numItems + multiResult.numItems, + }; + } + + private estimateSubDBSize(db: Database): { actualSize: number; numItems: number } { + const stats = db.getStats(); + let branchPages = 0; + let leafPages = 0; + let overflowPages = 0; + let pageSize = 0; + let totalSize = 0; + let numItems = 0; + if ('entryCount' in stats && typeof stats.entryCount === 'number') { + numItems = stats.entryCount; + } + if ( + 'treeBranchPageCount' in stats && + typeof stats.treeBranchPageCount === 'number' && + 'treeLeafPageCount' in stats && + typeof stats.treeLeafPageCount === 'number' && + 'overflowPages' in stats && + typeof stats.overflowPages === 'number' && + 'pageSize' in stats && + typeof stats.pageSize === 'number' + ) { + branchPages = stats.treeBranchPageCount; + leafPages = stats.treeLeafPageCount; + overflowPages = stats.overflowPages; + pageSize = stats.pageSize; + totalSize = (branchPages + leafPages + overflowPages) * pageSize; + } + return { actualSize: totalSize, numItems }; } } diff --git a/yarn-project/p2p/src/mem_pools/instrumentation.ts b/yarn-project/p2p/src/mem_pools/instrumentation.ts index 102235a406e..45e3cde4f6b 100644 --- a/yarn-project/p2p/src/mem_pools/instrumentation.ts +++ b/yarn-project/p2p/src/mem_pools/instrumentation.ts @@ -1,5 +1,12 @@ import { type Gossipable } from '@aztec/circuit-types'; -import { Attributes, type Histogram, Metrics, type TelemetryClient, type UpDownCounter } from '@aztec/telemetry-client'; +import { + Attributes, + type Histogram, + LmdbMetrics, + Metrics, + type TelemetryClient, + type UpDownCounter, +} from '@aztec/telemetry-client'; /** * Instrumentation class for the Pools (TxPool, AttestationPool, etc). @@ -10,6 +17,8 @@ export class PoolInstrumentation { /** Tracks tx size */ private objectSize: Histogram; + private dbMetrics: LmdbMetrics; + private defaultAttributes; constructor(telemetry: TelemetryClient, name: string) { @@ -35,6 +44,26 @@ export class PoolInstrumentation { ], }, }); + + this.dbMetrics = new LmdbMetrics( + meter, + { + name: Metrics.MEMPOOL_DB_MAP_SIZE, + description: 'Database map size for the Tx mempool', + }, + { + name: Metrics.MEMPOOL_DB_USED_SIZE, + description: 'Database used size for the Tx mempool', + }, + { + name: Metrics.MEMPOOL_DB_NUM_ITEMS, + description: 'Num items in database for the Tx mempool', + }, + ); + } + + public recordDBMetrics(dbMapSize: number, dbNumItems: number, dbUsedSize: number) { + this.dbMetrics.recordDBMetrics(dbMapSize, dbNumItems, dbUsedSize); } public recordSize(poolObject: PoolObject) { diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts index 04d931c4240..5b409013cd2 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts @@ -53,6 +53,8 @@ export class AztecKVTxPool implements TxPool { } this.#metrics.recordRemovedObjects(deleted, 'pending'); this.#metrics.recordAddedObjects(txHashes.length, 'mined'); + const storeSizes = this.#store.estimateSize(); + this.#metrics.recordDBMetrics(storeSizes.mappingSize, storeSizes.numItems, storeSizes.actualSize); }); } diff --git a/yarn-project/telemetry-client/src/index.ts b/yarn-project/telemetry-client/src/index.ts index 962f158dcca..ce7d17939bf 100644 --- a/yarn-project/telemetry-client/src/index.ts +++ b/yarn-project/telemetry-client/src/index.ts @@ -2,3 +2,4 @@ export * from './telemetry.js'; export * from './histogram_utils.js'; export * from './with_tracer.js'; export * from './prom_otel_adapter.js'; +export * from './lmdb_metrics.js'; diff --git a/yarn-project/telemetry-client/src/lmdb_metrics.ts b/yarn-project/telemetry-client/src/lmdb_metrics.ts new file mode 100644 index 00000000000..62d98dcece6 --- /dev/null +++ b/yarn-project/telemetry-client/src/lmdb_metrics.ts @@ -0,0 +1,38 @@ +import { type Gauge, type Meter, type Metrics, ValueType } from './telemetry.js'; + +export type LmdbMetricDescriptor = { + name: Metrics; + description: string; +}; + +export class LmdbMetrics { + private dbMapSize: Gauge; + private dbUsedSize: Gauge; + private dbNumItems: Gauge; + + constructor( + meter: Meter, + dbMapSizeDescriptor: LmdbMetricDescriptor, + dbUsedSizeDescriptor: LmdbMetricDescriptor, + dbNumItemsDescriptor: LmdbMetricDescriptor, + ) { + this.dbMapSize = meter.createGauge(dbMapSizeDescriptor.name, { + description: dbMapSizeDescriptor.description, + valueType: ValueType.INT, + }); + this.dbUsedSize = meter.createGauge(dbUsedSizeDescriptor.name, { + description: dbUsedSizeDescriptor.description, + valueType: ValueType.INT, + }); + this.dbNumItems = meter.createGauge(dbNumItemsDescriptor.name, { + description: dbNumItemsDescriptor.description, + valueType: ValueType.INT, + }); + } + + public recordDBMetrics(dbMapSize: number, dbNumItems: number, dbUsedSize: number) { + this.dbMapSize.record(dbMapSize); + this.dbNumItems.record(dbNumItems); + this.dbUsedSize.record(dbUsedSize); + } +} diff --git a/yarn-project/telemetry-client/src/metrics.ts b/yarn-project/telemetry-client/src/metrics.ts index 32f2996487a..4ff83135235 100644 --- a/yarn-project/telemetry-client/src/metrics.ts +++ b/yarn-project/telemetry-client/src/metrics.ts @@ -25,12 +25,18 @@ export const CIRCUIT_SIZE = 'aztec.circuit.size'; export const MEMPOOL_TX_COUNT = 'aztec.mempool.tx_count'; export const MEMPOOL_TX_SIZE = 'aztec.mempool.tx_size'; +export const MEMPOOL_DB_NUM_ITEMS = 'aztec.mempool.db.num_items'; +export const MEMPOOL_DB_MAP_SIZE = 'aztec.mempool.db.map_size'; +export const MEMPOOL_DB_USED_SIZE = 'aztec.mempool.db.used_size'; export const ARCHIVER_SYNC_DURATION = 'aztec.archiver.sync_duration'; export const ARCHIVER_BLOCK_HEIGHT = 'aztec.archiver.block_height'; export const ARCHIVER_BLOCK_SIZE = 'aztec.archiver.block_size'; export const ARCHIVER_ROLLUP_PROOF_DELAY = 'aztec.archiver.rollup_proof_delay'; export const ARCHIVER_ROLLUP_PROOF_COUNT = 'aztec.archiver.rollup_proof_count'; +export const ARCHIVER_DB_NUM_ITEMS = 'aztec.archiver.db.num_items'; +export const ARCHIVER_DB_MAP_SIZE = 'aztec.archiver.db.map_size'; +export const ARCHIVER_DB_USED_SIZE = 'aztec.archiver.db.used_size'; export const NODE_RECEIVE_TX_DURATION = 'aztec.node.receive_tx.duration'; export const NODE_RECEIVE_TX_COUNT = 'aztec.node.receive_tx.count'; @@ -73,4 +79,90 @@ export const WORLD_STATE_SYNC_DURATION = 'aztec.world_state.sync.duration'; export const WORLD_STATE_MERKLE_TREE_SIZE = 'aztec.world_state.merkle_tree_size'; export const WORLD_STATE_DB_SIZE = 'aztec.world_state.db_size'; +export const WORLD_STATE_DB_MAP_SIZE_NULLIFIER = 'aztec.world_state.db_map_size.nullifier'; +export const WORLD_STATE_DB_MAP_SIZE_PUBLIC_DATA = 'aztec.world_state.db_map_size.public_data'; +export const WORLD_STATE_DB_MAP_SIZE_ARCHIVE = 'aztec.world_state.db_map_size.archive'; +export const WORLD_STATE_DB_MAP_SIZE_MESSAGE = 'aztec.world_state.db_map_size.message'; +export const WORLD_STATE_DB_MAP_SIZE_NOTE_HASH = 'aztec.world_state.db_map_size.note_hash'; + +export const WORLD_STATE_TREE_SIZE_NULLIFIER = 'aztec.world_state.tree_size.nullifier'; +export const WORLD_STATE_TREE_SIZE_PUBLIC_DATA = 'aztec.world_state.tree_size.public_data'; +export const WORLD_STATE_TREE_SIZE_ARCHIVE = 'aztec.world_state.tree_size.archive'; +export const WORLD_STATE_TREE_SIZE_MESSAGE = 'aztec.world_state.tree_size.message'; +export const WORLD_STATE_TREE_SIZE_NOTE_HASH = 'aztec.world_state.tree_size.note_hash'; + +export const WORLD_STATE_UNFINALISED_HEIGHT_NULLIFIER = 'aztec.world_state.unfinalised_height.nullifier'; +export const WORLD_STATE_UNFINALISED_HEIGHT_PUBLIC_DATA = 'aztec.world_state.unfinalised_height.public_data'; +export const WORLD_STATE_UNFINALISED_HEIGHT_ARCHIVE = 'aztec.world_state.unfinalised_height.archive'; +export const WORLD_STATE_UNFINALISED_HEIGHT_MESSAGE = 'aztec.world_state.unfinalised_height.message'; +export const WORLD_STATE_UNFINALISED_HEIGHT_NOTE_HASH = 'aztec.world_state.unfinalised_height.note_hash'; + +export const WORLD_STATE_FINALISED_HEIGHT_NULLIFIER = 'aztec.world_state.finalised_height.nullifier'; +export const WORLD_STATE_FINALISED_HEIGHT_PUBLIC_DATA = 'aztec.world_state.finalised_height.public_data'; +export const WORLD_STATE_FINALISED_HEIGHT_ARCHIVE = 'aztec.world_state.finalised_height.archive'; +export const WORLD_STATE_FINALISED_HEIGHT_MESSAGE = 'aztec.world_state.finalised_height.message'; +export const WORLD_STATE_FINALISED_HEIGHT_NOTE_HASH = 'aztec.world_state.finalised_height.note_hash'; + +export const WORLD_STATE_OLDEST_BLOCK_NULLIFIER = 'aztec.world_state.oldest_block.nullifier'; +export const WORLD_STATE_OLDEST_BLOCK_PUBLIC_DATA = 'aztec.world_state.oldest_block.public_data'; +export const WORLD_STATE_OLDEST_BLOCK_ARCHIVE = 'aztec.world_state.oldest_block.archive'; +export const WORLD_STATE_OLDEST_BLOCK_MESSAGE = 'aztec.world_state.oldest_block.message'; +export const WORLD_STATE_OLDEST_BLOCK_NOTE_HASH = 'aztec.world_state.oldest_block.note_hash'; + +export const WORLD_STATE_BLOCKS_DB_USED_SIZE_NULLIFIER = 'aztec.world_state.db_used_size.blocks.nullifier'; +export const WORLD_STATE_BLOCKS_DB_USED_SIZE_PUBLIC_DATA = 'aztec.world_state.db_used_size.blocks.public_data'; +export const WORLD_STATE_BLOCKS_DB_USED_SIZE_ARCHIVE = 'aztec.world_state.db_used_size.blocks.archive'; +export const WORLD_STATE_BLOCKS_DB_USED_SIZE_MESSAGE = 'aztec.world_state.db_used_size.blocks.message'; +export const WORLD_STATE_BLOCKS_DB_USED_SIZE_NOTE_HASH = 'aztec.world_state.db_used_size.blocks.note_hash'; + +export const WORLD_STATE_BLOCKS_DB_NUM_ITEMS_NULLIFIER = 'aztec.world_state.db_num_items.blocks.nullifier'; +export const WORLD_STATE_BLOCKS_DB_NUM_ITEMS_PUBLIC_DATA = 'aztec.world_state.db_num_items.blocks.public_data'; +export const WORLD_STATE_BLOCKS_DB_NUM_ITEMS_ARCHIVE = 'aztec.world_state.db_num_items.blocks.archive'; +export const WORLD_STATE_BLOCKS_DB_NUM_ITEMS_MESSAGE = 'aztec.world_state.db_num_items.blocks.message'; +export const WORLD_STATE_BLOCKS_DB_NUM_ITEMS_NOTE_HASH = 'aztec.world_state.db_num_items.blocks.note_hash'; + +export const WORLD_STATE_NODES_DB_USED_SIZE_NULLIFIER = 'aztec.world_state.db_used_size.nodes.nullifier'; +export const WORLD_STATE_NODES_DB_USED_SIZE_PUBLIC_DATA = 'aztec.world_state.db_used_size.nodes.public_data'; +export const WORLD_STATE_NODES_DB_USED_SIZE_ARCHIVE = 'aztec.world_state.db_used_size.nodes.archive'; +export const WORLD_STATE_NODES_DB_USED_SIZE_MESSAGE = 'aztec.world_state.db_used_size.nodes.message'; +export const WORLD_STATE_NODES_DB_USED_SIZE_NOTE_HASH = 'aztec.world_state.db_used_size.nodes.note_hash'; + +export const WORLD_STATE_NODES_DB_NUM_ITEMS_NULLIFIER = 'aztec.world_state.db_num_items.nodes.nullifier'; +export const WORLD_STATE_NODES_DB_NUM_ITEMS_PUBLIC_DATA = 'aztec.world_state.db_num_items.nodes.public_data'; +export const WORLD_STATE_NODES_DB_NUM_ITEMS_ARCHIVE = 'aztec.world_state.db_num_items.nodes.archive'; +export const WORLD_STATE_NODES_DB_NUM_ITEMS_MESSAGE = 'aztec.world_state.db_num_items.nodes.message'; +export const WORLD_STATE_NODES_DB_NUM_ITEMS_NOTE_HASH = 'aztec.world_state.db_num_items.nodes.note_hash'; + +export const WORLD_STATE_LEAF_PREIMAGE_DB_USED_SIZE_NULLIFIER = + 'aztec.world_state.db_used_size.leaf_preimage.nullifier'; +export const WORLD_STATE_LEAF_PREIMAGE_DB_USED_SIZE_PUBLIC_DATA = + 'aztec.world_state.db_used_size.leaf_preimage.public_data'; +export const WORLD_STATE_LEAF_PREIMAGE_DB_USED_SIZE_ARCHIVE = 'aztec.world_state.db_used_size.leaf_preimage.archive'; +export const WORLD_STATE_LEAF_PREIMAGE_DB_USED_SIZE_MESSAGE = 'aztec.world_state.db_used_size.leaf_preimage.message'; +export const WORLD_STATE_LEAF_PREIMAGE_DB_USED_SIZE_NOTE_HASH = + 'aztec.world_state.db_used_size.leaf_preimage.note_hash'; + +export const WORLD_STATE_LEAF_PREIMAGE_DB_NUM_ITEMS_NULLIFIER = + 'aztec.world_state.db_num_items.leaf_preimage.nullifier'; +export const WORLD_STATE_LEAF_PREIMAGE_DB_NUM_ITEMS_PUBLIC_DATA = + 'aztec.world_state.db_num_items.leaf_preimage.public_data'; +export const WORLD_STATE_LEAF_PREIMAGE_DB_NUM_ITEMS_ARCHIVE = 'aztec.world_state.db_num_items.leaf_preimage.archive'; +export const WORLD_STATE_LEAF_PREIMAGE_DB_NUM_ITEMS_MESSAGE = 'aztec.world_state.db_num_items.leaf_preimage.message'; +export const WORLD_STATE_LEAF_PREIMAGE_DB_NUM_ITEMS_NOTE_HASH = + 'aztec.world_state.db_num_items.leaf_preimage.note_hash'; + +export const WORLD_STATE_LEAF_INDICES_DB_USED_SIZE_NULLIFIER = 'aztec.world_state.db_used_size.leaf_indices.nullifier'; +export const WORLD_STATE_LEAF_INDICES_DB_USED_SIZE_PUBLIC_DATA = + 'aztec.world_state.db_used_size.leaf_indices.public_data'; +export const WORLD_STATE_LEAF_INDICES_DB_USED_SIZE_ARCHIVE = 'aztec.world_state.db_used_size.leaf_indices.archive'; +export const WORLD_STATE_LEAF_INDICES_DB_USED_SIZE_MESSAGE = 'aztec.world_state.db_used_size.leaf_indices.message'; +export const WORLD_STATE_LEAF_INDICES_DB_USED_SIZE_NOTE_HASH = 'aztec.world_state.db_used_size.leaf_indices.note_hash'; + +export const WORLD_STATE_LEAF_INDICES_DB_NUM_ITEMS_NULLIFIER = 'aztec.world_state.db_num_items.leaf_indices.nullifier'; +export const WORLD_STATE_LEAF_INDICES_DB_NUM_ITEMS_PUBLIC_DATA = + 'aztec.world_state.db_num_items.leaf_indices.public_data'; +export const WORLD_STATE_LEAF_INDICES_DB_NUM_ITEMS_ARCHIVE = 'aztec.world_state.db_num_items.leaf_indices.archive'; +export const WORLD_STATE_LEAF_INDICES_DB_NUM_ITEMS_MESSAGE = 'aztec.world_state.db_num_items.leaf_indices.message'; +export const WORLD_STATE_LEAF_INDICES_DB_NUM_ITEMS_NOTE_HASH = 'aztec.world_state.db_num_items.leaf_indices.note_hash'; + export const PROOF_VERIFIER_COUNT = 'aztec.proof_verifier.count'; diff --git a/yarn-project/world-state/src/synchronizer/factory.ts b/yarn-project/world-state/src/synchronizer/factory.ts index fa2a7c43ef4..3aff058e66e 100644 --- a/yarn-project/world-state/src/synchronizer/factory.ts +++ b/yarn-project/world-state/src/synchronizer/factory.ts @@ -16,7 +16,7 @@ export async function createWorldStateSynchronizer( client: TelemetryClient, ) { const merkleTrees = await createWorldState(config, client); - return new ServerWorldStateSynchronizer(merkleTrees, l2BlockSource, config); + return new ServerWorldStateSynchronizer(merkleTrees, l2BlockSource, config, client); } export async function createWorldState( diff --git a/yarn-project/world-state/src/synchronizer/instrumentation.ts b/yarn-project/world-state/src/synchronizer/instrumentation.ts new file mode 100644 index 00000000000..d52dca0aef4 --- /dev/null +++ b/yarn-project/world-state/src/synchronizer/instrumentation.ts @@ -0,0 +1,150 @@ +import { MerkleTreeId } from '@aztec/circuit-types'; +import { type DebugLogger, createDebugLogger } from '@aztec/foundation/log'; +import { type Gauge, type Meter, type TelemetryClient, ValueType } from '@aztec/telemetry-client'; + +import { type DBStats, type TreeDBStats, type TreeMeta, type WorldStateStatusFull } from '../native/message.js'; + +type TreeTypeString = 'nullifier' | 'note_hash' | 'archive' | 'message' | 'public_data'; +type DBTypeString = 'leaf_preimage' | 'leaf_indices' | 'nodes' | 'blocks'; + +class TreeDBInstrumentation { + private dbNumItems: Gauge; + private dbUsedSize: Gauge; + + constructor(meter: Meter, treeName: TreeTypeString, dbName: DBTypeString) { + this.dbUsedSize = meter.createGauge(`aztec.world_state.db_used_size.${dbName}.${treeName}`, { + description: `The current used database size for the ${treeName} tree ${dbName} database`, + valueType: ValueType.INT, + }); + + this.dbNumItems = meter.createGauge(`aztec.world_state.db_num_items.${dbName}.${treeName}`, { + description: `The current number of items in the ${treeName} tree ${dbName} database`, + valueType: ValueType.INT, + }); + } + + public updateMetrics(treeDbStats: DBStats) { + this.dbNumItems.record(Number(treeDbStats.numDataItems)); + this.dbUsedSize.record(Number(treeDbStats.totalUsedSize)); + } +} + +class TreeInstrumentation { + private treeDbInstrumentation: Map = new Map< + DBTypeString, + TreeDBInstrumentation + >(); + private dbMapSize: Gauge; + private treeSize: Gauge; + private unfinalisedHeight: Gauge; + private finalisedHeight: Gauge; + private oldestBlock: Gauge; + + constructor(meter: Meter, treeName: TreeTypeString, private log: DebugLogger) { + this.dbMapSize = meter.createGauge(`aztec.world_state.db_map_size.${treeName}`, { + description: `The current configured map size for the ${treeName} tree`, + valueType: ValueType.INT, + }); + + this.treeSize = meter.createGauge(`aztec.world_state.tree_size.${treeName}`, { + description: `The current number of leaves in the ${treeName} tree`, + valueType: ValueType.INT, + }); + + this.unfinalisedHeight = meter.createGauge(`aztec.world_state.unfinalised_height.${treeName}`, { + description: `The unfinalised block height of the ${treeName} tree`, + valueType: ValueType.INT, + }); + + this.finalisedHeight = meter.createGauge(`aztec.world_state.finalised_height.${treeName}`, { + description: `The finalised block height of the ${treeName} tree`, + valueType: ValueType.INT, + }); + + this.oldestBlock = meter.createGauge(`aztec.world_state.oldest_block.${treeName}`, { + description: `The oldest historical block of the ${treeName} tree`, + valueType: ValueType.INT, + }); + + this.treeDbInstrumentation.set('blocks', new TreeDBInstrumentation(meter, treeName, 'blocks')); + this.treeDbInstrumentation.set('nodes', new TreeDBInstrumentation(meter, treeName, 'nodes')); + this.treeDbInstrumentation.set('leaf_preimage', new TreeDBInstrumentation(meter, treeName, 'leaf_preimage')); + this.treeDbInstrumentation.set('leaf_indices', new TreeDBInstrumentation(meter, treeName, 'leaf_indices')); + } + + private updateDBMetrics(dbName: DBTypeString, dbStats: DBStats) { + const inst = this.treeDbInstrumentation.get(dbName); + if (!inst) { + this.log.error(`Failed to find instrumentation for ${dbName}`); + return; + } + inst.updateMetrics(dbStats); + } + + public updateMetrics(treeDbStats: TreeDBStats, treeMeta: TreeMeta) { + this.dbMapSize.record(Number(treeDbStats.mapSize)); + this.treeSize.record(Number(treeMeta.committedSize)); + this.finalisedHeight.record(Number(treeMeta.finalisedBlockHeight)); + this.unfinalisedHeight.record(Number(treeMeta.unfinalisedBlockHeight)); + this.oldestBlock.record(Number(treeMeta.oldestHistoricBlock)); + + this.updateDBMetrics('leaf_indices', treeDbStats.leafIndicesDBStats); + this.updateDBMetrics('leaf_preimage', treeDbStats.leafPreimagesDBStats); + this.updateDBMetrics('blocks', treeDbStats.blocksDBStats); + this.updateDBMetrics('nodes', treeDbStats.nodesDBStats); + } +} + +export class WorldStateInstrumentation { + private treeInstrumentation: Map = new Map(); + + constructor(telemetry: TelemetryClient, private log = createDebugLogger('aztec:world-state:instrumentation')) { + const meter = telemetry.getMeter('World State'); + this.treeInstrumentation.set(MerkleTreeId.ARCHIVE, new TreeInstrumentation(meter, 'archive', log)); + this.treeInstrumentation.set(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, new TreeInstrumentation(meter, 'message', log)); + this.treeInstrumentation.set(MerkleTreeId.NOTE_HASH_TREE, new TreeInstrumentation(meter, 'note_hash', log)); + this.treeInstrumentation.set(MerkleTreeId.NULLIFIER_TREE, new TreeInstrumentation(meter, 'nullifier', log)); + this.treeInstrumentation.set(MerkleTreeId.PUBLIC_DATA_TREE, new TreeInstrumentation(meter, 'public_data', log)); + } + + private updateTreeStats(treeDbStats: TreeDBStats, treeMeta: TreeMeta, tree: MerkleTreeId) { + const instrumentation = this.treeInstrumentation.get(tree); + if (!instrumentation) { + this.log.error(`Failed to retrieve instrumentation for tree ${MerkleTreeId[tree]}`); + return; + } + instrumentation.updateMetrics(treeDbStats, treeMeta); + } + + public updateWorldStateMetrics(worldStateStatus: WorldStateStatusFull) { + this.updateTreeStats( + worldStateStatus.dbStats.archiveTreeStats, + worldStateStatus.meta.archiveTreeMeta, + MerkleTreeId.ARCHIVE, + ); + + this.updateTreeStats( + worldStateStatus.dbStats.messageTreeStats, + worldStateStatus.meta.messageTreeMeta, + MerkleTreeId.L1_TO_L2_MESSAGE_TREE, + ); + + this.updateTreeStats( + worldStateStatus.dbStats.noteHashTreeStats, + worldStateStatus.meta.noteHashTreeMeta, + MerkleTreeId.NOTE_HASH_TREE, + ); + + this.updateTreeStats( + worldStateStatus.dbStats.nullifierTreeStats, + worldStateStatus.meta.nullifierTreeMeta, + MerkleTreeId.NULLIFIER_TREE, + ); + + this.updateTreeStats( + worldStateStatus.dbStats.publicDataTreeStats, + worldStateStatus.meta.publicDataTreeMeta, + MerkleTreeId.PUBLIC_DATA_TREE, + ); + } +} diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts index a8e0a3098bb..2cdcd42e1e1 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts @@ -12,6 +12,7 @@ import { times } from '@aztec/foundation/collection'; import { randomInt } from '@aztec/foundation/crypto'; import { type DebugLogger, createDebugLogger } from '@aztec/foundation/log'; import { SHA256Trunc } from '@aztec/merkle-tree'; +import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; import { jest } from '@jest/globals'; import { type MockProxy, mock } from 'jest-mock-extended'; @@ -211,7 +212,7 @@ class TestWorldStateSynchronizer extends ServerWorldStateSynchronizer { worldStateConfig: WorldStateConfig, private mockBlockStream: L2BlockStream, ) { - super(merkleTrees, blockAndMessagesSource, worldStateConfig); + super(merkleTrees, blockAndMessagesSource, worldStateConfig, new NoopTelemetryClient()); } protected override createBlockStream(): L2BlockStream { diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts index 1678b22e41a..7f5d67a3aa8 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts @@ -23,10 +23,12 @@ import { createDebugLogger } from '@aztec/foundation/log'; import { promiseWithResolvers } from '@aztec/foundation/promise'; import { elapsed } from '@aztec/foundation/timer'; import { SHA256Trunc } from '@aztec/merkle-tree'; +import { type TelemetryClient } from '@aztec/telemetry-client'; -import { type WorldStateStatusSummary } from '../native/message.js'; +import { type WorldStateStatusFull } from '../native/message.js'; import { type MerkleTreeAdminDatabase } from '../world-state-db/merkle_tree_db.js'; import { type WorldStateConfig } from './config.js'; +import { WorldStateInstrumentation } from './instrumentation.js'; /** * Synchronizes the world state with the L2 blocks from a L2BlockSource via a block stream. @@ -43,13 +45,16 @@ export class ServerWorldStateSynchronizer private syncPromise = promiseWithResolvers(); protected blockStream: L2BlockStream | undefined; + private instrumentation: WorldStateInstrumentation; constructor( private readonly merkleTreeDb: MerkleTreeAdminDatabase, private readonly l2BlockSource: L2BlockSource & L1ToL2MessageSource, private readonly config: WorldStateConfig, + telemetry: TelemetryClient, private readonly log = createDebugLogger('aztec:world_state'), ) { + this.instrumentation = new WorldStateInstrumentation(telemetry); this.merkleTreeCommitted = this.merkleTreeDb.getCommitted(); } @@ -205,18 +210,24 @@ export class ServerWorldStateSynchronizer this.log.verbose(`Handling new L2 blocks from ${l2Blocks[0].number} to ${l2Blocks[l2Blocks.length - 1].number}`); const messagePromises = l2Blocks.map(block => this.l2BlockSource.getL1ToL2Messages(BigInt(block.number))); const l1ToL2Messages: Fr[][] = await Promise.all(messagePromises); + let updateStatus: WorldStateStatusFull | undefined = undefined; for (let i = 0; i < l2Blocks.length; i++) { const [duration, result] = await elapsed(() => this.handleL2Block(l2Blocks[i], l1ToL2Messages[i])); this.log.verbose(`Handled new L2 block`, { eventName: 'l2-block-handled', duration, - unfinalisedBlockNumber: result.unfinalisedBlockNumber, - finalisedBlockNumber: result.finalisedBlockNumber, - oldestHistoricBlock: result.oldestHistoricalBlock, + unfinalisedBlockNumber: result.summary.unfinalisedBlockNumber, + finalisedBlockNumber: result.summary.finalisedBlockNumber, + oldestHistoricBlock: result.summary.oldestHistoricalBlock, ...l2Blocks[i].getStats(), } satisfies L2BlockHandledStats); + updateStatus = result; } + if (!updateStatus) { + return; + } + this.instrumentation.updateWorldStateMetrics(updateStatus); } /** @@ -225,7 +236,7 @@ export class ServerWorldStateSynchronizer * @param l1ToL2Messages - The L1 to L2 messages for the block. * @returns Whether the block handled was produced by this same node. */ - private async handleL2Block(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { + private async handleL2Block(l2Block: L2Block, l1ToL2Messages: Fr[]): Promise { // First we check that the L1 to L2 messages hash to the block inHash. // Note that we cannot optimize this check by checking the root of the subtree after inserting the messages // to the real L1_TO_L2_MESSAGE_TREE (like we do in merkleTreeDb.handleL2BlockAndMessages(...)) because that @@ -240,7 +251,7 @@ export class ServerWorldStateSynchronizer this.syncPromise.resolve(); } - return result.summary; + return result; } private async handleChainFinalized(blockNumber: number) { @@ -255,7 +266,8 @@ export class ServerWorldStateSynchronizer private async handleChainPruned(blockNumber: number) { this.log.info(`Chain pruned to block ${blockNumber}`); - await this.merkleTreeDb.unwindBlocks(BigInt(blockNumber)); + const status = await this.merkleTreeDb.unwindBlocks(BigInt(blockNumber)); + this.instrumentation.updateWorldStateMetrics(status); } /** diff --git a/yarn-project/world-state/src/test/integration.test.ts b/yarn-project/world-state/src/test/integration.test.ts index 8f839a4d9eb..20093e48e18 100644 --- a/yarn-project/world-state/src/test/integration.test.ts +++ b/yarn-project/world-state/src/test/integration.test.ts @@ -4,6 +4,7 @@ import { EthAddress, type Fr } from '@aztec/circuits.js'; import { type DebugLogger, createDebugLogger } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; import { type DataStoreConfig } from '@aztec/kv-store/config'; +import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; import { jest } from '@jest/globals'; @@ -49,7 +50,7 @@ describe('world-state integration', () => { archiver = new MockPrefilledArchiver(blocks, messages); db = (await createWorldState(config)) as NativeWorldStateService; - synchronizer = new TestWorldStateSynchronizer(db, archiver, config); + synchronizer = new TestWorldStateSynchronizer(db, archiver, config, new NoopTelemetryClient()); log.info(`Created synchronizer`); }); @@ -142,7 +143,7 @@ describe('world-state integration', () => { await expectSynchedToBlock(5); await synchronizer.stopBlockStream(); - synchronizer = new TestWorldStateSynchronizer(db, archiver, config); + synchronizer = new TestWorldStateSynchronizer(db, archiver, config, new NoopTelemetryClient()); archiver.createBlocks(3); await synchronizer.start(); @@ -159,7 +160,12 @@ describe('world-state integration', () => { }); it('syncs only proven blocks when instructed', async () => { - synchronizer = new TestWorldStateSynchronizer(db, archiver, { ...config, worldStateProvenBlocksOnly: true }); + synchronizer = new TestWorldStateSynchronizer( + db, + archiver, + { ...config, worldStateProvenBlocksOnly: true }, + new NoopTelemetryClient(), + ); archiver.createBlocks(5); archiver.setProvenBlockNumber(3); @@ -193,7 +199,12 @@ describe('world-state integration', () => { describe('immediate sync', () => { beforeEach(() => { // Set up a synchronizer with a longer block check interval to avoid interference with immediate sync - synchronizer = new TestWorldStateSynchronizer(db, archiver, { ...config, worldStateBlockCheckIntervalMS: 1000 }); + synchronizer = new TestWorldStateSynchronizer( + db, + archiver, + { ...config, worldStateBlockCheckIntervalMS: 1000 }, + new NoopTelemetryClient(), + ); }); it('syncs immediately to the latest block', async () => { diff --git a/yarn-project/world-state/src/world-state-db/merkle_trees.ts b/yarn-project/world-state/src/world-state-db/merkle_trees.ts index 335efdee061..2842eebdae1 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_trees.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_trees.ts @@ -712,7 +712,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { } await this.#snapshot(l2Block.number); - this.metrics.recordDbSize(this.store.estimateSize().bytes); + this.metrics.recordDbSize(this.store.estimateSize().actualSize); this.metrics.recordSyncDuration('commit', timer); return buildEmptyWorldStateStatusFull(); } From 25670558f36378ab3de090f2f10d85e40a12d223 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Mon, 18 Nov 2024 17:12:14 +0000 Subject: [PATCH 30/31] Some comments --- yarn-project/kv-store/src/lmdb/store.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/yarn-project/kv-store/src/lmdb/store.ts b/yarn-project/kv-store/src/lmdb/store.ts index 726007937d6..91b1020a289 100644 --- a/yarn-project/kv-store/src/lmdb/store.ts +++ b/yarn-project/kv-store/src/lmdb/store.ts @@ -184,7 +184,7 @@ export class AztecLmdbStore implements AztecKVStore { estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { const stats = this.#rootDb.getStats(); - // The 'mapSize' is the total amount of vertual address space allocated to the DB (effectively the maximum possible size) + // The 'mapSize' is the total amount of virtual address space allocated to the DB (effectively the maximum possible size) // http://www.lmdb.tech/doc/group__mdb.html#a4bde3c8b676457342cba2fe27aed5fbd let mapSize = 0; if ('mapSize' in stats && typeof stats.mapSize === 'number') { @@ -207,9 +207,11 @@ export class AztecLmdbStore implements AztecKVStore { let pageSize = 0; let totalSize = 0; let numItems = 0; + // This is the total number of key/value pairs present in the DB if ('entryCount' in stats && typeof stats.entryCount === 'number') { numItems = stats.entryCount; } + // The closest value we can get to the actual size of the database is the number of consumed pages * the page size if ( 'treeBranchPageCount' in stats && typeof stats.treeBranchPageCount === 'number' && From 15b7d8c35d91a5025720faa2035e651b7c617989 Mon Sep 17 00:00:00 2001 From: PhilWindle Date: Wed, 20 Nov 2024 09:11:27 +0000 Subject: [PATCH 31/31] Review changes --- yarn-project/archiver/src/archiver/archiver.ts | 2 +- yarn-project/archiver/src/archiver/instrumentation.ts | 4 ++-- yarn-project/p2p/src/mem_pools/instrumentation.ts | 4 ++-- .../p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts | 2 +- yarn-project/telemetry-client/src/lmdb_metrics.ts | 8 ++++---- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/yarn-project/archiver/src/archiver/archiver.ts b/yarn-project/archiver/src/archiver/archiver.ts index 451ecab15f8..9b59447ddf4 100644 --- a/yarn-project/archiver/src/archiver/archiver.ts +++ b/yarn-project/archiver/src/archiver/archiver.ts @@ -267,7 +267,7 @@ export class Archiver implements ArchiveSource { await this.handleEpochPrune(provenBlockNumber, currentL1BlockNumber); const storeSizes = this.store.estimateSize(); - this.instrumentation.recordDBMetrics(storeSizes.mappingSize, storeSizes.numItems, storeSizes.actualSize); + this.instrumentation.recordDBMetrics(storeSizes); } } diff --git a/yarn-project/archiver/src/archiver/instrumentation.ts b/yarn-project/archiver/src/archiver/instrumentation.ts index 119bb14010a..1d6343b8f9d 100644 --- a/yarn-project/archiver/src/archiver/instrumentation.ts +++ b/yarn-project/archiver/src/archiver/instrumentation.ts @@ -75,8 +75,8 @@ export class ArchiverInstrumentation { ); } - public recordDBMetrics(dbMapSize: number, dbNumItems: number, dbUsedSize: number) { - this.dbMetrics.recordDBMetrics(dbMapSize, dbNumItems, dbUsedSize); + public recordDBMetrics(metrics: { mappingSize: number; numItems: number; actualSize: number }) { + this.dbMetrics.recordDBMetrics(metrics); } public isEnabled(): boolean { diff --git a/yarn-project/p2p/src/mem_pools/instrumentation.ts b/yarn-project/p2p/src/mem_pools/instrumentation.ts index 45e3cde4f6b..8f335b149fb 100644 --- a/yarn-project/p2p/src/mem_pools/instrumentation.ts +++ b/yarn-project/p2p/src/mem_pools/instrumentation.ts @@ -62,8 +62,8 @@ export class PoolInstrumentation { ); } - public recordDBMetrics(dbMapSize: number, dbNumItems: number, dbUsedSize: number) { - this.dbMetrics.recordDBMetrics(dbMapSize, dbNumItems, dbUsedSize); + public recordDBMetrics(metrics: { mappingSize: number; numItems: number; actualSize: number }) { + this.dbMetrics.recordDBMetrics(metrics); } public recordSize(poolObject: PoolObject) { diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts index 5b409013cd2..9a937284697 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts @@ -54,7 +54,7 @@ export class AztecKVTxPool implements TxPool { this.#metrics.recordRemovedObjects(deleted, 'pending'); this.#metrics.recordAddedObjects(txHashes.length, 'mined'); const storeSizes = this.#store.estimateSize(); - this.#metrics.recordDBMetrics(storeSizes.mappingSize, storeSizes.numItems, storeSizes.actualSize); + this.#metrics.recordDBMetrics(storeSizes); }); } diff --git a/yarn-project/telemetry-client/src/lmdb_metrics.ts b/yarn-project/telemetry-client/src/lmdb_metrics.ts index 62d98dcece6..c8efc91a801 100644 --- a/yarn-project/telemetry-client/src/lmdb_metrics.ts +++ b/yarn-project/telemetry-client/src/lmdb_metrics.ts @@ -30,9 +30,9 @@ export class LmdbMetrics { }); } - public recordDBMetrics(dbMapSize: number, dbNumItems: number, dbUsedSize: number) { - this.dbMapSize.record(dbMapSize); - this.dbNumItems.record(dbNumItems); - this.dbUsedSize.record(dbUsedSize); + public recordDBMetrics(metrics: { mappingSize: number; numItems: number; actualSize: number }) { + this.dbMapSize.record(metrics.mappingSize); + this.dbNumItems.record(metrics.actualSize); + this.dbUsedSize.record(metrics.actualSize); } }