From e1c03f4d5ae4418b87f3c63b7acaab70dafec260 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 17 May 2024 00:21:47 -0700 Subject: [PATCH 001/127] rough fcmp++ tree impl (lots of work remaining to clean it up and fix) --- .gitignore | 2 + .gitmodules | 3 + src/CMakeLists.txt | 1 + src/blockchain_db/lmdb/db_lmdb.cpp | 18 +- src/blockchain_db/lmdb/db_lmdb.h | 10 + src/crypto/crypto.cpp | 6 + src/crypto/crypto.h | 6 + src/fcmp/CMakeLists.txt | 49 ++ src/fcmp/fcmp.cpp | 86 +++ src/fcmp/fcmp.h | 921 +++++++++++++++++++++++++++++ src/fcmp/fcmp_rust/CMakeLists.txt | 70 +++ src/fcmp/fcmp_rust/Cargo.toml | 21 + src/fcmp/fcmp_rust/build.rs | 5 + src/fcmp/fcmp_rust/src/lib.rs | 226 +++++++ tests/unit_tests/CMakeLists.txt | 3 + tests/unit_tests/fcmp_tree.cpp | 304 ++++++++++ 16 files changed, 1730 insertions(+), 1 deletion(-) create mode 100644 src/fcmp/CMakeLists.txt create mode 100644 src/fcmp/fcmp.cpp create mode 100644 src/fcmp/fcmp.h create mode 100644 src/fcmp/fcmp_rust/CMakeLists.txt create mode 100644 src/fcmp/fcmp_rust/Cargo.toml create mode 100644 src/fcmp/fcmp_rust/build.rs create mode 100644 src/fcmp/fcmp_rust/src/lib.rs create mode 100644 tests/unit_tests/fcmp_tree.cpp diff --git a/.gitignore b/.gitignore index 9f62575e5ab..2fc767cca2a 100644 --- a/.gitignore +++ b/.gitignore @@ -120,3 +120,5 @@ nbproject __pycache__/ *.pyc *.log + +Cargo.lock diff --git a/.gitmodules b/.gitmodules index 721cce3b4bb..95bee114a20 100644 --- a/.gitmodules +++ b/.gitmodules @@ -14,3 +14,6 @@ path = external/supercop url = https://github.com/monero-project/supercop branch = monero +[submodule "external/fcmp-plus-plus"] + path = external/fcmp-plus-plus + url = https://github.com/kayabaNerve/fcmp-plus-plus.git diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6190b40f830..357fac0cb04 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -89,6 +89,7 @@ add_subdirectory(ringct) add_subdirectory(checkpoints) add_subdirectory(cryptonote_basic) add_subdirectory(cryptonote_core) +add_subdirectory(fcmp) add_subdirectory(lmdb) add_subdirectory(multisig) add_subdirectory(net) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index d01119249cc..af456b00dca 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -199,6 +199,9 @@ namespace * * spent_keys input hash - * + * leaves leaf_idx {O.x, I.x, C.x} + * branches layer_idx [{branch_idx, branch_hash}...] + * * txpool_meta txn hash txn metadata * txpool_blob txn hash txn blob * @@ -210,7 +213,7 @@ namespace * attached as a prefix on the Data to serve as the DUPSORT key. * (DUPFIXED saves 8 bytes per record.) * - * The output_amounts table doesn't use a dummy key, but uses DUPSORT. + * The output_amounts and branches tables don't use a dummy key, but use DUPSORT */ const char* const LMDB_BLOCKS = "blocks"; const char* const LMDB_BLOCK_HEIGHTS = "block_heights"; @@ -228,6 +231,10 @@ const char* const LMDB_OUTPUT_TXS = "output_txs"; const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts"; const char* const LMDB_SPENT_KEYS = "spent_keys"; +// Curve trees tree types +const char* const LMDB_LEAVES = "leaves"; +const char* const LMDB_BRANCHES = "branches"; + const char* const LMDB_TXPOOL_META = "txpool_meta"; const char* const LMDB_TXPOOL_BLOB = "txpool_blob"; @@ -1437,6 +1444,9 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); + lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); + lmdb_db_open(txn, LMDB_BRANCHES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_branches, "Failed to open db handle for m_branches"); + lmdb_db_open(txn, LMDB_TXPOOL_META, MDB_CREATE, m_txpool_meta, "Failed to open db handle for m_txpool_meta"); lmdb_db_open(txn, LMDB_TXPOOL_BLOB, MDB_CREATE, m_txpool_blob, "Failed to open db handle for m_txpool_blob"); @@ -1456,6 +1466,8 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_block_heights, compare_hash32); mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); + mdb_set_dupsort(txn, m_leaves, compare_uint64); + mdb_set_dupsort(txn, m_branches, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); mdb_set_dupsort(txn, m_block_info, compare_uint64); if (!(mdb_flags & MDB_RDONLY)) @@ -1633,6 +1645,10 @@ void BlockchainLMDB::reset() throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str())); if (auto result = mdb_drop(txn, m_spent_keys, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str())); + if (auto result = mdb_drop(txn, m_leaves, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str())); + if (auto result = mdb_drop(txn, m_branches, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_branches: ", result).c_str())); (void)mdb_drop(txn, m_hf_starting_heights, 0); // this one is dropped in new code if (auto result = mdb_drop(txn, m_hf_versions, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_hf_versions: ", result).c_str())); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 6eeb942dc25..c31250af2fa 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -64,6 +64,9 @@ typedef struct mdb_txn_cursors MDB_cursor *m_txc_spent_keys; + MDB_cursor *m_txc_leaves; + MDB_cursor *m_txc_branches; + MDB_cursor *m_txc_txpool_meta; MDB_cursor *m_txc_txpool_blob; @@ -87,6 +90,8 @@ typedef struct mdb_txn_cursors #define m_cur_tx_indices m_cursors->m_txc_tx_indices #define m_cur_tx_outputs m_cursors->m_txc_tx_outputs #define m_cur_spent_keys m_cursors->m_txc_spent_keys +#define m_cur_leaves m_cursors->m_txc_leaves +#define m_cur_branches m_cursors->m_txc_branches #define m_cur_txpool_meta m_cursors->m_txc_txpool_meta #define m_cur_txpool_blob m_cursors->m_txc_txpool_blob #define m_cur_alt_blocks m_cursors->m_txc_alt_blocks @@ -109,6 +114,8 @@ typedef struct mdb_rflags bool m_rf_tx_indices; bool m_rf_tx_outputs; bool m_rf_spent_keys; + bool m_rf_leaves; + bool m_rf_branches; bool m_rf_txpool_meta; bool m_rf_txpool_blob; bool m_rf_alt_blocks; @@ -463,6 +470,9 @@ class BlockchainLMDB : public BlockchainDB MDB_dbi m_spent_keys; + MDB_dbi m_leaves; + MDB_dbi m_branches; + MDB_dbi m_txpool_meta; MDB_dbi m_txpool_blob; diff --git a/src/crypto/crypto.cpp b/src/crypto/crypto.cpp index f6c94fa0393..00aedd288cc 100644 --- a/src/crypto/crypto.cpp +++ b/src/crypto/crypto.cpp @@ -618,6 +618,12 @@ namespace crypto { ge_p1p1_to_p3(&res, &point2); } + void crypto_ops::derive_key_image_generator(const public_key &pub, ec_point &ki_gen) { + ge_p3 point; + hash_to_ec(pub, point); + ge_p3_tobytes(&ki_gen, &point); + } + void crypto_ops::generate_key_image(const public_key &pub, const secret_key &sec, key_image &image) { ge_p3 point; ge_p2 point2; diff --git a/src/crypto/crypto.h b/src/crypto/crypto.h index 6b4126246d7..401af44c35d 100644 --- a/src/crypto/crypto.h +++ b/src/crypto/crypto.h @@ -145,6 +145,8 @@ namespace crypto { friend void generate_tx_proof_v1(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const secret_key &, signature &); static bool check_tx_proof(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const signature &, const int); friend bool check_tx_proof(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const signature &, const int); + static void derive_key_image_generator(const public_key &, ec_point &); + friend void derive_key_image_generator(const public_key &, ec_point &); static void generate_key_image(const public_key &, const secret_key &, key_image &); friend void generate_key_image(const public_key &, const secret_key &, key_image &); static void generate_ring_signature(const hash &, const key_image &, @@ -268,6 +270,10 @@ namespace crypto { return crypto_ops::check_tx_proof(prefix_hash, R, A, B, D, sig, version); } + inline void derive_key_image_generator(const public_key &pub, ec_point &ki_gen) { + crypto_ops::derive_key_image_generator(pub, ki_gen); + } + /* To send money to a key: * * The sender generates an ephemeral key and includes it in transaction output. * * To spend the money, the receiver generates a key image from it. diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt new file mode 100644 index 00000000000..d242361a8c2 --- /dev/null +++ b/src/fcmp/CMakeLists.txt @@ -0,0 +1,49 @@ +# Copyright (c) 2024, The Monero Project +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are +# permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of +# conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list +# of conditions and the following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be +# used to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +set(fcmp_sources + fcmp.cpp) + +monero_find_all_headers(fcmp_headers "${CMAKE_CURRENT_SOURCE_DIR}") + +add_subdirectory(fcmp_rust) + +monero_add_library_with_deps( + NAME fcmp + DEPENDS rust_cxx + SOURCES + ${fcmp_sources} + ${fcmp_headers}) + +target_link_libraries(fcmp + PUBLIC + crypto + epee + PRIVATE + fcmp_rust + ${EXTRA_LIBRARIES}) diff --git a/src/fcmp/fcmp.cpp b/src/fcmp/fcmp.cpp new file mode 100644 index 00000000000..de3c72ba730 --- /dev/null +++ b/src/fcmp/fcmp.cpp @@ -0,0 +1,86 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "fcmp.h" +#include "misc_log_ex.h" + +namespace fcmp +{ + +// TODO: move into its own fcmp_crypto file +static SeleneScalar ed_25519_point_to_selene_scalar(const crypto::ec_point &point) +{ + static_assert(sizeof(fcmp::RustEd25519Point) == sizeof(crypto::ec_point), + "expected same size ed25519 point to rust representation"); + + // TODO: implement reading just the x coordinate of ed25519 point in C/C++ + fcmp::RustEd25519Point rust_point; + memcpy(&rust_point, &point, sizeof(fcmp::RustEd25519Point)); + return fcmp_rust::ed25519_point_to_selene_scalar(rust_point); +}; + +// TODO: move into its own fcmp_crypto file +LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) +{ + crypto::ec_point I; + crypto::derive_key_image_generator(O, I); + + return LeafTuple{ + .O_x = ed_25519_point_to_selene_scalar(O), + .I_x = ed_25519_point_to_selene_scalar(I), + .C_x = ed_25519_point_to_selene_scalar(C) + }; +} + +// TODO: move into its own fcmp_crypto file +std::vector flatten_leaves(const std::vector &leaves) +{ + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + + for (const auto &l : leaves) + { + // TODO: implement without cloning + flattened_leaves.emplace_back(fcmp_rust::clone_selene_scalar(l.O_x)); + flattened_leaves.emplace_back(fcmp_rust::clone_selene_scalar(l.I_x)); + flattened_leaves.emplace_back(fcmp_rust::clone_selene_scalar(l.C_x)); + } + + return flattened_leaves; +}; + +SeleneScalar Helios::point_to_cycle_scalar(const Helios::Point &point) const +{ + return fcmp_rust::helios_point_to_selene_scalar(point); +}; + +HeliosScalar Selene::point_to_cycle_scalar(const Selene::Point &point) const +{ + return fcmp_rust::selene_point_to_helios_scalar(point); +}; +} //namespace fcmp diff --git a/src/fcmp/fcmp.h b/src/fcmp/fcmp.h new file mode 100644 index 00000000000..f56a013e7b1 --- /dev/null +++ b/src/fcmp/fcmp.h @@ -0,0 +1,921 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "crypto/crypto.h" +#include "fcmp_rust/cxx.h" +#include "fcmp_rust/fcmp_rust.h" +#include "misc_log_ex.h" +#include "string_tools.h" + +#include + +#include + +namespace fcmp +{ + using RustEd25519Point = std::array; + + // Need to forward declare Scalar types for point_to_cycle_scalar below + using SeleneScalar = rust::Box; + using HeliosScalar = rust::Box; + + static struct Helios final + { + using Generators = rust::Box; + using Scalar = HeliosScalar; + using Point = rust::Box; + using Chunk = rust::Slice; + + // TODO: static constants + const Generators GENERATORS = fcmp_rust::random_helios_generators(); + const Point HASH_INIT_POINT = fcmp_rust::random_helios_hash_init_point(); + + // TODO: use correct value + static const std::size_t WIDTH = 5; + + Point hash_grow( + const Generators &generators, + const Point &existing_hash, + const std::size_t offset, + const Chunk &prior_children, + const Chunk &new_children) const + { + return fcmp_rust::hash_grow_helios( + generators, + existing_hash, + offset, + prior_children, + new_children); + } + + SeleneScalar point_to_cycle_scalar(const Point &point) const; + + Scalar clone(const Scalar &scalar) const { return fcmp_rust::clone_helios_scalar(scalar); } + Point clone(const Point &point) const { return fcmp_rust::clone_helios_point(point); } + + Scalar zero_scalar() const { return fcmp_rust::helios_zero_scalar(); } + + std::array to_bytes(const Scalar &scalar) const + { return fcmp_rust::helios_scalar_to_bytes(scalar); } + std::array to_bytes(const Point &point) const + { return fcmp_rust::helios_point_to_bytes(point); } + + std::string to_string(const Scalar &scalar) const + { return epee::string_tools::pod_to_hex(to_bytes(scalar)); } + std::string to_string(const Point &point) const + { return epee::string_tools::pod_to_hex(to_bytes(point)); } + } HELIOS; + + static struct Selene final + { + using Generators = rust::Box; + using Scalar = SeleneScalar; + using Point = rust::Box; + using Chunk = rust::Slice; + + // TODO: static constants + const Generators GENERATORS = fcmp_rust::random_selene_generators(); + const Point HASH_INIT_POINT = fcmp_rust::random_selene_hash_init_point(); + + // TODO: use correct value + static const std::size_t WIDTH = 5; + + Point hash_grow( + const Generators &generators, + const Point &existing_hash, + const std::size_t offset, + const Chunk &prior_children, + const Chunk &new_children) const + { + return fcmp_rust::hash_grow_selene( + generators, + existing_hash, + offset, + prior_children, + new_children); + }; + + HeliosScalar point_to_cycle_scalar(const Point &point) const; + + Scalar clone(const Scalar &scalar) const { return fcmp_rust::clone_selene_scalar(scalar); } + Point clone(const Point &point) const { return fcmp_rust::clone_selene_point(point); } + + Scalar zero_scalar() const { return fcmp_rust::selene_zero_scalar(); } + + std::array to_bytes(const Scalar &scalar) const + { return fcmp_rust::selene_scalar_to_bytes(scalar); } + std::array to_bytes(const Point &point) const + { return fcmp_rust::selene_point_to_bytes(point); } + + std::string to_string(const Scalar &scalar) const + { return epee::string_tools::pod_to_hex(to_bytes(scalar)); } + std::string to_string(const Point &point) const + { return epee::string_tools::pod_to_hex(to_bytes(point)); } + } SELENE; + + // TODO: cleanly separate everything below into another file. This current file should strictly be for the rust interface + + // TODO: template all the curve things + + // TODO: Curve class + // TODO: CurveTree class instantiated with the curves and widths + + // TODO: template + struct LeafTuple final + { + Selene::Scalar O_x; + Selene::Scalar I_x; + Selene::Scalar C_x; + }; + static const std::size_t LEAF_TUPLE_SIZE = 3; + static const std::size_t LEAF_LAYER_CHUNK_SIZE = LEAF_TUPLE_SIZE * SELENE.WIDTH; + + // Tree structure + struct Leaves final + { + // Starting index in the leaf layer + std::size_t start_idx; + // Contiguous leaves in a tree that start at the start_idx + std::vector tuples; + }; + + // A layer of contiguous hashes starting from a specific start_idx in the tree + template + struct LayerExtension final + { + std::size_t start_idx; + std::vector hashes; + }; + + // A struct useful to extend an existing tree, layers alternate between C1 and C2 + template + struct TreeExtension final + { + Leaves leaves; + std::vector> c1_layer_extensions; + std::vector> c2_layer_extensions; + }; + + // Useful data from the last chunk in a layer + template + struct LastChunkData final + { + // The total number of children % child layer chunk size + /*TODO: const*/ std::size_t child_offset; + // The last child in the chunk (and therefore the last child in the child layer) + /*TODO: const*/ typename C::Scalar last_child; + // The hash of the last chunk of child scalars + /*TODO: const*/ typename C::Point last_parent; + // Total number of children in the child layer + /*TODO: const*/ std::size_t child_layer_size; + // Total number of hashes in the parent layer + /*TODO: const*/ std::size_t parent_layer_size; + }; + + template + struct LastChunks final + { + std::vector> c1_last_chunks; + std::vector> c2_last_chunks; + }; + + template + using Layer = std::vector; + + // A complete tree, useful for testing (can't fit the whole tree in memory otherwise) + // TODO: move this to just the testing + template + struct Tree final + { + std::vector leaves; + std::vector> c1_layers; + std::vector> c2_layers; + }; + + LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C); + std::vector flatten_leaves(const std::vector &leaves); + + // TODO: move into its own fcmp_crypto file + template + static void extend_scalars_from_cycle_points(const C_POINTS &curve, + const std::vector &points, + std::vector &scalars_out) + { + scalars_out.reserve(scalars_out.size() + points.size()); + + for (const auto &point : points) + { + // TODO: implement reading just the x coordinate of points on curves in curve cycle in C/C++ + typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); + scalars_out.push_back(std::move(scalar)); + } + } + + template + LastChunkData get_last_leaf_chunk(const C2 &c2, + const std::vector &leaves, + const std::vector &parent_layer) + { + CHECK_AND_ASSERT_THROW_MES(!leaves.empty(), "empty leaf layer"); + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "empty leaf parent layer"); + + const std::size_t child_offset = (leaves.size() * LEAF_TUPLE_SIZE) % LEAF_LAYER_CHUNK_SIZE; + + const typename C2::Scalar &last_child = leaves.back().C_x; + const typename C2::Point &last_parent = parent_layer.back(); + + return LastChunkData{ + .child_offset = child_offset, + .last_child = c2.clone(last_child), + .last_parent = c2.clone(last_parent), + .child_layer_size = leaves.size() * LEAF_TUPLE_SIZE, + .parent_layer_size = parent_layer.size() + }; + } + + template + LastChunkData get_last_child_layer_chunk(const C_CHILD &c_child, + const C_PARENT &c_parent, + const std::vector &child_layer, + const std::vector &parent_layer) + { + CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "empty child layer"); + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "empty parent layer"); + + const std::size_t child_offset = child_layer.size() % c_parent.WIDTH; + + const typename C_CHILD::Point &last_child_point = child_layer.back(); + const typename C_PARENT::Scalar &last_child = c_child.point_to_cycle_scalar(last_child_point); + + const typename C_PARENT::Point &last_parent = parent_layer.back(); + + return LastChunkData{ + .child_offset = child_offset, + .last_child = c_parent.clone(last_child), + .last_parent = c_parent.clone(last_parent), + .child_layer_size = child_layer.size(), + .parent_layer_size = parent_layer.size() + }; + } + + // TODO: implement in the db, never want the entire tree in memory + template + LastChunks get_last_chunks(const C1 &c1, + const C2 &c2, + const Tree &tree) + { + // const bool valid = validate_tree(tree, C1, C2); + // CHECK_AND_ASSERT_THROW_MES(valid, "invalid tree"); + + const auto &leaves = tree.leaves; + const auto &c1_layers = tree.c1_layers; + const auto &c2_layers = tree.c2_layers; + + LastChunks last_chunks; + last_chunks.c1_last_chunks.reserve(c1_layers.size()); + last_chunks.c2_last_chunks.reserve(c2_layers.size()); + + // First push the last leaf chunk data into c2 chunks + CHECK_AND_ASSERT_THROW_MES(!c2_layers.empty(), "empty curve 2 layers"); + auto last_leaf_chunk = get_last_leaf_chunk(c2, + leaves, + c2_layers[0]); + last_chunks.c2_last_chunks.push_back(std::move(last_leaf_chunk)); + + // Next parents will be c1 + bool parent_is_c1 = true; + + // Since we started with c2, the number of c2 layers should be == c1_layers.size() || (c1_layers.size() + 1) + const std::size_t num_layers = c2_layers.size(); + CHECK_AND_ASSERT_THROW_MES(num_layers == c1_layers.size() || num_layers == (c1_layers.size() + 1), + "unexpected number of curve layers"); + + // If there are no c1 layers, we're done + if (c1_layers.empty()) + return last_chunks; + + // Then get last chunks up until the root + for (std::size_t layer_idx = 0; layer_idx < num_layers; ++layer_idx) + { + CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > layer_idx, "missing c1 layer"); + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > layer_idx, "missing c2 layer"); + + // TODO: template the below if statement into another function + if (parent_is_c1) + { + const Layer &child_layer = c2_layers[layer_idx]; + CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); + + const Layer &parent_layer = c1_layers[layer_idx]; + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); + + auto last_parent_chunk = get_last_child_layer_chunk(c2, + c1, + child_layer, + parent_layer); + + last_chunks.c1_last_chunks.push_back(std::move(last_parent_chunk)); + } + else + { + const Layer &child_layer = c1_layers[layer_idx]; + CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); + + const Layer &parent_layer = c2_layers[layer_idx]; + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); + + auto last_parent_chunk = get_last_child_layer_chunk(c1, + c2, + child_layer, + parent_layer); + + last_chunks.c2_last_chunks.push_back(std::move(last_parent_chunk)); + } + + // Alternate curves every iteration + parent_is_c1 = !parent_is_c1; + } + + return last_chunks; + } + + template + static void extend_zeroes(const C &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout) + { + zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); + + for (std::size_t i = 0; i < num_zeroes; ++i) + zeroes_inout.emplace_back(curve.zero_scalar()); + } + + template + static typename C::Point get_new_parent(const C &curve, + const typename C::Chunk &new_children) + { + // New parent means no prior children, fill priors with 0 + std::vector prior_children; + extend_zeroes(curve, new_children.size(), prior_children); + + return curve.hash_grow( + curve.GENERATORS, + curve.HASH_INIT_POINT, + 0,/*offset*/ + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); + } + + template + static typename C::Point get_first_leaf_parent(const C &curve, + const typename C::Chunk &new_children, + const LastChunkData *last_chunk_ptr) + { + // If no last chunk exists, or if the last chunk is already full, then we can get a new parent + if (last_chunk_ptr == nullptr || last_chunk_ptr->child_offset == 0) + return get_new_parent(curve, new_children); + + // There won't be any existing children when growing the leaf layer, fill priors with 0 + std::vector prior_children; + extend_zeroes(curve, new_children.size(), prior_children); + + return curve.hash_grow( + curve.GENERATORS, + last_chunk_ptr->last_parent, + last_chunk_ptr->child_offset, + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); + } + + template + static typename C::Point get_first_non_leaf_parent(const C &curve, + const typename C::Chunk &new_children, + const bool child_layer_last_hash_updated, + const LastChunkData *last_chunk_ptr) + { + // If no last chunk exists, we can get a new parent + if (last_chunk_ptr == nullptr) + return get_new_parent(curve, new_children); + + std::vector prior_children; + std::size_t offset = last_chunk_ptr->child_offset; + + if (child_layer_last_hash_updated) + { + // If the last chunk has updated children in it, then we need to get the delta to the old children, and + // subtract the offset by 1 since we're updating the prior last hash + prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); + offset = offset > 0 ? (offset - 1) : (curve.WIDTH - 1); + + // Extend prior children by zeroes for any additional new children, since they must be new + if (new_children.size() > 1) + extend_zeroes(curve, new_children.size() - 1, prior_children); + } + else if (offset > 0) + { + // If we're updating the parent hash and no children were updated, then we're just adding new children + // to the existing last chunk and can fill priors with 0 + extend_zeroes(curve, new_children.size(), prior_children); + } + else + { + // If the last chunk is already full and isn't updated in any way, then we just get a new parent + return get_new_parent(curve, new_children); + } + + return curve.hash_grow( + curve.GENERATORS, + last_chunk_ptr->last_parent, + offset, + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); + } + + template + void hash_layer(const C_CHILD &c_child, + const C_PARENT &c_parent, + const LastChunkData *last_child_chunk_ptr, + const LastChunkData *last_parent_chunk_ptr, + const LayerExtension &children, + LayerExtension &parents_out) + { + parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; + parents_out.hashes.clear(); + + CHECK_AND_ASSERT_THROW_MES(!children.hashes.empty(), "empty children hashes"); + + const std::size_t max_chunk_size = c_parent.WIDTH; + std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; + + // TODO: work through all edge cases, then try to simplify the approach to avoid them + // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 + // since we'll be updating the existing parent hash of the last chunk + if (offset > 0) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } + + // If the child layer had its existing last hash updated, then we need to update the existing last parent + // hash in this layer as well + bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) + ? false + : last_parent_chunk_ptr->child_layer_size == (children.start_idx + 1); + + if (offset == 0 && child_layer_last_hash_updated) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } + + // TODO: clean this up so I don't have to do it twice here and in get_first_non_leaf_parent + CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); + if (child_layer_last_hash_updated) + offset = offset > 0 ? (offset - 1) : (max_chunk_size - 1); + + // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when + // hashing the *existing* root layer + std::vector child_scalars; + if (last_child_chunk_ptr != nullptr && last_child_chunk_ptr->parent_layer_size == 1) + { + MDEBUG("Here I have captured what I want to capture... children.start_idx: " << children.start_idx + << " , children.hashes.size(): " << children.hashes.size() << " , max_chunk_size: " << max_chunk_size); + + // We should be updating the existing root, there shouldn't be a last parent chunk + CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); + + // If the children don't already include the existing root at start_idx 0 (they would if the existing + // root was updated in the child layer), then we need to add it to the first chunk to be hashed + if (children.start_idx > 0) + child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); + } + + // Convert child points to scalars + extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); + + // See how many children we need to fill up the existing last chunk + std::size_t chunk_size = std::min(child_scalars.size(), max_chunk_size - offset); + MDEBUG("Starting chunk_size: " << chunk_size << " , child_scalars.size(): " << child_scalars.size() << " , offset: " << offset); + + // Hash chunks of child scalars to create the parent hashes + std::size_t chunk_start_idx = 0; + while (chunk_start_idx < child_scalars.size()) + { + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; + + for (const auto &c : chunk) + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " : " << c_parent.to_string(c)); + + // Hash the chunk of children + typename C_PARENT::Point chunk_hash = chunk_start_idx == 0 + ? get_first_non_leaf_parent(c_parent, chunk, child_layer_last_hash_updated, last_parent_chunk_ptr) + : get_new_parent(c_parent, chunk); + + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c_parent.to_string(chunk_hash)); + + // We've got our hash + parents_out.hashes.emplace_back(std::move(chunk_hash)); + + // Advance to the next chunk + chunk_start_idx += chunk_size; + + // Prepare for next loop if there should be one + if (chunk_start_idx == child_scalars.size()) + break; + + // Fill a complete chunk, or add the remaining new children to the last chunk + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < child_scalars.size(), "unexpected chunk start idx"); + chunk_size = std::min(max_chunk_size, child_scalars.size() - chunk_start_idx); + } + } + + template + void hash_leaf_layer(const C2 &c2, + const LastChunkData *last_chunk_ptr, + const Leaves &leaves, + LayerExtension &parents_out) + { + parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->parent_layer_size; + parents_out.hashes.clear(); + + if (leaves.tuples.empty()) + return; + + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] + const std::vector children = fcmp::flatten_leaves(leaves.tuples); + + const std::size_t max_chunk_size = LEAF_LAYER_CHUNK_SIZE; + const std::size_t offset = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->child_offset; + + // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 + // since we'll be updating the existing parent hash of the last chunk + if (offset > 0) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } + + // See how many new children are needed to fill up the existing last chunk + CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); + std::size_t chunk_size = std::min(children.size(), max_chunk_size - offset); + + std::size_t chunk_start_idx = 0; + while (chunk_start_idx < children.size()) + { + const auto chunk_start = children.data() + chunk_start_idx; + const typename C2::Chunk chunk{chunk_start, chunk_size}; + + for (const auto &c : chunk) + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " : " << c2.to_string(c)); + + // Hash the chunk of children + typename C2::Point chunk_hash = chunk_start_idx == 0 + ? get_first_leaf_parent(c2, chunk, last_chunk_ptr) + : get_new_parent(c2, chunk); + + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c2.to_string(chunk_hash) << " , chunk_size: " << chunk_size); + + // We've got our hash + parents_out.hashes.emplace_back(std::move(chunk_hash)); + + // Advance to the next chunk + chunk_start_idx += chunk_size; + + // Prepare for next loop if there should be one + if (chunk_start_idx == children.size()) + break; + + // Fill a complete chunk, or add the remaining new children to the last chunk + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < children.size(), "unexpected chunk start idx"); + chunk_size = std::min(max_chunk_size, children.size() - chunk_start_idx); + } + } + + template + TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, + const Leaves &new_leaves, + const C1 &c1, + const C2 &c2) + { + TreeExtension tree_extension; + + if (new_leaves.tuples.empty()) + return tree_extension; + + const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; + const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; + + // Set the leaf start idx + tree_extension.leaves.start_idx = c2_last_chunks.empty() + ? 0 + : c2_last_chunks[0].child_layer_size; + + // Copy the leaves + // TODO: don't copy here + tree_extension.leaves.tuples.reserve(new_leaves.tuples.size()); + for (const auto &leaf : new_leaves.tuples) + { + tree_extension.leaves.tuples.emplace_back(LeafTuple{ + .O_x = SELENE.clone(leaf.O_x), + .I_x = SELENE.clone(leaf.I_x), + .C_x = SELENE.clone(leaf.C_x) + }); + } + + auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; + + // Hash the leaf layer + LayerExtension parents; + hash_leaf_layer(c2, + c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], + new_leaves, + parents); + + c2_layer_extensions_out.emplace_back(std::move(parents)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + // Alternate between hashing c2 children, c1 children, c2, c1, ... + bool parent_is_c1 = true; + + std::size_t c1_last_idx = 0; + std::size_t c2_last_idx = 0; + // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) + while (true) + { + if (parent_is_c1) + { + CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); + + LayerExtension c1_layer_extension; + fcmp::hash_layer(c2, + c1, + (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], + (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], + c2_layer_extensions_out[c2_last_idx], + c1_layer_extension); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + + // Check if we just added the root + if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c2_last_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); + + LayerExtension c2_layer_extension; + fcmp::hash_layer(c1, + c2, + (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], + (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], + c1_layer_extensions_out[c1_last_idx], + c2_layer_extension); + + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c1_last_idx; + } + + parent_is_c1 = !parent_is_c1; + } + } + + // TODO: this is only useful for testsing, can't fit entire tree in memory + template + void extend_tree(const TreeExtension &tree_extension, + const C1 &c1, + const C2 &c2, + Tree &tree_inout) + { + // Add the leaves + CHECK_AND_ASSERT_THROW_MES((tree_inout.leaves.size() * LEAF_TUPLE_SIZE) == tree_extension.leaves.start_idx, + "unexpected leaf start idx"); + + tree_inout.leaves.reserve(tree_inout.leaves.size() + tree_extension.leaves.tuples.size()); + for (const auto &leaf : tree_extension.leaves.tuples) + { + tree_inout.leaves.emplace_back(LeafTuple{ + .O_x = c2.clone(leaf.O_x), + .I_x = c2.clone(leaf.I_x), + .C_x = c2.clone(leaf.C_x) + }); + } + + // Add the layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); + const LayerExtension &c2_ext = c2_extensions[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c2_idx <= tree_inout.c2_layers.size(), "missing c2 layer"); + if (tree_inout.c2_layers.size() == c2_idx) + tree_inout.c2_layers.emplace_back(Layer{}); + + auto &c2_inout = tree_inout.c2_layers[c2_idx]; + + const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); + const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c2 layer start"); + + // We updated the last hash + if (started_at_tip) + c2_inout.back() = c2.clone(c2_ext.hashes.front()); + + for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) + c2_inout.emplace_back(c2.clone(c2_ext.hashes[i])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); + const fcmp::LayerExtension &c1_ext = c1_extensions[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c1_idx <= tree_inout.c1_layers.size(), "missing c1 layer"); + if (tree_inout.c1_layers.size() == c1_idx) + tree_inout.c1_layers.emplace_back(Layer{}); + + auto &c1_inout = tree_inout.c1_layers[c1_idx]; + + const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); + const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c1 layer start"); + + // We updated the last hash + if (started_at_tip) + c1_inout.back() = c1.clone(c1_ext.hashes.front()); + + for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) + c1_inout.emplace_back(c1.clone(c1_ext.hashes[i])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } + + // existing tree should be valid + // TODO: only do this in debug build + // assert(validate_tree(existing_tree_inout, c1, c2)); + } + + template + bool validate_layer(const C_PARENT &c_parent, + const C_CHILD &c_child, + const Layer &parents, + const Layer &children) + { + // Get scalar representation of children + std::vector child_scalars; + extend_scalars_from_cycle_points(c_child, children, child_scalars); + + const std::size_t max_chunk_size = c_parent.WIDTH; + + // Hash chunk of children scalars, then see if the hash matches up to respective parent + std::size_t chunk_start_idx = 0; + for (std::size_t i = 0; i < parents.size(); ++i) + { + CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); + const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); + CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); + + const typename C_PARENT::Point &parent = parents[i]; + + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; + + const typename C_PARENT::Point chunk_hash = get_new_parent(c_parent, chunk); + + const auto actual_bytes = c_parent.to_bytes(parent); + const auto expected_bytes = c_parent.to_bytes(chunk_hash); + CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); + + chunk_start_idx += chunk_size; + } + + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); + + return true; + } + + template + bool validate_tree(const Tree &tree, const C1 &c1, const C2 &c2) + { + const auto &leaves = tree.leaves; + const auto &c1_layers = tree.c1_layers; + const auto &c2_layers = tree.c2_layers; + + CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree"); + CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); + CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + false, "unexpected mismatch of c2 and c1 layers"); + + // Verify root has 1 member in it + const bool c2_is_root = c2_layers.size() > c1_layers.size(); + CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, + "root must have 1 member in it"); + + // Iterate from root down to layer above leaves, and check hashes match up correctly + bool parent_is_c2 = c2_is_root; + std::size_t c2_idx = c2_layers.size() - 1; + std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); + for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) + { + if (parent_is_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + + const Layer &parents = c2_layers[c2_idx]; + const Layer &children = c1_layers[c1_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); + + const bool valid = validate_layer(c2, c1, parents, children); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); + + --c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + + const Layer &parents = c1_layers[c1_idx]; + const Layer &children = c2_layers[c2_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); + + const bool valid = validate_layer(c1, c2, parents, children); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); + + --c1_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + // // Now validate leaves + // return validate_leaves(c2, layers[0], leaves); + return true; + } +} diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt new file mode 100644 index 00000000000..e14564be023 --- /dev/null +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -0,0 +1,70 @@ +# Copyright (c) 2016-2024, The Monero Project +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are +# permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of +# conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list +# of conditions and the following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be +# used to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +if (CMAKE_BUILD_TYPE STREQUAL "Debug") + set(CARGO_CMD cargo build) + set(TARGET_DIR "debug") +else () + set(CARGO_CMD cargo build --release) + set(TARGET_DIR "release") +endif () + +set(FCMP_RUST_CXX "${CMAKE_CURRENT_BINARY_DIR}/fcmp_rust.cc") +set(FCMP_RUST_HEADER_DIR "${MONERO_GENERATED_HEADERS_DIR}/fcmp_rust") +set(FCMP_RUST_HEADER "${FCMP_RUST_HEADER_DIR}/fcmp_rust.h") +set(CXX_HEADER "${FCMP_RUST_HEADER_DIR}/cxx.h") + +# Removing OUTPUT files makes sure custom command runs every time +file(REMOVE_RECURSE "${FCMP_RUST_CXX}") +file(REMOVE_RECURSE "${FCMP_RUST_HEADER_DIR}") +file(MAKE_DIRECTORY "${FCMP_RUST_HEADER_DIR}") + +add_custom_command( + COMMENT "Building rust fcmp lib" + OUTPUT ${FCMP_RUST_CXX} ${FCMP_RUST_HEADER} ${CXX_HEADER} + COMMAND CARGO_TARGET_DIR=${CMAKE_CURRENT_BINARY_DIR} ${CARGO_CMD} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/cxxbridge/fcmp_rust/src/lib.rs.cc ${FCMP_RUST_CXX} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/cxxbridge/fcmp_rust/src/lib.rs.h ${FCMP_RUST_HEADER} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/cxxbridge/rust/cxx.h ${CXX_HEADER} + COMMAND echo "Finished copying fcmp rust targets" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + VERBATIM +) + +add_custom_target(rust_cxx ALL DEPENDS ${CXX_HEADER}) + +set(fcmp_rust_sources ${FCMP_RUST_CXX}) + +monero_find_all_headers(fcmp_rust_headers "${FCMP_RUST_HEAfDER_DIR}") + +monero_add_library(fcmp_rust + ${fcmp_rust_sources} + ${fcmp_rust_headers}) + +set(FCMP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/${TARGET_DIR}/libfcmp_rust.a") +target_link_libraries(fcmp_rust dl ${FCMP_RUST_LIB}) diff --git a/src/fcmp/fcmp_rust/Cargo.toml b/src/fcmp/fcmp_rust/Cargo.toml new file mode 100644 index 00000000000..d5052aac72f --- /dev/null +++ b/src/fcmp/fcmp_rust/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "fcmp_rust" +version = "0.0.0" +edition = "2021" + +[lib] +name = "fcmp_rust" +crate-type = ["staticlib"] + +[dependencies] +cxx = "1.0" +full-chain-membership-proofs = { path = "../../../external/fcmp-plus-plus/crypto/fcmps" } +ciphersuite = { path = "../../../external/fcmp-plus-plus/crypto/ciphersuite", features = ["helioselene", "ed25519"] } + +ec-divisors = { path = "../../../external/fcmp-plus-plus/crypto/divisors", features = ["ed25519"] } +rand_core = { version = "0.6", features = ["getrandom"] } +transcript = { package = "flexible-transcript", path = "../../../external/fcmp-plus-plus/crypto/transcript", features = ["recommended"] } +generalized-bulletproofs = { path = "../../../external/fcmp-plus-plus/crypto/generalized-bulletproofs", features = ["tests"] } + +[build-dependencies] +cxx-build = "1.0" \ No newline at end of file diff --git a/src/fcmp/fcmp_rust/build.rs b/src/fcmp/fcmp_rust/build.rs new file mode 100644 index 00000000000..0b216f6d3cf --- /dev/null +++ b/src/fcmp/fcmp_rust/build.rs @@ -0,0 +1,5 @@ +fn main() { + let _ = cxx_build::bridge("src/lib.rs"); + + println!("cargo:rerun-if-changed=src/lib.rs"); +} \ No newline at end of file diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs new file mode 100644 index 00000000000..c2efffa6a10 --- /dev/null +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -0,0 +1,226 @@ +use rand_core::OsRng; + +use std::io; + +use full_chain_membership_proofs::tree::hash_grow; + +use transcript::RecommendedTranscript; + +use ciphersuite::{group::{Group, GroupEncoding, ff::{PrimeField, Field}}, Ciphersuite, Ed25519, Selene, Helios}; + +use ec_divisors::DivisorCurve; + +use generalized_bulletproofs::Generators; + +// TODO: lint +#[cxx::bridge] +mod ffi { + // Rust types and signatures exposed to C++. + #[namespace = "fcmp_rust"] + extern "Rust" { + // TODO: Separate Helios and Selene namespaces + type HeliosGenerators; + type HeliosPoint; + type HeliosScalar; + + type SeleneGenerators; + type SelenePoint; + type SeleneScalar; + + fn random_helios_generators() -> Box; + fn random_helios_hash_init_point() -> Box; + + fn random_selene_generators() -> Box; + fn random_selene_hash_init_point() -> Box; + + fn clone_helios_scalar(helios_scalar: &Box) -> Box; + fn clone_selene_scalar(selene_scalar: &Box) -> Box; + fn clone_helios_point(helios_point: &Box) -> Box; + fn clone_selene_point(selene_point: &Box) -> Box; + + fn helios_scalar_to_bytes(helios_scalar: &Box) -> [u8; 32]; + fn selene_scalar_to_bytes(selene_scalar: &Box) -> [u8; 32]; + fn helios_point_to_bytes(helios_point: &Box) -> [u8; 32]; + fn selene_point_to_bytes(selene_point: &Box) -> [u8; 32]; + + fn ed25519_point_to_selene_scalar(ed25519_point: &[u8; 32]) -> Box; + fn selene_point_to_helios_scalar(selene_point: &Box) -> Box; + fn helios_point_to_selene_scalar(helios_point: &Box) -> Box; + + fn helios_zero_scalar() -> Box; + fn selene_zero_scalar() -> Box; + + pub fn hash_grow_helios( + helios_generators: &Box, + existing_hash: &Box, + offset: usize, + prior_children: &[Box], + new_children: &[Box] + ) -> Result>; + + pub fn hash_grow_selene( + selene_generators: &Box, + existing_hash: &Box, + offset: usize, + prior_children: &[Box], + new_children: &[Box] + ) -> Result>; + } +} + +// TODO: cleaner const usage of generators +// TODO: try to get closer to underlying types +// TODO: maybe don't do both tuple and Box? Just make these all boxes +pub struct HeliosGenerators(Generators); +pub struct HeliosPoint(::G); +pub struct HeliosScalar(::F); + +pub struct SeleneGenerators(Generators); +pub struct SelenePoint(::G); +pub struct SeleneScalar(::F); + +#[allow(non_snake_case)] +pub fn random_helios_generators() -> Box { + let helios_generators = generalized_bulletproofs::tests::generators::(512); + Box::new(HeliosGenerators(helios_generators)) +} + +#[allow(non_snake_case)] +pub fn random_selene_generators() -> Box { + let selene_generators = generalized_bulletproofs::tests::generators::(512); + Box::new(SeleneGenerators(selene_generators)) +} + +#[allow(non_snake_case)] +pub fn random_helios_hash_init_point() -> Box { + let helios_hash_init_point = ::G::random(&mut OsRng); + dbg!(&helios_hash_init_point); + Box::new(HeliosPoint(helios_hash_init_point)) +} + +#[allow(non_snake_case)] +pub fn random_selene_hash_init_point() -> Box { + let selene_hash_init_point = ::G::random(&mut OsRng); + dbg!(&selene_hash_init_point); + Box::new(SelenePoint(selene_hash_init_point)) +} + +// TODO: should be able to use generics +// TODO: shorter names +pub fn clone_helios_scalar(helios_scalar: &Box) -> Box { + Box::new(HeliosScalar(helios_scalar.0)) +} + +pub fn clone_selene_scalar(selene_scalar: &Box) -> Box { + Box::new(SeleneScalar(selene_scalar.0)) +} + +pub fn clone_helios_point(helios_point: &Box) -> Box { + Box::new(HeliosPoint(helios_point.0)) +} + +pub fn clone_selene_point(selene_point: &Box) -> Box { + Box::new(SelenePoint(selene_point.0)) +} + +// TODO: generics +pub fn helios_scalar_to_bytes(helios_scalar: &Box) -> [u8; 32] { + helios_scalar.0.to_repr() +} + +pub fn selene_scalar_to_bytes(selene_scalar: &Box) -> [u8; 32] { + selene_scalar.0.to_repr() +} + +pub fn helios_point_to_bytes(helios_point: &Box) -> [u8; 32] { + helios_point.0.to_bytes() +} + +pub fn selene_point_to_bytes(selene_point: &Box) -> [u8; 32] { + selene_point.0.to_bytes() +} + +// Get the x coordinate of the ed25519 point +// TODO: use generics for below logic +pub fn ed25519_point_to_selene_scalar(ed25519_point: &[u8; 32]) -> Box { + // TODO: unwrap or else error + let ed25519_point = ::read_G(&mut ed25519_point.as_slice()).unwrap(); + + let xy_coords = ::G::to_xy(ed25519_point); + let x: ::F = xy_coords.0; + Box::new(SeleneScalar(x)) +} + +// TODO: use generics for below logic +pub fn selene_point_to_helios_scalar(selene_point: &Box) -> Box { + let xy_coords = ::G::to_xy(selene_point.0); + let x: ::F = xy_coords.0; + Box::new(HeliosScalar(x)) +} + +// TODO: use generics for below logic +pub fn helios_point_to_selene_scalar(helios_point: &Box) -> Box { + let xy_coords = ::G::to_xy(helios_point.0); + let x: ::F = xy_coords.0; + Box::new(SeleneScalar(x)) +} + +pub fn helios_zero_scalar() -> Box { + Box::new(HeliosScalar(::F::ZERO)) +} + +pub fn selene_zero_scalar() -> Box { + Box::new(SeleneScalar(::F::ZERO)) +} + +// TODO: use generics for curves +pub fn hash_grow_helios( + helios_generators: &Box, + existing_hash: &Box, + offset: usize, + prior_children: &[Box], + new_children: &[Box] +) -> Result, io::Error> { + let prior_children = prior_children.iter().map(|c| c.0).collect::>(); + let new_children = new_children.iter().map(|c| c.0).collect::>(); + + let hash = hash_grow( + &helios_generators.0, + existing_hash.0, + offset, + &prior_children, + &new_children + ); + + if let Some(hash) = hash { + Ok(Box::new(HeliosPoint(hash))) + } else { + Err(io::Error::new(io::ErrorKind::Other, "failed to grow hash")) + } +} + +// TODO: use generics for curves +pub fn hash_grow_selene( + selene_generators: &Box, + existing_hash: &Box, + offset: usize, + prior_children: &[Box], + new_children: &[Box] +) -> Result, io::Error> { + let prior_children = prior_children.iter().map(|c| c.0).collect::>(); + let new_children = new_children.iter().map(|c| c.0).collect::>(); + + let hash = hash_grow( + &selene_generators.0, + existing_hash.0, + offset, + &prior_children, + &new_children + ); + + if let Some(hash) = hash { + Ok(Box::new(SelenePoint(hash))) + } else { + Err(io::Error::new(io::ErrorKind::Other, "failed to grow hash")) + } +} diff --git a/tests/unit_tests/CMakeLists.txt b/tests/unit_tests/CMakeLists.txt index 8659b0ed07a..cdc188e1620 100644 --- a/tests/unit_tests/CMakeLists.txt +++ b/tests/unit_tests/CMakeLists.txt @@ -51,6 +51,7 @@ set(unit_tests_sources epee_serialization.cpp epee_utils.cpp expect.cpp + fcmp_tree.cpp json_serialization.cpp get_xtype_from_string.cpp hashchain.cpp @@ -113,11 +114,13 @@ monero_add_minimal_executable(unit_tests target_link_libraries(unit_tests PRIVATE ringct + crypto cryptonote_protocol cryptonote_core daemon_messages daemon_rpc_server blockchain_db + fcmp lmdb_lib rpc net diff --git a/tests/unit_tests/fcmp_tree.cpp b/tests/unit_tests/fcmp_tree.cpp new file mode 100644 index 00000000000..2d4ddbe1ca6 --- /dev/null +++ b/tests/unit_tests/fcmp_tree.cpp @@ -0,0 +1,304 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gtest/gtest.h" + +#include "fcmp/fcmp.h" +#include "misc_log_ex.h" + +#include + +static const fcmp::Leaves generate_leaves(const std::size_t num_leaves) +{ + std::vector tuples; + tuples.reserve(num_leaves); + + for (std::size_t i = 0; i < num_leaves; ++i) + { + // Generate random output tuple + crypto::secret_key o,c; + crypto::public_key O,C; + crypto::generate_keys(O, o, o, false); + crypto::generate_keys(C, c, c, false); + + tuples.emplace_back(fcmp::output_to_leaf_tuple(O, C)); + } + + return fcmp::Leaves{ + .start_idx = 0, + .tuples = std::move(tuples) + }; +} + +static void log_tree_extension(const fcmp::TreeExtension &tree_extension) +{ + const auto &c1_extensions = tree_extension.c1_layer_extensions; + const auto &c2_extensions = tree_extension.c2_layer_extensions; + + MDEBUG("Tree extension has " << tree_extension.leaves.tuples.size() << " leaves, " + << c1_extensions.size() << " helios layers, " << c2_extensions.size() << " selene layers"); + + MDEBUG("Leaf start idx: " << tree_extension.leaves.start_idx); + for (std::size_t i = 0; i < tree_extension.leaves.tuples.size(); ++i) + { + const auto &leaf = tree_extension.leaves.tuples[i]; + + const auto O_x = fcmp::SELENE.to_string(leaf.O_x); + const auto I_x = fcmp::SELENE.to_string(leaf.I_x); + const auto C_x = fcmp::SELENE.to_string(leaf.C_x); + + MDEBUG("Leaf idx " << ((i*fcmp::LEAF_TUPLE_SIZE) + tree_extension.leaves.start_idx) << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); + } + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (c1_extensions.size() + c2_extensions.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer"); + + const fcmp::LayerExtension &c2_layer = c2_extensions[c2_idx]; + MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); + + for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) + MDEBUG("Hash idx: " << (j + c2_layer.start_idx) << " , hash: " << fcmp::SELENE.to_string(c2_layer.hashes[j])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer"); + + const fcmp::LayerExtension &c1_layer = c1_extensions[c1_idx]; + MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); + + for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) + MDEBUG("Hash idx: " << (j + c1_layer.start_idx) << " , hash: " << fcmp::HELIOS.to_string(c1_layer.hashes[j])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} + +static void log_tree(const fcmp::Tree &tree) +{ + MDEBUG("Tree has " << tree.leaves.size() << " leaves, " + << tree.c1_layers.size() << " helios layers, " << tree.c2_layers.size() << " selene layers"); + + for (std::size_t i = 0; i < tree.leaves.size(); ++i) + { + const auto &leaf = tree.leaves[i]; + + const auto O_x = fcmp::SELENE.to_string(leaf.O_x); + const auto I_x = fcmp::SELENE.to_string(leaf.I_x); + const auto C_x = fcmp::SELENE.to_string(leaf.C_x); + + MDEBUG("Leaf idx " << i << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); + } + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (tree.c1_layers.size() + tree.c2_layers.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < tree.c2_layers.size(), "unexpected c2 layer"); + + const fcmp::Layer &c2_layer = tree.c2_layers[c2_idx]; + MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); + + for (std::size_t j = 0; j < c2_layer.size(); ++j) + MDEBUG("Hash idx: " << j << " , hash: " << fcmp::SELENE.to_string(c2_layer[j])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < tree.c1_layers.size(), "unexpected c1 layer"); + + const fcmp::Layer &c1_layer = tree.c1_layers[c1_idx]; + MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); + + for (std::size_t j = 0; j < c1_layer.size(); ++j) + MDEBUG("Hash idx: " << j << " , hash: " << fcmp::HELIOS.to_string(c1_layer[j])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} + +static void log_last_chunks(const fcmp::LastChunks &last_chunks) +{ + const auto &c1_last_chunks = last_chunks.c1_last_chunks; + const auto &c2_last_chunks = last_chunks.c2_last_chunks; + + MDEBUG("Total of " << c1_last_chunks.size() << " Helios last chunks and " + << c2_last_chunks.size() << " Selene last chunks"); + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (c1_last_chunks.size() + c2_last_chunks.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_chunks.size(), "unexpected c2 layer"); + + const fcmp::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; + + MDEBUG("child_offset: " << last_chunk.child_offset + << " , last_child: " << fcmp::SELENE.to_string(last_chunk.last_child) + << " , last_parent: " << fcmp::SELENE.to_string(last_chunk.last_parent) + << " , child_layer_size: " << last_chunk.child_layer_size + << " , parent_layer_size: " << last_chunk.parent_layer_size); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_chunks.size(), "unexpected c1 layer"); + + const fcmp::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; + + MDEBUG("child_offset: " << last_chunk.child_offset + << " , last_child: " << fcmp::HELIOS.to_string(last_chunk.last_child) + << " , last_parent: " << fcmp::HELIOS.to_string(last_chunk.last_parent) + << " , child_layer_size: " << last_chunk.child_layer_size + << " , parent_layer_size: " << last_chunk.parent_layer_size); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} + +TEST(fcmp_tree, grow_tree) +{ + // TODO: 1 .. std::pow(fcmp::SELENE.WIDTH, 5)+2 + const std::vector N_LEAVES{ + 1, + 2, + 3, + fcmp::SELENE.WIDTH - 1, + fcmp::SELENE.WIDTH, + fcmp::SELENE.WIDTH + 1, + (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2) - 1, + (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2), + (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2) + 1, + (std::size_t)std::pow(fcmp::SELENE.WIDTH, 3) + // (std::size_t)std::pow(fcmp::SELENE.WIDTH, 4), + // (std::size_t)std::pow(fcmp::SELENE.WIDTH, 5) + }; + + for (const auto &init_leaves : N_LEAVES) + { + for (const auto &ext_leaves : N_LEAVES) + { + MDEBUG("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); + + fcmp::Tree global_tree; + + // TODO: use a class that's initialized with the curve cycle and don't need to call templated functions with curve instances every time + + // Initially extend global tree by `init_leaves` + { + MDEBUG("Adding " << init_leaves << " leaves to tree"); + + const auto tree_extension = fcmp::get_tree_extension( + fcmp::LastChunks{}, + generate_leaves(init_leaves), + fcmp::HELIOS, + fcmp::SELENE); + + log_tree_extension(tree_extension); + + fcmp::extend_tree( + tree_extension, + fcmp::HELIOS, + fcmp::SELENE, + global_tree); + + log_tree(global_tree); + + const bool validated = fcmp::validate_tree( + global_tree, + fcmp::HELIOS, + fcmp::SELENE); + + ASSERT_TRUE(validated); + + MDEBUG("Successfully added initial " << init_leaves << " leaves to tree"); + } + + // Then extend the global tree again by `ext_leaves` + { + MDEBUG("Extending tree by " << ext_leaves << " leaves"); + + const fcmp::LastChunks &last_chunks = fcmp::get_last_chunks( + fcmp::HELIOS, + fcmp::SELENE, + global_tree); + + log_last_chunks(last_chunks); + + const auto tree_extension = fcmp::get_tree_extension( + last_chunks, + generate_leaves(ext_leaves), + fcmp::HELIOS, + fcmp::SELENE); + + log_tree_extension(tree_extension); + + fcmp::extend_tree( + tree_extension, + fcmp::HELIOS, + fcmp::SELENE, + global_tree); + + log_tree(global_tree); + + const bool validated = fcmp::validate_tree( + global_tree, + fcmp::HELIOS, + fcmp::SELENE); + + ASSERT_TRUE(validated); + + MDEBUG("Successfully extended by " << ext_leaves << " leaves"); + } + } + } +} From 1ba876bcc20f512bf1a735b344c383737667e3c2 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 17 May 2024 00:22:50 -0700 Subject: [PATCH 002/127] remove whitespaces --- src/fcmp/fcmp.h | 12 ++++++------ tests/unit_tests/fcmp_tree.cpp | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/fcmp/fcmp.h b/src/fcmp/fcmp.h index f56a013e7b1..4d678077dd0 100644 --- a/src/fcmp/fcmp.h +++ b/src/fcmp/fcmp.h @@ -162,7 +162,7 @@ namespace fcmp { // Starting index in the leaf layer std::size_t start_idx; - // Contiguous leaves in a tree that start at the start_idx + // Contiguous leaves in a tree that start at the start_idx std::vector tuples; }; @@ -179,7 +179,7 @@ namespace fcmp struct TreeExtension final { Leaves leaves; - std::vector> c1_layer_extensions; + std::vector> c1_layer_extensions; std::vector> c2_layer_extensions; }; @@ -313,7 +313,7 @@ namespace fcmp bool parent_is_c1 = true; // Since we started with c2, the number of c2 layers should be == c1_layers.size() || (c1_layers.size() + 1) - const std::size_t num_layers = c2_layers.size(); + const std::size_t num_layers = c2_layers.size(); CHECK_AND_ASSERT_THROW_MES(num_layers == c1_layers.size() || num_layers == (c1_layers.size() + 1), "unexpected number of curve layers"); @@ -405,7 +405,7 @@ namespace fcmp // There won't be any existing children when growing the leaf layer, fill priors with 0 std::vector prior_children; - extend_zeroes(curve, new_children.size(), prior_children); + extend_zeroes(curve, new_children.size(), prior_children); return curve.hash_grow( curve.GENERATORS, @@ -764,7 +764,7 @@ namespace fcmp if (tree_inout.c2_layers.size() == c2_idx) tree_inout.c2_layers.emplace_back(Layer{}); - auto &c2_inout = tree_inout.c2_layers[c2_idx]; + auto &c2_inout = tree_inout.c2_layers[c2_idx]; const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); @@ -790,7 +790,7 @@ namespace fcmp if (tree_inout.c1_layers.size() == c1_idx) tree_inout.c1_layers.emplace_back(Layer{}); - auto &c1_inout = tree_inout.c1_layers[c1_idx]; + auto &c1_inout = tree_inout.c1_layers[c1_idx]; const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); diff --git a/tests/unit_tests/fcmp_tree.cpp b/tests/unit_tests/fcmp_tree.cpp index 2d4ddbe1ca6..22809e1a94d 100644 --- a/tests/unit_tests/fcmp_tree.cpp +++ b/tests/unit_tests/fcmp_tree.cpp @@ -84,7 +84,7 @@ static void log_tree_extension(const fcmp::TreeExtension &c2_layer = c2_extensions[c2_idx]; + const fcmp::LayerExtension &c2_layer = c2_extensions[c2_idx]; MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) @@ -96,7 +96,7 @@ static void log_tree_extension(const fcmp::TreeExtension &c1_layer = c1_extensions[c1_idx]; + const fcmp::LayerExtension &c1_layer = c1_extensions[c1_idx]; MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) @@ -134,7 +134,7 @@ static void log_tree(const fcmp::Tree &tree) { CHECK_AND_ASSERT_THROW_MES(c2_idx < tree.c2_layers.size(), "unexpected c2 layer"); - const fcmp::Layer &c2_layer = tree.c2_layers[c2_idx]; + const fcmp::Layer &c2_layer = tree.c2_layers[c2_idx]; MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c2_layer.size(); ++j) @@ -146,7 +146,7 @@ static void log_tree(const fcmp::Tree &tree) { CHECK_AND_ASSERT_THROW_MES(c1_idx < tree.c1_layers.size(), "unexpected c1 layer"); - const fcmp::Layer &c1_layer = tree.c1_layers[c1_idx]; + const fcmp::Layer &c1_layer = tree.c1_layers[c1_idx]; MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c1_layer.size(); ++j) @@ -248,7 +248,7 @@ TEST(fcmp_tree, grow_tree) fcmp::extend_tree( tree_extension, fcmp::HELIOS, - fcmp::SELENE, + fcmp::SELENE, global_tree); log_tree(global_tree); @@ -285,7 +285,7 @@ TEST(fcmp_tree, grow_tree) fcmp::extend_tree( tree_extension, fcmp::HELIOS, - fcmp::SELENE, + fcmp::SELENE, global_tree); log_tree(global_tree); From 9e6b93b94d8b2fe3c731859198977a8aef34ac83 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 17 May 2024 16:26:43 -0700 Subject: [PATCH 003/127] test validates lowest layer in tree --- src/fcmp/fcmp.h | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/src/fcmp/fcmp.h b/src/fcmp/fcmp.h index 4d678077dd0..a2b8bd1defb 100644 --- a/src/fcmp/fcmp.h +++ b/src/fcmp/fcmp.h @@ -810,22 +810,15 @@ namespace fcmp } // existing tree should be valid - // TODO: only do this in debug build // assert(validate_tree(existing_tree_inout, c1, c2)); } - template + template bool validate_layer(const C_PARENT &c_parent, - const C_CHILD &c_child, const Layer &parents, - const Layer &children) + const std::vector &child_scalars, + const std::size_t max_chunk_size) { - // Get scalar representation of children - std::vector child_scalars; - extend_scalars_from_cycle_points(c_child, children, child_scalars); - - const std::size_t max_chunk_size = c_parent.WIDTH; - // Hash chunk of children scalars, then see if the hash matches up to respective parent std::size_t chunk_start_idx = 0; for (std::size_t i = 0; i < parents.size(); ++i) @@ -887,7 +880,10 @@ namespace fcmp CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); - const bool valid = validate_layer(c2, c1, parents, children); + std::vector child_scalars; + extend_scalars_from_cycle_points(c1, children, child_scalars); + + const bool valid = validate_layer(c2, parents, child_scalars, c2.WIDTH); CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); @@ -904,7 +900,10 @@ namespace fcmp CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); - const bool valid = validate_layer(c1, c2, parents, children); + std::vector child_scalars; + extend_scalars_from_cycle_points(c2, children, child_scalars); + + const bool valid = validate_layer(c1, parents, child_scalars, c1.WIDTH); CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); @@ -914,8 +913,7 @@ namespace fcmp parent_is_c2 = !parent_is_c2; } - // // Now validate leaves - // return validate_leaves(c2, layers[0], leaves); - return true; + // Now validate leaves + return validate_layer(c2, c2_layers[0], flatten_leaves(leaves), LEAF_LAYER_CHUNK_SIZE); } } From 33ad50b1763914d1a8d6c28ad60e7edcc9a27a7f Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 17 May 2024 17:19:44 -0700 Subject: [PATCH 004/127] fix c1 c2 layer indexing issue in test helper get_last_chunk --- src/fcmp/fcmp.h | 33 +++++++++++++++++++-------------- tests/unit_tests/fcmp_tree.cpp | 8 ++++---- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/src/fcmp/fcmp.h b/src/fcmp/fcmp.h index a2b8bd1defb..4cd9613a99e 100644 --- a/src/fcmp/fcmp.h +++ b/src/fcmp/fcmp.h @@ -312,9 +312,8 @@ namespace fcmp // Next parents will be c1 bool parent_is_c1 = true; - // Since we started with c2, the number of c2 layers should be == c1_layers.size() || (c1_layers.size() + 1) - const std::size_t num_layers = c2_layers.size(); - CHECK_AND_ASSERT_THROW_MES(num_layers == c1_layers.size() || num_layers == (c1_layers.size() + 1), + // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), "unexpected number of curve layers"); // If there are no c1 layers, we're done @@ -322,18 +321,20 @@ namespace fcmp return last_chunks; // Then get last chunks up until the root - for (std::size_t layer_idx = 0; layer_idx < num_layers; ++layer_idx) + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < c2_layers.size(); ++i) { - CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > layer_idx, "missing c1 layer"); - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > layer_idx, "missing c2 layer"); + CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); // TODO: template the below if statement into another function if (parent_is_c1) { - const Layer &child_layer = c2_layers[layer_idx]; + const Layer &child_layer = c2_layers[c2_idx]; CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - const Layer &parent_layer = c1_layers[layer_idx]; + const Layer &parent_layer = c1_layers[c1_idx]; CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); auto last_parent_chunk = get_last_child_layer_chunk(c2, @@ -342,13 +343,15 @@ namespace fcmp parent_layer); last_chunks.c1_last_chunks.push_back(std::move(last_parent_chunk)); + + ++c2_idx; } else { - const Layer &child_layer = c1_layers[layer_idx]; + const Layer &child_layer = c1_layers[c1_idx]; CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - const Layer &parent_layer = c2_layers[layer_idx]; + const Layer &parent_layer = c2_layers[c2_idx]; CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); auto last_parent_chunk = get_last_child_layer_chunk(c1, @@ -357,6 +360,8 @@ namespace fcmp parent_layer); last_chunks.c2_last_chunks.push_back(std::move(last_parent_chunk)); + + ++c1_idx; } // Alternate curves every iteration @@ -486,8 +491,8 @@ namespace fcmp --parents_out.start_idx; } - // If the child layer had its existing last hash updated, then we need to update the existing last parent - // hash in this layer as well + // If the child layer had its existing last hash updated, then we'll need to use the last hash's prior + // version in order to update the existing last parent hash in this layer bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) ? false : last_parent_chunk_ptr->child_layer_size == (children.start_idx + 1); @@ -499,6 +504,8 @@ namespace fcmp } // TODO: clean this up so I don't have to do it twice here and in get_first_non_leaf_parent + // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk + // will start from there and may need 1 more to fill CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); if (child_layer_last_hash_updated) offset = offset > 0 ? (offset - 1) : (max_chunk_size - 1); @@ -508,8 +515,6 @@ namespace fcmp std::vector child_scalars; if (last_child_chunk_ptr != nullptr && last_child_chunk_ptr->parent_layer_size == 1) { - MDEBUG("Here I have captured what I want to capture... children.start_idx: " << children.start_idx - << " , children.hashes.size(): " << children.hashes.size() << " , max_chunk_size: " << max_chunk_size); // We should be updating the existing root, there shouldn't be a last parent chunk CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); diff --git a/tests/unit_tests/fcmp_tree.cpp b/tests/unit_tests/fcmp_tree.cpp index 22809e1a94d..4892eb6fd51 100644 --- a/tests/unit_tests/fcmp_tree.cpp +++ b/tests/unit_tests/fcmp_tree.cpp @@ -218,9 +218,9 @@ TEST(fcmp_tree, grow_tree) (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2) - 1, (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2), (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2) + 1, - (std::size_t)std::pow(fcmp::SELENE.WIDTH, 3) - // (std::size_t)std::pow(fcmp::SELENE.WIDTH, 4), - // (std::size_t)std::pow(fcmp::SELENE.WIDTH, 5) + (std::size_t)std::pow(fcmp::SELENE.WIDTH, 3), + (std::size_t)std::pow(fcmp::SELENE.WIDTH, 4), + (std::size_t)std::pow(fcmp::SELENE.WIDTH, 5) }; for (const auto &init_leaves : N_LEAVES) @@ -267,7 +267,7 @@ TEST(fcmp_tree, grow_tree) { MDEBUG("Extending tree by " << ext_leaves << " leaves"); - const fcmp::LastChunks &last_chunks = fcmp::get_last_chunks( + const auto last_chunks = fcmp::get_last_chunks( fcmp::HELIOS, fcmp::SELENE, global_tree); From c05bd80ee5467acf3aae431431b3143b1120305d Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 17 May 2024 18:51:18 -0700 Subject: [PATCH 005/127] actual indexing fix, tests now passing --- src/fcmp/fcmp.h | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/fcmp/fcmp.h b/src/fcmp/fcmp.h index 4cd9613a99e..5093e71ac77 100644 --- a/src/fcmp/fcmp.h +++ b/src/fcmp/fcmp.h @@ -298,24 +298,28 @@ namespace fcmp const auto &c1_layers = tree.c1_layers; const auto &c2_layers = tree.c2_layers; + // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + "unexpected number of curve layers"); + LastChunks last_chunks; - last_chunks.c1_last_chunks.reserve(c1_layers.size()); - last_chunks.c2_last_chunks.reserve(c2_layers.size()); + + auto &c1_last_chunks_out = last_chunks.c1_last_chunks; + auto &c2_last_chunks_out = last_chunks.c2_last_chunks; + + c1_last_chunks_out.reserve(c1_layers.size()); + c2_last_chunks_out.reserve(c2_layers.size()); // First push the last leaf chunk data into c2 chunks CHECK_AND_ASSERT_THROW_MES(!c2_layers.empty(), "empty curve 2 layers"); auto last_leaf_chunk = get_last_leaf_chunk(c2, leaves, c2_layers[0]); - last_chunks.c2_last_chunks.push_back(std::move(last_leaf_chunk)); + c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); // Next parents will be c1 bool parent_is_c1 = true; - // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), - "unexpected number of curve layers"); - // If there are no c1 layers, we're done if (c1_layers.empty()) return last_chunks; @@ -323,7 +327,7 @@ namespace fcmp // Then get last chunks up until the root std::size_t c1_idx = 0; std::size_t c2_idx = 0; - for (std::size_t i = 0; i < c2_layers.size(); ++i) + while (c1_last_chunks_out.size() < c1_layers.size() || c2_last_chunks_out.size() < c2_layers.size()) { CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); @@ -342,7 +346,7 @@ namespace fcmp child_layer, parent_layer); - last_chunks.c1_last_chunks.push_back(std::move(last_parent_chunk)); + c1_last_chunks_out.push_back(std::move(last_parent_chunk)); ++c2_idx; } @@ -359,7 +363,7 @@ namespace fcmp child_layer, parent_layer); - last_chunks.c2_last_chunks.push_back(std::move(last_parent_chunk)); + c2_last_chunks_out.push_back(std::move(last_parent_chunk)); ++c1_idx; } @@ -368,6 +372,9 @@ namespace fcmp parent_is_c1 = !parent_is_c1; } + CHECK_AND_ASSERT_THROW_MES(c1_last_chunks_out.size() == c1_layers.size(), "unexepcted c1 last chunks"); + CHECK_AND_ASSERT_THROW_MES(c2_last_chunks_out.size() == c2_layers.size(), "unexepcted c2 last chunks"); + return last_chunks; } @@ -515,7 +522,6 @@ namespace fcmp std::vector child_scalars; if (last_child_chunk_ptr != nullptr && last_child_chunk_ptr->parent_layer_size == 1) { - // We should be updating the existing root, there shouldn't be a last parent chunk CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); From ad8872a76ba1127c7ace6a53cf6764f7b4a95f56 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 20 May 2024 15:24:27 -0700 Subject: [PATCH 006/127] Cleaner file organization --- src/fcmp/CMakeLists.txt | 3 +- src/fcmp/curve_trees.cpp | 66 ++ src/fcmp/curve_trees.h | 839 ++++++++++++++++ src/fcmp/fcmp.h | 930 ------------------ src/fcmp/{fcmp.cpp => tower_cycle_types.cpp} | 68 +- src/fcmp/tower_cycle_types.h | 164 +++ tests/unit_tests/CMakeLists.txt | 2 +- .../{fcmp_tree.cpp => curve_trees.cpp} | 130 +-- 8 files changed, 1164 insertions(+), 1038 deletions(-) create mode 100644 src/fcmp/curve_trees.cpp create mode 100644 src/fcmp/curve_trees.h delete mode 100644 src/fcmp/fcmp.h rename src/fcmp/{fcmp.cpp => tower_cycle_types.cpp} (58%) create mode 100644 src/fcmp/tower_cycle_types.h rename tests/unit_tests/{fcmp_tree.cpp => curve_trees.cpp} (58%) diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt index d242361a8c2..2c4543e192e 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp/CMakeLists.txt @@ -27,7 +27,8 @@ # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. set(fcmp_sources - fcmp.cpp) + curve_trees.cpp + tower_cycle_types.cpp) monero_find_all_headers(fcmp_headers "${CMAKE_CURRENT_SOURCE_DIR}") diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp new file mode 100644 index 00000000000..3e58f484d43 --- /dev/null +++ b/src/fcmp/curve_trees.cpp @@ -0,0 +1,66 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "curve_trees.h" + +namespace fcmp +{ +namespace curve_trees +{ + +LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) +{ + crypto::ec_point I; + crypto::derive_key_image_generator(O, I); + + return LeafTuple{ + .O_x = tower_cycle::selene::SELENE.ed_25519_point_to_scalar(O), + .I_x = tower_cycle::selene::SELENE.ed_25519_point_to_scalar(I), + .C_x = tower_cycle::selene::SELENE.ed_25519_point_to_scalar(C) + }; +} + +// TODO: move into curves tree file +std::vector flatten_leaves(const std::vector &leaves) +{ + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + + for (const auto &l : leaves) + { + // TODO: implement without cloning + flattened_leaves.emplace_back(tower_cycle::selene::SELENE.clone(l.O_x)); + flattened_leaves.emplace_back(tower_cycle::selene::SELENE.clone(l.I_x)); + flattened_leaves.emplace_back(tower_cycle::selene::SELENE.clone(l.C_x)); + } + + return flattened_leaves; +}; + +} //namespace curve_trees +} //namespace fcmp diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h new file mode 100644 index 00000000000..49a7bafd8cb --- /dev/null +++ b/src/fcmp/curve_trees.h @@ -0,0 +1,839 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "crypto/crypto.h" +#include "misc_log_ex.h" +#include "tower_cycle_types.h" + +#include + +namespace fcmp +{ +namespace curve_trees +{ + + +// TODO: template all the curve things +// TODO: CurveTree class instantiated with the curves and widths +// TODO: move "TEST" functions + +// TODO: template +struct LeafTuple final +{ + tower_cycle::selene::Selene::Scalar O_x; + tower_cycle::selene::Selene::Scalar I_x; + tower_cycle::selene::Selene::Scalar C_x; +}; + +static const std::size_t LEAF_TUPLE_SIZE = 3; + +// TODO: make this a const class member that's set on initialization +static const std::size_t LEAF_LAYER_CHUNK_WIDTH = LEAF_TUPLE_SIZE * tower_cycle::selene::SELENE.WIDTH; + +// Tree structure +struct Leaves final +{ + // Starting index in the leaf layer + std::size_t start_idx; + // Contiguous leaves in a tree that start at the start_idx + std::vector tuples; +}; + +// A layer of contiguous hashes starting from a specific start_idx in the tree +template +struct LayerExtension final +{ + std::size_t start_idx; + std::vector hashes; +}; + +// A struct useful to extend an existing tree, layers alternate between C1 and C2 +template +struct TreeExtension final +{ + Leaves leaves; + std::vector> c1_layer_extensions; + std::vector> c2_layer_extensions; +}; + +// Useful data from the last chunk in a layer +template +struct LastChunkData final +{ + // The total number of children % child layer chunk width + /*TODO: const*/ std::size_t child_offset; + // The last child in the chunk (and therefore the last child in the child layer) + /*TODO: const*/ typename C::Scalar last_child; + // The hash of the last chunk of child scalars + /*TODO: const*/ typename C::Point last_parent; + // Total number of children in the child layer + /*TODO: const*/ std::size_t child_layer_size; + // Total number of hashes in the parent layer + /*TODO: const*/ std::size_t parent_layer_size; +}; + +template +struct LastChunks final +{ + std::vector> c1_last_chunks; + std::vector> c2_last_chunks; +}; + +// TODO: template +LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C); +std::vector flatten_leaves(const std::vector &leaves); + +template +static void extend_scalars_from_cycle_points(const C_POINTS &curve, + const std::vector &points, + std::vector &scalars_out) +{ + scalars_out.reserve(scalars_out.size() + points.size()); + + for (const auto &point : points) + { + // TODO: implement reading just the x coordinate of points on curves in curve cycle in C/C++ + typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); + scalars_out.push_back(std::move(scalar)); + } +} + +// TODO: move to tower_cycle_types +template +static void extend_zeroes(const C &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout) +{ + zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); + + for (std::size_t i = 0; i < num_zeroes; ++i) + zeroes_inout.emplace_back(curve.zero_scalar()); +} + +template +static typename C::Point get_new_parent(const C &curve, + const typename C::Chunk &new_children) +{ + // New parent means no prior children, fill priors with 0 + std::vector prior_children; + extend_zeroes(curve, new_children.size(), prior_children); + + return curve.hash_grow( + curve.GENERATORS, + curve.HASH_INIT_POINT, + 0,/*offset*/ + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); +} + +template +static typename C::Point get_first_leaf_parent(const C &curve, + const typename C::Chunk &new_children, + const LastChunkData *last_chunk_ptr) +{ + // If no last chunk exists, or if the last chunk is already full, then we can get a new parent + if (last_chunk_ptr == nullptr || last_chunk_ptr->child_offset == 0) + return get_new_parent(curve, new_children); + + // There won't be any existing children when growing the leaf layer, fill priors with 0 + std::vector prior_children; + extend_zeroes(curve, new_children.size(), prior_children); + + return curve.hash_grow( + curve.GENERATORS, + last_chunk_ptr->last_parent, + last_chunk_ptr->child_offset, + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); +} + +template +static typename C::Point get_first_non_leaf_parent(const C &curve, + const typename C::Chunk &new_children, + const bool child_layer_last_hash_updated, + const LastChunkData *last_chunk_ptr) +{ + // If no last chunk exists, we can get a new parent + if (last_chunk_ptr == nullptr) + return get_new_parent(curve, new_children); + + std::vector prior_children; + std::size_t offset = last_chunk_ptr->child_offset; + + if (child_layer_last_hash_updated) + { + // If the last chunk has updated children in it, then we need to get the delta to the old children, and + // subtract the offset by 1 since we're updating the prior last hash + prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); + offset = offset > 0 ? (offset - 1) : (curve.WIDTH - 1); + + // Extend prior children by zeroes for any additional new children, since they must be new + if (new_children.size() > 1) + extend_zeroes(curve, new_children.size() - 1, prior_children); + } + else if (offset > 0) + { + // If we're updating the parent hash and no children were updated, then we're just adding new children + // to the existing last chunk and can fill priors with 0 + extend_zeroes(curve, new_children.size(), prior_children); + } + else + { + // If the last chunk is already full and isn't updated in any way, then we just get a new parent + return get_new_parent(curve, new_children); + } + + return curve.hash_grow( + curve.GENERATORS, + last_chunk_ptr->last_parent, + offset, + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); +} + +// TODO: look into consolidating hash_layer and hash_leaf_layer into 1 function +template +void hash_layer(const C_CHILD &c_child, + const C_PARENT &c_parent, + const LastChunkData *last_child_chunk_ptr, + const LastChunkData *last_parent_chunk_ptr, + const LayerExtension &children, + LayerExtension &parents_out) +{ + parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; + parents_out.hashes.clear(); + + CHECK_AND_ASSERT_THROW_MES(!children.hashes.empty(), "empty children hashes"); + + const std::size_t max_chunk_size = c_parent.WIDTH; + std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; + + // TODO: try to simplify the approach to avoid edge cases + // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 + // since we'll be updating the existing parent hash of the last chunk + if (offset > 0) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } + + // If the child layer had its existing last hash updated, then we'll need to use the last hash's prior + // version in order to update the existing last parent hash in this layer + bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) + ? false + : last_parent_chunk_ptr->child_layer_size == (children.start_idx + 1); + + if (offset == 0 && child_layer_last_hash_updated) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } + + // TODO: clean this up so I don't have to do it twice here and in get_first_non_leaf_parent + // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk + // will start from there and may need 1 more to fill + CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); + if (child_layer_last_hash_updated) + offset = offset > 0 ? (offset - 1) : (max_chunk_size - 1); + + // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when + // hashing the *existing* root layer + std::vector child_scalars; + if (last_child_chunk_ptr != nullptr && last_child_chunk_ptr->parent_layer_size == 1) + { + // We should be updating the existing root, there shouldn't be a last parent chunk + CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); + + // If the children don't already include the existing root at start_idx 0 (they would if the existing + // root was updated in the child layer), then we need to add it to the first chunk to be hashed + if (children.start_idx > 0) + child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); + } + + // Convert child points to scalars + extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); + + // See how many children we need to fill up the existing last chunk + std::size_t chunk_size = std::min(child_scalars.size(), max_chunk_size - offset); + MDEBUG("Starting chunk_size: " << chunk_size << " , num child scalars: " << child_scalars.size() + << " , offset: " << offset); + + // Hash chunks of child scalars to create the parent hashes + std::size_t chunk_start_idx = 0; + while (chunk_start_idx < child_scalars.size()) + { + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; + + for (const auto &c : chunk) + MDEBUG("Hashing " << c_parent.to_string(c)); + + // Hash the chunk of children + typename C_PARENT::Point chunk_hash = chunk_start_idx == 0 + ? get_first_non_leaf_parent(c_parent, chunk, child_layer_last_hash_updated, last_parent_chunk_ptr) + : get_new_parent(c_parent, chunk); + + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c_parent.to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + parents_out.hashes.emplace_back(std::move(chunk_hash)); + + // Advance to the next chunk + chunk_start_idx += chunk_size; + + // Prepare for next loop if there should be one + if (chunk_start_idx == child_scalars.size()) + break; + + // Fill a complete chunk, or add the remaining new children to the last chunk + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < child_scalars.size(), "unexpected chunk start idx"); + chunk_size = std::min(max_chunk_size, child_scalars.size() - chunk_start_idx); + } +} + +template +void hash_leaf_layer(const C2 &c2, + const LastChunkData *last_chunk_ptr, + const Leaves &leaves, + LayerExtension &parents_out) +{ + parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->parent_layer_size; + parents_out.hashes.clear(); + + if (leaves.tuples.empty()) + return; + + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] + const std::vector children = flatten_leaves(leaves.tuples); + + const std::size_t max_chunk_size = LEAF_LAYER_CHUNK_WIDTH; + const std::size_t offset = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->child_offset; + + // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 + // since we'll be updating the existing parent hash of the last chunk + if (offset > 0) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } + + // See how many new children are needed to fill up the existing last chunk + CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); + std::size_t chunk_size = std::min(children.size(), max_chunk_size - offset); + + std::size_t chunk_start_idx = 0; + while (chunk_start_idx < children.size()) + { + const auto chunk_start = children.data() + chunk_start_idx; + const typename C2::Chunk chunk{chunk_start, chunk_size}; + + for (const auto &c : chunk) + MDEBUG("Hashing " << c2.to_string(c)); + + // Hash the chunk of children + typename C2::Point chunk_hash = chunk_start_idx == 0 + ? get_first_leaf_parent(c2, chunk, last_chunk_ptr) + : get_new_parent(c2, chunk); + + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c2.to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + parents_out.hashes.emplace_back(std::move(chunk_hash)); + + // Advance to the next chunk + chunk_start_idx += chunk_size; + + // Prepare for next loop if there should be one + if (chunk_start_idx == children.size()) + break; + + // Fill a complete chunk, or add the remaining new children to the last chunk + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < children.size(), "unexpected chunk start idx"); + chunk_size = std::min(max_chunk_size, children.size() - chunk_start_idx); + } +} + +template +TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, + const Leaves &new_leaves, + const C1 &c1, + const C2 &c2) +{ + TreeExtension tree_extension; + + if (new_leaves.tuples.empty()) + return tree_extension; + + const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; + const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; + + // Set the leaf start idx + tree_extension.leaves.start_idx = c2_last_chunks.empty() + ? 0 + : c2_last_chunks[0].child_layer_size; + + // Copy the leaves + // TODO: don't copy here + tree_extension.leaves.tuples.reserve(new_leaves.tuples.size()); + for (const auto &leaf : new_leaves.tuples) + { + tree_extension.leaves.tuples.emplace_back(LeafTuple{ + .O_x = c2.clone(leaf.O_x), + .I_x = c2.clone(leaf.I_x), + .C_x = c2.clone(leaf.C_x) + }); + } + + auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; + + // Hash the leaf layer + LayerExtension parents; + hash_leaf_layer(c2, + c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], + new_leaves, + parents); + + c2_layer_extensions_out.emplace_back(std::move(parents)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + // Alternate between hashing c2 children, c1 children, c2, c1, ... + bool parent_is_c1 = true; + + std::size_t c1_last_idx = 0; + std::size_t c2_last_idx = 0; + // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) + while (true) + { + if (parent_is_c1) + { + CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); + + LayerExtension c1_layer_extension; + hash_layer(c2, + c1, + (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], + (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], + c2_layer_extensions_out[c2_last_idx], + c1_layer_extension); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + + // Check if we just added the root + if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c2_last_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); + + LayerExtension c2_layer_extension; + hash_layer(c1, + c2, + (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], + (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], + c1_layer_extensions_out[c1_last_idx], + c2_layer_extension); + + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c1_last_idx; + } + + parent_is_c1 = !parent_is_c1; + } +} + +// TEST +template +using Layer = std::vector; + +// TEST +// A complete tree, useful for testing (can't fit the whole tree in memory otherwise) +template +struct Tree final +{ + std::vector leaves; + std::vector> c1_layers; + std::vector> c2_layers; +}; + +// TEST +template +LastChunkData get_last_leaf_chunk(const C2 &c2, + const std::vector &leaves, + const std::vector &parent_layer) +{ + CHECK_AND_ASSERT_THROW_MES(!leaves.empty(), "empty leaf layer"); + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "empty leaf parent layer"); + + const std::size_t child_offset = (leaves.size() * LEAF_TUPLE_SIZE) % LEAF_LAYER_CHUNK_WIDTH; + + const typename C2::Scalar &last_child = leaves.back().C_x; + const typename C2::Point &last_parent = parent_layer.back(); + + return LastChunkData{ + .child_offset = child_offset, + .last_child = c2.clone(last_child), + .last_parent = c2.clone(last_parent), + .child_layer_size = leaves.size() * LEAF_TUPLE_SIZE, + .parent_layer_size = parent_layer.size() + }; +} + +// TEST +template +LastChunkData get_last_child_layer_chunk(const C_CHILD &c_child, + const C_PARENT &c_parent, + const std::vector &child_layer, + const std::vector &parent_layer) +{ + CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "empty child layer"); + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "empty parent layer"); + + const std::size_t child_offset = child_layer.size() % c_parent.WIDTH; + + const typename C_CHILD::Point &last_child_point = child_layer.back(); + const typename C_PARENT::Scalar &last_child = c_child.point_to_cycle_scalar(last_child_point); + + const typename C_PARENT::Point &last_parent = parent_layer.back(); + + return LastChunkData{ + .child_offset = child_offset, + .last_child = c_parent.clone(last_child), + .last_parent = c_parent.clone(last_parent), + .child_layer_size = child_layer.size(), + .parent_layer_size = parent_layer.size() + }; +} + +// TODO: implement in the db, never want the entire tree in memory +// TEST +template +LastChunks get_last_chunks(const C1 &c1, + const C2 &c2, + const Tree &tree) +{ + const auto &leaves = tree.leaves; + const auto &c1_layers = tree.c1_layers; + const auto &c2_layers = tree.c2_layers; + + // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + "unexpected number of curve layers"); + + LastChunks last_chunks; + + auto &c1_last_chunks_out = last_chunks.c1_last_chunks; + auto &c2_last_chunks_out = last_chunks.c2_last_chunks; + + c1_last_chunks_out.reserve(c1_layers.size()); + c2_last_chunks_out.reserve(c2_layers.size()); + + // First push the last leaf chunk data into c2 chunks + CHECK_AND_ASSERT_THROW_MES(!c2_layers.empty(), "empty curve 2 layers"); + auto last_leaf_chunk = get_last_leaf_chunk(c2, + leaves, + c2_layers[0]); + c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); + + // Next parents will be c1 + bool parent_is_c1 = true; + + // If there are no c1 layers, we're done + if (c1_layers.empty()) + return last_chunks; + + // Then get last chunks up until the root + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + while (c1_last_chunks_out.size() < c1_layers.size() || c2_last_chunks_out.size() < c2_layers.size()) + { + CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); + + // TODO: template the below if statement into another function + if (parent_is_c1) + { + const Layer &child_layer = c2_layers[c2_idx]; + CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); + + const Layer &parent_layer = c1_layers[c1_idx]; + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); + + auto last_parent_chunk = get_last_child_layer_chunk(c2, + c1, + child_layer, + parent_layer); + + c1_last_chunks_out.push_back(std::move(last_parent_chunk)); + + ++c2_idx; + } + else + { + const Layer &child_layer = c1_layers[c1_idx]; + CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); + + const Layer &parent_layer = c2_layers[c2_idx]; + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); + + auto last_parent_chunk = get_last_child_layer_chunk(c1, + c2, + child_layer, + parent_layer); + + c2_last_chunks_out.push_back(std::move(last_parent_chunk)); + + ++c1_idx; + } + + // Alternate curves every iteration + parent_is_c1 = !parent_is_c1; + } + + CHECK_AND_ASSERT_THROW_MES(c1_last_chunks_out.size() == c1_layers.size(), "unexepected c1 last chunks"); + CHECK_AND_ASSERT_THROW_MES(c2_last_chunks_out.size() == c2_layers.size(), "unexepected c2 last chunks"); + + return last_chunks; +} + +// TODO: this is only useful for testsing, since can't fit entire tree in memory +// TEST +template +void extend_tree(const TreeExtension &tree_extension, + const C1 &c1, + const C2 &c2, + Tree &tree_inout) +{ + // Add the leaves + CHECK_AND_ASSERT_THROW_MES((tree_inout.leaves.size() * LEAF_TUPLE_SIZE) == tree_extension.leaves.start_idx, + "unexpected leaf start idx"); + + tree_inout.leaves.reserve(tree_inout.leaves.size() + tree_extension.leaves.tuples.size()); + for (const auto &leaf : tree_extension.leaves.tuples) + { + tree_inout.leaves.emplace_back(LeafTuple{ + .O_x = c2.clone(leaf.O_x), + .I_x = c2.clone(leaf.I_x), + .C_x = c2.clone(leaf.C_x) + }); + } + + // Add the layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); + const LayerExtension &c2_ext = c2_extensions[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c2_idx <= tree_inout.c2_layers.size(), "missing c2 layer"); + if (tree_inout.c2_layers.size() == c2_idx) + tree_inout.c2_layers.emplace_back(Layer{}); + + auto &c2_inout = tree_inout.c2_layers[c2_idx]; + + const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); + const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c2 layer start"); + + // We updated the last hash + if (started_at_tip) + c2_inout.back() = c2.clone(c2_ext.hashes.front()); + + for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) + c2_inout.emplace_back(c2.clone(c2_ext.hashes[i])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); + const LayerExtension &c1_ext = c1_extensions[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c1_idx <= tree_inout.c1_layers.size(), "missing c1 layer"); + if (tree_inout.c1_layers.size() == c1_idx) + tree_inout.c1_layers.emplace_back(Layer{}); + + auto &c1_inout = tree_inout.c1_layers[c1_idx]; + + const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); + const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c1 layer start"); + + // We updated the last hash + if (started_at_tip) + c1_inout.back() = c1.clone(c1_ext.hashes.front()); + + for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) + c1_inout.emplace_back(c1.clone(c1_ext.hashes[i])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} + +// TEST +template +bool validate_layer(const C_PARENT &c_parent, + const Layer &parents, + const std::vector &child_scalars, + const std::size_t max_chunk_size) +{ + // Hash chunk of children scalars, then see if the hash matches up to respective parent + std::size_t chunk_start_idx = 0; + for (std::size_t i = 0; i < parents.size(); ++i) + { + CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); + const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); + CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); + + const typename C_PARENT::Point &parent = parents[i]; + + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; + + const typename C_PARENT::Point chunk_hash = get_new_parent(c_parent, chunk); + + const auto actual_bytes = c_parent.to_bytes(parent); + const auto expected_bytes = c_parent.to_bytes(chunk_hash); + CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); + + chunk_start_idx += chunk_size; + } + + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); + + return true; +} + +// TEST +template +bool validate_tree(const Tree &tree, const C1 &c1, const C2 &c2) +{ + const auto &leaves = tree.leaves; + const auto &c1_layers = tree.c1_layers; + const auto &c2_layers = tree.c2_layers; + + CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree"); + CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); + CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + false, "unexpected mismatch of c2 and c1 layers"); + + // Verify root has 1 member in it + const bool c2_is_root = c2_layers.size() > c1_layers.size(); + CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, + "root must have 1 member in it"); + + // Iterate from root down to layer above leaves, and check hashes match up correctly + bool parent_is_c2 = c2_is_root; + std::size_t c2_idx = c2_layers.size() - 1; + std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); + for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) + { + // TODO: implement templated function for below if statement + if (parent_is_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + + const Layer &parents = c2_layers[c2_idx]; + const Layer &children = c1_layers[c1_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); + + std::vector child_scalars; + extend_scalars_from_cycle_points(c1, children, child_scalars); + + const bool valid = validate_layer(c2, parents, child_scalars, c2.WIDTH); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); + + --c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + + const Layer &parents = c1_layers[c1_idx]; + const Layer &children = c2_layers[c2_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); + + std::vector child_scalars; + extend_scalars_from_cycle_points(c2, children, child_scalars); + + const bool valid = validate_layer(c1, parents, child_scalars, c1.WIDTH); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); + + --c1_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + // Now validate leaves + return validate_layer(c2, c2_layers[0], flatten_leaves(leaves), LEAF_LAYER_CHUNK_WIDTH); +} + +} //namespace curve_trees +} //namespace fcmp diff --git a/src/fcmp/fcmp.h b/src/fcmp/fcmp.h deleted file mode 100644 index 5093e71ac77..00000000000 --- a/src/fcmp/fcmp.h +++ /dev/null @@ -1,930 +0,0 @@ -// Copyright (c) 2024, The Monero Project -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without modification, are -// permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of -// conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list -// of conditions and the following disclaimer in the documentation and/or other -// materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be -// used to endorse or promote products derived from this software without specific -// prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#pragma once - -#include "crypto/crypto.h" -#include "fcmp_rust/cxx.h" -#include "fcmp_rust/fcmp_rust.h" -#include "misc_log_ex.h" -#include "string_tools.h" - -#include - -#include - -namespace fcmp -{ - using RustEd25519Point = std::array; - - // Need to forward declare Scalar types for point_to_cycle_scalar below - using SeleneScalar = rust::Box; - using HeliosScalar = rust::Box; - - static struct Helios final - { - using Generators = rust::Box; - using Scalar = HeliosScalar; - using Point = rust::Box; - using Chunk = rust::Slice; - - // TODO: static constants - const Generators GENERATORS = fcmp_rust::random_helios_generators(); - const Point HASH_INIT_POINT = fcmp_rust::random_helios_hash_init_point(); - - // TODO: use correct value - static const std::size_t WIDTH = 5; - - Point hash_grow( - const Generators &generators, - const Point &existing_hash, - const std::size_t offset, - const Chunk &prior_children, - const Chunk &new_children) const - { - return fcmp_rust::hash_grow_helios( - generators, - existing_hash, - offset, - prior_children, - new_children); - } - - SeleneScalar point_to_cycle_scalar(const Point &point) const; - - Scalar clone(const Scalar &scalar) const { return fcmp_rust::clone_helios_scalar(scalar); } - Point clone(const Point &point) const { return fcmp_rust::clone_helios_point(point); } - - Scalar zero_scalar() const { return fcmp_rust::helios_zero_scalar(); } - - std::array to_bytes(const Scalar &scalar) const - { return fcmp_rust::helios_scalar_to_bytes(scalar); } - std::array to_bytes(const Point &point) const - { return fcmp_rust::helios_point_to_bytes(point); } - - std::string to_string(const Scalar &scalar) const - { return epee::string_tools::pod_to_hex(to_bytes(scalar)); } - std::string to_string(const Point &point) const - { return epee::string_tools::pod_to_hex(to_bytes(point)); } - } HELIOS; - - static struct Selene final - { - using Generators = rust::Box; - using Scalar = SeleneScalar; - using Point = rust::Box; - using Chunk = rust::Slice; - - // TODO: static constants - const Generators GENERATORS = fcmp_rust::random_selene_generators(); - const Point HASH_INIT_POINT = fcmp_rust::random_selene_hash_init_point(); - - // TODO: use correct value - static const std::size_t WIDTH = 5; - - Point hash_grow( - const Generators &generators, - const Point &existing_hash, - const std::size_t offset, - const Chunk &prior_children, - const Chunk &new_children) const - { - return fcmp_rust::hash_grow_selene( - generators, - existing_hash, - offset, - prior_children, - new_children); - }; - - HeliosScalar point_to_cycle_scalar(const Point &point) const; - - Scalar clone(const Scalar &scalar) const { return fcmp_rust::clone_selene_scalar(scalar); } - Point clone(const Point &point) const { return fcmp_rust::clone_selene_point(point); } - - Scalar zero_scalar() const { return fcmp_rust::selene_zero_scalar(); } - - std::array to_bytes(const Scalar &scalar) const - { return fcmp_rust::selene_scalar_to_bytes(scalar); } - std::array to_bytes(const Point &point) const - { return fcmp_rust::selene_point_to_bytes(point); } - - std::string to_string(const Scalar &scalar) const - { return epee::string_tools::pod_to_hex(to_bytes(scalar)); } - std::string to_string(const Point &point) const - { return epee::string_tools::pod_to_hex(to_bytes(point)); } - } SELENE; - - // TODO: cleanly separate everything below into another file. This current file should strictly be for the rust interface - - // TODO: template all the curve things - - // TODO: Curve class - // TODO: CurveTree class instantiated with the curves and widths - - // TODO: template - struct LeafTuple final - { - Selene::Scalar O_x; - Selene::Scalar I_x; - Selene::Scalar C_x; - }; - static const std::size_t LEAF_TUPLE_SIZE = 3; - static const std::size_t LEAF_LAYER_CHUNK_SIZE = LEAF_TUPLE_SIZE * SELENE.WIDTH; - - // Tree structure - struct Leaves final - { - // Starting index in the leaf layer - std::size_t start_idx; - // Contiguous leaves in a tree that start at the start_idx - std::vector tuples; - }; - - // A layer of contiguous hashes starting from a specific start_idx in the tree - template - struct LayerExtension final - { - std::size_t start_idx; - std::vector hashes; - }; - - // A struct useful to extend an existing tree, layers alternate between C1 and C2 - template - struct TreeExtension final - { - Leaves leaves; - std::vector> c1_layer_extensions; - std::vector> c2_layer_extensions; - }; - - // Useful data from the last chunk in a layer - template - struct LastChunkData final - { - // The total number of children % child layer chunk size - /*TODO: const*/ std::size_t child_offset; - // The last child in the chunk (and therefore the last child in the child layer) - /*TODO: const*/ typename C::Scalar last_child; - // The hash of the last chunk of child scalars - /*TODO: const*/ typename C::Point last_parent; - // Total number of children in the child layer - /*TODO: const*/ std::size_t child_layer_size; - // Total number of hashes in the parent layer - /*TODO: const*/ std::size_t parent_layer_size; - }; - - template - struct LastChunks final - { - std::vector> c1_last_chunks; - std::vector> c2_last_chunks; - }; - - template - using Layer = std::vector; - - // A complete tree, useful for testing (can't fit the whole tree in memory otherwise) - // TODO: move this to just the testing - template - struct Tree final - { - std::vector leaves; - std::vector> c1_layers; - std::vector> c2_layers; - }; - - LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C); - std::vector flatten_leaves(const std::vector &leaves); - - // TODO: move into its own fcmp_crypto file - template - static void extend_scalars_from_cycle_points(const C_POINTS &curve, - const std::vector &points, - std::vector &scalars_out) - { - scalars_out.reserve(scalars_out.size() + points.size()); - - for (const auto &point : points) - { - // TODO: implement reading just the x coordinate of points on curves in curve cycle in C/C++ - typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); - scalars_out.push_back(std::move(scalar)); - } - } - - template - LastChunkData get_last_leaf_chunk(const C2 &c2, - const std::vector &leaves, - const std::vector &parent_layer) - { - CHECK_AND_ASSERT_THROW_MES(!leaves.empty(), "empty leaf layer"); - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "empty leaf parent layer"); - - const std::size_t child_offset = (leaves.size() * LEAF_TUPLE_SIZE) % LEAF_LAYER_CHUNK_SIZE; - - const typename C2::Scalar &last_child = leaves.back().C_x; - const typename C2::Point &last_parent = parent_layer.back(); - - return LastChunkData{ - .child_offset = child_offset, - .last_child = c2.clone(last_child), - .last_parent = c2.clone(last_parent), - .child_layer_size = leaves.size() * LEAF_TUPLE_SIZE, - .parent_layer_size = parent_layer.size() - }; - } - - template - LastChunkData get_last_child_layer_chunk(const C_CHILD &c_child, - const C_PARENT &c_parent, - const std::vector &child_layer, - const std::vector &parent_layer) - { - CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "empty child layer"); - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "empty parent layer"); - - const std::size_t child_offset = child_layer.size() % c_parent.WIDTH; - - const typename C_CHILD::Point &last_child_point = child_layer.back(); - const typename C_PARENT::Scalar &last_child = c_child.point_to_cycle_scalar(last_child_point); - - const typename C_PARENT::Point &last_parent = parent_layer.back(); - - return LastChunkData{ - .child_offset = child_offset, - .last_child = c_parent.clone(last_child), - .last_parent = c_parent.clone(last_parent), - .child_layer_size = child_layer.size(), - .parent_layer_size = parent_layer.size() - }; - } - - // TODO: implement in the db, never want the entire tree in memory - template - LastChunks get_last_chunks(const C1 &c1, - const C2 &c2, - const Tree &tree) - { - // const bool valid = validate_tree(tree, C1, C2); - // CHECK_AND_ASSERT_THROW_MES(valid, "invalid tree"); - - const auto &leaves = tree.leaves; - const auto &c1_layers = tree.c1_layers; - const auto &c2_layers = tree.c2_layers; - - // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), - "unexpected number of curve layers"); - - LastChunks last_chunks; - - auto &c1_last_chunks_out = last_chunks.c1_last_chunks; - auto &c2_last_chunks_out = last_chunks.c2_last_chunks; - - c1_last_chunks_out.reserve(c1_layers.size()); - c2_last_chunks_out.reserve(c2_layers.size()); - - // First push the last leaf chunk data into c2 chunks - CHECK_AND_ASSERT_THROW_MES(!c2_layers.empty(), "empty curve 2 layers"); - auto last_leaf_chunk = get_last_leaf_chunk(c2, - leaves, - c2_layers[0]); - c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); - - // Next parents will be c1 - bool parent_is_c1 = true; - - // If there are no c1 layers, we're done - if (c1_layers.empty()) - return last_chunks; - - // Then get last chunks up until the root - std::size_t c1_idx = 0; - std::size_t c2_idx = 0; - while (c1_last_chunks_out.size() < c1_layers.size() || c2_last_chunks_out.size() < c2_layers.size()) - { - CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); - - // TODO: template the below if statement into another function - if (parent_is_c1) - { - const Layer &child_layer = c2_layers[c2_idx]; - CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - - const Layer &parent_layer = c1_layers[c1_idx]; - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); - - auto last_parent_chunk = get_last_child_layer_chunk(c2, - c1, - child_layer, - parent_layer); - - c1_last_chunks_out.push_back(std::move(last_parent_chunk)); - - ++c2_idx; - } - else - { - const Layer &child_layer = c1_layers[c1_idx]; - CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - - const Layer &parent_layer = c2_layers[c2_idx]; - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); - - auto last_parent_chunk = get_last_child_layer_chunk(c1, - c2, - child_layer, - parent_layer); - - c2_last_chunks_out.push_back(std::move(last_parent_chunk)); - - ++c1_idx; - } - - // Alternate curves every iteration - parent_is_c1 = !parent_is_c1; - } - - CHECK_AND_ASSERT_THROW_MES(c1_last_chunks_out.size() == c1_layers.size(), "unexepcted c1 last chunks"); - CHECK_AND_ASSERT_THROW_MES(c2_last_chunks_out.size() == c2_layers.size(), "unexepcted c2 last chunks"); - - return last_chunks; - } - - template - static void extend_zeroes(const C &curve, - const std::size_t num_zeroes, - std::vector &zeroes_inout) - { - zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); - - for (std::size_t i = 0; i < num_zeroes; ++i) - zeroes_inout.emplace_back(curve.zero_scalar()); - } - - template - static typename C::Point get_new_parent(const C &curve, - const typename C::Chunk &new_children) - { - // New parent means no prior children, fill priors with 0 - std::vector prior_children; - extend_zeroes(curve, new_children.size(), prior_children); - - return curve.hash_grow( - curve.GENERATORS, - curve.HASH_INIT_POINT, - 0,/*offset*/ - typename C::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); - } - - template - static typename C::Point get_first_leaf_parent(const C &curve, - const typename C::Chunk &new_children, - const LastChunkData *last_chunk_ptr) - { - // If no last chunk exists, or if the last chunk is already full, then we can get a new parent - if (last_chunk_ptr == nullptr || last_chunk_ptr->child_offset == 0) - return get_new_parent(curve, new_children); - - // There won't be any existing children when growing the leaf layer, fill priors with 0 - std::vector prior_children; - extend_zeroes(curve, new_children.size(), prior_children); - - return curve.hash_grow( - curve.GENERATORS, - last_chunk_ptr->last_parent, - last_chunk_ptr->child_offset, - typename C::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); - } - - template - static typename C::Point get_first_non_leaf_parent(const C &curve, - const typename C::Chunk &new_children, - const bool child_layer_last_hash_updated, - const LastChunkData *last_chunk_ptr) - { - // If no last chunk exists, we can get a new parent - if (last_chunk_ptr == nullptr) - return get_new_parent(curve, new_children); - - std::vector prior_children; - std::size_t offset = last_chunk_ptr->child_offset; - - if (child_layer_last_hash_updated) - { - // If the last chunk has updated children in it, then we need to get the delta to the old children, and - // subtract the offset by 1 since we're updating the prior last hash - prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); - offset = offset > 0 ? (offset - 1) : (curve.WIDTH - 1); - - // Extend prior children by zeroes for any additional new children, since they must be new - if (new_children.size() > 1) - extend_zeroes(curve, new_children.size() - 1, prior_children); - } - else if (offset > 0) - { - // If we're updating the parent hash and no children were updated, then we're just adding new children - // to the existing last chunk and can fill priors with 0 - extend_zeroes(curve, new_children.size(), prior_children); - } - else - { - // If the last chunk is already full and isn't updated in any way, then we just get a new parent - return get_new_parent(curve, new_children); - } - - return curve.hash_grow( - curve.GENERATORS, - last_chunk_ptr->last_parent, - offset, - typename C::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); - } - - template - void hash_layer(const C_CHILD &c_child, - const C_PARENT &c_parent, - const LastChunkData *last_child_chunk_ptr, - const LastChunkData *last_parent_chunk_ptr, - const LayerExtension &children, - LayerExtension &parents_out) - { - parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; - parents_out.hashes.clear(); - - CHECK_AND_ASSERT_THROW_MES(!children.hashes.empty(), "empty children hashes"); - - const std::size_t max_chunk_size = c_parent.WIDTH; - std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; - - // TODO: work through all edge cases, then try to simplify the approach to avoid them - // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 - // since we'll be updating the existing parent hash of the last chunk - if (offset > 0) - { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } - - // If the child layer had its existing last hash updated, then we'll need to use the last hash's prior - // version in order to update the existing last parent hash in this layer - bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) - ? false - : last_parent_chunk_ptr->child_layer_size == (children.start_idx + 1); - - if (offset == 0 && child_layer_last_hash_updated) - { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } - - // TODO: clean this up so I don't have to do it twice here and in get_first_non_leaf_parent - // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk - // will start from there and may need 1 more to fill - CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); - if (child_layer_last_hash_updated) - offset = offset > 0 ? (offset - 1) : (max_chunk_size - 1); - - // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when - // hashing the *existing* root layer - std::vector child_scalars; - if (last_child_chunk_ptr != nullptr && last_child_chunk_ptr->parent_layer_size == 1) - { - // We should be updating the existing root, there shouldn't be a last parent chunk - CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); - - // If the children don't already include the existing root at start_idx 0 (they would if the existing - // root was updated in the child layer), then we need to add it to the first chunk to be hashed - if (children.start_idx > 0) - child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); - } - - // Convert child points to scalars - extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); - - // See how many children we need to fill up the existing last chunk - std::size_t chunk_size = std::min(child_scalars.size(), max_chunk_size - offset); - MDEBUG("Starting chunk_size: " << chunk_size << " , child_scalars.size(): " << child_scalars.size() << " , offset: " << offset); - - // Hash chunks of child scalars to create the parent hashes - std::size_t chunk_start_idx = 0; - while (chunk_start_idx < child_scalars.size()) - { - const auto chunk_start = child_scalars.data() + chunk_start_idx; - const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; - - for (const auto &c : chunk) - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " : " << c_parent.to_string(c)); - - // Hash the chunk of children - typename C_PARENT::Point chunk_hash = chunk_start_idx == 0 - ? get_first_non_leaf_parent(c_parent, chunk, child_layer_last_hash_updated, last_parent_chunk_ptr) - : get_new_parent(c_parent, chunk); - - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c_parent.to_string(chunk_hash)); - - // We've got our hash - parents_out.hashes.emplace_back(std::move(chunk_hash)); - - // Advance to the next chunk - chunk_start_idx += chunk_size; - - // Prepare for next loop if there should be one - if (chunk_start_idx == child_scalars.size()) - break; - - // Fill a complete chunk, or add the remaining new children to the last chunk - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < child_scalars.size(), "unexpected chunk start idx"); - chunk_size = std::min(max_chunk_size, child_scalars.size() - chunk_start_idx); - } - } - - template - void hash_leaf_layer(const C2 &c2, - const LastChunkData *last_chunk_ptr, - const Leaves &leaves, - LayerExtension &parents_out) - { - parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->parent_layer_size; - parents_out.hashes.clear(); - - if (leaves.tuples.empty()) - return; - - // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] - const std::vector children = fcmp::flatten_leaves(leaves.tuples); - - const std::size_t max_chunk_size = LEAF_LAYER_CHUNK_SIZE; - const std::size_t offset = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->child_offset; - - // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 - // since we'll be updating the existing parent hash of the last chunk - if (offset > 0) - { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } - - // See how many new children are needed to fill up the existing last chunk - CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); - std::size_t chunk_size = std::min(children.size(), max_chunk_size - offset); - - std::size_t chunk_start_idx = 0; - while (chunk_start_idx < children.size()) - { - const auto chunk_start = children.data() + chunk_start_idx; - const typename C2::Chunk chunk{chunk_start, chunk_size}; - - for (const auto &c : chunk) - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " : " << c2.to_string(c)); - - // Hash the chunk of children - typename C2::Point chunk_hash = chunk_start_idx == 0 - ? get_first_leaf_parent(c2, chunk, last_chunk_ptr) - : get_new_parent(c2, chunk); - - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c2.to_string(chunk_hash) << " , chunk_size: " << chunk_size); - - // We've got our hash - parents_out.hashes.emplace_back(std::move(chunk_hash)); - - // Advance to the next chunk - chunk_start_idx += chunk_size; - - // Prepare for next loop if there should be one - if (chunk_start_idx == children.size()) - break; - - // Fill a complete chunk, or add the remaining new children to the last chunk - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < children.size(), "unexpected chunk start idx"); - chunk_size = std::min(max_chunk_size, children.size() - chunk_start_idx); - } - } - - template - TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, - const Leaves &new_leaves, - const C1 &c1, - const C2 &c2) - { - TreeExtension tree_extension; - - if (new_leaves.tuples.empty()) - return tree_extension; - - const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; - const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; - - // Set the leaf start idx - tree_extension.leaves.start_idx = c2_last_chunks.empty() - ? 0 - : c2_last_chunks[0].child_layer_size; - - // Copy the leaves - // TODO: don't copy here - tree_extension.leaves.tuples.reserve(new_leaves.tuples.size()); - for (const auto &leaf : new_leaves.tuples) - { - tree_extension.leaves.tuples.emplace_back(LeafTuple{ - .O_x = SELENE.clone(leaf.O_x), - .I_x = SELENE.clone(leaf.I_x), - .C_x = SELENE.clone(leaf.C_x) - }); - } - - auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; - auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; - - // Hash the leaf layer - LayerExtension parents; - hash_leaf_layer(c2, - c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], - new_leaves, - parents); - - c2_layer_extensions_out.emplace_back(std::move(parents)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - // Alternate between hashing c2 children, c1 children, c2, c1, ... - bool parent_is_c1 = true; - - std::size_t c1_last_idx = 0; - std::size_t c2_last_idx = 0; - // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) - while (true) - { - if (parent_is_c1) - { - CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); - - LayerExtension c1_layer_extension; - fcmp::hash_layer(c2, - c1, - (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], - (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], - c2_layer_extensions_out[c2_last_idx], - c1_layer_extension); - - c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); - - // Check if we just added the root - if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c2_last_idx; - } - else - { - CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); - - LayerExtension c2_layer_extension; - fcmp::hash_layer(c1, - c2, - (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], - (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], - c1_layer_extensions_out[c1_last_idx], - c2_layer_extension); - - c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c1_last_idx; - } - - parent_is_c1 = !parent_is_c1; - } - } - - // TODO: this is only useful for testsing, can't fit entire tree in memory - template - void extend_tree(const TreeExtension &tree_extension, - const C1 &c1, - const C2 &c2, - Tree &tree_inout) - { - // Add the leaves - CHECK_AND_ASSERT_THROW_MES((tree_inout.leaves.size() * LEAF_TUPLE_SIZE) == tree_extension.leaves.start_idx, - "unexpected leaf start idx"); - - tree_inout.leaves.reserve(tree_inout.leaves.size() + tree_extension.leaves.tuples.size()); - for (const auto &leaf : tree_extension.leaves.tuples) - { - tree_inout.leaves.emplace_back(LeafTuple{ - .O_x = c2.clone(leaf.O_x), - .I_x = c2.clone(leaf.I_x), - .C_x = c2.clone(leaf.C_x) - }); - } - - // Add the layers - const auto &c2_extensions = tree_extension.c2_layer_extensions; - const auto &c1_extensions = tree_extension.c1_layer_extensions; - CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); - - bool use_c2 = true; - std::size_t c2_idx = 0; - std::size_t c1_idx = 0; - for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) - { - if (use_c2) - { - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); - const LayerExtension &c2_ext = c2_extensions[c2_idx]; - - CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); - - CHECK_AND_ASSERT_THROW_MES(c2_idx <= tree_inout.c2_layers.size(), "missing c2 layer"); - if (tree_inout.c2_layers.size() == c2_idx) - tree_inout.c2_layers.emplace_back(Layer{}); - - auto &c2_inout = tree_inout.c2_layers[c2_idx]; - - const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); - const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); - CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c2 layer start"); - - // We updated the last hash - if (started_at_tip) - c2_inout.back() = c2.clone(c2_ext.hashes.front()); - - for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) - c2_inout.emplace_back(c2.clone(c2_ext.hashes[i])); - - ++c2_idx; - } - else - { - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); - const fcmp::LayerExtension &c1_ext = c1_extensions[c1_idx]; - - CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); - - CHECK_AND_ASSERT_THROW_MES(c1_idx <= tree_inout.c1_layers.size(), "missing c1 layer"); - if (tree_inout.c1_layers.size() == c1_idx) - tree_inout.c1_layers.emplace_back(Layer{}); - - auto &c1_inout = tree_inout.c1_layers[c1_idx]; - - const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); - const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); - CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c1 layer start"); - - // We updated the last hash - if (started_at_tip) - c1_inout.back() = c1.clone(c1_ext.hashes.front()); - - for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) - c1_inout.emplace_back(c1.clone(c1_ext.hashes[i])); - - ++c1_idx; - } - - use_c2 = !use_c2; - } - - // existing tree should be valid - // assert(validate_tree(existing_tree_inout, c1, c2)); - } - - template - bool validate_layer(const C_PARENT &c_parent, - const Layer &parents, - const std::vector &child_scalars, - const std::size_t max_chunk_size) - { - // Hash chunk of children scalars, then see if the hash matches up to respective parent - std::size_t chunk_start_idx = 0; - for (std::size_t i = 0; i < parents.size(); ++i) - { - CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); - const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); - CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); - - const typename C_PARENT::Point &parent = parents[i]; - - const auto chunk_start = child_scalars.data() + chunk_start_idx; - const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; - - const typename C_PARENT::Point chunk_hash = get_new_parent(c_parent, chunk); - - const auto actual_bytes = c_parent.to_bytes(parent); - const auto expected_bytes = c_parent.to_bytes(chunk_hash); - CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); - - chunk_start_idx += chunk_size; - } - - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); - - return true; - } - - template - bool validate_tree(const Tree &tree, const C1 &c1, const C2 &c2) - { - const auto &leaves = tree.leaves; - const auto &c1_layers = tree.c1_layers; - const auto &c2_layers = tree.c2_layers; - - CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree"); - CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); - CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), - false, "unexpected mismatch of c2 and c1 layers"); - - // Verify root has 1 member in it - const bool c2_is_root = c2_layers.size() > c1_layers.size(); - CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, - "root must have 1 member in it"); - - // Iterate from root down to layer above leaves, and check hashes match up correctly - bool parent_is_c2 = c2_is_root; - std::size_t c2_idx = c2_layers.size() - 1; - std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); - for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) - { - if (parent_is_c2) - { - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); - - const Layer &parents = c2_layers[c2_idx]; - const Layer &children = c1_layers[c1_idx]; - - CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); - CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); - - std::vector child_scalars; - extend_scalars_from_cycle_points(c1, children, child_scalars); - - const bool valid = validate_layer(c2, parents, child_scalars, c2.WIDTH); - - CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); - - --c2_idx; - } - else - { - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); - - const Layer &parents = c1_layers[c1_idx]; - const Layer &children = c2_layers[c2_idx]; - - CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); - CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); - - std::vector child_scalars; - extend_scalars_from_cycle_points(c2, children, child_scalars); - - const bool valid = validate_layer(c1, parents, child_scalars, c1.WIDTH); - - CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); - - --c1_idx; - } - - parent_is_c2 = !parent_is_c2; - } - - // Now validate leaves - return validate_layer(c2, c2_layers[0], flatten_leaves(leaves), LEAF_LAYER_CHUNK_SIZE); - } -} diff --git a/src/fcmp/fcmp.cpp b/src/fcmp/tower_cycle_types.cpp similarity index 58% rename from src/fcmp/fcmp.cpp rename to src/fcmp/tower_cycle_types.cpp index de3c72ba730..4069e1beda7 100644 --- a/src/fcmp/fcmp.cpp +++ b/src/fcmp/tower_cycle_types.cpp @@ -26,61 +26,45 @@ // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "fcmp.h" -#include "misc_log_ex.h" +#include "tower_cycle_types.h" namespace fcmp { - -// TODO: move into its own fcmp_crypto file -static SeleneScalar ed_25519_point_to_selene_scalar(const crypto::ec_point &point) -{ - static_assert(sizeof(fcmp::RustEd25519Point) == sizeof(crypto::ec_point), - "expected same size ed25519 point to rust representation"); - - // TODO: implement reading just the x coordinate of ed25519 point in C/C++ - fcmp::RustEd25519Point rust_point; - memcpy(&rust_point, &point, sizeof(fcmp::RustEd25519Point)); - return fcmp_rust::ed25519_point_to_selene_scalar(rust_point); -}; - -// TODO: move into its own fcmp_crypto file -LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) +namespace tower_cycle { - crypto::ec_point I; - crypto::derive_key_image_generator(O, I); - return LeafTuple{ - .O_x = ed_25519_point_to_selene_scalar(O), - .I_x = ed_25519_point_to_selene_scalar(I), - .C_x = ed_25519_point_to_selene_scalar(C) - }; -} - -// TODO: move into its own fcmp_crypto file -std::vector flatten_leaves(const std::vector &leaves) +namespace helios { - std::vector flattened_leaves; - flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - - for (const auto &l : leaves) - { - // TODO: implement without cloning - flattened_leaves.emplace_back(fcmp_rust::clone_selene_scalar(l.O_x)); - flattened_leaves.emplace_back(fcmp_rust::clone_selene_scalar(l.I_x)); - flattened_leaves.emplace_back(fcmp_rust::clone_selene_scalar(l.C_x)); - } - - return flattened_leaves; -}; - +//---------------------------------------------------------------------------------------------------------------------- SeleneScalar Helios::point_to_cycle_scalar(const Helios::Point &point) const { return fcmp_rust::helios_point_to_selene_scalar(point); }; +//---------------------------------------------------------------------------------------------------------------------- +} //namespace helios +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +namespace selene +{ +//---------------------------------------------------------------------------------------------------------------------- +SeleneScalar Selene::ed_25519_point_to_scalar(const crypto::ec_point &point) +{ + static_assert(sizeof(RustEd25519Point) == sizeof(crypto::ec_point), + "expected same size ed25519 point to rust representation"); + // TODO: implement reading just the x coordinate of ed25519 point in C/C++ + fcmp::tower_cycle::RustEd25519Point rust_point; + memcpy(&rust_point, &point, sizeof(fcmp::tower_cycle::RustEd25519Point)); + return fcmp_rust::ed25519_point_to_selene_scalar(rust_point); +}; +//---------------------------------------------------------------------------------------------------------------------- HeliosScalar Selene::point_to_cycle_scalar(const Selene::Point &point) const { return fcmp_rust::selene_point_to_helios_scalar(point); }; +//---------------------------------------------------------------------------------------------------------------------- +} //namespace selene +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace curves } //namespace fcmp diff --git a/src/fcmp/tower_cycle_types.h b/src/fcmp/tower_cycle_types.h new file mode 100644 index 00000000000..3660aeb166e --- /dev/null +++ b/src/fcmp/tower_cycle_types.h @@ -0,0 +1,164 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "crypto/crypto.h" +#include "fcmp_rust/cxx.h" +#include "fcmp_rust/fcmp_rust.h" +#include "string_tools.h" + +#include + +namespace fcmp +{ +namespace tower_cycle +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +using RustEd25519Point = std::array; + +// Need to forward declare Scalar types for point_to_cycle_scalar below +using SeleneScalar = rust::Box; +using HeliosScalar = rust::Box; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +namespace helios +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// TODO: Curve classes that inherit from a parent +static struct Helios final +{ + using Generators = rust::Box; + using Scalar = HeliosScalar; + using Point = rust::Box; + using Chunk = rust::Slice; + + // TODO: static constants + const Generators GENERATORS = fcmp_rust::random_helios_generators(); + const Point HASH_INIT_POINT = fcmp_rust::random_helios_hash_init_point(); + + // TODO: use correct value + static const std::size_t WIDTH = 5; + + // Helios point x-coordinates are Selene scalars + SeleneScalar point_to_cycle_scalar(const Point &point) const; + + Point hash_grow( + const Generators &generators, + const Point &existing_hash, + const std::size_t offset, + const Chunk &prior_children, + const Chunk &new_children) const + { + return fcmp_rust::hash_grow_helios( + generators, + existing_hash, + offset, + prior_children, + new_children); + } + + Scalar clone(const Scalar &scalar) const { return fcmp_rust::clone_helios_scalar(scalar); } + Point clone(const Point &point) const { return fcmp_rust::clone_helios_point(point); } + + Scalar zero_scalar() const { return fcmp_rust::helios_zero_scalar(); } + + std::array to_bytes(const Scalar &scalar) const + { return fcmp_rust::helios_scalar_to_bytes(scalar); } + std::array to_bytes(const Point &point) const + { return fcmp_rust::helios_point_to_bytes(point); } + + std::string to_string(const Scalar &scalar) const + { return epee::string_tools::pod_to_hex(to_bytes(scalar)); } + std::string to_string(const Point &point) const + { return epee::string_tools::pod_to_hex(to_bytes(point)); } +} HELIOS; +}//namespace helios +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +namespace selene +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +static struct Selene final +{ + using Generators = rust::Box; + using Scalar = SeleneScalar; + using Point = rust::Box; + using Chunk = rust::Slice; + + // TODO: static constants + const Generators GENERATORS = fcmp_rust::random_selene_generators(); + const Point HASH_INIT_POINT = fcmp_rust::random_selene_hash_init_point(); + + // TODO: use correct value + static const std::size_t WIDTH = 5; + + // Ed25519 point x-coordinates are Selene scalars + SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point); + + // Selene point x-coordinates are Helios scalars + HeliosScalar point_to_cycle_scalar(const Point &point) const; + + Point hash_grow( + const Generators &generators, + const Point &existing_hash, + const std::size_t offset, + const Chunk &prior_children, + const Chunk &new_children) const + { + return fcmp_rust::hash_grow_selene( + generators, + existing_hash, + offset, + prior_children, + new_children); + }; + + Scalar clone(const Scalar &scalar) const { return fcmp_rust::clone_selene_scalar(scalar); } + Point clone(const Point &point) const { return fcmp_rust::clone_selene_point(point); } + + Scalar zero_scalar() const { return fcmp_rust::selene_zero_scalar(); } + + std::array to_bytes(const Scalar &scalar) const + { return fcmp_rust::selene_scalar_to_bytes(scalar); } + std::array to_bytes(const Point &point) const + { return fcmp_rust::selene_point_to_bytes(point); } + + std::string to_string(const Scalar &scalar) const + { return epee::string_tools::pod_to_hex(to_bytes(scalar)); } + std::string to_string(const Point &point) const + { return epee::string_tools::pod_to_hex(to_bytes(point)); } +} SELENE; +}// namespace selene +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace curves +}//namespace fcmp diff --git a/tests/unit_tests/CMakeLists.txt b/tests/unit_tests/CMakeLists.txt index cdc188e1620..67ce66a1666 100644 --- a/tests/unit_tests/CMakeLists.txt +++ b/tests/unit_tests/CMakeLists.txt @@ -41,6 +41,7 @@ set(unit_tests_sources chacha.cpp checkpoints.cpp command_line.cpp + curve_trees.cpp crypto.cpp decompose_amount_into_digits.cpp device.cpp @@ -51,7 +52,6 @@ set(unit_tests_sources epee_serialization.cpp epee_utils.cpp expect.cpp - fcmp_tree.cpp json_serialization.cpp get_xtype_from_string.cpp hashchain.cpp diff --git a/tests/unit_tests/fcmp_tree.cpp b/tests/unit_tests/curve_trees.cpp similarity index 58% rename from tests/unit_tests/fcmp_tree.cpp rename to tests/unit_tests/curve_trees.cpp index 4892eb6fd51..07a98c787ca 100644 --- a/tests/unit_tests/fcmp_tree.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -28,14 +28,15 @@ #include "gtest/gtest.h" -#include "fcmp/fcmp.h" +#include "fcmp/curve_trees.h" +#include "fcmp/tower_cycle_types.h" #include "misc_log_ex.h" #include -static const fcmp::Leaves generate_leaves(const std::size_t num_leaves) +static const fcmp::curve_trees::Leaves generate_leaves(const std::size_t num_leaves) { - std::vector tuples; + std::vector tuples; tuples.reserve(num_leaves); for (std::size_t i = 0; i < num_leaves; ++i) @@ -46,16 +47,16 @@ static const fcmp::Leaves generate_leaves(const std::size_t num_leaves) crypto::generate_keys(O, o, o, false); crypto::generate_keys(C, c, c, false); - tuples.emplace_back(fcmp::output_to_leaf_tuple(O, C)); + tuples.emplace_back(fcmp::curve_trees::output_to_leaf_tuple(O, C)); } - return fcmp::Leaves{ + return fcmp::curve_trees::Leaves{ .start_idx = 0, .tuples = std::move(tuples) }; } -static void log_tree_extension(const fcmp::TreeExtension &tree_extension) +static void log_tree_extension(const fcmp::curve_trees::TreeExtension &tree_extension) { const auto &c1_extensions = tree_extension.c1_layer_extensions; const auto &c2_extensions = tree_extension.c2_layer_extensions; @@ -68,11 +69,12 @@ static void log_tree_extension(const fcmp::TreeExtension &c2_layer = c2_extensions[c2_idx]; + const fcmp::curve_trees::LayerExtension &c2_layer = c2_extensions[c2_idx]; MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) - MDEBUG("Hash idx: " << (j + c2_layer.start_idx) << " , hash: " << fcmp::SELENE.to_string(c2_layer.hashes[j])); + MDEBUG("Hash idx: " << (j + c2_layer.start_idx) << " , hash: " + << fcmp::tower_cycle::selene::SELENE.to_string(c2_layer.hashes[j])); ++c2_idx; } @@ -96,11 +99,12 @@ static void log_tree_extension(const fcmp::TreeExtension &c1_layer = c1_extensions[c1_idx]; + const fcmp::curve_trees::LayerExtension &c1_layer = c1_extensions[c1_idx]; MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) - MDEBUG("Hash idx: " << (j + c1_layer.start_idx) << " , hash: " << fcmp::HELIOS.to_string(c1_layer.hashes[j])); + MDEBUG("Hash idx: " << (j + c1_layer.start_idx) << " , hash: " + << fcmp::tower_cycle::helios::HELIOS.to_string(c1_layer.hashes[j])); ++c1_idx; } @@ -109,7 +113,7 @@ static void log_tree_extension(const fcmp::TreeExtension &tree) +static void log_tree(const fcmp::curve_trees::Tree &tree) { MDEBUG("Tree has " << tree.leaves.size() << " leaves, " << tree.c1_layers.size() << " helios layers, " << tree.c2_layers.size() << " selene layers"); @@ -118,9 +122,9 @@ static void log_tree(const fcmp::Tree &tree) { const auto &leaf = tree.leaves[i]; - const auto O_x = fcmp::SELENE.to_string(leaf.O_x); - const auto I_x = fcmp::SELENE.to_string(leaf.I_x); - const auto C_x = fcmp::SELENE.to_string(leaf.C_x); + const auto O_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.O_x); + const auto I_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.I_x); + const auto C_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.C_x); MDEBUG("Leaf idx " << i << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); } @@ -134,11 +138,11 @@ static void log_tree(const fcmp::Tree &tree) { CHECK_AND_ASSERT_THROW_MES(c2_idx < tree.c2_layers.size(), "unexpected c2 layer"); - const fcmp::Layer &c2_layer = tree.c2_layers[c2_idx]; + const fcmp::curve_trees::Layer &c2_layer = tree.c2_layers[c2_idx]; MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c2_layer.size(); ++j) - MDEBUG("Hash idx: " << j << " , hash: " << fcmp::SELENE.to_string(c2_layer[j])); + MDEBUG("Hash idx: " << j << " , hash: " << fcmp::tower_cycle::selene::SELENE.to_string(c2_layer[j])); ++c2_idx; } @@ -146,11 +150,11 @@ static void log_tree(const fcmp::Tree &tree) { CHECK_AND_ASSERT_THROW_MES(c1_idx < tree.c1_layers.size(), "unexpected c1 layer"); - const fcmp::Layer &c1_layer = tree.c1_layers[c1_idx]; + const fcmp::curve_trees::Layer &c1_layer = tree.c1_layers[c1_idx]; MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c1_layer.size(); ++j) - MDEBUG("Hash idx: " << j << " , hash: " << fcmp::HELIOS.to_string(c1_layer[j])); + MDEBUG("Hash idx: " << j << " , hash: " << fcmp::tower_cycle::helios::HELIOS.to_string(c1_layer[j])); ++c1_idx; } @@ -159,7 +163,7 @@ static void log_tree(const fcmp::Tree &tree) } } -static void log_last_chunks(const fcmp::LastChunks &last_chunks) +static void log_last_chunks(const fcmp::curve_trees::LastChunks &last_chunks) { const auto &c1_last_chunks = last_chunks.c1_last_chunks; const auto &c2_last_chunks = last_chunks.c2_last_chunks; @@ -176,11 +180,11 @@ static void log_last_chunks(const fcmp::LastChunks & { CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_chunks.size(), "unexpected c2 layer"); - const fcmp::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; + const fcmp::curve_trees::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; MDEBUG("child_offset: " << last_chunk.child_offset - << " , last_child: " << fcmp::SELENE.to_string(last_chunk.last_child) - << " , last_parent: " << fcmp::SELENE.to_string(last_chunk.last_parent) + << " , last_child: " << fcmp::tower_cycle::selene::SELENE.to_string(last_chunk.last_child) + << " , last_parent: " << fcmp::tower_cycle::selene::SELENE.to_string(last_chunk.last_parent) << " , child_layer_size: " << last_chunk.child_layer_size << " , parent_layer_size: " << last_chunk.parent_layer_size); @@ -190,11 +194,11 @@ static void log_last_chunks(const fcmp::LastChunks & { CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_chunks.size(), "unexpected c1 layer"); - const fcmp::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; + const fcmp::curve_trees::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; MDEBUG("child_offset: " << last_chunk.child_offset - << " , last_child: " << fcmp::HELIOS.to_string(last_chunk.last_child) - << " , last_parent: " << fcmp::HELIOS.to_string(last_chunk.last_parent) + << " , last_child: " << fcmp::tower_cycle::helios::HELIOS.to_string(last_chunk.last_child) + << " , last_parent: " << fcmp::tower_cycle::helios::HELIOS.to_string(last_chunk.last_parent) << " , child_layer_size: " << last_chunk.child_layer_size << " , parent_layer_size: " << last_chunk.parent_layer_size); @@ -207,29 +211,27 @@ static void log_last_chunks(const fcmp::LastChunks & TEST(fcmp_tree, grow_tree) { - // TODO: 1 .. std::pow(fcmp::SELENE.WIDTH, 5)+2 const std::vector N_LEAVES{ 1, 2, 3, - fcmp::SELENE.WIDTH - 1, - fcmp::SELENE.WIDTH, - fcmp::SELENE.WIDTH + 1, - (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2) - 1, - (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2), - (std::size_t)std::pow(fcmp::SELENE.WIDTH, 2) + 1, - (std::size_t)std::pow(fcmp::SELENE.WIDTH, 3), - (std::size_t)std::pow(fcmp::SELENE.WIDTH, 4), - (std::size_t)std::pow(fcmp::SELENE.WIDTH, 5) + fcmp::tower_cycle::selene::SELENE.WIDTH - 1, + fcmp::tower_cycle::selene::SELENE.WIDTH, + fcmp::tower_cycle::selene::SELENE.WIDTH + 1, + (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 2) - 1, + (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 2), + (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 2) + 1, + (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 3), + (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 4) }; - for (const auto &init_leaves : N_LEAVES) + for (const std::size_t init_leaves : N_LEAVES) { - for (const auto &ext_leaves : N_LEAVES) + for (const std::size_t ext_leaves : N_LEAVES) { MDEBUG("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); - fcmp::Tree global_tree; + fcmp::curve_trees::Tree global_tree; // TODO: use a class that's initialized with the curve cycle and don't need to call templated functions with curve instances every time @@ -237,26 +239,26 @@ TEST(fcmp_tree, grow_tree) { MDEBUG("Adding " << init_leaves << " leaves to tree"); - const auto tree_extension = fcmp::get_tree_extension( - fcmp::LastChunks{}, + const auto tree_extension = fcmp::curve_trees::get_tree_extension( + fcmp::curve_trees::LastChunks{}, generate_leaves(init_leaves), - fcmp::HELIOS, - fcmp::SELENE); + fcmp::tower_cycle::helios::HELIOS, + fcmp::tower_cycle::selene::SELENE); log_tree_extension(tree_extension); - fcmp::extend_tree( + fcmp::curve_trees::extend_tree( tree_extension, - fcmp::HELIOS, - fcmp::SELENE, + fcmp::tower_cycle::helios::HELIOS, + fcmp::tower_cycle::selene::SELENE, global_tree); log_tree(global_tree); - const bool validated = fcmp::validate_tree( + const bool validated = fcmp::curve_trees::validate_tree( global_tree, - fcmp::HELIOS, - fcmp::SELENE); + fcmp::tower_cycle::helios::HELIOS, + fcmp::tower_cycle::selene::SELENE); ASSERT_TRUE(validated); @@ -267,33 +269,33 @@ TEST(fcmp_tree, grow_tree) { MDEBUG("Extending tree by " << ext_leaves << " leaves"); - const auto last_chunks = fcmp::get_last_chunks( - fcmp::HELIOS, - fcmp::SELENE, + const auto last_chunks = fcmp::curve_trees::get_last_chunks( + fcmp::tower_cycle::helios::HELIOS, + fcmp::tower_cycle::selene::SELENE, global_tree); log_last_chunks(last_chunks); - const auto tree_extension = fcmp::get_tree_extension( + const auto tree_extension = fcmp::curve_trees::get_tree_extension( last_chunks, generate_leaves(ext_leaves), - fcmp::HELIOS, - fcmp::SELENE); + fcmp::tower_cycle::helios::HELIOS, + fcmp::tower_cycle::selene::SELENE); log_tree_extension(tree_extension); - fcmp::extend_tree( + fcmp::curve_trees::extend_tree( tree_extension, - fcmp::HELIOS, - fcmp::SELENE, + fcmp::tower_cycle::helios::HELIOS, + fcmp::tower_cycle::selene::SELENE, global_tree); log_tree(global_tree); - const bool validated = fcmp::validate_tree( + const bool validated = fcmp::curve_trees::validate_tree( global_tree, - fcmp::HELIOS, - fcmp::SELENE); + fcmp::tower_cycle::helios::HELIOS, + fcmp::tower_cycle::selene::SELENE); ASSERT_TRUE(validated); From 5103a94ee9a26c7a1fba9c789cac88a5fe4f7e59 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 20 May 2024 17:40:57 -0700 Subject: [PATCH 007/127] template all curve_trees types & funcs, rename tower_cycle_types to tower_cycle --- src/fcmp/CMakeLists.txt | 3 +- src/fcmp/curve_trees.cpp | 66 --------------- src/fcmp/curve_trees.h | 84 ++++++++++++++----- ...{tower_cycle_types.cpp => tower_cycle.cpp} | 4 +- .../{tower_cycle_types.h => tower_cycle.h} | 2 +- tests/unit_tests/curve_trees.cpp | 19 +++-- 6 files changed, 78 insertions(+), 100 deletions(-) delete mode 100644 src/fcmp/curve_trees.cpp rename src/fcmp/{tower_cycle_types.cpp => tower_cycle.cpp} (98%) rename src/fcmp/{tower_cycle_types.h => tower_cycle.h} (99%) diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt index 2c4543e192e..8af1b1f6bef 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp/CMakeLists.txt @@ -27,8 +27,7 @@ # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. set(fcmp_sources - curve_trees.cpp - tower_cycle_types.cpp) + tower_cycle.cpp) monero_find_all_headers(fcmp_headers "${CMAKE_CURRENT_SOURCE_DIR}") diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp deleted file mode 100644 index 3e58f484d43..00000000000 --- a/src/fcmp/curve_trees.cpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2024, The Monero Project -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without modification, are -// permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of -// conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list -// of conditions and the following disclaimer in the documentation and/or other -// materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be -// used to endorse or promote products derived from this software without specific -// prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "curve_trees.h" - -namespace fcmp -{ -namespace curve_trees -{ - -LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) -{ - crypto::ec_point I; - crypto::derive_key_image_generator(O, I); - - return LeafTuple{ - .O_x = tower_cycle::selene::SELENE.ed_25519_point_to_scalar(O), - .I_x = tower_cycle::selene::SELENE.ed_25519_point_to_scalar(I), - .C_x = tower_cycle::selene::SELENE.ed_25519_point_to_scalar(C) - }; -} - -// TODO: move into curves tree file -std::vector flatten_leaves(const std::vector &leaves) -{ - std::vector flattened_leaves; - flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - - for (const auto &l : leaves) - { - // TODO: implement without cloning - flattened_leaves.emplace_back(tower_cycle::selene::SELENE.clone(l.O_x)); - flattened_leaves.emplace_back(tower_cycle::selene::SELENE.clone(l.I_x)); - flattened_leaves.emplace_back(tower_cycle::selene::SELENE.clone(l.C_x)); - } - - return flattened_leaves; -}; - -} //namespace curve_trees -} //namespace fcmp diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 49a7bafd8cb..5190aa5dc08 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -30,7 +30,7 @@ #include "crypto/crypto.h" #include "misc_log_ex.h" -#include "tower_cycle_types.h" +#include "tower_cycle.h" #include @@ -40,16 +40,16 @@ namespace curve_trees { -// TODO: template all the curve things // TODO: CurveTree class instantiated with the curves and widths // TODO: move "TEST" functions -// TODO: template +// TODO: make part of CurveTrees class +template struct LeafTuple final { - tower_cycle::selene::Selene::Scalar O_x; - tower_cycle::selene::Selene::Scalar I_x; - tower_cycle::selene::Selene::Scalar C_x; + typename C::Scalar O_x; + typename C::Scalar I_x; + typename C::Scalar C_x; }; static const std::size_t LEAF_TUPLE_SIZE = 3; @@ -57,15 +57,18 @@ static const std::size_t LEAF_TUPLE_SIZE = 3; // TODO: make this a const class member that's set on initialization static const std::size_t LEAF_LAYER_CHUNK_WIDTH = LEAF_TUPLE_SIZE * tower_cycle::selene::SELENE.WIDTH; +// TODO: make part of CurveTrees class // Tree structure +template struct Leaves final { // Starting index in the leaf layer - std::size_t start_idx; + std::size_t start_idx; // Contiguous leaves in a tree that start at the start_idx - std::vector tuples; + std::vector> tuples; }; +// TODO: make part of CurveTrees class // A layer of contiguous hashes starting from a specific start_idx in the tree template struct LayerExtension final @@ -74,15 +77,17 @@ struct LayerExtension final std::vector hashes; }; +// TODO: make part of CurveTrees class // A struct useful to extend an existing tree, layers alternate between C1 and C2 template struct TreeExtension final { - Leaves leaves; + Leaves leaves; std::vector> c1_layer_extensions; std::vector> c2_layer_extensions; }; +// TODO: make part of CurveTrees class // Useful data from the last chunk in a layer template struct LastChunkData final @@ -99,6 +104,7 @@ struct LastChunkData final /*TODO: const*/ std::size_t parent_layer_size; }; +// TODO: make part of CurveTrees class template struct LastChunks final { @@ -106,9 +112,40 @@ struct LastChunks final std::vector> c2_last_chunks; }; -// TODO: template -LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C); -std::vector flatten_leaves(const std::vector &leaves); +// TODO: make part of CurveTrees class +template +LeafTuple output_to_leaf_tuple(const C2 &curve, + const crypto::public_key &O, + const crypto::public_key &C) +{ + crypto::ec_point I; + crypto::derive_key_image_generator(O, I); + + return LeafTuple{ + .O_x = curve.ed_25519_point_to_scalar(O), + .I_x = curve.ed_25519_point_to_scalar(I), + .C_x = curve.ed_25519_point_to_scalar(C) + }; +}; + +// TODO: make part of CurveTrees class +template +std::vector flatten_leaves(const C &curve, + const std::vector> &leaves) +{ + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + + for (const auto &l : leaves) + { + // TODO: implement without cloning + flattened_leaves.emplace_back(curve.clone(l.O_x)); + flattened_leaves.emplace_back(curve.clone(l.I_x)); + flattened_leaves.emplace_back(curve.clone(l.C_x)); + } + + return flattened_leaves; +}; template static void extend_scalars_from_cycle_points(const C_POINTS &curve, @@ -125,7 +162,7 @@ static void extend_scalars_from_cycle_points(const C_POINTS &curve, } } -// TODO: move to tower_cycle_types +// TODO: move to tower_cycle template static void extend_zeroes(const C &curve, const std::size_t num_zeroes, @@ -154,6 +191,7 @@ static typename C::Point get_new_parent(const C &curve, ); } +// TODO: make part of CurveTrees class template static typename C::Point get_first_leaf_parent(const C &curve, const typename C::Chunk &new_children, @@ -176,6 +214,7 @@ static typename C::Point get_first_leaf_parent(const C &curve, ); } +// TODO: make part of CurveTrees class template static typename C::Point get_first_non_leaf_parent(const C &curve, const typename C::Chunk &new_children, @@ -222,6 +261,7 @@ static typename C::Point get_first_non_leaf_parent(const C &curve, } // TODO: look into consolidating hash_layer and hash_leaf_layer into 1 function +// TODO: make part of CurveTrees class template void hash_layer(const C_CHILD &c_child, const C_PARENT &c_parent, @@ -322,10 +362,11 @@ void hash_layer(const C_CHILD &c_child, } } +// TODO: make part of CurveTrees class template void hash_leaf_layer(const C2 &c2, const LastChunkData *last_chunk_ptr, - const Leaves &leaves, + const Leaves &leaves, LayerExtension &parents_out) { parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->parent_layer_size; @@ -335,7 +376,7 @@ void hash_leaf_layer(const C2 &c2, return; // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] - const std::vector children = flatten_leaves(leaves.tuples); + const std::vector children = flatten_leaves(c2, leaves.tuples); const std::size_t max_chunk_size = LEAF_LAYER_CHUNK_WIDTH; const std::size_t offset = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->child_offset; @@ -385,9 +426,10 @@ void hash_leaf_layer(const C2 &c2, } } +// TODO: make part of CurveTrees class template TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, - const Leaves &new_leaves, + const Leaves &new_leaves, const C1 &c1, const C2 &c2) { @@ -409,7 +451,7 @@ TreeExtension get_tree_extension(const LastChunks &existing_last tree_extension.leaves.tuples.reserve(new_leaves.tuples.size()); for (const auto &leaf : new_leaves.tuples) { - tree_extension.leaves.tuples.emplace_back(LeafTuple{ + tree_extension.leaves.tuples.emplace_back(LeafTuple{ .O_x = c2.clone(leaf.O_x), .I_x = c2.clone(leaf.I_x), .C_x = c2.clone(leaf.C_x) @@ -494,7 +536,7 @@ using Layer = std::vector; template struct Tree final { - std::vector leaves; + std::vector> leaves; std::vector> c1_layers; std::vector> c2_layers; }; @@ -502,7 +544,7 @@ struct Tree final // TEST template LastChunkData get_last_leaf_chunk(const C2 &c2, - const std::vector &leaves, + const std::vector> &leaves, const std::vector &parent_layer) { CHECK_AND_ASSERT_THROW_MES(!leaves.empty(), "empty leaf layer"); @@ -654,7 +696,7 @@ void extend_tree(const TreeExtension &tree_extension, tree_inout.leaves.reserve(tree_inout.leaves.size() + tree_extension.leaves.tuples.size()); for (const auto &leaf : tree_extension.leaves.tuples) { - tree_inout.leaves.emplace_back(LeafTuple{ + tree_inout.leaves.emplace_back(LeafTuple{ .O_x = c2.clone(leaf.O_x), .I_x = c2.clone(leaf.I_x), .C_x = c2.clone(leaf.C_x) @@ -832,7 +874,7 @@ bool validate_tree(const Tree &tree, const C1 &c1, const C2 &c2) } // Now validate leaves - return validate_layer(c2, c2_layers[0], flatten_leaves(leaves), LEAF_LAYER_CHUNK_WIDTH); + return validate_layer(c2, c2_layers[0], flatten_leaves(c2, leaves), LEAF_LAYER_CHUNK_WIDTH); } } //namespace curve_trees diff --git a/src/fcmp/tower_cycle_types.cpp b/src/fcmp/tower_cycle.cpp similarity index 98% rename from src/fcmp/tower_cycle_types.cpp rename to src/fcmp/tower_cycle.cpp index 4069e1beda7..e9012566780 100644 --- a/src/fcmp/tower_cycle_types.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -26,7 +26,7 @@ // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "tower_cycle_types.h" +#include "tower_cycle.h" namespace fcmp { @@ -47,7 +47,7 @@ SeleneScalar Helios::point_to_cycle_scalar(const Helios::Point &point) const namespace selene { //---------------------------------------------------------------------------------------------------------------------- -SeleneScalar Selene::ed_25519_point_to_scalar(const crypto::ec_point &point) +SeleneScalar Selene::ed_25519_point_to_scalar(const crypto::ec_point &point) const { static_assert(sizeof(RustEd25519Point) == sizeof(crypto::ec_point), "expected same size ed25519 point to rust representation"); diff --git a/src/fcmp/tower_cycle_types.h b/src/fcmp/tower_cycle.h similarity index 99% rename from src/fcmp/tower_cycle_types.h rename to src/fcmp/tower_cycle.h index 3660aeb166e..36177f07245 100644 --- a/src/fcmp/tower_cycle_types.h +++ b/src/fcmp/tower_cycle.h @@ -122,7 +122,7 @@ static struct Selene final static const std::size_t WIDTH = 5; // Ed25519 point x-coordinates are Selene scalars - SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point); + SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) const; // Selene point x-coordinates are Helios scalars HeliosScalar point_to_cycle_scalar(const Point &point) const; diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 07a98c787ca..a8e7a44596c 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -29,14 +29,15 @@ #include "gtest/gtest.h" #include "fcmp/curve_trees.h" -#include "fcmp/tower_cycle_types.h" +#include "fcmp/tower_cycle.h" #include "misc_log_ex.h" #include -static const fcmp::curve_trees::Leaves generate_leaves(const std::size_t num_leaves) +template +static const fcmp::curve_trees::Leaves generate_leaves(const C2 &curve, const std::size_t num_leaves) { - std::vector tuples; + std::vector> tuples; tuples.reserve(num_leaves); for (std::size_t i = 0; i < num_leaves; ++i) @@ -47,10 +48,12 @@ static const fcmp::curve_trees::Leaves generate_leaves(const std::size_t num_lea crypto::generate_keys(O, o, o, false); crypto::generate_keys(C, c, c, false); - tuples.emplace_back(fcmp::curve_trees::output_to_leaf_tuple(O, C)); + auto leaf_tuple = fcmp::curve_trees::output_to_leaf_tuple(curve, O, C); + + tuples.emplace_back(std::move(leaf_tuple)); } - return fcmp::curve_trees::Leaves{ + return fcmp::curve_trees::Leaves{ .start_idx = 0, .tuples = std::move(tuples) }; @@ -209,7 +212,7 @@ static void log_last_chunks(const fcmp::curve_trees::LastChunks N_LEAVES{ 1, @@ -241,7 +244,7 @@ TEST(fcmp_tree, grow_tree) const auto tree_extension = fcmp::curve_trees::get_tree_extension( fcmp::curve_trees::LastChunks{}, - generate_leaves(init_leaves), + generate_leaves(fcmp::tower_cycle::selene::SELENE, init_leaves), fcmp::tower_cycle::helios::HELIOS, fcmp::tower_cycle::selene::SELENE); @@ -278,7 +281,7 @@ TEST(fcmp_tree, grow_tree) const auto tree_extension = fcmp::curve_trees::get_tree_extension( last_chunks, - generate_leaves(ext_leaves), + generate_leaves(fcmp::tower_cycle::selene::SELENE, ext_leaves), fcmp::tower_cycle::helios::HELIOS, fcmp::tower_cycle::selene::SELENE); From d9390c7b0804d5ffd59eb84b1525e8e433d77166 Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 21 May 2024 14:22:54 -0700 Subject: [PATCH 008/127] Implement CurveTrees & CurveTreesUnitTest classes to simplify callers --- src/fcmp/curve_trees.h | 1135 ++++++++++-------------------- src/fcmp/tower_cycle.h | 33 +- tests/unit_tests/curve_trees.cpp | 577 +++++++++++---- tests/unit_tests/curve_trees.h | 80 +++ 4 files changed, 944 insertions(+), 881 deletions(-) create mode 100644 tests/unit_tests/curve_trees.h diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 5190aa5dc08..a841c41e33b 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -34,848 +34,485 @@ #include -namespace fcmp -{ -namespace curve_trees -{ - - -// TODO: CurveTree class instantiated with the curves and widths -// TODO: move "TEST" functions - -// TODO: make part of CurveTrees class -template -struct LeafTuple final -{ - typename C::Scalar O_x; - typename C::Scalar I_x; - typename C::Scalar C_x; -}; - -static const std::size_t LEAF_TUPLE_SIZE = 3; +// forward declarations +class CurveTreesUnitTest; -// TODO: make this a const class member that's set on initialization -static const std::size_t LEAF_LAYER_CHUNK_WIDTH = LEAF_TUPLE_SIZE * tower_cycle::selene::SELENE.WIDTH; - -// TODO: make part of CurveTrees class -// Tree structure -template -struct Leaves final -{ - // Starting index in the leaf layer - std::size_t start_idx; - // Contiguous leaves in a tree that start at the start_idx - std::vector> tuples; -}; - -// TODO: make part of CurveTrees class -// A layer of contiguous hashes starting from a specific start_idx in the tree -template -struct LayerExtension final -{ - std::size_t start_idx; - std::vector hashes; -}; - -// TODO: make part of CurveTrees class -// A struct useful to extend an existing tree, layers alternate between C1 and C2 -template -struct TreeExtension final -{ - Leaves leaves; - std::vector> c1_layer_extensions; - std::vector> c2_layer_extensions; -}; - -// TODO: make part of CurveTrees class -// Useful data from the last chunk in a layer -template -struct LastChunkData final +namespace fcmp { - // The total number of children % child layer chunk width - /*TODO: const*/ std::size_t child_offset; - // The last child in the chunk (and therefore the last child in the child layer) - /*TODO: const*/ typename C::Scalar last_child; - // The hash of the last chunk of child scalars - /*TODO: const*/ typename C::Point last_parent; - // Total number of children in the child layer - /*TODO: const*/ std::size_t child_layer_size; - // Total number of hashes in the parent layer - /*TODO: const*/ std::size_t parent_layer_size; -}; -// TODO: make part of CurveTrees class +// TODO: longer descriptions template -struct LastChunks final -{ - std::vector> c1_last_chunks; - std::vector> c2_last_chunks; -}; - -// TODO: make part of CurveTrees class -template -LeafTuple output_to_leaf_tuple(const C2 &curve, - const crypto::public_key &O, - const crypto::public_key &C) +class CurveTrees { - crypto::ec_point I; - crypto::derive_key_image_generator(O, I); - - return LeafTuple{ - .O_x = curve.ed_25519_point_to_scalar(O), - .I_x = curve.ed_25519_point_to_scalar(I), - .C_x = curve.ed_25519_point_to_scalar(C) + friend class ::CurveTreesUnitTest; +public: + CurveTrees(const C1 &c1, const C2 &c2, const std::size_t c1_width, const std::size_t c2_width): + m_c1{c1}, + m_c2{c2}, + m_c1_width{c1_width}, + m_c2_width{c2_width}, + m_leaf_layer_chunk_width{LEAF_TUPLE_SIZE * c2_width} + { + assert(c1_width > 0); + assert(c2_width > 0); }; -}; - -// TODO: make part of CurveTrees class -template -std::vector flatten_leaves(const C &curve, - const std::vector> &leaves) -{ - std::vector flattened_leaves; - flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - for (const auto &l : leaves) +//member structs +public: + // Tuple that composes a single leaf in the tree + struct LeafTuple final { - // TODO: implement without cloning - flattened_leaves.emplace_back(curve.clone(l.O_x)); - flattened_leaves.emplace_back(curve.clone(l.I_x)); - flattened_leaves.emplace_back(curve.clone(l.C_x)); - } - - return flattened_leaves; -}; - -template -static void extend_scalars_from_cycle_points(const C_POINTS &curve, - const std::vector &points, - std::vector &scalars_out) -{ - scalars_out.reserve(scalars_out.size() + points.size()); + // Output ed25519 point x-coordinate + typename C2::Scalar O_x; + // Key image generator x-coordinate + typename C2::Scalar I_x; + // Commitment x-coordinate + typename C2::Scalar C_x; + }; + static const std::size_t LEAF_TUPLE_SIZE = 3; + static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); - for (const auto &point : points) + // Leaves in the tree + struct Leaves final { - // TODO: implement reading just the x coordinate of points on curves in curve cycle in C/C++ - typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); - scalars_out.push_back(std::move(scalar)); - } -} - -// TODO: move to tower_cycle -template -static void extend_zeroes(const C &curve, - const std::size_t num_zeroes, - std::vector &zeroes_inout) -{ - zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); - - for (std::size_t i = 0; i < num_zeroes; ++i) - zeroes_inout.emplace_back(curve.zero_scalar()); -} - -template -static typename C::Point get_new_parent(const C &curve, - const typename C::Chunk &new_children) -{ - // New parent means no prior children, fill priors with 0 - std::vector prior_children; - extend_zeroes(curve, new_children.size(), prior_children); - - return curve.hash_grow( - curve.GENERATORS, - curve.HASH_INIT_POINT, - 0,/*offset*/ - typename C::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); -} - -// TODO: make part of CurveTrees class -template -static typename C::Point get_first_leaf_parent(const C &curve, - const typename C::Chunk &new_children, - const LastChunkData *last_chunk_ptr) -{ - // If no last chunk exists, or if the last chunk is already full, then we can get a new parent - if (last_chunk_ptr == nullptr || last_chunk_ptr->child_offset == 0) - return get_new_parent(curve, new_children); - - // There won't be any existing children when growing the leaf layer, fill priors with 0 - std::vector prior_children; - extend_zeroes(curve, new_children.size(), prior_children); - - return curve.hash_grow( - curve.GENERATORS, - last_chunk_ptr->last_parent, - last_chunk_ptr->child_offset, - typename C::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); -} - -// TODO: make part of CurveTrees class -template -static typename C::Point get_first_non_leaf_parent(const C &curve, - const typename C::Chunk &new_children, - const bool child_layer_last_hash_updated, - const LastChunkData *last_chunk_ptr) -{ - // If no last chunk exists, we can get a new parent - if (last_chunk_ptr == nullptr) - return get_new_parent(curve, new_children); - - std::vector prior_children; - std::size_t offset = last_chunk_ptr->child_offset; + // Starting index in the leaf layer + std::size_t start_idx; + // Contiguous leaves in a tree that start at the start_idx + std::vector tuples; + }; - if (child_layer_last_hash_updated) + // A layer of contiguous hashes starting from a specific start_idx in the tree + template + struct LayerExtension final { - // If the last chunk has updated children in it, then we need to get the delta to the old children, and - // subtract the offset by 1 since we're updating the prior last hash - prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); - offset = offset > 0 ? (offset - 1) : (curve.WIDTH - 1); - - // Extend prior children by zeroes for any additional new children, since they must be new - if (new_children.size() > 1) - extend_zeroes(curve, new_children.size() - 1, prior_children); - } - else if (offset > 0) - { - // If we're updating the parent hash and no children were updated, then we're just adding new children - // to the existing last chunk and can fill priors with 0 - extend_zeroes(curve, new_children.size(), prior_children); - } - else - { - // If the last chunk is already full and isn't updated in any way, then we just get a new parent - return get_new_parent(curve, new_children); - } - - return curve.hash_grow( - curve.GENERATORS, - last_chunk_ptr->last_parent, - offset, - typename C::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); -} - -// TODO: look into consolidating hash_layer and hash_leaf_layer into 1 function -// TODO: make part of CurveTrees class -template -void hash_layer(const C_CHILD &c_child, - const C_PARENT &c_parent, - const LastChunkData *last_child_chunk_ptr, - const LastChunkData *last_parent_chunk_ptr, - const LayerExtension &children, - LayerExtension &parents_out) -{ - parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; - parents_out.hashes.clear(); - - CHECK_AND_ASSERT_THROW_MES(!children.hashes.empty(), "empty children hashes"); - - const std::size_t max_chunk_size = c_parent.WIDTH; - std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; + std::size_t start_idx; + std::vector hashes; + }; - // TODO: try to simplify the approach to avoid edge cases - // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 - // since we'll be updating the existing parent hash of the last chunk - if (offset > 0) + // A struct useful to extend an existing tree + // - layers alternate between C1 and C2 + // - c2_layer_extensions[0] is first layer after leaves, then c1_layer_extensions[0], c2_layer_extensions[1], etc + struct TreeExtension final { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } + Leaves leaves; + std::vector> c1_layer_extensions; + std::vector> c2_layer_extensions; + }; - // If the child layer had its existing last hash updated, then we'll need to use the last hash's prior - // version in order to update the existing last parent hash in this layer - bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) - ? false - : last_parent_chunk_ptr->child_layer_size == (children.start_idx + 1); + // Useful data from the last chunk in a layer + template + struct LastChunkData final + { + // The total number of children % child layer chunk width + /*TODO: const*/ std::size_t child_offset; + // The last child in the chunk (and therefore the last child in the child layer) + /*TODO: const*/ typename C::Scalar last_child; + // The hash of the last chunk of child scalars + /*TODO: const*/ typename C::Point last_parent; + // Total number of children in the child layer + /*TODO: const*/ std::size_t child_layer_size; + // Total number of hashes in the parent layer + /*TODO: const*/ std::size_t parent_layer_size; + }; - if (offset == 0 && child_layer_last_hash_updated) + // Last chunk data from each layer in the tree + // - layers alternate between C1 and C2 + // - c2_last_chunks[0] is first layer after leaves, then c1_last_chunks[0], then c2_last_chunks[1], etc + struct LastChunks final { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } + std::vector> c1_last_chunks; + std::vector> c2_last_chunks; + }; - // TODO: clean this up so I don't have to do it twice here and in get_first_non_leaf_parent - // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk - // will start from there and may need 1 more to fill - CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); - if (child_layer_last_hash_updated) - offset = offset > 0 ? (offset - 1) : (max_chunk_size - 1); - - // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when - // hashing the *existing* root layer - std::vector child_scalars; - if (last_child_chunk_ptr != nullptr && last_child_chunk_ptr->parent_layer_size == 1) +//member functions +public: + // TODO: move impl into cpp + LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) const { - // We should be updating the existing root, there shouldn't be a last parent chunk - CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); + crypto::ec_point I; + crypto::derive_key_image_generator(O, I); + + return LeafTuple{ + .O_x = m_c2.ed_25519_point_to_scalar(O), + .I_x = m_c2.ed_25519_point_to_scalar(I), + .C_x = m_c2.ed_25519_point_to_scalar(C) + }; + }; - // If the children don't already include the existing root at start_idx 0 (they would if the existing - // root was updated in the child layer), then we need to add it to the first chunk to be hashed - if (children.start_idx > 0) - child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); - } + // TODO: move impl into cpp + std::vector flatten_leaves(const std::vector &leaves) const + { + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - // Convert child points to scalars - extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); + for (const auto &l : leaves) + { + // TODO: implement without cloning + flattened_leaves.emplace_back(m_c2.clone(l.O_x)); + flattened_leaves.emplace_back(m_c2.clone(l.I_x)); + flattened_leaves.emplace_back(m_c2.clone(l.C_x)); + } - // See how many children we need to fill up the existing last chunk - std::size_t chunk_size = std::min(child_scalars.size(), max_chunk_size - offset); - MDEBUG("Starting chunk_size: " << chunk_size << " , num child scalars: " << child_scalars.size() - << " , offset: " << offset); + return flattened_leaves; + }; - // Hash chunks of child scalars to create the parent hashes - std::size_t chunk_start_idx = 0; - while (chunk_start_idx < child_scalars.size()) + // TODO: move impl into cpp + TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, const Leaves &new_leaves) { - const auto chunk_start = child_scalars.data() + chunk_start_idx; - const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; - - for (const auto &c : chunk) - MDEBUG("Hashing " << c_parent.to_string(c)); + TreeExtension tree_extension; - // Hash the chunk of children - typename C_PARENT::Point chunk_hash = chunk_start_idx == 0 - ? get_first_non_leaf_parent(c_parent, chunk, child_layer_last_hash_updated, last_parent_chunk_ptr) - : get_new_parent(c_parent, chunk); + if (new_leaves.tuples.empty()) + return tree_extension; - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c_parent.to_string(chunk_hash) - << " , chunk_size: " << chunk_size); + const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; + const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; - // We've got our hash - parents_out.hashes.emplace_back(std::move(chunk_hash)); + // Set the leaf start idx + tree_extension.leaves.start_idx = c2_last_chunks.empty() + ? 0 + : c2_last_chunks[0].child_layer_size; - // Advance to the next chunk - chunk_start_idx += chunk_size; + // Copy the leaves + // TODO: don't copy here + tree_extension.leaves.tuples.reserve(new_leaves.tuples.size()); + for (const auto &leaf : new_leaves.tuples) + { + tree_extension.leaves.tuples.emplace_back(LeafTuple{ + .O_x = m_c2.clone(leaf.O_x), + .I_x = m_c2.clone(leaf.I_x), + .C_x = m_c2.clone(leaf.C_x) + }); + } - // Prepare for next loop if there should be one - if (chunk_start_idx == child_scalars.size()) - break; + auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; - // Fill a complete chunk, or add the remaining new children to the last chunk - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < child_scalars.size(), "unexpected chunk start idx"); - chunk_size = std::min(max_chunk_size, child_scalars.size() - chunk_start_idx); - } -} - -// TODO: make part of CurveTrees class -template -void hash_leaf_layer(const C2 &c2, - const LastChunkData *last_chunk_ptr, - const Leaves &leaves, - LayerExtension &parents_out) -{ - parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->parent_layer_size; - parents_out.hashes.clear(); + // Hash the leaf layer + LayerExtension parents; + this->hash_leaf_layer(c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], + new_leaves, + parents); - if (leaves.tuples.empty()) - return; + c2_layer_extensions_out.emplace_back(std::move(parents)); - // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] - const std::vector children = flatten_leaves(c2, leaves.tuples); + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; - const std::size_t max_chunk_size = LEAF_LAYER_CHUNK_WIDTH; - const std::size_t offset = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->child_offset; + // Alternate between hashing c2 children, c1 children, c2, c1, ... + bool parent_is_c1 = true; - // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 - // since we'll be updating the existing parent hash of the last chunk - if (offset > 0) - { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; + std::size_t c1_last_idx = 0; + std::size_t c2_last_idx = 0; + // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) + while (true) + { + if (parent_is_c1) + { + CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); + + LayerExtension c1_layer_extension; + this->hash_layer(m_c2, + m_c1, + (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], + (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], + c2_layer_extensions_out[c2_last_idx], + m_c1_width, + c1_layer_extension); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + + // Check if we just added the root + if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c2_last_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); + + LayerExtension c2_layer_extension; + this->hash_layer(m_c1, + m_c2, + (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], + (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], + c1_layer_extensions_out[c1_last_idx], + m_c2_width, + c2_layer_extension); + + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c1_last_idx; + } + + parent_is_c1 = !parent_is_c1; + } } - // See how many new children are needed to fill up the existing last chunk - CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); - std::size_t chunk_size = std::min(children.size(), max_chunk_size - offset); - - std::size_t chunk_start_idx = 0; - while (chunk_start_idx < children.size()) +private: + // TODO: move impl into cpp + template + typename C::Point get_new_parent(const C &curve, + const typename C::Chunk &new_children) const { - const auto chunk_start = children.data() + chunk_start_idx; - const typename C2::Chunk chunk{chunk_start, chunk_size}; - - for (const auto &c : chunk) - MDEBUG("Hashing " << c2.to_string(c)); - - // Hash the chunk of children - typename C2::Point chunk_hash = chunk_start_idx == 0 - ? get_first_leaf_parent(c2, chunk, last_chunk_ptr) - : get_new_parent(c2, chunk); - - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c2.to_string(chunk_hash) - << " , chunk_size: " << chunk_size); - - // We've got our hash - parents_out.hashes.emplace_back(std::move(chunk_hash)); - - // Advance to the next chunk - chunk_start_idx += chunk_size; - - // Prepare for next loop if there should be one - if (chunk_start_idx == children.size()) - break; - - // Fill a complete chunk, or add the remaining new children to the last chunk - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < children.size(), "unexpected chunk start idx"); - chunk_size = std::min(max_chunk_size, children.size() - chunk_start_idx); + // New parent means no prior children, fill priors with 0 + std::vector prior_children; + fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); + + return curve.hash_grow( + curve.GENERATORS, + curve.HASH_INIT_POINT, + 0,/*offset*/ + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); } -} -// TODO: make part of CurveTrees class -template -TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, - const Leaves &new_leaves, - const C1 &c1, - const C2 &c2) -{ - TreeExtension tree_extension; - - if (new_leaves.tuples.empty()) - return tree_extension; - - const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; - const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; - - // Set the leaf start idx - tree_extension.leaves.start_idx = c2_last_chunks.empty() - ? 0 - : c2_last_chunks[0].child_layer_size; - - // Copy the leaves - // TODO: don't copy here - tree_extension.leaves.tuples.reserve(new_leaves.tuples.size()); - for (const auto &leaf : new_leaves.tuples) + // TODO: move impl into cpp + typename C2::Point get_first_leaf_parent(const typename C2::Chunk &new_children, + const LastChunkData *last_chunk_ptr) const { - tree_extension.leaves.tuples.emplace_back(LeafTuple{ - .O_x = c2.clone(leaf.O_x), - .I_x = c2.clone(leaf.I_x), - .C_x = c2.clone(leaf.C_x) - }); + // If no last chunk exists, or if the last chunk is already full, then we can get a new parent + if (last_chunk_ptr == nullptr || last_chunk_ptr->child_offset == 0) + return get_new_parent(m_c2, new_children); + + // There won't be any existing children when growing the leaf layer, fill priors with 0 + std::vector prior_children; + fcmp::tower_cycle::extend_zeroes(m_c2, new_children.size(), prior_children); + + return m_c2.hash_grow( + m_c2.GENERATORS, + last_chunk_ptr->last_parent, + last_chunk_ptr->child_offset, + typename C2::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); } - auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; - auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; - - // Hash the leaf layer - LayerExtension parents; - hash_leaf_layer(c2, - c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], - new_leaves, - parents); - - c2_layer_extensions_out.emplace_back(std::move(parents)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - // Alternate between hashing c2 children, c1 children, c2, c1, ... - bool parent_is_c1 = true; - - std::size_t c1_last_idx = 0; - std::size_t c2_last_idx = 0; - // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) - while (true) + // TODO: move impl into cpp + template + typename C::Point get_first_non_leaf_parent(const C &curve, + const typename C::Chunk &new_children, + const std::size_t chunk_width, + const bool child_layer_last_hash_updated, + const LastChunkData *last_chunk_ptr) const { - if (parent_is_c1) - { - CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); - - LayerExtension c1_layer_extension; - hash_layer(c2, - c1, - (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], - (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], - c2_layer_extensions_out[c2_last_idx], - c1_layer_extension); + // If no last chunk exists, we can get a new parent + if (last_chunk_ptr == nullptr) + return get_new_parent(curve, new_children); - c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + std::vector prior_children; + std::size_t offset = last_chunk_ptr->child_offset; - // Check if we just added the root - if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c2_last_idx; - } - else + if (child_layer_last_hash_updated) { - CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); - - LayerExtension c2_layer_extension; - hash_layer(c1, - c2, - (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], - (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], - c1_layer_extensions_out[c1_last_idx], - c2_layer_extension); - - c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c1_last_idx; + // If the last chunk has updated children in it, then we need to get the delta to the old children, and + // subtract the offset by 1 since we're updating the prior last hash + prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); + offset = offset > 0 ? (offset - 1) : (chunk_width - 1); + + // Extend prior children by zeroes for any additional new children, since they must be new + if (new_children.size() > 1) + fcmp::tower_cycle::extend_zeroes(curve, new_children.size() - 1, prior_children); } - - parent_is_c1 = !parent_is_c1; - } -} - -// TEST -template -using Layer = std::vector; - -// TEST -// A complete tree, useful for testing (can't fit the whole tree in memory otherwise) -template -struct Tree final -{ - std::vector> leaves; - std::vector> c1_layers; - std::vector> c2_layers; -}; - -// TEST -template -LastChunkData get_last_leaf_chunk(const C2 &c2, - const std::vector> &leaves, - const std::vector &parent_layer) -{ - CHECK_AND_ASSERT_THROW_MES(!leaves.empty(), "empty leaf layer"); - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "empty leaf parent layer"); - - const std::size_t child_offset = (leaves.size() * LEAF_TUPLE_SIZE) % LEAF_LAYER_CHUNK_WIDTH; - - const typename C2::Scalar &last_child = leaves.back().C_x; - const typename C2::Point &last_parent = parent_layer.back(); - - return LastChunkData{ - .child_offset = child_offset, - .last_child = c2.clone(last_child), - .last_parent = c2.clone(last_parent), - .child_layer_size = leaves.size() * LEAF_TUPLE_SIZE, - .parent_layer_size = parent_layer.size() - }; -} - -// TEST -template -LastChunkData get_last_child_layer_chunk(const C_CHILD &c_child, - const C_PARENT &c_parent, - const std::vector &child_layer, - const std::vector &parent_layer) -{ - CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "empty child layer"); - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "empty parent layer"); - - const std::size_t child_offset = child_layer.size() % c_parent.WIDTH; - - const typename C_CHILD::Point &last_child_point = child_layer.back(); - const typename C_PARENT::Scalar &last_child = c_child.point_to_cycle_scalar(last_child_point); - - const typename C_PARENT::Point &last_parent = parent_layer.back(); - - return LastChunkData{ - .child_offset = child_offset, - .last_child = c_parent.clone(last_child), - .last_parent = c_parent.clone(last_parent), - .child_layer_size = child_layer.size(), - .parent_layer_size = parent_layer.size() - }; -} - -// TODO: implement in the db, never want the entire tree in memory -// TEST -template -LastChunks get_last_chunks(const C1 &c1, - const C2 &c2, - const Tree &tree) -{ - const auto &leaves = tree.leaves; - const auto &c1_layers = tree.c1_layers; - const auto &c2_layers = tree.c2_layers; - - // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), - "unexpected number of curve layers"); - - LastChunks last_chunks; - - auto &c1_last_chunks_out = last_chunks.c1_last_chunks; - auto &c2_last_chunks_out = last_chunks.c2_last_chunks; - - c1_last_chunks_out.reserve(c1_layers.size()); - c2_last_chunks_out.reserve(c2_layers.size()); - - // First push the last leaf chunk data into c2 chunks - CHECK_AND_ASSERT_THROW_MES(!c2_layers.empty(), "empty curve 2 layers"); - auto last_leaf_chunk = get_last_leaf_chunk(c2, - leaves, - c2_layers[0]); - c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); - - // Next parents will be c1 - bool parent_is_c1 = true; - - // If there are no c1 layers, we're done - if (c1_layers.empty()) - return last_chunks; - - // Then get last chunks up until the root - std::size_t c1_idx = 0; - std::size_t c2_idx = 0; - while (c1_last_chunks_out.size() < c1_layers.size() || c2_last_chunks_out.size() < c2_layers.size()) - { - CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); - - // TODO: template the below if statement into another function - if (parent_is_c1) + else if (offset > 0) { - const Layer &child_layer = c2_layers[c2_idx]; - CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - - const Layer &parent_layer = c1_layers[c1_idx]; - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); - - auto last_parent_chunk = get_last_child_layer_chunk(c2, - c1, - child_layer, - parent_layer); - - c1_last_chunks_out.push_back(std::move(last_parent_chunk)); - - ++c2_idx; + // If we're updating the parent hash and no children were updated, then we're just adding new children + // to the existing last chunk and can fill priors with 0 + fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); } else { - const Layer &child_layer = c1_layers[c1_idx]; - CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - - const Layer &parent_layer = c2_layers[c2_idx]; - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); - - auto last_parent_chunk = get_last_child_layer_chunk(c1, - c2, - child_layer, - parent_layer); - - c2_last_chunks_out.push_back(std::move(last_parent_chunk)); - - ++c1_idx; + // If the last chunk is already full and isn't updated in any way, then we just get a new parent + return get_new_parent(curve, new_children); } - // Alternate curves every iteration - parent_is_c1 = !parent_is_c1; + return curve.hash_grow( + curve.GENERATORS, + last_chunk_ptr->last_parent, + offset, + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); } - CHECK_AND_ASSERT_THROW_MES(c1_last_chunks_out.size() == c1_layers.size(), "unexepected c1 last chunks"); - CHECK_AND_ASSERT_THROW_MES(c2_last_chunks_out.size() == c2_layers.size(), "unexepected c2 last chunks"); - - return last_chunks; -} - -// TODO: this is only useful for testsing, since can't fit entire tree in memory -// TEST -template -void extend_tree(const TreeExtension &tree_extension, - const C1 &c1, - const C2 &c2, - Tree &tree_inout) -{ - // Add the leaves - CHECK_AND_ASSERT_THROW_MES((tree_inout.leaves.size() * LEAF_TUPLE_SIZE) == tree_extension.leaves.start_idx, - "unexpected leaf start idx"); - - tree_inout.leaves.reserve(tree_inout.leaves.size() + tree_extension.leaves.tuples.size()); - for (const auto &leaf : tree_extension.leaves.tuples) + // TODO: look into consolidating hash_layer and hash_leaf_layer into 1 function + // TODO: move impl into cpp + template + void hash_layer(const C_CHILD &c_child, + const C_PARENT &c_parent, + const LastChunkData *last_child_chunk_ptr, + const LastChunkData *last_parent_chunk_ptr, + const LayerExtension &children, + const std::size_t chunk_width, + LayerExtension &parents_out) { - tree_inout.leaves.emplace_back(LeafTuple{ - .O_x = c2.clone(leaf.O_x), - .I_x = c2.clone(leaf.I_x), - .C_x = c2.clone(leaf.C_x) - }); - } + parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; + parents_out.hashes.clear(); - // Add the layers - const auto &c2_extensions = tree_extension.c2_layer_extensions; - const auto &c1_extensions = tree_extension.c1_layer_extensions; - CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + CHECK_AND_ASSERT_THROW_MES(!children.hashes.empty(), "empty children hashes"); - bool use_c2 = true; - std::size_t c2_idx = 0; - std::size_t c1_idx = 0; - for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) - { - if (use_c2) + std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; + + // TODO: try to simplify the approach to avoid edge cases + // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 + // since we'll be updating the existing parent hash of the last chunk + if (offset > 0) { - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); - const LayerExtension &c2_ext = c2_extensions[c2_idx]; + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } - CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); + // If the child layer had its existing last hash updated, then we'll need to use the last hash's prior + // version in order to update the existing last parent hash in this layer + bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) + ? false + : last_parent_chunk_ptr->child_layer_size == (children.start_idx + 1); - CHECK_AND_ASSERT_THROW_MES(c2_idx <= tree_inout.c2_layers.size(), "missing c2 layer"); - if (tree_inout.c2_layers.size() == c2_idx) - tree_inout.c2_layers.emplace_back(Layer{}); + if (offset == 0 && child_layer_last_hash_updated) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } - auto &c2_inout = tree_inout.c2_layers[c2_idx]; + // TODO: clean this up so I don't have to do it twice here and in get_first_non_leaf_parent + // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk + // will start from there and may need 1 more to fill + CHECK_AND_ASSERT_THROW_MES(chunk_width > offset, "unexpected offset"); + if (child_layer_last_hash_updated) + offset = offset > 0 ? (offset - 1) : (chunk_width - 1); + + // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when + // hashing the *existing* root layer + std::vector child_scalars; + if (last_child_chunk_ptr != nullptr && last_child_chunk_ptr->parent_layer_size == 1) + { + // We should be updating the existing root, there shouldn't be a last parent chunk + CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); - const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); - const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); - CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c2 layer start"); + // If the children don't already include the existing root at start_idx 0 (they would if the existing + // root was updated in the child layer), then we need to add it to the first chunk to be hashed + if (children.start_idx > 0) + child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); + } - // We updated the last hash - if (started_at_tip) - c2_inout.back() = c2.clone(c2_ext.hashes.front()); + // Convert child points to scalars + tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); - for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) - c2_inout.emplace_back(c2.clone(c2_ext.hashes[i])); + // See how many children we need to fill up the existing last chunk + std::size_t chunk_size = std::min(child_scalars.size(), chunk_width - offset); + MDEBUG("Starting chunk_size: " << chunk_size << " , num child scalars: " << child_scalars.size() + << " , offset: " << offset); - ++c2_idx; - } - else + // Hash chunks of child scalars to create the parent hashes + std::size_t chunk_start_idx = 0; + while (chunk_start_idx < child_scalars.size()) { - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); - const LayerExtension &c1_ext = c1_extensions[c1_idx]; + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; - CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); + for (const auto &c : chunk) + MDEBUG("Hashing " << c_parent.to_string(c)); - CHECK_AND_ASSERT_THROW_MES(c1_idx <= tree_inout.c1_layers.size(), "missing c1 layer"); - if (tree_inout.c1_layers.size() == c1_idx) - tree_inout.c1_layers.emplace_back(Layer{}); + // Hash the chunk of children + typename C_PARENT::Point chunk_hash = chunk_start_idx == 0 + ? get_first_non_leaf_parent(c_parent, chunk, chunk_width, child_layer_last_hash_updated, last_parent_chunk_ptr) + : get_new_parent(c_parent, chunk); - auto &c1_inout = tree_inout.c1_layers[c1_idx]; + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c_parent.to_string(chunk_hash) + << " , chunk_size: " << chunk_size); - const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); - const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); - CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c1 layer start"); + // We've got our hash + parents_out.hashes.emplace_back(std::move(chunk_hash)); - // We updated the last hash - if (started_at_tip) - c1_inout.back() = c1.clone(c1_ext.hashes.front()); + // Advance to the next chunk + chunk_start_idx += chunk_size; - for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) - c1_inout.emplace_back(c1.clone(c1_ext.hashes[i])); + // Prepare for next loop if there should be one + if (chunk_start_idx == child_scalars.size()) + break; - ++c1_idx; + // Fill a complete chunk, or add the remaining new children to the last chunk + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < child_scalars.size(), "unexpected chunk start idx"); + chunk_size = std::min(chunk_width, child_scalars.size() - chunk_start_idx); } - - use_c2 = !use_c2; } -} - -// TEST -template -bool validate_layer(const C_PARENT &c_parent, - const Layer &parents, - const std::vector &child_scalars, - const std::size_t max_chunk_size) -{ - // Hash chunk of children scalars, then see if the hash matches up to respective parent - std::size_t chunk_start_idx = 0; - for (std::size_t i = 0; i < parents.size(); ++i) - { - CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); - const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); - CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); - - const typename C_PARENT::Point &parent = parents[i]; - - const auto chunk_start = child_scalars.data() + chunk_start_idx; - const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; - - const typename C_PARENT::Point chunk_hash = get_new_parent(c_parent, chunk); - - const auto actual_bytes = c_parent.to_bytes(parent); - const auto expected_bytes = c_parent.to_bytes(chunk_hash); - CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); - chunk_start_idx += chunk_size; - } - - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); - - return true; -} - -// TEST -template -bool validate_tree(const Tree &tree, const C1 &c1, const C2 &c2) -{ - const auto &leaves = tree.leaves; - const auto &c1_layers = tree.c1_layers; - const auto &c2_layers = tree.c2_layers; - - CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree"); - CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); - CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), - false, "unexpected mismatch of c2 and c1 layers"); - - // Verify root has 1 member in it - const bool c2_is_root = c2_layers.size() > c1_layers.size(); - CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, - "root must have 1 member in it"); - - // Iterate from root down to layer above leaves, and check hashes match up correctly - bool parent_is_c2 = c2_is_root; - std::size_t c2_idx = c2_layers.size() - 1; - std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); - for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) + // TODO: move impl into cpp + void hash_leaf_layer(const LastChunkData *last_chunk_ptr, + const Leaves &leaves, + LayerExtension &parents_out) { - // TODO: implement templated function for below if statement - if (parent_is_c2) - { - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->parent_layer_size; + parents_out.hashes.clear(); - const Layer &parents = c2_layers[c2_idx]; - const Layer &children = c1_layers[c1_idx]; + if (leaves.tuples.empty()) + return; - CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); - CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] + const std::vector children = flatten_leaves(leaves.tuples); - std::vector child_scalars; - extend_scalars_from_cycle_points(c1, children, child_scalars); + const std::size_t max_chunk_size = m_leaf_layer_chunk_width; + const std::size_t offset = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->child_offset; - const bool valid = validate_layer(c2, parents, child_scalars, c2.WIDTH); + // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 + // since we'll be updating the existing parent hash of the last chunk + if (offset > 0) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } - CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); + // See how many new children are needed to fill up the existing last chunk + CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); + std::size_t chunk_size = std::min(children.size(), max_chunk_size - offset); - --c2_idx; - } - else + std::size_t chunk_start_idx = 0; + while (chunk_start_idx < children.size()) { - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + const auto chunk_start = children.data() + chunk_start_idx; + const typename C2::Chunk chunk{chunk_start, chunk_size}; - const Layer &parents = c1_layers[c1_idx]; - const Layer &children = c2_layers[c2_idx]; + for (const auto &c : chunk) + MDEBUG("Hashing " << m_c2.to_string(c)); - CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); - CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); + // Hash the chunk of children + typename C2::Point chunk_hash = chunk_start_idx == 0 + ? get_first_leaf_parent(chunk, last_chunk_ptr) + : get_new_parent(m_c2, chunk); - std::vector child_scalars; - extend_scalars_from_cycle_points(c2, children, child_scalars); + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << m_c2.to_string(chunk_hash) + << " , chunk_size: " << chunk_size); - const bool valid = validate_layer(c1, parents, child_scalars, c1.WIDTH); + // We've got our hash + parents_out.hashes.emplace_back(std::move(chunk_hash)); - CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); + // Advance to the next chunk + chunk_start_idx += chunk_size; - --c1_idx; - } + // Prepare for next loop if there should be one + if (chunk_start_idx == children.size()) + break; - parent_is_c2 = !parent_is_c2; + // Fill a complete chunk, or add the remaining new children to the last chunk + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < children.size(), "unexpected chunk start idx"); + chunk_size = std::min(max_chunk_size, children.size() - chunk_start_idx); + } } - // Now validate leaves - return validate_layer(c2, c2_layers[0], flatten_leaves(c2, leaves), LEAF_LAYER_CHUNK_WIDTH); -} +private: + const C1 &m_c1; + const C2 &m_c2; + + const std::size_t m_c1_width; + const std::size_t m_c2_width; + + const std::size_t m_leaf_layer_chunk_width; +}; -} //namespace curve_trees } //namespace fcmp diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index 36177f07245..2d9e7729701 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -64,9 +64,6 @@ static struct Helios final const Generators GENERATORS = fcmp_rust::random_helios_generators(); const Point HASH_INIT_POINT = fcmp_rust::random_helios_hash_init_point(); - // TODO: use correct value - static const std::size_t WIDTH = 5; - // Helios point x-coordinates are Selene scalars SeleneScalar point_to_cycle_scalar(const Point &point) const; @@ -118,9 +115,6 @@ static struct Selene final const Generators GENERATORS = fcmp_rust::random_selene_generators(); const Point HASH_INIT_POINT = fcmp_rust::random_selene_hash_init_point(); - // TODO: use correct value - static const std::size_t WIDTH = 5; - // Ed25519 point x-coordinates are Selene scalars SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) const; @@ -160,5 +154,32 @@ static struct Selene final }// namespace selene //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- +template +static void extend_zeroes(const C &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout) +{ + zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); + + for (std::size_t i = 0; i < num_zeroes; ++i) + zeroes_inout.emplace_back(curve.zero_scalar()); +} +//---------------------------------------------------------------------------------------------------------------------- +template +static void extend_scalars_from_cycle_points(const C_POINTS &curve, + const std::vector &points, + std::vector &scalars_out) +{ + scalars_out.reserve(scalars_out.size() + points.size()); + + for (const auto &point : points) + { + // TODO: implement reading just the x coordinate of points on curves in curve cycle in C/C++ + typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); + scalars_out.push_back(std::move(scalar)); + } +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- }//namespace curves }//namespace fcmp diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index a8e7a44596c..b40df6d33c9 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -28,38 +28,393 @@ #include "gtest/gtest.h" -#include "fcmp/curve_trees.h" -#include "fcmp/tower_cycle.h" -#include "misc_log_ex.h" +#include "curve_trees.h" #include -template -static const fcmp::curve_trees::Leaves generate_leaves(const C2 &curve, const std::size_t num_leaves) +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesUnitTest helpers +//---------------------------------------------------------------------------------------------------------------------- +template +static CurveTreesV1::LastChunkData get_last_child_layer_chunk(const C &curve, + const std::size_t child_layer_size, + const std::size_t parent_layer_size, + const std::size_t chunk_width, + const typename C::Scalar &last_child, + const typename C::Point &last_parent) { - std::vector> tuples; - tuples.reserve(num_leaves); + CHECK_AND_ASSERT_THROW_MES(child_layer_size > 0, "empty child layer"); + CHECK_AND_ASSERT_THROW_MES(parent_layer_size > 0, "empty parent layer"); - for (std::size_t i = 0; i < num_leaves; ++i) + const std::size_t child_offset = child_layer_size % chunk_width; + + return CurveTreesV1::LastChunkData{ + .child_offset = child_offset, + .last_child = curve.clone(last_child), + .last_parent = curve.clone(last_parent), + .child_layer_size = child_layer_size, + .parent_layer_size = parent_layer_size + }; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesUnitTest implementations +//---------------------------------------------------------------------------------------------------------------------- +CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUnitTest::Tree &tree) +{ + const auto &leaves = tree.leaves; + const auto &c1_layers = tree.c1_layers; + const auto &c2_layers = tree.c2_layers; + + // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + "unexpected number of curve layers"); + + CurveTreesV1::LastChunks last_chunks; + + if (c2_layers.empty()) + return last_chunks; + + auto &c1_last_chunks_out = last_chunks.c1_last_chunks; + auto &c2_last_chunks_out = last_chunks.c2_last_chunks; + + c1_last_chunks_out.reserve(c1_layers.size()); + c2_last_chunks_out.reserve(c2_layers.size()); + + // First push the last leaf chunk data into c2 chunks + auto last_leaf_chunk = get_last_child_layer_chunk(m_curve_trees.m_c2, + /*child_layer_size */ leaves.size() * CurveTreesV1::LEAF_TUPLE_SIZE, + /*parent_layer_size*/ c2_layers[0].size(), + /*chunk_width */ m_curve_trees.m_leaf_layer_chunk_width, + /*last_child */ leaves.back().C_x, + /*last_parent */ c2_layers[0].back()); + + c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); + + // If there are no c1 layers, we're done + if (c1_layers.empty()) + return last_chunks; + + // Next parents will be c1 + bool parent_is_c1 = true; + + // Then get last chunks up until the root + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + while (c1_last_chunks_out.size() < c1_layers.size() || c2_last_chunks_out.size() < c2_layers.size()) { - // Generate random output tuple - crypto::secret_key o,c; - crypto::public_key O,C; - crypto::generate_keys(O, o, o, false); - crypto::generate_keys(C, c, c, false); + CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); - auto leaf_tuple = fcmp::curve_trees::output_to_leaf_tuple(curve, O, C); + // TODO: template the below if statement into another function + if (parent_is_c1) + { + const Layer &child_layer = c2_layers[c2_idx]; + CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - tuples.emplace_back(std::move(leaf_tuple)); + const Layer &parent_layer = c1_layers[c1_idx]; + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); + + const auto &last_child = m_curve_trees.m_c2.point_to_cycle_scalar(child_layer.back()); + + auto last_parent_chunk = get_last_child_layer_chunk(m_curve_trees.m_c1, + child_layer.size(), + parent_layer.size(), + m_curve_trees.m_c1_width, + last_child, + parent_layer.back()); + + c1_last_chunks_out.push_back(std::move(last_parent_chunk)); + + ++c2_idx; + } + else + { + const Layer &child_layer = c1_layers[c1_idx]; + CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); + + const Layer &parent_layer = c2_layers[c2_idx]; + CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); + + const auto &last_child = m_curve_trees.m_c1.point_to_cycle_scalar(child_layer.back()); + + auto last_parent_chunk = get_last_child_layer_chunk(m_curve_trees.m_c2, + child_layer.size(), + parent_layer.size(), + m_curve_trees.m_c2_width, + last_child, + parent_layer.back()); + + c2_last_chunks_out.push_back(std::move(last_parent_chunk)); + + ++c1_idx; + } + + // Alternate curves every iteration + parent_is_c1 = !parent_is_c1; } - return fcmp::curve_trees::Leaves{ - .start_idx = 0, - .tuples = std::move(tuples) - }; + CHECK_AND_ASSERT_THROW_MES(c1_last_chunks_out.size() == c1_layers.size(), "unexpected c1 last chunks"); + CHECK_AND_ASSERT_THROW_MES(c2_last_chunks_out.size() == c2_layers.size(), "unexpected c2 last chunks"); + + return last_chunks; +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_extension, + CurveTreesUnitTest::Tree &tree_inout) +{ + // Add the leaves + const std::size_t init_num_leaves = tree_inout.leaves.size() * m_curve_trees.LEAF_TUPLE_SIZE; + CHECK_AND_ASSERT_THROW_MES(init_num_leaves == tree_extension.leaves.start_idx, + "unexpected leaf start idx"); + + tree_inout.leaves.reserve(tree_inout.leaves.size() + tree_extension.leaves.tuples.size()); + for (const auto &leaf : tree_extension.leaves.tuples) + { + tree_inout.leaves.emplace_back(CurveTreesV1::LeafTuple{ + .O_x = m_curve_trees.m_c2.clone(leaf.O_x), + .I_x = m_curve_trees.m_c2.clone(leaf.I_x), + .C_x = m_curve_trees.m_c2.clone(leaf.C_x) + }); + } + + // Add the layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + // TODO: template below if statement + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); + const CurveTreesV1::LayerExtension &c2_ext = c2_extensions[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c2_idx <= tree_inout.c2_layers.size(), "missing c2 layer"); + if (tree_inout.c2_layers.size() == c2_idx) + tree_inout.c2_layers.emplace_back(Layer{}); + + auto &c2_inout = tree_inout.c2_layers[c2_idx]; + + const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); + const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c2 layer start"); + + // We updated the last hash + if (started_at_tip) + c2_inout.back() = m_curve_trees.m_c2.clone(c2_ext.hashes.front()); + + for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) + c2_inout.emplace_back(m_curve_trees.m_c2.clone(c2_ext.hashes[i])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); + const CurveTreesV1::LayerExtension &c1_ext = c1_extensions[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c1_idx <= tree_inout.c1_layers.size(), "missing c1 layer"); + if (tree_inout.c1_layers.size() == c1_idx) + tree_inout.c1_layers.emplace_back(Layer{}); + + auto &c1_inout = tree_inout.c1_layers[c1_idx]; + + const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); + const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c1 layer start"); + + // We updated the last hash + if (started_at_tip) + c1_inout.back() = m_curve_trees.m_c1.clone(c1_ext.hashes.front()); + + for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) + c1_inout.emplace_back(m_curve_trees.m_c1.clone(c1_ext.hashes[i])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +template +bool CurveTreesUnitTest::validate_layer(const C_PARENT &c_parent, + const CurveTreesUnitTest::Layer &parents, + const std::vector &child_scalars, + const std::size_t max_chunk_size) +{ + // Hash chunk of children scalars, then see if the hash matches up to respective parent + std::size_t chunk_start_idx = 0; + for (std::size_t i = 0; i < parents.size(); ++i) + { + CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); + const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); + CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); + + const typename C_PARENT::Point &parent = parents[i]; + + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; + + const typename C_PARENT::Point chunk_hash = m_curve_trees.get_new_parent(c_parent, chunk); + + const auto actual_bytes = c_parent.to_bytes(parent); + const auto expected_bytes = c_parent.to_bytes(chunk_hash); + CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); + + chunk_start_idx += chunk_size; + } + + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) +{ + const auto &leaves = tree.leaves; + const auto &c1_layers = tree.c1_layers; + const auto &c2_layers = tree.c2_layers; + + CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree"); + CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); + CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + false, "unexpected mismatch of c2 and c1 layers"); + + // Verify root has 1 member in it + const bool c2_is_root = c2_layers.size() > c1_layers.size(); + CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, + "root must have 1 member in it"); + + // Iterate from root down to layer above leaves, and check hashes match up correctly + bool parent_is_c2 = c2_is_root; + std::size_t c2_idx = c2_layers.size() - 1; + std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); + for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) + { + // TODO: implement templated function for below if statement + if (parent_is_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + + const Layer &parents = c2_layers[c2_idx]; + const Layer &children = c1_layers[c1_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); + + std::vector child_scalars; + fcmp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c1, + children, + child_scalars); + + const bool valid = this->validate_layer(m_curve_trees.m_c2, + parents, + child_scalars, + m_curve_trees.m_c2_width); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); + + --c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + + const Layer &parents = c1_layers[c1_idx]; + const Layer &children = c2_layers[c2_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); + + std::vector child_scalars; + fcmp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c2, + children, + child_scalars); + + const bool valid = this->validate_layer( + m_curve_trees.m_c1, + parents, + child_scalars, + m_curve_trees.m_c1_width); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); + + --c1_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + // Now validate leaves + return this->validate_layer(m_curve_trees.m_c2, + c2_layers[0], + m_curve_trees.flatten_leaves(leaves), + m_curve_trees.m_leaf_layer_chunk_width); } +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Logging helpers +//---------------------------------------------------------------------------------------------------------------------- +static void log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) +{ + const auto &c1_last_chunks = last_chunks.c1_last_chunks; + const auto &c2_last_chunks = last_chunks.c2_last_chunks; + + MDEBUG("Total of " << c1_last_chunks.size() << " Helios last chunks and " + << c2_last_chunks.size() << " Selene last chunks"); + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (c1_last_chunks.size() + c2_last_chunks.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_chunks.size(), "unexpected c2 layer"); + + const CurveTreesV1::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; + + MDEBUG("child_offset: " << last_chunk.child_offset + << " , last_child: " << fcmp::tower_cycle::selene::SELENE.to_string(last_chunk.last_child) + << " , last_parent: " << fcmp::tower_cycle::selene::SELENE.to_string(last_chunk.last_parent) + << " , child_layer_size: " << last_chunk.child_layer_size + << " , parent_layer_size: " << last_chunk.parent_layer_size); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_chunks.size(), "unexpected c1 layer"); + + const CurveTreesV1::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; + + MDEBUG("child_offset: " << last_chunk.child_offset + << " , last_child: " << fcmp::tower_cycle::helios::HELIOS.to_string(last_chunk.last_child) + << " , last_parent: " << fcmp::tower_cycle::helios::HELIOS.to_string(last_chunk.last_parent) + << " , child_layer_size: " << last_chunk.child_layer_size + << " , parent_layer_size: " << last_chunk.parent_layer_size); + + ++c1_idx; + } -static void log_tree_extension(const fcmp::curve_trees::TreeExtension &tree_extension) + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +static void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension) { const auto &c1_extensions = tree_extension.c1_layer_extensions; const auto &c2_extensions = tree_extension.c2_layer_extensions; @@ -76,7 +431,7 @@ static void log_tree_extension(const fcmp::curve_trees::TreeExtension &c2_layer = c2_extensions[c2_idx]; + const CurveTreesV1::LayerExtension &c2_layer = c2_extensions[c2_idx]; MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) @@ -102,7 +457,7 @@ static void log_tree_extension(const fcmp::curve_trees::TreeExtension &c1_layer = c1_extensions[c1_idx]; + const CurveTreesV1::LayerExtension &c1_layer = c1_extensions[c1_idx]; MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) @@ -115,8 +470,8 @@ static void log_tree_extension(const fcmp::curve_trees::TreeExtension &tree) +//---------------------------------------------------------------------------------------------------------------------- +static void log_tree(const CurveTreesUnitTest::Tree &tree) { MDEBUG("Tree has " << tree.leaves.size() << " leaves, " << tree.c1_layers.size() << " helios layers, " << tree.c2_layers.size() << " selene layers"); @@ -141,7 +496,7 @@ static void log_tree(const fcmp::curve_trees::Tree &c2_layer = tree.c2_layers[c2_idx]; + const CurveTreesUnitTest::Layer &c2_layer = tree.c2_layers[c2_idx]; MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c2_layer.size(); ++j) @@ -153,7 +508,7 @@ static void log_tree(const fcmp::curve_trees::Tree &c1_layer = tree.c1_layers[c1_idx]; + const CurveTreesUnitTest::Layer &c1_layer = tree.c1_layers[c1_idx]; MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c1_layer.size(); ++j) @@ -165,67 +520,82 @@ static void log_tree(const fcmp::curve_trees::Tree &last_chunks) +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Test helpers +//---------------------------------------------------------------------------------------------------------------------- +static const CurveTreesV1::Leaves generate_leaves(const CurveTreesV1 &curve_trees, const std::size_t num_leaves) { - const auto &c1_last_chunks = last_chunks.c1_last_chunks; - const auto &c2_last_chunks = last_chunks.c2_last_chunks; - - MDEBUG("Total of " << c1_last_chunks.size() << " Helios last chunks and " - << c2_last_chunks.size() << " Selene last chunks"); + std::vector tuples; + tuples.reserve(num_leaves); - bool use_c2 = true; - std::size_t c1_idx = 0; - std::size_t c2_idx = 0; - for (std::size_t i = 0; i < (c1_last_chunks.size() + c2_last_chunks.size()); ++i) + for (std::size_t i = 0; i < num_leaves; ++i) { - if (use_c2) - { - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_chunks.size(), "unexpected c2 layer"); - - const fcmp::curve_trees::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; + // Generate random output tuple + crypto::secret_key o,c; + crypto::public_key O,C; + crypto::generate_keys(O, o, o, false); + crypto::generate_keys(C, c, c, false); - MDEBUG("child_offset: " << last_chunk.child_offset - << " , last_child: " << fcmp::tower_cycle::selene::SELENE.to_string(last_chunk.last_child) - << " , last_parent: " << fcmp::tower_cycle::selene::SELENE.to_string(last_chunk.last_parent) - << " , child_layer_size: " << last_chunk.child_layer_size - << " , parent_layer_size: " << last_chunk.parent_layer_size); + auto leaf_tuple = curve_trees.output_to_leaf_tuple(O, C); - ++c2_idx; - } - else - { - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_chunks.size(), "unexpected c1 layer"); + tuples.emplace_back(std::move(leaf_tuple)); + } - const fcmp::curve_trees::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; + return CurveTreesV1::Leaves{ + .start_idx = 0, + .tuples = std::move(tuples) + }; +} +//---------------------------------------------------------------------------------------------------------------------- +static void grow_tree_test(CurveTreesV1 &curve_trees, + CurveTreesUnitTest &curve_trees_accessor, + const std::size_t num_leaves, + CurveTreesUnitTest::Tree &tree_inout) +{ + const auto last_chunks = curve_trees_accessor.get_last_chunks(tree_inout); + log_last_chunks(last_chunks); - MDEBUG("child_offset: " << last_chunk.child_offset - << " , last_child: " << fcmp::tower_cycle::helios::HELIOS.to_string(last_chunk.last_child) - << " , last_parent: " << fcmp::tower_cycle::helios::HELIOS.to_string(last_chunk.last_parent) - << " , child_layer_size: " << last_chunk.child_layer_size - << " , parent_layer_size: " << last_chunk.parent_layer_size); + const auto tree_extension = curve_trees.get_tree_extension( + last_chunks, + generate_leaves(curve_trees, num_leaves)); + log_tree_extension(tree_extension); - ++c1_idx; - } + curve_trees_accessor.extend_tree(tree_extension, tree_inout); + log_tree(tree_inout); - use_c2 = !use_c2; - } + ASSERT_TRUE(curve_trees_accessor.validate_tree(tree_inout)); } - +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Test +//---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, grow_tree) { + // TODO: test varying widths + const std::size_t HELIOS_CHUNK_WIDTH = 5; + const std::size_t SELENE_CHUNK_WIDTH = 5; + + auto curve_trees = CurveTreesV1( + fcmp::tower_cycle::helios::HELIOS, + fcmp::tower_cycle::selene::SELENE, + HELIOS_CHUNK_WIDTH, + SELENE_CHUNK_WIDTH); + + CurveTreesUnitTest curve_trees_accesor{curve_trees}; + const std::vector N_LEAVES{ 1, 2, 3, - fcmp::tower_cycle::selene::SELENE.WIDTH - 1, - fcmp::tower_cycle::selene::SELENE.WIDTH, - fcmp::tower_cycle::selene::SELENE.WIDTH + 1, - (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 2) - 1, - (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 2), - (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 2) + 1, - (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 3), - (std::size_t)std::pow(fcmp::tower_cycle::selene::SELENE.WIDTH, 4) + SELENE_CHUNK_WIDTH - 1, + SELENE_CHUNK_WIDTH, + SELENE_CHUNK_WIDTH + 1, + (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 2) - 1, + (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 2), + (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 2) + 1, + (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 3), + (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 4) }; for (const std::size_t init_leaves : N_LEAVES) @@ -234,74 +604,29 @@ TEST(curve_trees, grow_tree) { MDEBUG("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); - fcmp::curve_trees::Tree global_tree; + CurveTreesUnitTest::Tree global_tree; - // TODO: use a class that's initialized with the curve cycle and don't need to call templated functions with curve instances every time - - // Initially extend global tree by `init_leaves` + // Initialize global tree with `init_leaves` { MDEBUG("Adding " << init_leaves << " leaves to tree"); - const auto tree_extension = fcmp::curve_trees::get_tree_extension( - fcmp::curve_trees::LastChunks{}, - generate_leaves(fcmp::tower_cycle::selene::SELENE, init_leaves), - fcmp::tower_cycle::helios::HELIOS, - fcmp::tower_cycle::selene::SELENE); - - log_tree_extension(tree_extension); - - fcmp::curve_trees::extend_tree( - tree_extension, - fcmp::tower_cycle::helios::HELIOS, - fcmp::tower_cycle::selene::SELENE, + grow_tree_test(curve_trees, + curve_trees_accesor, + init_leaves, global_tree); - log_tree(global_tree); - - const bool validated = fcmp::curve_trees::validate_tree( - global_tree, - fcmp::tower_cycle::helios::HELIOS, - fcmp::tower_cycle::selene::SELENE); - - ASSERT_TRUE(validated); - MDEBUG("Successfully added initial " << init_leaves << " leaves to tree"); } - // Then extend the global tree again by `ext_leaves` + // Then extend the global tree by `ext_leaves` { MDEBUG("Extending tree by " << ext_leaves << " leaves"); - const auto last_chunks = fcmp::curve_trees::get_last_chunks( - fcmp::tower_cycle::helios::HELIOS, - fcmp::tower_cycle::selene::SELENE, - global_tree); - - log_last_chunks(last_chunks); - - const auto tree_extension = fcmp::curve_trees::get_tree_extension( - last_chunks, - generate_leaves(fcmp::tower_cycle::selene::SELENE, ext_leaves), - fcmp::tower_cycle::helios::HELIOS, - fcmp::tower_cycle::selene::SELENE); - - log_tree_extension(tree_extension); - - fcmp::curve_trees::extend_tree( - tree_extension, - fcmp::tower_cycle::helios::HELIOS, - fcmp::tower_cycle::selene::SELENE, + grow_tree_test(curve_trees, + curve_trees_accesor, + ext_leaves, global_tree); - log_tree(global_tree); - - const bool validated = fcmp::curve_trees::validate_tree( - global_tree, - fcmp::tower_cycle::helios::HELIOS, - fcmp::tower_cycle::selene::SELENE); - - ASSERT_TRUE(validated); - MDEBUG("Successfully extended by " << ext_leaves << " leaves"); } } diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h new file mode 100644 index 00000000000..f8456e4ab3d --- /dev/null +++ b/tests/unit_tests/curve_trees.h @@ -0,0 +1,80 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "fcmp/curve_trees.h" +#include "fcmp/tower_cycle.h" +#include "misc_log_ex.h" + +using Helios = fcmp::tower_cycle::helios::Helios; +using Selene = fcmp::tower_cycle::selene::Selene; + +// TODO: make this the instantiation in curve_trees.h/.cpp +using CurveTreesV1 = fcmp::CurveTrees; + +class CurveTreesUnitTest +{ +public: + CurveTreesUnitTest(CurveTreesV1 &curve_trees): m_curve_trees(curve_trees) {}; + +//member structs +public: + template + using Layer = std::vector; + + // A complete tree, useful for testing (don't want to keep the whole tree in memory during normal operation) + struct Tree final + { + std::vector leaves; + std::vector> c1_layers; + std::vector> c2_layers; + }; + +//public member functions +public: + // Read the in-memory tree and get data from last chunks from each layer + CurveTreesV1::LastChunks get_last_chunks(const Tree &tree); + + // Use the tree extension to extend the in-memory tree + void extend_tree(const CurveTreesV1::TreeExtension &tree_extension, Tree &tree_inout); + + // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer + bool validate_tree(const Tree &tree); + +//private member functions +private: + template + bool validate_layer(const C_PARENT &c_parent, + const Layer &parents, + const std::vector &child_scalars, + const std::size_t max_chunk_size); + +private: + CurveTreesV1 &m_curve_trees; +}; From 29e0fe759e462c7289edfba89a814942834f1845 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 22 May 2024 01:31:27 -0700 Subject: [PATCH 009/127] Add Curve class, and Helios & Selene classes that derive from Curve --- src/fcmp/curve_trees.h | 12 +- src/fcmp/tower_cycle.cpp | 138 ++++++++++++++++++--- src/fcmp/tower_cycle.h | 201 +++++++++++++++++++------------ tests/unit_tests/curve_trees.cpp | 67 +++++++---- tests/unit_tests/curve_trees.h | 9 +- 5 files changed, 300 insertions(+), 127 deletions(-) diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index a841c41e33b..7bcdb139e96 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -133,9 +133,9 @@ class CurveTrees crypto::derive_key_image_generator(O, I); return LeafTuple{ - .O_x = m_c2.ed_25519_point_to_scalar(O), - .I_x = m_c2.ed_25519_point_to_scalar(I), - .C_x = m_c2.ed_25519_point_to_scalar(C) + .O_x = fcmp::tower_cycle::ed_25519_point_to_scalar(O), + .I_x = fcmp::tower_cycle::ed_25519_point_to_scalar(I), + .C_x = fcmp::tower_cycle::ed_25519_point_to_scalar(C) }; }; @@ -265,8 +265,7 @@ class CurveTrees fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); return curve.hash_grow( - curve.GENERATORS, - curve.HASH_INIT_POINT, + curve.m_hash_init_point, 0,/*offset*/ typename C::Chunk{prior_children.data(), prior_children.size()}, new_children @@ -286,7 +285,6 @@ class CurveTrees fcmp::tower_cycle::extend_zeroes(m_c2, new_children.size(), prior_children); return m_c2.hash_grow( - m_c2.GENERATORS, last_chunk_ptr->last_parent, last_chunk_ptr->child_offset, typename C2::Chunk{prior_children.data(), prior_children.size()}, @@ -333,7 +331,6 @@ class CurveTrees } return curve.hash_grow( - curve.GENERATORS, last_chunk_ptr->last_parent, offset, typename C::Chunk{prior_children.data(), prior_children.size()}, @@ -505,6 +502,7 @@ class CurveTrees } } +//member variables private: const C1 &m_c1; const C2 &m_c2; diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index e9012566780..a44bc5f1510 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -26,28 +26,125 @@ // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include "string_tools.h" #include "tower_cycle.h" namespace fcmp { namespace tower_cycle { - -namespace helios -{ //---------------------------------------------------------------------------------------------------------------------- -SeleneScalar Helios::point_to_cycle_scalar(const Helios::Point &point) const +//---------------------------------------------------------------------------------------------------------------------- +Helios::CycleScalar Helios::point_to_cycle_scalar(const Helios::Point &point) const { return fcmp_rust::helios_point_to_selene_scalar(point); -}; +} //---------------------------------------------------------------------------------------------------------------------- -} //namespace helios +Selene::CycleScalar Selene::point_to_cycle_scalar(const Selene::Point &point) const +{ + return fcmp_rust::selene_point_to_helios_scalar(point); +} //---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_grow( + const Helios::Point &existing_hash, + const std::size_t offset, + const Helios::Chunk &prior_children, + const Helios::Chunk &new_children) const +{ + return fcmp_rust::hash_grow_helios( + m_generators, + existing_hash, + offset, + prior_children, + new_children); +} //---------------------------------------------------------------------------------------------------------------------- -namespace selene +Selene::Point Selene::hash_grow( + const Selene::Point &existing_hash, + const std::size_t offset, + const Selene::Chunk &prior_children, + const Selene::Chunk &new_children) const { + return fcmp_rust::hash_grow_selene( + m_generators, + existing_hash, + offset, + prior_children, + new_children); +} //---------------------------------------------------------------------------------------------------------------------- -SeleneScalar Selene::ed_25519_point_to_scalar(const crypto::ec_point &point) const +Helios::Scalar Helios::clone(const Helios::Scalar &scalar) const +{ + return fcmp_rust::clone_helios_scalar(scalar); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Scalar Selene::clone(const Selene::Scalar &scalar) const +{ + return fcmp_rust::clone_selene_scalar(scalar); +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::clone(const Helios::Point &point) const +{ + return fcmp_rust::clone_helios_point(point); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::clone(const Selene::Point &point) const +{ + return fcmp_rust::clone_selene_point(point); +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Scalar Helios::zero_scalar() const +{ + return fcmp_rust::helios_zero_scalar(); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Scalar Selene::zero_scalar() const +{ + return fcmp_rust::selene_zero_scalar(); +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Helios::to_bytes(const Helios::Scalar &scalar) const +{ + return fcmp_rust::helios_scalar_to_bytes(scalar); +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Selene::to_bytes(const Selene::Scalar &scalar) const +{ + return fcmp_rust::selene_scalar_to_bytes(scalar); +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Helios::to_bytes(const Helios::Point &point) const +{ + return fcmp_rust::helios_point_to_bytes(point); +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Selene::to_bytes(const Selene::Point &point) const +{ + return fcmp_rust::selene_point_to_bytes(point); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Helios::to_string(const typename Helios::Scalar &scalar) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(scalar)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Selene::to_string(const typename Selene::Scalar &scalar) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(scalar)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Helios::to_string(const typename Helios::Point &point) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(point)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Selene::to_string(const typename Selene::Point &point) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(point)); +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) { static_assert(sizeof(RustEd25519Point) == sizeof(crypto::ec_point), "expected same size ed25519 point to rust representation"); @@ -56,15 +153,28 @@ SeleneScalar Selene::ed_25519_point_to_scalar(const crypto::ec_point &point) con fcmp::tower_cycle::RustEd25519Point rust_point; memcpy(&rust_point, &point, sizeof(fcmp::tower_cycle::RustEd25519Point)); return fcmp_rust::ed25519_point_to_selene_scalar(rust_point); -}; +} //---------------------------------------------------------------------------------------------------------------------- -HeliosScalar Selene::point_to_cycle_scalar(const Selene::Point &point) const +Helios::Generators random_helios_generators() { - return fcmp_rust::selene_point_to_helios_scalar(point); -}; + return fcmp_rust::random_helios_generators(); +} //---------------------------------------------------------------------------------------------------------------------- -} //namespace selene +Selene::Generators random_selene_generators() +{ + return fcmp_rust::random_selene_generators(); +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point random_helios_hash_init_point() +{ + return fcmp_rust::random_helios_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point random_selene_hash_init_point() +{ + return fcmp_rust::random_selene_hash_init_point(); +} //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -} //namespace curves +} //namespace tower_cycle } //namespace fcmp diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index 2d9e7729701..b871b6f9862 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -31,7 +31,6 @@ #include "crypto/crypto.h" #include "fcmp_rust/cxx.h" #include "fcmp_rust/fcmp_rust.h" -#include "string_tools.h" #include @@ -41,119 +40,162 @@ namespace tower_cycle { //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- +// Rust types +//---------------------------------------------------------------------------------------------------------------------- using RustEd25519Point = std::array; // Need to forward declare Scalar types for point_to_cycle_scalar below using SeleneScalar = rust::Box; using HeliosScalar = rust::Box; //---------------------------------------------------------------------------------------------------------------------- +struct HeliosT final +{ + using Generators = rust::Box; + using Scalar = HeliosScalar; + using Point = rust::Box; + using Chunk = rust::Slice; + using CycleScalar = SeleneScalar; +}; //---------------------------------------------------------------------------------------------------------------------- -namespace helios +struct SeleneT final { + using Generators = rust::Box; + using Scalar = SeleneScalar; + using Point = rust::Box; + using Chunk = rust::Slice; + using CycleScalar = HeliosScalar; +}; +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- +// Parent curve class that curves in a curve cycle must implement //---------------------------------------------------------------------------------------------------------------------- -// TODO: Curve classes that inherit from a parent -static struct Helios final +template +class Curve { - using Generators = rust::Box; - using Scalar = HeliosScalar; - using Point = rust::Box; - using Chunk = rust::Slice; +//constructor +public: + Curve(const typename C::Generators &generators, const typename C::Point &hash_init_point): + m_generators{generators}, + m_hash_init_point{hash_init_point} + {}; + +//member functions +public: + // Read the x-coordinate from this curve's point to get this curve's cycle scalar + virtual typename C::CycleScalar point_to_cycle_scalar(const typename C::Point &point) const = 0; + + virtual typename C::Point hash_grow( + const typename C::Point &existing_hash, + const std::size_t offset, + const typename C::Chunk &prior_children, + const typename C::Chunk &new_children) const = 0; + + virtual typename C::Scalar clone(const typename C::Scalar &scalar) const = 0; + virtual typename C::Point clone(const typename C::Point &point) const = 0; + + virtual typename C::Scalar zero_scalar() const = 0; - // TODO: static constants - const Generators GENERATORS = fcmp_rust::random_helios_generators(); - const Point HASH_INIT_POINT = fcmp_rust::random_helios_hash_init_point(); + virtual std::array to_bytes(const typename C::Scalar &scalar) const = 0; + virtual std::array to_bytes(const typename C::Point &point) const = 0; - // Helios point x-coordinates are Selene scalars - SeleneScalar point_to_cycle_scalar(const Point &point) const; + virtual std::string to_string(const typename C::Scalar &scalar) const = 0; + virtual std::string to_string(const typename C::Point &point) const = 0; + +//member variables +public: + // TODO: make these static constants + const typename C::Generators &m_generators; + const typename C::Point &m_hash_init_point; +}; +//---------------------------------------------------------------------------------------------------------------------- +class Helios final : public Curve +{ +//typedefs +public: + using Generators = HeliosT::Generators; + using Scalar = HeliosT::Scalar; + using Point = HeliosT::Point; + using Chunk = HeliosT::Chunk; + using CycleScalar = HeliosT::CycleScalar; + +//constructor +public: + Helios(const Generators &generators, const Point &hash_init_point) + : Curve(generators, hash_init_point) + {}; + +//member functions +public: + CycleScalar point_to_cycle_scalar(const Point &point) const override; Point hash_grow( - const Generators &generators, const Point &existing_hash, const std::size_t offset, const Chunk &prior_children, - const Chunk &new_children) const - { - return fcmp_rust::hash_grow_helios( - generators, - existing_hash, - offset, - prior_children, - new_children); - } + const Chunk &new_children) const override; - Scalar clone(const Scalar &scalar) const { return fcmp_rust::clone_helios_scalar(scalar); } - Point clone(const Point &point) const { return fcmp_rust::clone_helios_point(point); } + Scalar clone(const Scalar &scalar) const override; + Point clone(const Point &point) const override; - Scalar zero_scalar() const { return fcmp_rust::helios_zero_scalar(); } + Scalar zero_scalar() const override; - std::array to_bytes(const Scalar &scalar) const - { return fcmp_rust::helios_scalar_to_bytes(scalar); } - std::array to_bytes(const Point &point) const - { return fcmp_rust::helios_point_to_bytes(point); } + std::array to_bytes(const Scalar &scalar) const override; + std::array to_bytes(const Point &point) const override; - std::string to_string(const Scalar &scalar) const - { return epee::string_tools::pod_to_hex(to_bytes(scalar)); } - std::string to_string(const Point &point) const - { return epee::string_tools::pod_to_hex(to_bytes(point)); } -} HELIOS; -}//namespace helios + std::string to_string(const Scalar &scalar) const override; + std::string to_string(const Point &point) const override; +}; //---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- -namespace selene -{ -//---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- -static struct Selene final +class Selene final : public Curve { - using Generators = rust::Box; - using Scalar = SeleneScalar; - using Point = rust::Box; - using Chunk = rust::Slice; - - // TODO: static constants - const Generators GENERATORS = fcmp_rust::random_selene_generators(); - const Point HASH_INIT_POINT = fcmp_rust::random_selene_hash_init_point(); - - // Ed25519 point x-coordinates are Selene scalars - SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) const; - - // Selene point x-coordinates are Helios scalars - HeliosScalar point_to_cycle_scalar(const Point &point) const; +//typedefs +public: + using Generators = SeleneT::Generators; + using Scalar = SeleneT::Scalar; + using Point = SeleneT::Point; + using Chunk = SeleneT::Chunk; + using CycleScalar = SeleneT::CycleScalar; + +//constructor +public: + Selene(const Generators &generators, const Point &hash_init_point) + : Curve(generators, hash_init_point) + {}; + +//member functions +public: + CycleScalar point_to_cycle_scalar(const Point &point) const override; Point hash_grow( - const Generators &generators, const Point &existing_hash, const std::size_t offset, const Chunk &prior_children, - const Chunk &new_children) const - { - return fcmp_rust::hash_grow_selene( - generators, - existing_hash, - offset, - prior_children, - new_children); - }; + const Chunk &new_children) const override; - Scalar clone(const Scalar &scalar) const { return fcmp_rust::clone_selene_scalar(scalar); } - Point clone(const Point &point) const { return fcmp_rust::clone_selene_point(point); } + Scalar clone(const Scalar &scalar) const override; + Point clone(const Point &point) const override; - Scalar zero_scalar() const { return fcmp_rust::selene_zero_scalar(); } + Scalar zero_scalar() const override; - std::array to_bytes(const Scalar &scalar) const - { return fcmp_rust::selene_scalar_to_bytes(scalar); } - std::array to_bytes(const Point &point) const - { return fcmp_rust::selene_point_to_bytes(point); } + std::array to_bytes(const Scalar &scalar) const override; + std::array to_bytes(const Point &point) const override; - std::string to_string(const Scalar &scalar) const - { return epee::string_tools::pod_to_hex(to_bytes(scalar)); } - std::string to_string(const Point &point) const - { return epee::string_tools::pod_to_hex(to_bytes(point)); } -} SELENE; -}// namespace selene + std::string to_string(const Scalar &scalar) const override; + std::string to_string(const Point &point) const override; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Ed25519 point x-coordinates are Selene scalars +SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point); +//---------------------------------------------------------------------------------------------------------------------- +// TODO: use static constants and get rid of the below functions +Helios::Generators random_helios_generators(); +Selene::Generators random_selene_generators(); +Helios::Point random_helios_hash_init_point(); +Selene::Point random_selene_hash_init_point(); //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- +// TODO: implement in cpp file template static void extend_zeroes(const C &curve, const std::size_t num_zeroes, @@ -165,6 +207,7 @@ static void extend_zeroes(const C &curve, zeroes_inout.emplace_back(curve.zero_scalar()); } //---------------------------------------------------------------------------------------------------------------------- +// TODO: move impl into cpp template static void extend_scalars_from_cycle_points(const C_POINTS &curve, const std::vector &points, diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index b40df6d33c9..8038c56e67d 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -365,10 +365,9 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) m_curve_trees.m_leaf_layer_chunk_width); } //---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- // Logging helpers //---------------------------------------------------------------------------------------------------------------------- -static void log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) +void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) { const auto &c1_last_chunks = last_chunks.c1_last_chunks; const auto &c2_last_chunks = last_chunks.c2_last_chunks; @@ -388,8 +387,8 @@ static void log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) const CurveTreesV1::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; MDEBUG("child_offset: " << last_chunk.child_offset - << " , last_child: " << fcmp::tower_cycle::selene::SELENE.to_string(last_chunk.last_child) - << " , last_parent: " << fcmp::tower_cycle::selene::SELENE.to_string(last_chunk.last_parent) + << " , last_child: " << m_curve_trees.m_c2.to_string(last_chunk.last_child) + << " , last_parent: " << m_curve_trees.m_c2.to_string(last_chunk.last_parent) << " , child_layer_size: " << last_chunk.child_layer_size << " , parent_layer_size: " << last_chunk.parent_layer_size); @@ -402,8 +401,8 @@ static void log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) const CurveTreesV1::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; MDEBUG("child_offset: " << last_chunk.child_offset - << " , last_child: " << fcmp::tower_cycle::helios::HELIOS.to_string(last_chunk.last_child) - << " , last_parent: " << fcmp::tower_cycle::helios::HELIOS.to_string(last_chunk.last_parent) + << " , last_child: " << m_curve_trees.m_c1.to_string(last_chunk.last_child) + << " , last_parent: " << m_curve_trees.m_c1.to_string(last_chunk.last_parent) << " , child_layer_size: " << last_chunk.child_layer_size << " , parent_layer_size: " << last_chunk.parent_layer_size); @@ -414,7 +413,7 @@ static void log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) } } //---------------------------------------------------------------------------------------------------------------------- -static void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension) +void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension) { const auto &c1_extensions = tree_extension.c1_layer_extensions; const auto &c2_extensions = tree_extension.c2_layer_extensions; @@ -427,9 +426,9 @@ static void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension { const auto &leaf = tree_extension.leaves.tuples[i]; - const auto O_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.O_x); - const auto I_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.I_x); - const auto C_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.C_x); + const auto O_x = m_curve_trees.m_c2.to_string(leaf.O_x); + const auto I_x = m_curve_trees.m_c2.to_string(leaf.I_x); + const auto C_x = m_curve_trees.m_c2.to_string(leaf.C_x); MDEBUG("Leaf idx " << ((i*CurveTreesV1::LEAF_TUPLE_SIZE) + tree_extension.leaves.start_idx) << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); @@ -449,7 +448,7 @@ static void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) MDEBUG("Hash idx: " << (j + c2_layer.start_idx) << " , hash: " - << fcmp::tower_cycle::selene::SELENE.to_string(c2_layer.hashes[j])); + << m_curve_trees.m_c2.to_string(c2_layer.hashes[j])); ++c2_idx; } @@ -462,7 +461,7 @@ static void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) MDEBUG("Hash idx: " << (j + c1_layer.start_idx) << " , hash: " - << fcmp::tower_cycle::helios::HELIOS.to_string(c1_layer.hashes[j])); + << m_curve_trees.m_c1.to_string(c1_layer.hashes[j])); ++c1_idx; } @@ -471,7 +470,7 @@ static void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension } } //---------------------------------------------------------------------------------------------------------------------- -static void log_tree(const CurveTreesUnitTest::Tree &tree) +void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) { MDEBUG("Tree has " << tree.leaves.size() << " leaves, " << tree.c1_layers.size() << " helios layers, " << tree.c2_layers.size() << " selene layers"); @@ -480,9 +479,9 @@ static void log_tree(const CurveTreesUnitTest::Tree &tree) { const auto &leaf = tree.leaves[i]; - const auto O_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.O_x); - const auto I_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.I_x); - const auto C_x = fcmp::tower_cycle::selene::SELENE.to_string(leaf.C_x); + const auto O_x = m_curve_trees.m_c2.to_string(leaf.O_x); + const auto I_x = m_curve_trees.m_c2.to_string(leaf.I_x); + const auto C_x = m_curve_trees.m_c2.to_string(leaf.C_x); MDEBUG("Leaf idx " << i << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); } @@ -500,7 +499,7 @@ static void log_tree(const CurveTreesUnitTest::Tree &tree) MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c2_layer.size(); ++j) - MDEBUG("Hash idx: " << j << " , hash: " << fcmp::tower_cycle::selene::SELENE.to_string(c2_layer[j])); + MDEBUG("Hash idx: " << j << " , hash: " << m_curve_trees.m_c2.to_string(c2_layer[j])); ++c2_idx; } @@ -512,7 +511,7 @@ static void log_tree(const CurveTreesUnitTest::Tree &tree) MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c1_layer.size(); ++j) - MDEBUG("Hash idx: " << j << " , hash: " << fcmp::tower_cycle::helios::HELIOS.to_string(c1_layer[j])); + MDEBUG("Hash idx: " << j << " , hash: " << m_curve_trees.m_c1.to_string(c1_layer[j])); ++c1_idx; } @@ -524,7 +523,7 @@ static void log_tree(const CurveTreesUnitTest::Tree &tree) //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -static const CurveTreesV1::Leaves generate_leaves(const CurveTreesV1 &curve_trees, const std::size_t num_leaves) +static const CurveTreesV1::Leaves generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t num_leaves) { std::vector tuples; tuples.reserve(num_leaves); @@ -553,17 +552,25 @@ static void grow_tree_test(CurveTreesV1 &curve_trees, const std::size_t num_leaves, CurveTreesUnitTest::Tree &tree_inout) { + // Get the last chunk from each layer in the tree; empty if tree is empty const auto last_chunks = curve_trees_accessor.get_last_chunks(tree_inout); - log_last_chunks(last_chunks); + curve_trees_accessor.log_last_chunks(last_chunks); + + // Get a tree extension object to the existing tree using randomly generated leaves + // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves const auto tree_extension = curve_trees.get_tree_extension( last_chunks, - generate_leaves(curve_trees, num_leaves)); - log_tree_extension(tree_extension); + generate_random_leaves(curve_trees, num_leaves)); + curve_trees_accessor.log_tree_extension(tree_extension); + + // Use the tree extension to extend the existing tree curve_trees_accessor.extend_tree(tree_extension, tree_inout); - log_tree(tree_inout); + curve_trees_accessor.log_tree(tree_inout); + + // Validate tree structure and all hashes ASSERT_TRUE(curve_trees_accessor.validate_tree(tree_inout)); } //---------------------------------------------------------------------------------------------------------------------- @@ -572,13 +579,23 @@ static void grow_tree_test(CurveTreesV1 &curve_trees, //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, grow_tree) { + // TODO: use static constant generators and hash init points + const Helios::Generators HELIOS_GENERATORS = fcmp::tower_cycle::random_helios_generators(); + const Selene::Generators SELENE_GENERATORS = fcmp::tower_cycle::random_selene_generators(); + + const Helios::Point HELIOS_HASH_INIT_POINT = fcmp::tower_cycle::random_helios_hash_init_point(); + const Selene::Point SELENE_HASH_INIT_POINT = fcmp::tower_cycle::random_selene_hash_init_point(); + + Helios helios(HELIOS_GENERATORS, HELIOS_HASH_INIT_POINT); + Selene selene(SELENE_GENERATORS, SELENE_HASH_INIT_POINT); + // TODO: test varying widths const std::size_t HELIOS_CHUNK_WIDTH = 5; const std::size_t SELENE_CHUNK_WIDTH = 5; auto curve_trees = CurveTreesV1( - fcmp::tower_cycle::helios::HELIOS, - fcmp::tower_cycle::selene::SELENE, + helios, + selene, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index f8456e4ab3d..16527827a7a 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -32,8 +32,8 @@ #include "fcmp/tower_cycle.h" #include "misc_log_ex.h" -using Helios = fcmp::tower_cycle::helios::Helios; -using Selene = fcmp::tower_cycle::selene::Selene; +using Helios = fcmp::tower_cycle::Helios; +using Selene = fcmp::tower_cycle::Selene; // TODO: make this the instantiation in curve_trees.h/.cpp using CurveTreesV1 = fcmp::CurveTrees; @@ -67,6 +67,11 @@ class CurveTreesUnitTest // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer bool validate_tree(const Tree &tree); + // logging helpers + void log_last_chunks(const CurveTreesV1::LastChunks &last_chunks); + void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); + void log_tree(const CurveTreesUnitTest::Tree &tree); + //private member functions private: template From 9e68475ebda7abf0e745636136ccd82e759a11bd Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 22 May 2024 15:35:26 -0700 Subject: [PATCH 010/127] Use widths from fcmp++ repo test & align tests with width --- src/fcmp/fcmp_rust/src/lib.rs | 12 +-- src/fcmp/tower_cycle.cpp | 8 +- src/fcmp/tower_cycle.h | 9 ++- tests/unit_tests/curve_trees.cpp | 129 +++++++++++++++++++------------ 4 files changed, 95 insertions(+), 63 deletions(-) diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index c2efffa6a10..8270e726c38 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -27,10 +27,10 @@ mod ffi { type SelenePoint; type SeleneScalar; - fn random_helios_generators() -> Box; + fn random_helios_generators(n: usize) -> Box; fn random_helios_hash_init_point() -> Box; - fn random_selene_generators() -> Box; + fn random_selene_generators(n: usize) -> Box; fn random_selene_hash_init_point() -> Box; fn clone_helios_scalar(helios_scalar: &Box) -> Box; @@ -80,14 +80,14 @@ pub struct SelenePoint(::G); pub struct SeleneScalar(::F); #[allow(non_snake_case)] -pub fn random_helios_generators() -> Box { - let helios_generators = generalized_bulletproofs::tests::generators::(512); +pub fn random_helios_generators(n: usize) -> Box { + let helios_generators = generalized_bulletproofs::tests::generators::(n); Box::new(HeliosGenerators(helios_generators)) } #[allow(non_snake_case)] -pub fn random_selene_generators() -> Box { - let selene_generators = generalized_bulletproofs::tests::generators::(512); +pub fn random_selene_generators(n: usize) -> Box { + let selene_generators = generalized_bulletproofs::tests::generators::(n); Box::new(SeleneGenerators(selene_generators)) } diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index a44bc5f1510..c954f7affc6 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -155,14 +155,14 @@ SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) return fcmp_rust::ed25519_point_to_selene_scalar(rust_point); } //---------------------------------------------------------------------------------------------------------------------- -Helios::Generators random_helios_generators() +Helios::Generators random_helios_generators(std::size_t n) { - return fcmp_rust::random_helios_generators(); + return fcmp_rust::random_helios_generators(n); } //---------------------------------------------------------------------------------------------------------------------- -Selene::Generators random_selene_generators() +Selene::Generators random_selene_generators(std::size_t n) { - return fcmp_rust::random_selene_generators(); + return fcmp_rust::random_selene_generators(n); } //---------------------------------------------------------------------------------------------------------------------- Helios::Point random_helios_hash_init_point() diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index b871b6f9862..dcd3092cd76 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -67,7 +67,7 @@ struct SeleneT final }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// Parent curve class that curves in a curve cycle must implement +// Parent curve class that curves in a cycle must implement //---------------------------------------------------------------------------------------------------------------------- template class Curve @@ -188,9 +188,10 @@ class Selene final : public Curve // Ed25519 point x-coordinates are Selene scalars SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point); //---------------------------------------------------------------------------------------------------------------------- -// TODO: use static constants and get rid of the below functions -Helios::Generators random_helios_generators(); -Selene::Generators random_selene_generators(); +// TODO: use static constants and get rid of the below functions (WARNING: number of generators must be >= curve's +// width, and also need to account for selene leaf layer 3x) +Helios::Generators random_helios_generators(std::size_t n); +Selene::Generators random_selene_generators(std::size_t n); Helios::Point random_helios_hash_init_point(); Selene::Point random_selene_hash_init_point(); //---------------------------------------------------------------------------------------------------------------------- diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 8038c56e67d..582a587b7a3 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -30,8 +30,6 @@ #include "curve_trees.h" -#include - //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTreesUnitTest helpers @@ -472,7 +470,7 @@ void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &t //---------------------------------------------------------------------------------------------------------------------- void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) { - MDEBUG("Tree has " << tree.leaves.size() << " leaves, " + LOG_PRINT_L1("Tree has " << tree.leaves.size() << " leaves, " << tree.c1_layers.size() << " helios layers, " << tree.c2_layers.size() << " selene layers"); for (std::size_t i = 0; i < tree.leaves.size(); ++i) @@ -547,7 +545,7 @@ static const CurveTreesV1::Leaves generate_random_leaves(const CurveTreesV1 &cur }; } //---------------------------------------------------------------------------------------------------------------------- -static void grow_tree_test(CurveTreesV1 &curve_trees, +static void grow_tree(CurveTreesV1 &curve_trees, CurveTreesUnitTest &curve_trees_accessor, const std::size_t num_leaves, CurveTreesUnitTest::Tree &tree_inout) @@ -574,78 +572,111 @@ static void grow_tree_test(CurveTreesV1 &curve_trees, ASSERT_TRUE(curve_trees_accessor.validate_tree(tree_inout)); } //---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- -// Test -//---------------------------------------------------------------------------------------------------------------------- -TEST(curve_trees, grow_tree) +static void grow_tree_test(Helios &helios, + Selene &selene, + const std::size_t helios_width, + const std::size_t selene_width) { - // TODO: use static constant generators and hash init points - const Helios::Generators HELIOS_GENERATORS = fcmp::tower_cycle::random_helios_generators(); - const Selene::Generators SELENE_GENERATORS = fcmp::tower_cycle::random_selene_generators(); - - const Helios::Point HELIOS_HASH_INIT_POINT = fcmp::tower_cycle::random_helios_hash_init_point(); - const Selene::Point SELENE_HASH_INIT_POINT = fcmp::tower_cycle::random_selene_hash_init_point(); - - Helios helios(HELIOS_GENERATORS, HELIOS_HASH_INIT_POINT); - Selene selene(SELENE_GENERATORS, SELENE_HASH_INIT_POINT); - - // TODO: test varying widths - const std::size_t HELIOS_CHUNK_WIDTH = 5; - const std::size_t SELENE_CHUNK_WIDTH = 5; + LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_width << ", selene chunk width " << selene_width); auto curve_trees = CurveTreesV1( helios, selene, - HELIOS_CHUNK_WIDTH, - SELENE_CHUNK_WIDTH); + helios_width, + selene_width); CurveTreesUnitTest curve_trees_accesor{curve_trees}; + CHECK_AND_ASSERT_THROW_MES(helios_width > 1, "helios width must be > 1"); + CHECK_AND_ASSERT_THROW_MES(selene_width > 1, "selene width must be > 1"); + + // Number of leaves for which x number of layers is required + const std::size_t NEED_1_LAYER = selene_width; + const std::size_t NEED_2_LAYERS = NEED_1_LAYER * helios_width; + const std::size_t NEED_3_LAYERS = NEED_2_LAYERS * selene_width; + const std::vector N_LEAVES{ + // Basic tests 1, 2, - 3, - SELENE_CHUNK_WIDTH - 1, - SELENE_CHUNK_WIDTH, - SELENE_CHUNK_WIDTH + 1, - (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 2) - 1, - (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 2), - (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 2) + 1, - (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 3), - (std::size_t)std::pow(SELENE_CHUNK_WIDTH, 4) + + // Test with number of leaves {-1,0,+1} relative to chunk width boundaries + NEED_1_LAYER-1, + NEED_1_LAYER, + NEED_1_LAYER+1, + + NEED_2_LAYERS-1, + NEED_2_LAYERS, + NEED_2_LAYERS+1, + + NEED_3_LAYERS, }; for (const std::size_t init_leaves : N_LEAVES) { for (const std::size_t ext_leaves : N_LEAVES) { - MDEBUG("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); + // Tested reverse order already + if (ext_leaves < init_leaves) + continue; + + // Only test 3rd layer once because it's a huge test + if (init_leaves > 1 && ext_leaves == NEED_3_LAYERS) + continue; + + LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); CurveTreesUnitTest::Tree global_tree; // Initialize global tree with `init_leaves` - { - MDEBUG("Adding " << init_leaves << " leaves to tree"); + MDEBUG("Adding " << init_leaves << " leaves to tree"); - grow_tree_test(curve_trees, - curve_trees_accesor, - init_leaves, - global_tree); + grow_tree(curve_trees, + curve_trees_accesor, + init_leaves, + global_tree); - MDEBUG("Successfully added initial " << init_leaves << " leaves to tree"); - } + MDEBUG("Successfully added initial " << init_leaves << " leaves to tree"); // Then extend the global tree by `ext_leaves` - { - MDEBUG("Extending tree by " << ext_leaves << " leaves"); + MDEBUG("Extending tree by " << ext_leaves << " leaves"); - grow_tree_test(curve_trees, - curve_trees_accesor, - ext_leaves, - global_tree); + grow_tree(curve_trees, + curve_trees_accesor, + ext_leaves, + global_tree); - MDEBUG("Successfully extended by " << ext_leaves << " leaves"); - } + MDEBUG("Successfully extended by " << ext_leaves << " leaves"); } } } +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Test +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, grow_tree) +{ + // TODO: use static constant generators and hash init points + const std::size_t HELIOS_GENERATORS_LEN = 128; + const std::size_t SELENE_GENERATORS_LEN = 256; + + // https://github.com/kayabaNerve/fcmp-plus-plus/blob + // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 + const std::size_t HELIOS_CHUNK_WIDTH = 38; + const std::size_t SELENE_CHUNK_WIDTH = 18; + + CHECK_AND_ASSERT_THROW_MES(HELIOS_GENERATORS_LEN >= HELIOS_CHUNK_WIDTH, "helios generators < chunk width"); + CHECK_AND_ASSERT_THROW_MES(SELENE_GENERATORS_LEN >= (SELENE_CHUNK_WIDTH * CurveTreesV1::LEAF_TUPLE_SIZE), + "selene generators < max chunk width"); + + const Helios::Generators HELIOS_GENERATORS = fcmp::tower_cycle::random_helios_generators(HELIOS_GENERATORS_LEN); + const Selene::Generators SELENE_GENERATORS = fcmp::tower_cycle::random_selene_generators(SELENE_GENERATORS_LEN); + + const Helios::Point HELIOS_HASH_INIT_POINT = fcmp::tower_cycle::random_helios_hash_init_point(); + const Selene::Point SELENE_HASH_INIT_POINT = fcmp::tower_cycle::random_selene_hash_init_point(); + + Helios helios(HELIOS_GENERATORS, HELIOS_HASH_INIT_POINT); + Selene selene(SELENE_GENERATORS, SELENE_HASH_INIT_POINT); + + grow_tree_test(helios, selene, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); +} From 26009ba5f1675215206d0b55dd1a2df602101906 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 22 May 2024 16:21:12 -0700 Subject: [PATCH 011/127] slight simplification to CurveTrees::hash_layer --- src/fcmp/curve_trees.h | 52 +++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 7bcdb139e96..b873a7bd0a5 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -298,21 +298,19 @@ class CurveTrees const typename C::Chunk &new_children, const std::size_t chunk_width, const bool child_layer_last_hash_updated, - const LastChunkData *last_chunk_ptr) const + const LastChunkData *last_chunk_ptr, + const std::size_t offset) const { // If no last chunk exists, we can get a new parent if (last_chunk_ptr == nullptr) return get_new_parent(curve, new_children); std::vector prior_children; - std::size_t offset = last_chunk_ptr->child_offset; if (child_layer_last_hash_updated) { - // If the last chunk has updated children in it, then we need to get the delta to the old children, and - // subtract the offset by 1 since we're updating the prior last hash + // If the last chunk has updated children in it, then we need to get the delta to the old children prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); - offset = offset > 0 ? (offset - 1) : (chunk_width - 1); // Extend prior children by zeroes for any additional new children, since they must be new if (new_children.size() > 1) @@ -354,36 +352,29 @@ class CurveTrees CHECK_AND_ASSERT_THROW_MES(!children.hashes.empty(), "empty children hashes"); - std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; - - // TODO: try to simplify the approach to avoid edge cases - // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 - // since we'll be updating the existing parent hash of the last chunk - if (offset > 0) - { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } - - // If the child layer had its existing last hash updated, then we'll need to use the last hash's prior - // version in order to update the existing last parent hash in this layer + // If the child layer had its existing last hash updated (if the new children start at the last element in + // the child layer), then we'll need to use the last hash's prior version in order to update the existing + // last parent hash in this layer bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) ? false : last_parent_chunk_ptr->child_layer_size == (children.start_idx + 1); - if (offset == 0 && child_layer_last_hash_updated) - { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } + std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; - // TODO: clean this up so I don't have to do it twice here and in get_first_non_leaf_parent // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk // will start from there and may need 1 more to fill CHECK_AND_ASSERT_THROW_MES(chunk_width > offset, "unexpected offset"); if (child_layer_last_hash_updated) offset = offset > 0 ? (offset - 1) : (chunk_width - 1); + // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 + // since we'll be updating the existing parent hash of the last chunk + if (offset > 0 || child_layer_last_hash_updated) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } + // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when // hashing the *existing* root layer std::vector child_scalars; @@ -418,7 +409,12 @@ class CurveTrees // Hash the chunk of children typename C_PARENT::Point chunk_hash = chunk_start_idx == 0 - ? get_first_non_leaf_parent(c_parent, chunk, chunk_width, child_layer_last_hash_updated, last_parent_chunk_ptr) + ? get_first_non_leaf_parent(c_parent, + chunk, + chunk_width, + child_layer_last_hash_updated, + last_parent_chunk_ptr, + offset) : get_new_parent(c_parent, chunk); MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c_parent.to_string(chunk_hash) @@ -451,9 +447,6 @@ class CurveTrees if (leaves.tuples.empty()) return; - // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] - const std::vector children = flatten_leaves(leaves.tuples); - const std::size_t max_chunk_size = m_leaf_layer_chunk_width; const std::size_t offset = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->child_offset; @@ -465,6 +458,9 @@ class CurveTrees --parents_out.start_idx; } + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] + const std::vector children = flatten_leaves(leaves.tuples); + // See how many new children are needed to fill up the existing last chunk CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); std::size_t chunk_size = std::min(children.size(), max_chunk_size - offset); From 4ade675939d16f76bf9ec9c4d2a3b47f21dec41d Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 22 May 2024 18:48:47 -0700 Subject: [PATCH 012/127] Consolidate hash_leaf_layer into hash_layer --- src/fcmp/curve_trees.h | 255 +++++++++++++------------------ tests/unit_tests/curve_trees.cpp | 11 +- 2 files changed, 114 insertions(+), 152 deletions(-) diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index b873a7bd0a5..02b691251b5 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -140,28 +140,12 @@ class CurveTrees }; // TODO: move impl into cpp - std::vector flatten_leaves(const std::vector &leaves) const - { - std::vector flattened_leaves; - flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - - for (const auto &l : leaves) - { - // TODO: implement without cloning - flattened_leaves.emplace_back(m_c2.clone(l.O_x)); - flattened_leaves.emplace_back(m_c2.clone(l.I_x)); - flattened_leaves.emplace_back(m_c2.clone(l.C_x)); - } - - return flattened_leaves; - }; - - // TODO: move impl into cpp - TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, const Leaves &new_leaves) + TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, + const std::vector &new_leaf_tuples) { TreeExtension tree_extension; - if (new_leaves.tuples.empty()) + if (new_leaf_tuples.empty()) return tree_extension; const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; @@ -174,8 +158,8 @@ class CurveTrees // Copy the leaves // TODO: don't copy here - tree_extension.leaves.tuples.reserve(new_leaves.tuples.size()); - for (const auto &leaf : new_leaves.tuples) + tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); + for (const auto &leaf : new_leaf_tuples) { tree_extension.leaves.tuples.emplace_back(LeafTuple{ .O_x = m_c2.clone(leaf.O_x), @@ -187,13 +171,19 @@ class CurveTrees auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] + const std::vector flattened_leaves = this->flatten_leaves(new_leaf_tuples); + // Hash the leaf layer - LayerExtension parents; - this->hash_leaf_layer(c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], - new_leaves, - parents); + LayerExtension leaf_parents; + this->hash_layer(m_c2, + c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], + flattened_leaves, + tree_extension.leaves.start_idx, + m_leaf_layer_chunk_width, + leaf_parents); - c2_layer_extensions_out.emplace_back(std::move(parents)); + c2_layer_extensions_out.emplace_back(std::move(leaf_parents)); // Check if we just added the root if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) @@ -207,16 +197,31 @@ class CurveTrees // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) while (true) { + const LastChunkData *c1_last_chunk_ptr = (c1_last_chunks.size() <= c1_last_idx) + ? nullptr + : &c1_last_chunks[c1_last_idx]; + + const LastChunkData *c2_last_chunk_ptr = (c2_last_chunks.size() <= c2_last_idx) + ? nullptr + : &c2_last_chunks[c2_last_idx]; + + // TODO: templated function if (parent_is_c1) { CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); + const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx]; + + const auto c1_child_scalars = this->next_child_scalars_from_children(m_c2, + c2_last_chunk_ptr, + c1_last_chunk_ptr, + c2_child_extension); + LayerExtension c1_layer_extension; - this->hash_layer(m_c2, - m_c1, - (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], - (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], - c2_layer_extensions_out[c2_last_idx], + this->hash_layer(m_c1, + c1_last_chunk_ptr, + c1_child_scalars, + c2_child_extension.start_idx, m_c1_width, c1_layer_extension); @@ -232,12 +237,18 @@ class CurveTrees { CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); + const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx]; + + const auto c2_child_scalars = this->next_child_scalars_from_children(m_c1, + c1_last_chunk_ptr, + c2_last_chunk_ptr, + c1_child_extension); + LayerExtension c2_layer_extension; - this->hash_layer(m_c1, - m_c2, - (c1_last_chunks.size() <= c1_last_idx) ? nullptr : &c1_last_chunks[c1_last_idx], - (c2_last_chunks.size() <= c2_last_idx) ? nullptr : &c2_last_chunks[c2_last_idx], - c1_layer_extensions_out[c1_last_idx], + this->hash_layer(m_c2, + c2_last_chunk_ptr, + c2_child_scalars, + c1_child_extension.start_idx, m_c2_width, c2_layer_extension); @@ -272,29 +283,9 @@ class CurveTrees ); } - // TODO: move impl into cpp - typename C2::Point get_first_leaf_parent(const typename C2::Chunk &new_children, - const LastChunkData *last_chunk_ptr) const - { - // If no last chunk exists, or if the last chunk is already full, then we can get a new parent - if (last_chunk_ptr == nullptr || last_chunk_ptr->child_offset == 0) - return get_new_parent(m_c2, new_children); - - // There won't be any existing children when growing the leaf layer, fill priors with 0 - std::vector prior_children; - fcmp::tower_cycle::extend_zeroes(m_c2, new_children.size(), prior_children); - - return m_c2.hash_grow( - last_chunk_ptr->last_parent, - last_chunk_ptr->child_offset, - typename C2::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); - } - // TODO: move impl into cpp template - typename C::Point get_first_non_leaf_parent(const C &curve, + typename C::Point get_first_parent(const C &curve, const typename C::Chunk &new_children, const std::size_t chunk_width, const bool child_layer_last_hash_updated, @@ -336,28 +327,60 @@ class CurveTrees ); } - // TODO: look into consolidating hash_layer and hash_leaf_layer into 1 function // TODO: move impl into cpp - template - void hash_layer(const C_CHILD &c_child, - const C_PARENT &c_parent, + template + std::vector next_child_scalars_from_children(const C_CHILD &c_child, const LastChunkData *last_child_chunk_ptr, const LastChunkData *last_parent_chunk_ptr, - const LayerExtension &children, + const LayerExtension &children) + { + std::vector child_scalars; + + // The existing root would have a size of 1 + const bool updating_root_layer = last_child_chunk_ptr != nullptr + && last_child_chunk_ptr->parent_layer_size == 1; + + // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when + // hashing the *existing* root layer + if (updating_root_layer) + { + // We should be updating the existing root, there shouldn't be a last parent chunk + CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); + + // If the children don't already include the existing root, then we need to include it to be hashed + // - the children would include the existing root already if the existing root was updated in the child + // layer (the start_idx would be 0) + if (children.start_idx > 0) + child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); + } + + // Convert child points to scalars + tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); + + return child_scalars; + } + + // TODO: move impl into cpp + template + void hash_layer(const C &curve, + const LastChunkData *last_parent_chunk_ptr, + const std::vector &child_scalars, + const std::size_t children_start_idx, const std::size_t chunk_width, - LayerExtension &parents_out) + LayerExtension &parents_out) { parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; parents_out.hashes.clear(); - CHECK_AND_ASSERT_THROW_MES(!children.hashes.empty(), "empty children hashes"); + CHECK_AND_ASSERT_THROW_MES(!child_scalars.empty(), "empty child scalars"); - // If the child layer had its existing last hash updated (if the new children start at the last element in + // If the child layer had its existing last hash updated (if the new children include the last element in // the child layer), then we'll need to use the last hash's prior version in order to update the existing // last parent hash in this layer - bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) + // - Note: the leaf layer is strictly append-only, so this cannot be true for the leaf layer + const bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) ? false - : last_parent_chunk_ptr->child_layer_size == (children.start_idx + 1); + : last_parent_chunk_ptr->child_layer_size == (children_start_idx + 1); std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; @@ -365,7 +388,10 @@ class CurveTrees // will start from there and may need 1 more to fill CHECK_AND_ASSERT_THROW_MES(chunk_width > offset, "unexpected offset"); if (child_layer_last_hash_updated) + { + MDEBUG("child_layer_last_hash_updated, updating offset: " << offset); offset = offset > 0 ? (offset - 1) : (chunk_width - 1); + } // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 // since we'll be updating the existing parent hash of the last chunk @@ -375,25 +401,9 @@ class CurveTrees --parents_out.start_idx; } - // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when - // hashing the *existing* root layer - std::vector child_scalars; - if (last_child_chunk_ptr != nullptr && last_child_chunk_ptr->parent_layer_size == 1) - { - // We should be updating the existing root, there shouldn't be a last parent chunk - CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); - - // If the children don't already include the existing root at start_idx 0 (they would if the existing - // root was updated in the child layer), then we need to add it to the first chunk to be hashed - if (children.start_idx > 0) - child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); - } - - // Convert child points to scalars - tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); - // See how many children we need to fill up the existing last chunk std::size_t chunk_size = std::min(child_scalars.size(), chunk_width - offset); + MDEBUG("Starting chunk_size: " << chunk_size << " , num child scalars: " << child_scalars.size() << " , offset: " << offset); @@ -402,22 +412,22 @@ class CurveTrees while (chunk_start_idx < child_scalars.size()) { const auto chunk_start = child_scalars.data() + chunk_start_idx; - const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; + const typename C::Chunk chunk{chunk_start, chunk_size}; for (const auto &c : chunk) - MDEBUG("Hashing " << c_parent.to_string(c)); + MDEBUG("Hashing " << curve.to_string(c)); // Hash the chunk of children - typename C_PARENT::Point chunk_hash = chunk_start_idx == 0 - ? get_first_non_leaf_parent(c_parent, + typename C::Point chunk_hash = chunk_start_idx == 0 + ? get_first_parent(curve, chunk, chunk_width, child_layer_last_hash_updated, last_parent_chunk_ptr, offset) - : get_new_parent(c_parent, chunk); + : get_new_parent(curve, chunk); - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << c_parent.to_string(chunk_hash) + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash) << " , chunk_size: " << chunk_size); // We've got our hash @@ -437,66 +447,21 @@ class CurveTrees } // TODO: move impl into cpp - void hash_leaf_layer(const LastChunkData *last_chunk_ptr, - const Leaves &leaves, - LayerExtension &parents_out) + std::vector flatten_leaves(const std::vector &leaves) const { - parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->parent_layer_size; - parents_out.hashes.clear(); - - if (leaves.tuples.empty()) - return; - - const std::size_t max_chunk_size = m_leaf_layer_chunk_width; - const std::size_t offset = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->child_offset; + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 - // since we'll be updating the existing parent hash of the last chunk - if (offset > 0) + for (const auto &l : leaves) { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; + // TODO: implement without cloning + flattened_leaves.emplace_back(m_c2.clone(l.O_x)); + flattened_leaves.emplace_back(m_c2.clone(l.I_x)); + flattened_leaves.emplace_back(m_c2.clone(l.C_x)); } - // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar, scalar, scalar, scalar, scalar, scalar,...] - const std::vector children = flatten_leaves(leaves.tuples); - - // See how many new children are needed to fill up the existing last chunk - CHECK_AND_ASSERT_THROW_MES(max_chunk_size > offset, "unexpected offset"); - std::size_t chunk_size = std::min(children.size(), max_chunk_size - offset); - - std::size_t chunk_start_idx = 0; - while (chunk_start_idx < children.size()) - { - const auto chunk_start = children.data() + chunk_start_idx; - const typename C2::Chunk chunk{chunk_start, chunk_size}; - - for (const auto &c : chunk) - MDEBUG("Hashing " << m_c2.to_string(c)); - - // Hash the chunk of children - typename C2::Point chunk_hash = chunk_start_idx == 0 - ? get_first_leaf_parent(chunk, last_chunk_ptr) - : get_new_parent(m_c2, chunk); - - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << m_c2.to_string(chunk_hash) - << " , chunk_size: " << chunk_size); - - // We've got our hash - parents_out.hashes.emplace_back(std::move(chunk_hash)); - - // Advance to the next chunk - chunk_start_idx += chunk_size; - - // Prepare for next loop if there should be one - if (chunk_start_idx == children.size()) - break; - - // Fill a complete chunk, or add the remaining new children to the last chunk - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < children.size(), "unexpected chunk start idx"); - chunk_size = std::min(max_chunk_size, children.size() - chunk_start_idx); - } - } + return flattened_leaves; + }; //member variables private: diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 582a587b7a3..f0723690a2d 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -521,7 +521,8 @@ void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -static const CurveTreesV1::Leaves generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t num_leaves) +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, + const std::size_t num_leaves) { std::vector tuples; tuples.reserve(num_leaves); @@ -539,10 +540,7 @@ static const CurveTreesV1::Leaves generate_random_leaves(const CurveTreesV1 &cur tuples.emplace_back(std::move(leaf_tuple)); } - return CurveTreesV1::Leaves{ - .start_idx = 0, - .tuples = std::move(tuples) - }; + return tuples; } //---------------------------------------------------------------------------------------------------------------------- static void grow_tree(CurveTreesV1 &curve_trees, @@ -557,8 +555,7 @@ static void grow_tree(CurveTreesV1 &curve_trees, // Get a tree extension object to the existing tree using randomly generated leaves // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves - const auto tree_extension = curve_trees.get_tree_extension( - last_chunks, + const auto tree_extension = curve_trees.get_tree_extension(last_chunks, generate_random_leaves(curve_trees, num_leaves)); curve_trees_accessor.log_tree_extension(tree_extension); From 9ba00be519c28227a871a382f084fe5edd96624b Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 22 May 2024 20:24:39 -0700 Subject: [PATCH 013/127] Move curve_trees.h implementations into curve_trees.cpp file --- src/fcmp/CMakeLists.txt | 1 + src/fcmp/curve_trees.cpp | 440 +++++++++++++++++++++++++++++++++ src/fcmp/curve_trees.h | 331 ++----------------------- tests/unit_tests/curve_trees.h | 1 - 4 files changed, 460 insertions(+), 313 deletions(-) create mode 100644 src/fcmp/curve_trees.cpp diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt index 8af1b1f6bef..4db451d6800 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp/CMakeLists.txt @@ -27,6 +27,7 @@ # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. set(fcmp_sources + curve_trees.cpp tower_cycle.cpp) monero_find_all_headers(fcmp_headers "${CMAKE_CURRENT_SOURCE_DIR}") diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp new file mode 100644 index 00000000000..0ad0c1aed13 --- /dev/null +++ b/src/fcmp/curve_trees.cpp @@ -0,0 +1,440 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "curve_trees.h" + + +namespace fcmp +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +using Helios = tower_cycle::Helios; +using Selene = tower_cycle::Selene; + +// Instantiate the tower cycle types +template class CurveTrees; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +template<> +CurveTrees::LeafTuple CurveTrees::output_to_leaf_tuple( + const crypto::public_key &O, + const crypto::public_key &C) const +{ + crypto::ec_point I; + crypto::derive_key_image_generator(O, I); + + return LeafTuple{ + .O_x = fcmp::tower_cycle::ed_25519_point_to_scalar(O), + .I_x = fcmp::tower_cycle::ed_25519_point_to_scalar(I), + .C_x = fcmp::tower_cycle::ed_25519_point_to_scalar(C) + }; +}; +//---------------------------------------------------------------------------------------------------------------------- +template +typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( + const LastChunks &existing_last_chunks, + const std::vector &new_leaf_tuples) +{ + TreeExtension tree_extension; + + if (new_leaf_tuples.empty()) + return tree_extension; + + const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; + const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; + + // Set the leaf start idx + tree_extension.leaves.start_idx = c2_last_chunks.empty() + ? 0 + : c2_last_chunks[0].child_layer_size; + + // Copy the leaves + // TODO: don't copy here + tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); + for (const auto &leaf : new_leaf_tuples) + { + tree_extension.leaves.tuples.emplace_back(LeafTuple{ + .O_x = m_c2.clone(leaf.O_x), + .I_x = m_c2.clone(leaf.I_x), + .C_x = m_c2.clone(leaf.C_x) + }); + } + + auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; + + const std::vector flattened_leaves = this->flatten_leaves(new_leaf_tuples); + + // Hash the leaf layer + LayerExtension leaf_parents; + this->hash_layer(m_c2, + c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], + flattened_leaves, + tree_extension.leaves.start_idx, + m_leaf_layer_chunk_width, + leaf_parents); + + c2_layer_extensions_out.emplace_back(std::move(leaf_parents)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + // Alternate between hashing c2 children, c1 children, c2, c1, ... + bool parent_is_c1 = true; + + std::size_t c1_last_idx = 0; + std::size_t c2_last_idx = 0; + // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) + while (true) + { + const LastChunkData *c1_last_chunk_ptr = (c1_last_chunks.size() <= c1_last_idx) + ? nullptr + : &c1_last_chunks[c1_last_idx]; + + const LastChunkData *c2_last_chunk_ptr = (c2_last_chunks.size() <= c2_last_idx) + ? nullptr + : &c2_last_chunks[c2_last_idx]; + + // TODO: templated function + if (parent_is_c1) + { + CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); + + const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx]; + + const auto c1_child_scalars = this->next_child_scalars_from_children(m_c2, + c2_last_chunk_ptr, + c1_last_chunk_ptr, + c2_child_extension); + + LayerExtension c1_layer_extension; + this->hash_layer(m_c1, + c1_last_chunk_ptr, + c1_child_scalars, + c2_child_extension.start_idx, + m_c1_width, + c1_layer_extension); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + + // Check if we just added the root + if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c2_last_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); + + const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx]; + + const auto c2_child_scalars = this->next_child_scalars_from_children(m_c1, + c1_last_chunk_ptr, + c2_last_chunk_ptr, + c1_child_extension); + + LayerExtension c2_layer_extension; + this->hash_layer(m_c2, + c2_last_chunk_ptr, + c2_child_scalars, + c1_child_extension.start_idx, + m_c2_width, + c2_layer_extension); + + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c1_last_idx; + } + + parent_is_c1 = !parent_is_c1; + } +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Private member functions +//---------------------------------------------------------------------------------------------------------------------- +template +std::vector CurveTrees::flatten_leaves(const std::vector &leaves) const +{ + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + + for (const auto &l : leaves) + { + // TODO: implement without cloning + flattened_leaves.emplace_back(m_c2.clone(l.O_x)); + flattened_leaves.emplace_back(m_c2.clone(l.I_x)); + flattened_leaves.emplace_back(m_c2.clone(l.C_x)); + } + + return flattened_leaves; +}; +//---------------------------------------------------------------------------------------------------------------------- +// Explicit instantiations +template Helios::Point CurveTrees::get_new_parent(const Helios &curve, + const Helios::Chunk &new_children) const; + +template Selene::Point CurveTrees::get_new_parent(const Selene &curve, + const Selene::Chunk &new_children) const; +//---------------------------------------------------------------------------------------------------------------------- +// Implementation +template +template +typename C::Point CurveTrees::get_new_parent(const C &curve, + const typename C::Chunk &new_children) const +{ + // New parent means no prior children, fill priors with 0 + std::vector prior_children; + fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); + + return curve.hash_grow( + curve.m_hash_init_point, + 0,/*offset*/ + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Explicit instantiations +template Helios::Point CurveTrees::get_first_parent(const Helios &curve, + const Helios::Chunk &new_children, + const std::size_t chunk_width, + const bool child_layer_last_hash_updated, + const LastChunkData *last_chunk_ptr, + const std::size_t offset) const; + +template Selene::Point CurveTrees::get_first_parent(const Selene &curve, + const Selene::Chunk &new_children, + const std::size_t chunk_width, + const bool child_layer_last_hash_updated, + const LastChunkData *last_chunk_ptr, + const std::size_t offset) const; +//---------------------------------------------------------------------------------------------------------------------- +// Implementation +template +template +typename C::Point CurveTrees::get_first_parent(const C &curve, + const typename C::Chunk &new_children, + const std::size_t chunk_width, + const bool child_layer_last_hash_updated, + const LastChunkData *last_chunk_ptr, + const std::size_t offset) const +{ + // If no last chunk exists, we can get a new parent + if (last_chunk_ptr == nullptr) + return this->get_new_parent(curve, new_children); + + std::vector prior_children; + + if (child_layer_last_hash_updated) + { + // If the last chunk has updated children in it, then we need to get the delta to the old children + prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); + + // Extend prior children by zeroes for any additional new children, since they must be new + if (new_children.size() > 1) + fcmp::tower_cycle::extend_zeroes(curve, new_children.size() - 1, prior_children); + } + else if (offset > 0) + { + // If we're updating the parent hash and no children were updated, then we're just adding new children + // to the existing last chunk and can fill priors with 0 + fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); + } + else + { + // If the last chunk is already full and isn't updated in any way, then we just get a new parent + return this->get_new_parent(curve, new_children); + } + + return curve.hash_grow( + last_chunk_ptr->last_parent, + offset, + typename C::Chunk{prior_children.data(), prior_children.size()}, + new_children + ); +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Explicit instantiations +template std::vector CurveTrees::next_child_scalars_from_children(const Selene &c_child, + const LastChunkData *last_child_chunk_ptr, + const LastChunkData *last_parent_chunk_ptr, + const LayerExtension &children); + +template std::vector CurveTrees::next_child_scalars_from_children(const Helios &c_child, + const LastChunkData *last_child_chunk_ptr, + const LastChunkData *last_parent_chunk_ptr, + const LayerExtension &children); +//---------------------------------------------------------------------------------------------------------------------- +// Implementation +template +template +std::vector CurveTrees::next_child_scalars_from_children(const C_CHILD &c_child, + const LastChunkData *last_child_chunk_ptr, + const LastChunkData *last_parent_chunk_ptr, + const LayerExtension &children) +{ + std::vector child_scalars; + + // The existing root would have a size of 1 + const bool updating_root_layer = last_child_chunk_ptr != nullptr + && last_child_chunk_ptr->parent_layer_size == 1; + + // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when + // hashing the *existing* root layer + if (updating_root_layer) + { + // We should be updating the existing root, there shouldn't be a last parent chunk + CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); + + // If the children don't already include the existing root, then we need to include it to be hashed + // - the children would include the existing root already if the existing root was updated in the child + // layer (the start_idx would be 0) + if (children.start_idx > 0) + child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); + } + + // Convert child points to scalars + tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); + + return child_scalars; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Explicit instantiations +template void CurveTrees::hash_layer(const Helios &curve, + const LastChunkData *last_parent_chunk_ptr, + const std::vector &child_scalars, + const std::size_t children_start_idx, + const std::size_t chunk_width, + LayerExtension &parents_out); + +template void CurveTrees::hash_layer(const Selene &curve, + const LastChunkData *last_parent_chunk_ptr, + const std::vector &child_scalars, + const std::size_t children_start_idx, + const std::size_t chunk_width, + LayerExtension &parents_out); +//---------------------------------------------------------------------------------------------------------------------- +// Implementation +template +template +void CurveTrees::hash_layer(const C &curve, + const LastChunkData *last_parent_chunk_ptr, + const std::vector &child_scalars, + const std::size_t children_start_idx, + const std::size_t chunk_width, + LayerExtension &parents_out) +{ + parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; + parents_out.hashes.clear(); + + CHECK_AND_ASSERT_THROW_MES(!child_scalars.empty(), "empty child scalars"); + + // If the child layer had its existing last hash updated (if the new children include the last element in + // the child layer), then we'll need to use the last hash's prior version in order to update the existing + // last parent hash in this layer + // - Note: the leaf layer is strictly append-only, so this cannot be true for the leaf layer + const bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) + ? false + : last_parent_chunk_ptr->child_layer_size == (children_start_idx + 1); + + std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; + + // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk + // will start from there and may need 1 more to fill + CHECK_AND_ASSERT_THROW_MES(chunk_width > offset, "unexpected offset"); + if (child_layer_last_hash_updated) + { + MDEBUG("child_layer_last_hash_updated, updating offset: " << offset); + offset = offset > 0 ? (offset - 1) : (chunk_width - 1); + } + + // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 + // since we'll be updating the existing parent hash of the last chunk + if (offset > 0 || child_layer_last_hash_updated) + { + CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); + --parents_out.start_idx; + } + + // See how many children we need to fill up the existing last chunk + std::size_t chunk_size = std::min(child_scalars.size(), chunk_width - offset); + + MDEBUG("Starting chunk_size: " << chunk_size << " , num child scalars: " << child_scalars.size() + << " , offset: " << offset); + + // Hash chunks of child scalars to create the parent hashes + std::size_t chunk_start_idx = 0; + while (chunk_start_idx < child_scalars.size()) + { + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C::Chunk chunk{chunk_start, chunk_size}; + + for (const auto &c : chunk) + MDEBUG("Hashing " << curve.to_string(c)); + + // Hash the chunk of children + typename C::Point chunk_hash = chunk_start_idx == 0 + ? get_first_parent(curve, + chunk, + chunk_width, + child_layer_last_hash_updated, + last_parent_chunk_ptr, + offset) + : this->get_new_parent(curve, chunk); + + MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + parents_out.hashes.emplace_back(std::move(chunk_hash)); + + // Advance to the next chunk + chunk_start_idx += chunk_size; + + // Prepare for next loop if there should be one + if (chunk_start_idx == child_scalars.size()) + break; + + // Fill a complete chunk, or add the remaining new children to the last chunk + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < child_scalars.size(), "unexpected chunk start idx"); + chunk_size = std::min(chunk_width, child_scalars.size() - chunk_start_idx); + } +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace fcmp diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 02b691251b5..82274d23c6a 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -126,342 +126,49 @@ class CurveTrees //member functions public: - // TODO: move impl into cpp - LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) const - { - crypto::ec_point I; - crypto::derive_key_image_generator(O, I); - - return LeafTuple{ - .O_x = fcmp::tower_cycle::ed_25519_point_to_scalar(O), - .I_x = fcmp::tower_cycle::ed_25519_point_to_scalar(I), - .C_x = fcmp::tower_cycle::ed_25519_point_to_scalar(C) - }; - }; + // Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree + LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) const; - // TODO: move impl into cpp + // Take in the existing last chunks of each layer in the tree, as well as new leaves to add to the tree, + // and return a tree extension struct that can be used to extend a global tree TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, - const std::vector &new_leaf_tuples) - { - TreeExtension tree_extension; - - if (new_leaf_tuples.empty()) - return tree_extension; - - const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; - const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; - - // Set the leaf start idx - tree_extension.leaves.start_idx = c2_last_chunks.empty() - ? 0 - : c2_last_chunks[0].child_layer_size; - - // Copy the leaves - // TODO: don't copy here - tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); - for (const auto &leaf : new_leaf_tuples) - { - tree_extension.leaves.tuples.emplace_back(LeafTuple{ - .O_x = m_c2.clone(leaf.O_x), - .I_x = m_c2.clone(leaf.I_x), - .C_x = m_c2.clone(leaf.C_x) - }); - } - - auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; - auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; - - // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] - const std::vector flattened_leaves = this->flatten_leaves(new_leaf_tuples); - - // Hash the leaf layer - LayerExtension leaf_parents; - this->hash_layer(m_c2, - c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], - flattened_leaves, - tree_extension.leaves.start_idx, - m_leaf_layer_chunk_width, - leaf_parents); - - c2_layer_extensions_out.emplace_back(std::move(leaf_parents)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - // Alternate between hashing c2 children, c1 children, c2, c1, ... - bool parent_is_c1 = true; - - std::size_t c1_last_idx = 0; - std::size_t c2_last_idx = 0; - // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) - while (true) - { - const LastChunkData *c1_last_chunk_ptr = (c1_last_chunks.size() <= c1_last_idx) - ? nullptr - : &c1_last_chunks[c1_last_idx]; - - const LastChunkData *c2_last_chunk_ptr = (c2_last_chunks.size() <= c2_last_idx) - ? nullptr - : &c2_last_chunks[c2_last_idx]; - - // TODO: templated function - if (parent_is_c1) - { - CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); - - const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx]; - - const auto c1_child_scalars = this->next_child_scalars_from_children(m_c2, - c2_last_chunk_ptr, - c1_last_chunk_ptr, - c2_child_extension); - - LayerExtension c1_layer_extension; - this->hash_layer(m_c1, - c1_last_chunk_ptr, - c1_child_scalars, - c2_child_extension.start_idx, - m_c1_width, - c1_layer_extension); - - c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); - - // Check if we just added the root - if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c2_last_idx; - } - else - { - CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); - - const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx]; - - const auto c2_child_scalars = this->next_child_scalars_from_children(m_c1, - c1_last_chunk_ptr, - c2_last_chunk_ptr, - c1_child_extension); - - LayerExtension c2_layer_extension; - this->hash_layer(m_c2, - c2_last_chunk_ptr, - c2_child_scalars, - c1_child_extension.start_idx, - m_c2_width, - c2_layer_extension); - - c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c1_last_idx; - } - - parent_is_c1 = !parent_is_c1; - } - } + const std::vector &new_leaf_tuples); +//private member functions private: - // TODO: move impl into cpp - template - typename C::Point get_new_parent(const C &curve, - const typename C::Chunk &new_children) const - { - // New parent means no prior children, fill priors with 0 - std::vector prior_children; - fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] + std::vector flatten_leaves(const std::vector &leaves) const; - return curve.hash_grow( - curve.m_hash_init_point, - 0,/*offset*/ - typename C::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); - } + // TODO: make below functions static functions inside curve_trees.cpp + // Hash a chunk of new children + template + typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_children) const; - // TODO: move impl into cpp + // Hash the first chunk of children being added to a layer template typename C::Point get_first_parent(const C &curve, const typename C::Chunk &new_children, const std::size_t chunk_width, const bool child_layer_last_hash_updated, const LastChunkData *last_chunk_ptr, - const std::size_t offset) const - { - // If no last chunk exists, we can get a new parent - if (last_chunk_ptr == nullptr) - return get_new_parent(curve, new_children); - - std::vector prior_children; - - if (child_layer_last_hash_updated) - { - // If the last chunk has updated children in it, then we need to get the delta to the old children - prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); - - // Extend prior children by zeroes for any additional new children, since they must be new - if (new_children.size() > 1) - fcmp::tower_cycle::extend_zeroes(curve, new_children.size() - 1, prior_children); - } - else if (offset > 0) - { - // If we're updating the parent hash and no children were updated, then we're just adding new children - // to the existing last chunk and can fill priors with 0 - fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); - } - else - { - // If the last chunk is already full and isn't updated in any way, then we just get a new parent - return get_new_parent(curve, new_children); - } + const std::size_t offset) const; - return curve.hash_grow( - last_chunk_ptr->last_parent, - offset, - typename C::Chunk{prior_children.data(), prior_children.size()}, - new_children - ); - } - - // TODO: move impl into cpp + // After hashing a layer of children points, convert those children x-coordinates into their respective cycle + // scalars, and prepare them to be hashed for the next layer template std::vector next_child_scalars_from_children(const C_CHILD &c_child, const LastChunkData *last_child_chunk_ptr, const LastChunkData *last_parent_chunk_ptr, - const LayerExtension &children) - { - std::vector child_scalars; - - // The existing root would have a size of 1 - const bool updating_root_layer = last_child_chunk_ptr != nullptr - && last_child_chunk_ptr->parent_layer_size == 1; - - // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when - // hashing the *existing* root layer - if (updating_root_layer) - { - // We should be updating the existing root, there shouldn't be a last parent chunk - CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); + const LayerExtension &children); - // If the children don't already include the existing root, then we need to include it to be hashed - // - the children would include the existing root already if the existing root was updated in the child - // layer (the start_idx would be 0) - if (children.start_idx > 0) - child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); - } - - // Convert child points to scalars - tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); - - return child_scalars; - } - - // TODO: move impl into cpp + // Hash chunks of a layer of new children, outputting the next layer's parents template void hash_layer(const C &curve, const LastChunkData *last_parent_chunk_ptr, const std::vector &child_scalars, const std::size_t children_start_idx, const std::size_t chunk_width, - LayerExtension &parents_out) - { - parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; - parents_out.hashes.clear(); - - CHECK_AND_ASSERT_THROW_MES(!child_scalars.empty(), "empty child scalars"); - - // If the child layer had its existing last hash updated (if the new children include the last element in - // the child layer), then we'll need to use the last hash's prior version in order to update the existing - // last parent hash in this layer - // - Note: the leaf layer is strictly append-only, so this cannot be true for the leaf layer - const bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) - ? false - : last_parent_chunk_ptr->child_layer_size == (children_start_idx + 1); - - std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; - - // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk - // will start from there and may need 1 more to fill - CHECK_AND_ASSERT_THROW_MES(chunk_width > offset, "unexpected offset"); - if (child_layer_last_hash_updated) - { - MDEBUG("child_layer_last_hash_updated, updating offset: " << offset); - offset = offset > 0 ? (offset - 1) : (chunk_width - 1); - } - - // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 - // since we'll be updating the existing parent hash of the last chunk - if (offset > 0 || child_layer_last_hash_updated) - { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } - - // See how many children we need to fill up the existing last chunk - std::size_t chunk_size = std::min(child_scalars.size(), chunk_width - offset); - - MDEBUG("Starting chunk_size: " << chunk_size << " , num child scalars: " << child_scalars.size() - << " , offset: " << offset); - - // Hash chunks of child scalars to create the parent hashes - std::size_t chunk_start_idx = 0; - while (chunk_start_idx < child_scalars.size()) - { - const auto chunk_start = child_scalars.data() + chunk_start_idx; - const typename C::Chunk chunk{chunk_start, chunk_size}; - - for (const auto &c : chunk) - MDEBUG("Hashing " << curve.to_string(c)); - - // Hash the chunk of children - typename C::Point chunk_hash = chunk_start_idx == 0 - ? get_first_parent(curve, - chunk, - chunk_width, - child_layer_last_hash_updated, - last_parent_chunk_ptr, - offset) - : get_new_parent(curve, chunk); - - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash) - << " , chunk_size: " << chunk_size); - - // We've got our hash - parents_out.hashes.emplace_back(std::move(chunk_hash)); - - // Advance to the next chunk - chunk_start_idx += chunk_size; - - // Prepare for next loop if there should be one - if (chunk_start_idx == child_scalars.size()) - break; - - // Fill a complete chunk, or add the remaining new children to the last chunk - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < child_scalars.size(), "unexpected chunk start idx"); - chunk_size = std::min(chunk_width, child_scalars.size() - chunk_start_idx); - } - } - - // TODO: move impl into cpp - std::vector flatten_leaves(const std::vector &leaves) const - { - std::vector flattened_leaves; - flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - - for (const auto &l : leaves) - { - // TODO: implement without cloning - flattened_leaves.emplace_back(m_c2.clone(l.O_x)); - flattened_leaves.emplace_back(m_c2.clone(l.I_x)); - flattened_leaves.emplace_back(m_c2.clone(l.C_x)); - } - - return flattened_leaves; - }; + LayerExtension &parents_out); //member variables private: diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index 16527827a7a..b17b5a05f06 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -35,7 +35,6 @@ using Helios = fcmp::tower_cycle::Helios; using Selene = fcmp::tower_cycle::Selene; -// TODO: make this the instantiation in curve_trees.h/.cpp using CurveTreesV1 = fcmp::CurveTrees; class CurveTreesUnitTest From 5ad026975a60d16046c33fcc4f5be346a7ab96ff Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 23 May 2024 10:48:08 -0700 Subject: [PATCH 014/127] Cleaner template usage, moved static functions out of CurveTrees class --- src/fcmp/curve_trees.cpp | 420 ++++++++++++++----------------- src/fcmp/curve_trees.h | 105 ++++---- src/fcmp/tower_cycle.cpp | 66 ++++- src/fcmp/tower_cycle.h | 40 +-- tests/unit_tests/curve_trees.cpp | 94 +++---- tests/unit_tests/curve_trees.h | 16 +- 6 files changed, 350 insertions(+), 391 deletions(-) diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 0ad0c1aed13..3399d8146bf 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -31,192 +31,22 @@ namespace fcmp { +namespace curve_trees +{ //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -using Helios = tower_cycle::Helios; -using Selene = tower_cycle::Selene; - // Instantiate the tower cycle types template class CurveTrees; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -template<> -CurveTrees::LeafTuple CurveTrees::output_to_leaf_tuple( - const crypto::public_key &O, - const crypto::public_key &C) const -{ - crypto::ec_point I; - crypto::derive_key_image_generator(O, I); - - return LeafTuple{ - .O_x = fcmp::tower_cycle::ed_25519_point_to_scalar(O), - .I_x = fcmp::tower_cycle::ed_25519_point_to_scalar(I), - .C_x = fcmp::tower_cycle::ed_25519_point_to_scalar(C) - }; -}; +// Public helper functions //---------------------------------------------------------------------------------------------------------------------- -template -typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( - const LastChunks &existing_last_chunks, - const std::vector &new_leaf_tuples) -{ - TreeExtension tree_extension; - - if (new_leaf_tuples.empty()) - return tree_extension; - - const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; - const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; - - // Set the leaf start idx - tree_extension.leaves.start_idx = c2_last_chunks.empty() - ? 0 - : c2_last_chunks[0].child_layer_size; - - // Copy the leaves - // TODO: don't copy here - tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); - for (const auto &leaf : new_leaf_tuples) - { - tree_extension.leaves.tuples.emplace_back(LeafTuple{ - .O_x = m_c2.clone(leaf.O_x), - .I_x = m_c2.clone(leaf.I_x), - .C_x = m_c2.clone(leaf.C_x) - }); - } - - auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; - auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; - - const std::vector flattened_leaves = this->flatten_leaves(new_leaf_tuples); - - // Hash the leaf layer - LayerExtension leaf_parents; - this->hash_layer(m_c2, - c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], - flattened_leaves, - tree_extension.leaves.start_idx, - m_leaf_layer_chunk_width, - leaf_parents); - - c2_layer_extensions_out.emplace_back(std::move(leaf_parents)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - // Alternate between hashing c2 children, c1 children, c2, c1, ... - bool parent_is_c1 = true; - - std::size_t c1_last_idx = 0; - std::size_t c2_last_idx = 0; - // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) - while (true) - { - const LastChunkData *c1_last_chunk_ptr = (c1_last_chunks.size() <= c1_last_idx) - ? nullptr - : &c1_last_chunks[c1_last_idx]; - - const LastChunkData *c2_last_chunk_ptr = (c2_last_chunks.size() <= c2_last_idx) - ? nullptr - : &c2_last_chunks[c2_last_idx]; - - // TODO: templated function - if (parent_is_c1) - { - CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); - - const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx]; - - const auto c1_child_scalars = this->next_child_scalars_from_children(m_c2, - c2_last_chunk_ptr, - c1_last_chunk_ptr, - c2_child_extension); - - LayerExtension c1_layer_extension; - this->hash_layer(m_c1, - c1_last_chunk_ptr, - c1_child_scalars, - c2_child_extension.start_idx, - m_c1_width, - c1_layer_extension); - - c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); - - // Check if we just added the root - if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c2_last_idx; - } - else - { - CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); - - const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx]; - - const auto c2_child_scalars = this->next_child_scalars_from_children(m_c1, - c1_last_chunk_ptr, - c2_last_chunk_ptr, - c1_child_extension); - - LayerExtension c2_layer_extension; - this->hash_layer(m_c2, - c2_last_chunk_ptr, - c2_child_scalars, - c1_child_extension.start_idx, - m_c2_width, - c2_layer_extension); - - c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c1_last_idx; - } - - parent_is_c1 = !parent_is_c1; - } -}; -//---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- -// Private member functions -//---------------------------------------------------------------------------------------------------------------------- -template -std::vector CurveTrees::flatten_leaves(const std::vector &leaves) const -{ - std::vector flattened_leaves; - flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - - for (const auto &l : leaves) - { - // TODO: implement without cloning - flattened_leaves.emplace_back(m_c2.clone(l.O_x)); - flattened_leaves.emplace_back(m_c2.clone(l.I_x)); - flattened_leaves.emplace_back(m_c2.clone(l.C_x)); - } - - return flattened_leaves; -}; -//---------------------------------------------------------------------------------------------------------------------- -// Explicit instantiations -template Helios::Point CurveTrees::get_new_parent(const Helios &curve, - const Helios::Chunk &new_children) const; - -template Selene::Point CurveTrees::get_new_parent(const Selene &curve, - const Selene::Chunk &new_children) const; -//---------------------------------------------------------------------------------------------------------------------- -// Implementation -template -template -typename C::Point CurveTrees::get_new_parent(const C &curve, - const typename C::Chunk &new_children) const +template +typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_children) { // New parent means no prior children, fill priors with 0 std::vector prior_children; - fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); + tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); return curve.hash_grow( curve.m_hash_init_point, @@ -227,34 +57,20 @@ typename C::Point CurveTrees::get_new_parent(const C &curve, }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// Explicit instantiations -template Helios::Point CurveTrees::get_first_parent(const Helios &curve, - const Helios::Chunk &new_children, - const std::size_t chunk_width, - const bool child_layer_last_hash_updated, - const LastChunkData *last_chunk_ptr, - const std::size_t offset) const; - -template Selene::Point CurveTrees::get_first_parent(const Selene &curve, - const Selene::Chunk &new_children, - const std::size_t chunk_width, - const bool child_layer_last_hash_updated, - const LastChunkData *last_chunk_ptr, - const std::size_t offset) const; +// Static functions //---------------------------------------------------------------------------------------------------------------------- -// Implementation -template -template -typename C::Point CurveTrees::get_first_parent(const C &curve, +// Hash the first chunk of children being added to a layer +template +static typename C::Point get_first_parent(const C &curve, const typename C::Chunk &new_children, const std::size_t chunk_width, const bool child_layer_last_hash_updated, const LastChunkData *last_chunk_ptr, - const std::size_t offset) const + const std::size_t offset) { // If no last chunk exists, we can get a new parent if (last_chunk_ptr == nullptr) - return this->get_new_parent(curve, new_children); + return get_new_parent(curve, new_children); std::vector prior_children; @@ -265,18 +81,18 @@ typename C::Point CurveTrees::get_first_parent(const C &curve, // Extend prior children by zeroes for any additional new children, since they must be new if (new_children.size() > 1) - fcmp::tower_cycle::extend_zeroes(curve, new_children.size() - 1, prior_children); + tower_cycle::extend_zeroes(curve, new_children.size() - 1, prior_children); } else if (offset > 0) { // If we're updating the parent hash and no children were updated, then we're just adding new children // to the existing last chunk and can fill priors with 0 - fcmp::tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); + tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); } else { // If the last chunk is already full and isn't updated in any way, then we just get a new parent - return this->get_new_parent(curve, new_children); + return get_new_parent(curve, new_children); } return curve.hash_grow( @@ -287,22 +103,10 @@ typename C::Point CurveTrees::get_first_parent(const C &curve, ); }; //---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- -// Explicit instantiations -template std::vector CurveTrees::next_child_scalars_from_children(const Selene &c_child, - const LastChunkData *last_child_chunk_ptr, - const LastChunkData *last_parent_chunk_ptr, - const LayerExtension &children); - -template std::vector CurveTrees::next_child_scalars_from_children(const Helios &c_child, - const LastChunkData *last_child_chunk_ptr, - const LastChunkData *last_parent_chunk_ptr, - const LayerExtension &children); -//---------------------------------------------------------------------------------------------------------------------- -// Implementation -template +// After hashing a layer of children points, convert those children x-coordinates into their respective cycle +// scalars, and prepare them to be hashed for the next layer template -std::vector CurveTrees::next_child_scalars_from_children(const C_CHILD &c_child, +static std::vector next_child_scalars_from_children(const C_CHILD &c_child, const LastChunkData *last_child_chunk_ptr, const LastChunkData *last_parent_chunk_ptr, const LayerExtension &children) @@ -333,26 +137,9 @@ std::vector CurveTrees::next_child_scalars_fr return child_scalars; }; //---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- -// Explicit instantiations -template void CurveTrees::hash_layer(const Helios &curve, - const LastChunkData *last_parent_chunk_ptr, - const std::vector &child_scalars, - const std::size_t children_start_idx, - const std::size_t chunk_width, - LayerExtension &parents_out); - -template void CurveTrees::hash_layer(const Selene &curve, - const LastChunkData *last_parent_chunk_ptr, - const std::vector &child_scalars, - const std::size_t children_start_idx, - const std::size_t chunk_width, - LayerExtension &parents_out); -//---------------------------------------------------------------------------------------------------------------------- -// Implementation -template +// Hash chunks of a layer of new children, outputting the next layer's parents template -void CurveTrees::hash_layer(const C &curve, +static void hash_layer(const C &curve, const LastChunkData *last_parent_chunk_ptr, const std::vector &child_scalars, const std::size_t children_start_idx, @@ -415,7 +202,7 @@ void CurveTrees::hash_layer(const C &curve, child_layer_last_hash_updated, last_parent_chunk_ptr, offset) - : this->get_new_parent(curve, chunk); + : get_new_parent(curve, chunk); MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash) << " , chunk_size: " << chunk_size); @@ -437,4 +224,169 @@ void CurveTrees::hash_layer(const C &curve, }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- +// CurveTrees public member functions +//---------------------------------------------------------------------------------------------------------------------- +template<> +CurveTrees::LeafTuple CurveTrees::output_to_leaf_tuple( + const crypto::public_key &O, + const crypto::public_key &C) const +{ + crypto::ec_point I; + crypto::derive_key_image_generator(O, I); + + return LeafTuple{ + .O_x = tower_cycle::ed_25519_point_to_scalar(O), + .I_x = tower_cycle::ed_25519_point_to_scalar(I), + .C_x = tower_cycle::ed_25519_point_to_scalar(C) + }; +}; +//---------------------------------------------------------------------------------------------------------------------- +template +typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( + const LastChunks &existing_last_chunks, + const std::vector &new_leaf_tuples) +{ + TreeExtension tree_extension; + + if (new_leaf_tuples.empty()) + return tree_extension; + + const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; + const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; + + // Set the leaf start idx + tree_extension.leaves.start_idx = c2_last_chunks.empty() + ? 0 + : c2_last_chunks[0].child_layer_size; + + // Copy the leaves + // TODO: don't copy here + tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); + for (const auto &leaf : new_leaf_tuples) + { + tree_extension.leaves.tuples.emplace_back(LeafTuple{ + .O_x = m_c2.clone(leaf.O_x), + .I_x = m_c2.clone(leaf.I_x), + .C_x = m_c2.clone(leaf.C_x) + }); + } + + auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; + + const std::vector flattened_leaves = this->flatten_leaves(new_leaf_tuples); + + // Hash the leaf layer + LayerExtension leaf_parents; + hash_layer(m_c2, + c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], + flattened_leaves, + tree_extension.leaves.start_idx, + m_leaf_layer_chunk_width, + leaf_parents); + + c2_layer_extensions_out.emplace_back(std::move(leaf_parents)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + // Alternate between hashing c2 children, c1 children, c2, c1, ... + bool parent_is_c1 = true; + + std::size_t c1_last_idx = 0; + std::size_t c2_last_idx = 0; + // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) + while (true) + { + const LastChunkData *c1_last_chunk_ptr = (c1_last_chunks.size() <= c1_last_idx) + ? nullptr + : &c1_last_chunks[c1_last_idx]; + + const LastChunkData *c2_last_chunk_ptr = (c2_last_chunks.size() <= c2_last_idx) + ? nullptr + : &c2_last_chunks[c2_last_idx]; + + // TODO: templated function + if (parent_is_c1) + { + CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); + + const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx]; + + const auto c1_child_scalars = next_child_scalars_from_children(m_c2, + c2_last_chunk_ptr, + c1_last_chunk_ptr, + c2_child_extension); + + LayerExtension c1_layer_extension; + hash_layer(m_c1, + c1_last_chunk_ptr, + c1_child_scalars, + c2_child_extension.start_idx, + m_c1_width, + c1_layer_extension); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + + // Check if we just added the root + if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c2_last_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); + + const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx]; + + const auto c2_child_scalars = next_child_scalars_from_children(m_c1, + c1_last_chunk_ptr, + c2_last_chunk_ptr, + c1_child_extension); + + LayerExtension c2_layer_extension; + hash_layer(m_c2, + c2_last_chunk_ptr, + c2_child_scalars, + c1_child_extension.start_idx, + m_c2_width, + c2_layer_extension); + + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + + // Check if we just added the root + if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) + return tree_extension; + + ++c1_last_idx; + } + + parent_is_c1 = !parent_is_c1; + } +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTrees private member functions +//---------------------------------------------------------------------------------------------------------------------- +template +std::vector CurveTrees::flatten_leaves(const std::vector &leaves) const +{ + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + + for (const auto &l : leaves) + { + // TODO: implement without cloning + flattened_leaves.emplace_back(m_c2.clone(l.O_x)); + flattened_leaves.emplace_back(m_c2.clone(l.I_x)); + flattened_leaves.emplace_back(m_c2.clone(l.C_x)); + } + + return flattened_leaves; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace curve_trees } //namespace fcmp diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 82274d23c6a..903a6cc538a 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -37,10 +37,45 @@ // forward declarations class CurveTreesUnitTest; + namespace fcmp { +namespace curve_trees +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Hash a chunk of new children +template +typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_children); +//---------------------------------------------------------------------------------------------------------------------- +// A layer of contiguous hashes starting from a specific start_idx in the tree +template +struct LayerExtension final +{ + std::size_t start_idx; + std::vector hashes; +}; -// TODO: longer descriptions +// Useful data from the last chunk in a layer +template +struct LastChunkData final +{ + // The total number of children % child layer chunk width + const std::size_t child_offset; + // The last child in the chunk (and therefore the last child in the child layer) + /* TODO: const */ typename C::Scalar last_child; + // The hash of the last chunk of child scalars + /* TODO: const */ typename C::Point last_parent; + // Total number of children in the child layer + const std::size_t child_layer_size; + // Total number of hashes in the parent layer + const std::size_t parent_layer_size; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// This class is useful help update the curve trees tree without needing to keep the entire tree in memory +// - It requires instantiation with the C1 and C2 curve classes and widths, hardening the tree structure +// - It ties the C2 curve in the tree to the leaf layer template class CurveTrees { @@ -72,7 +107,7 @@ class CurveTrees static const std::size_t LEAF_TUPLE_SIZE = 3; static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); - // Leaves in the tree + // Contiguous leaves in the tree, starting a specified start_idx in the leaf layer struct Leaves final { // Starting index in the leaf layer @@ -81,14 +116,6 @@ class CurveTrees std::vector tuples; }; - // A layer of contiguous hashes starting from a specific start_idx in the tree - template - struct LayerExtension final - { - std::size_t start_idx; - std::vector hashes; - }; - // A struct useful to extend an existing tree // - layers alternate between C1 and C2 // - c2_layer_extensions[0] is first layer after leaves, then c1_layer_extensions[0], c2_layer_extensions[1], etc @@ -99,22 +126,6 @@ class CurveTrees std::vector> c2_layer_extensions; }; - // Useful data from the last chunk in a layer - template - struct LastChunkData final - { - // The total number of children % child layer chunk width - /*TODO: const*/ std::size_t child_offset; - // The last child in the chunk (and therefore the last child in the child layer) - /*TODO: const*/ typename C::Scalar last_child; - // The hash of the last chunk of child scalars - /*TODO: const*/ typename C::Point last_parent; - // Total number of children in the child layer - /*TODO: const*/ std::size_t child_layer_size; - // Total number of hashes in the parent layer - /*TODO: const*/ std::size_t parent_layer_size; - }; - // Last chunk data from each layer in the tree // - layers alternate between C1 and C2 // - c2_last_chunks[0] is first layer after leaves, then c1_last_chunks[0], then c2_last_chunks[1], etc @@ -139,46 +150,24 @@ class CurveTrees // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] std::vector flatten_leaves(const std::vector &leaves) const; - // TODO: make below functions static functions inside curve_trees.cpp - // Hash a chunk of new children - template - typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_children) const; - - // Hash the first chunk of children being added to a layer - template - typename C::Point get_first_parent(const C &curve, - const typename C::Chunk &new_children, - const std::size_t chunk_width, - const bool child_layer_last_hash_updated, - const LastChunkData *last_chunk_ptr, - const std::size_t offset) const; - - // After hashing a layer of children points, convert those children x-coordinates into their respective cycle - // scalars, and prepare them to be hashed for the next layer - template - std::vector next_child_scalars_from_children(const C_CHILD &c_child, - const LastChunkData *last_child_chunk_ptr, - const LastChunkData *last_parent_chunk_ptr, - const LayerExtension &children); - - // Hash chunks of a layer of new children, outputting the next layer's parents - template - void hash_layer(const C &curve, - const LastChunkData *last_parent_chunk_ptr, - const std::vector &child_scalars, - const std::size_t children_start_idx, - const std::size_t chunk_width, - LayerExtension &parents_out); - //member variables private: + // The curves const C1 &m_c1; const C2 &m_c2; + // The chunk widths of the layers in the tree tied to each curve const std::size_t m_c1_width; const std::size_t m_c2_width; + // The leaf layer has a distinct chunk width than the other layers const std::size_t m_leaf_layer_chunk_width; }; - +//---------------------------------------------------------------------------------------------------------------------- +using Helios = tower_cycle::Helios; +using Selene = tower_cycle::Selene; +using CurveTreesV1 = CurveTrees; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace curve_trees } //namespace fcmp diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index c954f7affc6..cca657fe40f 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -144,16 +144,7 @@ std::string Selene::to_string(const typename Selene::Point &point) const } //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) -{ - static_assert(sizeof(RustEd25519Point) == sizeof(crypto::ec_point), - "expected same size ed25519 point to rust representation"); - - // TODO: implement reading just the x coordinate of ed25519 point in C/C++ - fcmp::tower_cycle::RustEd25519Point rust_point; - memcpy(&rust_point, &point, sizeof(fcmp::tower_cycle::RustEd25519Point)); - return fcmp_rust::ed25519_point_to_selene_scalar(rust_point); -} +// Exposed helper functions //---------------------------------------------------------------------------------------------------------------------- Helios::Generators random_helios_generators(std::size_t n) { @@ -175,6 +166,61 @@ Selene::Point random_selene_hash_init_point() return fcmp_rust::random_selene_hash_init_point(); } //---------------------------------------------------------------------------------------------------------------------- +SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) +{ + static_assert(sizeof(RustEd25519Point) == sizeof(crypto::ec_point), + "expected same size ed25519 point to rust representation"); + + // TODO: implement reading just the x coordinate of ed25519 point in C/C++ + fcmp::tower_cycle::RustEd25519Point rust_point; + memcpy(&rust_point, &point, sizeof(fcmp::tower_cycle::RustEd25519Point)); + return fcmp_rust::ed25519_point_to_selene_scalar(rust_point); +} +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_zeroes(const C &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout) +{ + zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); + + for (std::size_t i = 0; i < num_zeroes; ++i) + zeroes_inout.emplace_back(curve.zero_scalar()); +} + +// Explicit instantiations +template void extend_zeroes(const Helios &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout); + +template void extend_zeroes(const Selene &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout); +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_scalars_from_cycle_points(const C_POINTS &curve, + const std::vector &points, + std::vector &scalars_out) +{ + scalars_out.reserve(scalars_out.size() + points.size()); + + for (const auto &point : points) + { + // TODO: implement reading just the x coordinate of points on curves in curve cycle in C/C++ + typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); + scalars_out.push_back(std::move(scalar)); + } +} + +// Explicit instantiations +template void extend_scalars_from_cycle_points(const Helios &curve, + const std::vector &points, + std::vector &scalars_out); + +template void extend_scalars_from_cycle_points(const Selene &curve, + const std::vector &points, + std::vector &scalars_out); +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace tower_cycle } //namespace fcmp diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index dcd3092cd76..bf08bd04feb 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -67,8 +67,7 @@ struct SeleneT final }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// Parent curve class that curves in a cycle must implement -//---------------------------------------------------------------------------------------------------------------------- +// Abstract parent curve class that curves in a cycle must implement template class Curve { @@ -185,9 +184,6 @@ class Selene final : public Curve }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// Ed25519 point x-coordinates are Selene scalars -SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point); -//---------------------------------------------------------------------------------------------------------------------- // TODO: use static constants and get rid of the below functions (WARNING: number of generators must be >= curve's // width, and also need to account for selene leaf layer 3x) Helios::Generators random_helios_generators(std::size_t n); @@ -195,35 +191,19 @@ Selene::Generators random_selene_generators(std::size_t n); Helios::Point random_helios_hash_init_point(); Selene::Point random_selene_hash_init_point(); //---------------------------------------------------------------------------------------------------------------------- +// Ed25519 point x-coordinates are Selene scalars +SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point); //---------------------------------------------------------------------------------------------------------------------- -// TODO: implement in cpp file -template -static void extend_zeroes(const C &curve, +template +void extend_zeroes(const C &curve, const std::size_t num_zeroes, - std::vector &zeroes_inout) -{ - zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); - - for (std::size_t i = 0; i < num_zeroes; ++i) - zeroes_inout.emplace_back(curve.zero_scalar()); -} + std::vector &zeroes_inout); //---------------------------------------------------------------------------------------------------------------------- -// TODO: move impl into cpp -template -static void extend_scalars_from_cycle_points(const C_POINTS &curve, +template +void extend_scalars_from_cycle_points(const C_POINTS &curve, const std::vector &points, - std::vector &scalars_out) -{ - scalars_out.reserve(scalars_out.size() + points.size()); - - for (const auto &point : points) - { - // TODO: implement reading just the x coordinate of points on curves in curve cycle in C/C++ - typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); - scalars_out.push_back(std::move(scalar)); - } -} + std::vector &scalars_out); //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -}//namespace curves +}//namespace tower_cycle }//namespace fcmp diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index f0723690a2d..44f123b7a1d 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -35,7 +35,7 @@ // CurveTreesUnitTest helpers //---------------------------------------------------------------------------------------------------------------------- template -static CurveTreesV1::LastChunkData get_last_child_layer_chunk(const C &curve, +static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const C &curve, const std::size_t child_layer_size, const std::size_t parent_layer_size, const std::size_t chunk_width, @@ -47,7 +47,7 @@ static CurveTreesV1::LastChunkData get_last_child_layer_chunk(const C &curve, const std::size_t child_offset = child_layer_size % chunk_width; - return CurveTreesV1::LastChunkData{ + return fcmp::curve_trees::LastChunkData{ .child_offset = child_offset, .last_child = curve.clone(last_child), .last_parent = curve.clone(last_parent), @@ -56,6 +56,39 @@ static CurveTreesV1::LastChunkData get_last_child_layer_chunk(const C &curve, }; } //---------------------------------------------------------------------------------------------------------------------- +template +static bool validate_layer(const C_PARENT &c_parent, + const CurveTreesUnitTest::Layer &parents, + const std::vector &child_scalars, + const std::size_t max_chunk_size) +{ + // Hash chunk of children scalars, then see if the hash matches up to respective parent + std::size_t chunk_start_idx = 0; + for (std::size_t i = 0; i < parents.size(); ++i) + { + CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); + const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); + CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); + + const typename C_PARENT::Point &parent = parents[i]; + + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; + + const typename C_PARENT::Point chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk); + + const auto actual_bytes = c_parent.to_bytes(parent); + const auto expected_bytes = c_parent.to_bytes(chunk_hash); + CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); + + chunk_start_idx += chunk_size; + } + + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTreesUnitTest implementations //---------------------------------------------------------------------------------------------------------------------- @@ -191,7 +224,7 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext if (use_c2) { CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); - const CurveTreesV1::LayerExtension &c2_ext = c2_extensions[c2_idx]; + const fcmp::curve_trees::LayerExtension &c2_ext = c2_extensions[c2_idx]; CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); @@ -217,7 +250,7 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext else { CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); - const CurveTreesV1::LayerExtension &c1_ext = c1_extensions[c1_idx]; + const fcmp::curve_trees::LayerExtension &c1_ext = c1_extensions[c1_idx]; CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); @@ -245,39 +278,6 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext } } //---------------------------------------------------------------------------------------------------------------------- -template -bool CurveTreesUnitTest::validate_layer(const C_PARENT &c_parent, - const CurveTreesUnitTest::Layer &parents, - const std::vector &child_scalars, - const std::size_t max_chunk_size) -{ - // Hash chunk of children scalars, then see if the hash matches up to respective parent - std::size_t chunk_start_idx = 0; - for (std::size_t i = 0; i < parents.size(); ++i) - { - CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); - const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); - CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); - - const typename C_PARENT::Point &parent = parents[i]; - - const auto chunk_start = child_scalars.data() + chunk_start_idx; - const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; - - const typename C_PARENT::Point chunk_hash = m_curve_trees.get_new_parent(c_parent, chunk); - - const auto actual_bytes = c_parent.to_bytes(parent); - const auto expected_bytes = c_parent.to_bytes(chunk_hash); - CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); - - chunk_start_idx += chunk_size; - } - - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); - - return true; -} -//---------------------------------------------------------------------------------------------------------------------- bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) { const auto &leaves = tree.leaves; @@ -317,7 +317,7 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) children, child_scalars); - const bool valid = this->validate_layer(m_curve_trees.m_c2, + const bool valid = validate_layer(m_curve_trees.m_c2, parents, child_scalars, m_curve_trees.m_c2_width); @@ -342,7 +342,7 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) children, child_scalars); - const bool valid = this->validate_layer( + const bool valid = validate_layer( m_curve_trees.m_c1, parents, child_scalars, @@ -357,7 +357,7 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) } // Now validate leaves - return this->validate_layer(m_curve_trees.m_c2, + return validate_layer(m_curve_trees.m_c2, c2_layers[0], m_curve_trees.flatten_leaves(leaves), m_curve_trees.m_leaf_layer_chunk_width); @@ -382,7 +382,7 @@ void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_ch { CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_chunks.size(), "unexpected c2 layer"); - const CurveTreesV1::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; + const fcmp::curve_trees::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; MDEBUG("child_offset: " << last_chunk.child_offset << " , last_child: " << m_curve_trees.m_c2.to_string(last_chunk.last_child) @@ -396,7 +396,7 @@ void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_ch { CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_chunks.size(), "unexpected c1 layer"); - const CurveTreesV1::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; + const fcmp::curve_trees::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; MDEBUG("child_offset: " << last_chunk.child_offset << " , last_child: " << m_curve_trees.m_c1.to_string(last_chunk.last_child) @@ -441,7 +441,7 @@ void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &t { CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer"); - const CurveTreesV1::LayerExtension &c2_layer = c2_extensions[c2_idx]; + const fcmp::curve_trees::LayerExtension &c2_layer = c2_extensions[c2_idx]; MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) @@ -454,7 +454,7 @@ void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &t { CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer"); - const CurveTreesV1::LayerExtension &c1_layer = c1_extensions[c1_idx]; + const fcmp::curve_trees::LayerExtension &c1_layer = c1_extensions[c1_idx]; MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) @@ -582,7 +582,7 @@ static void grow_tree_test(Helios &helios, helios_width, selene_width); - CurveTreesUnitTest curve_trees_accesor{curve_trees}; + CurveTreesUnitTest curve_trees_accessor{curve_trees}; CHECK_AND_ASSERT_THROW_MES(helios_width > 1, "helios width must be > 1"); CHECK_AND_ASSERT_THROW_MES(selene_width > 1, "selene width must be > 1"); @@ -629,7 +629,7 @@ static void grow_tree_test(Helios &helios, MDEBUG("Adding " << init_leaves << " leaves to tree"); grow_tree(curve_trees, - curve_trees_accesor, + curve_trees_accessor, init_leaves, global_tree); @@ -639,7 +639,7 @@ static void grow_tree_test(Helios &helios, MDEBUG("Extending tree by " << ext_leaves << " leaves"); grow_tree(curve_trees, - curve_trees_accesor, + curve_trees_accessor, ext_leaves, global_tree); diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index b17b5a05f06..cd3d8f370af 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -32,11 +32,11 @@ #include "fcmp/tower_cycle.h" #include "misc_log_ex.h" -using Helios = fcmp::tower_cycle::Helios; -using Selene = fcmp::tower_cycle::Selene; - -using CurveTreesV1 = fcmp::CurveTrees; +using Helios = fcmp::curve_trees::Helios; +using Selene = fcmp::curve_trees::Selene; +using CurveTreesV1 = fcmp::curve_trees::CurveTreesV1; +// Helper class that can access the private members of the CurveTrees class class CurveTreesUnitTest { public: @@ -71,14 +71,6 @@ class CurveTreesUnitTest void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); void log_tree(const CurveTreesUnitTest::Tree &tree); -//private member functions -private: - template - bool validate_layer(const C_PARENT &c_parent, - const Layer &parents, - const std::vector &child_scalars, - const std::size_t max_chunk_size); - private: CurveTreesV1 &m_curve_trees; }; From e68ea2e054ea9048b60f2f79591863712b7aa2e0 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 23 May 2024 12:34:48 -0700 Subject: [PATCH 015/127] small cleanup --- src/fcmp/curve_trees.cpp | 9 ++++----- src/fcmp/tower_cycle.cpp | 40 ++++++++++++++++++++-------------------- src/fcmp/tower_cycle.h | 14 +++++++------- 3 files changed, 31 insertions(+), 32 deletions(-) diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 3399d8146bf..0fd0fb9134e 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -73,7 +73,6 @@ static typename C::Point get_first_parent(const C &curve, return get_new_parent(curve, new_children); std::vector prior_children; - if (child_layer_last_hash_updated) { // If the last chunk has updated children in it, then we need to get the delta to the old children @@ -299,18 +298,18 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) while (true) { - const LastChunkData *c1_last_chunk_ptr = (c1_last_chunks.size() <= c1_last_idx) + const LastChunkData *c1_last_chunk_ptr = (c1_last_idx >= c1_last_chunks.size()) ? nullptr : &c1_last_chunks[c1_last_idx]; - const LastChunkData *c2_last_chunk_ptr = (c2_last_chunks.size() <= c2_last_idx) + const LastChunkData *c2_last_chunk_ptr = (c2_last_idx >= c2_last_chunks.size()) ? nullptr : &c2_last_chunks[c2_last_idx]; // TODO: templated function if (parent_is_c1) { - CHECK_AND_ASSERT_THROW_MES(c2_layer_extensions_out.size() > c2_last_idx, "missing c2 layer"); + CHECK_AND_ASSERT_THROW_MES(c2_last_idx < c2_layer_extensions_out.size(), "missing c2 layer"); const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx]; @@ -337,7 +336,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio } else { - CHECK_AND_ASSERT_THROW_MES(c1_layer_extensions_out.size() > c1_last_idx, "missing c1 layer"); + CHECK_AND_ASSERT_THROW_MES(c1_last_idx < c1_layer_extensions_out.size(), "missing c1 layer"); const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx]; diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index cca657fe40f..96fa5fe09a0 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -146,26 +146,6 @@ std::string Selene::to_string(const typename Selene::Point &point) const //---------------------------------------------------------------------------------------------------------------------- // Exposed helper functions //---------------------------------------------------------------------------------------------------------------------- -Helios::Generators random_helios_generators(std::size_t n) -{ - return fcmp_rust::random_helios_generators(n); -} -//---------------------------------------------------------------------------------------------------------------------- -Selene::Generators random_selene_generators(std::size_t n) -{ - return fcmp_rust::random_selene_generators(n); -} -//---------------------------------------------------------------------------------------------------------------------- -Helios::Point random_helios_hash_init_point() -{ - return fcmp_rust::random_helios_hash_init_point(); -} -//---------------------------------------------------------------------------------------------------------------------- -Selene::Point random_selene_hash_init_point() -{ - return fcmp_rust::random_selene_hash_init_point(); -} -//---------------------------------------------------------------------------------------------------------------------- SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) { static_assert(sizeof(RustEd25519Point) == sizeof(crypto::ec_point), @@ -221,6 +201,26 @@ template void extend_scalars_from_cycle_points(const Selene &cur const std::vector &points, std::vector &scalars_out); //---------------------------------------------------------------------------------------------------------------------- +Helios::Generators random_helios_generators(std::size_t n) +{ + return fcmp_rust::random_helios_generators(n); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Generators random_selene_generators(std::size_t n) +{ + return fcmp_rust::random_selene_generators(n); +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point random_helios_hash_init_point() +{ + return fcmp_rust::random_helios_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point random_selene_hash_init_point() +{ + return fcmp_rust::random_selene_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace tower_cycle } //namespace fcmp diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index bf08bd04feb..f63cac5ffe4 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -184,13 +184,6 @@ class Selene final : public Curve }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// TODO: use static constants and get rid of the below functions (WARNING: number of generators must be >= curve's -// width, and also need to account for selene leaf layer 3x) -Helios::Generators random_helios_generators(std::size_t n); -Selene::Generators random_selene_generators(std::size_t n); -Helios::Point random_helios_hash_init_point(); -Selene::Point random_selene_hash_init_point(); -//---------------------------------------------------------------------------------------------------------------------- // Ed25519 point x-coordinates are Selene scalars SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point); //---------------------------------------------------------------------------------------------------------------------- @@ -204,6 +197,13 @@ void extend_scalars_from_cycle_points(const C_POINTS &curve, const std::vector &points, std::vector &scalars_out); //---------------------------------------------------------------------------------------------------------------------- +// TODO: use static constants and get rid of the below functions (WARNING: number of generators must be >= curve's +// width, and also need to account for selene leaf layer 3x) +Helios::Generators random_helios_generators(std::size_t n); +Selene::Generators random_selene_generators(std::size_t n); +Helios::Point random_helios_hash_init_point(); +Selene::Point random_selene_hash_init_point(); +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- }//namespace tower_cycle }//namespace fcmp From af9b74fcc7d0eddc57b6d92aec7c14aa67e4d1b0 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 23 May 2024 23:56:23 -0700 Subject: [PATCH 016/127] start LMDB grow_tree --- src/blockchain_db/CMakeLists.txt | 1 + src/blockchain_db/blockchain_db.h | 4 + src/blockchain_db/lmdb/db_lmdb.cpp | 148 +++++++++++++++++++++++++++-- src/blockchain_db/lmdb/db_lmdb.h | 18 +++- src/blockchain_db/testdb.h | 2 + src/fcmp/curve_trees.cpp | 2 +- src/fcmp/curve_trees.h | 2 +- tests/unit_tests/blockchain_db.cpp | 34 +++++++ tests/unit_tests/curve_trees.cpp | 17 +--- tests/unit_tests/curve_trees.h | 18 ++++ 10 files changed, 217 insertions(+), 29 deletions(-) diff --git a/src/blockchain_db/CMakeLists.txt b/src/blockchain_db/CMakeLists.txt index e94705b221d..9c55cebaa5f 100644 --- a/src/blockchain_db/CMakeLists.txt +++ b/src/blockchain_db/CMakeLists.txt @@ -45,6 +45,7 @@ target_link_libraries(blockchain_db PUBLIC common cncrypto + fcmp ringct ${LMDB_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 3e953da30d0..b5224127e07 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -40,6 +40,7 @@ #include "cryptonote_basic/difficulty.h" #include "cryptonote_basic/hardfork.h" #include "cryptonote_protocol/enums.h" +#include "fcmp/curve_trees.h" /** \file * Cryptonote Blockchain Database Interface @@ -1764,6 +1765,9 @@ class BlockchainDB */ virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; + // TODO: description and make private + virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::vector &new_leaves) = 0; // // Hard fork related storage diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index af456b00dca..35d04147356 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -200,7 +200,7 @@ namespace * spent_keys input hash - * * leaves leaf_idx {O.x, I.x, C.x} - * branches layer_idx [{branch_idx, branch_hash}...] + * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * * txpool_meta txn hash txn metadata * txpool_blob txn hash txn blob @@ -213,7 +213,7 @@ namespace * attached as a prefix on the Data to serve as the DUPSORT key. * (DUPFIXED saves 8 bytes per record.) * - * The output_amounts and branches tables don't use a dummy key, but use DUPSORT + * The output_amounts and layers tables don't use a dummy key, but use DUPSORT */ const char* const LMDB_BLOCKS = "blocks"; const char* const LMDB_BLOCK_HEIGHTS = "block_heights"; @@ -233,7 +233,7 @@ const char* const LMDB_SPENT_KEYS = "spent_keys"; // Curve trees tree types const char* const LMDB_LEAVES = "leaves"; -const char* const LMDB_BRANCHES = "branches"; +const char* const LMDB_LAYERS = "layers"; const char* const LMDB_TXPOOL_META = "txpool_meta"; const char* const LMDB_TXPOOL_BLOB = "txpool_blob"; @@ -358,6 +358,11 @@ typedef struct outtx { uint64_t local_index; } outtx; +typedef struct layer_val { + uint64_t child_chunk_idx; + std::array child_chunk_hash; +} layer_val; + std::atomic mdb_txn_safe::num_active_txns{0}; std::atomic_flag mdb_txn_safe::creation_gate = ATOMIC_FLAG_INIT; @@ -1295,6 +1300,135 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } } +template +void BlockchainLMDB::grow_layer(const std::vector> &layer_extensions, + const std::size_t ext_idx, + const std::size_t layer_idx, + const fcmp::curve_trees::LastChunkData *last_chunk_ptr) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(layers) + + CHECK_AND_ASSERT_THROW_MES(ext_idx < layer_extensions.size(), "unexpected layer extension"); + const auto &ext = layer_extensions[ext_idx]; + + CHECK_AND_ASSERT_THROW_MES(!ext.hashes.empty(), "empty layer extension"); + + // TODO: make sure last_chunk_ptr->parent_layer_size is correct + + // Check if we started at 0 if fresh layer, or if started 1 after the last element in the layer + const bool started_after_tip = (ext.start_idx == 0 && last_chunk_ptr == nullptr) + || (last_chunk_ptr != nullptr && ext.start_idx == last_chunk_ptr->parent_layer_size); + + // Check if we updated the last element in the layer + const bool started_at_tip = (last_chunk_ptr != nullptr + && (ext.start_idx + 1) == last_chunk_ptr->parent_layer_size); + + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected layer start"); + + MDB_val_copy k(layer_idx); + + if (started_at_tip) + { + // We updated the last hash, so update it + layer_val lv; + lv.child_chunk_idx = ext.start_idx; + lv.child_chunk_hash = std::array(); // ext.hashes.front(); // TODO + MDB_val_set(v, lv); + + // We expect to overwrite the existing hash + // TODO: make sure the hash already exists + int result = mdb_cursor_put(m_cur_layers, &k, &v, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to update chunk hash: ", result).c_str())); + } + + // Now add all the new hashes found in the extension + for (std::size_t i = started_at_tip ? 1 : 0; i < ext.hashes.size(); ++i) + { + layer_val lv; + lv.child_chunk_idx = i + ext.start_idx; + lv.child_chunk_hash = std::array(); // ext.hashes[i]; // TODO + MDB_val_set(v, lv); + + // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. + // Adding MDB_NODUPDATA I assume re-introduces a key comparison. Benchmark MDB_NODUPDATA here + // MDB_NODUPDATA makes sure key/data pair doesn't already exist + int result = mdb_cursor_put(m_cur_layers, &k, &v, MDB_APPENDDUP | MDB_NODUPDATA); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add hash: ", result).c_str())); + } +} + +void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::vector &new_leaves) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(leaves) + + // TODO: read every layer's last chunks + const auto last_chunks = fcmp::curve_trees::CurveTreesV1::LastChunks{}; + + const auto tree_extension = curve_trees.get_tree_extension(last_chunks, new_leaves); + + // Insert the leaves + const auto &leaves = tree_extension.leaves; + for (std::size_t i = 0; i < leaves.tuples.size(); ++i) + { + MDB_val_copy k(i + leaves.start_idx); + MDB_val_set(v, leaves.tuples[i]); + + // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. + // Adding MDB_NOOVERWRITE I assume re-introduces a key comparison. Benchmark NOOVERWRITE here + // MDB_NOOVERWRITE makes sure key doesn't already exist + int result = mdb_cursor_put(m_cur_leaves, &k, &v, MDB_APPENDDUP | MDB_NOOVERWRITE); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); + } + + // Grow the layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + const std::size_t layer_idx = c2_idx + c1_idx; + + if (use_c2) + { + const auto *c2_last_chunk_ptr = (c2_idx >= last_chunks.c2_last_chunks.size()) + ? nullptr + : &last_chunks.c2_last_chunks[c2_idx]; + + this->grow_layer(c2_extensions, c2_idx, layer_idx, c2_last_chunk_ptr); + + ++c2_idx; + } + else + { + const auto *c1_last_chunk_ptr = (c1_idx >= last_chunks.c1_last_chunks.size()) + ? nullptr + : &last_chunks.c1_last_chunks[c1_idx]; + + this->grow_layer(c1_extensions, c1_idx, layer_idx, c1_last_chunk_ptr); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} + BlockchainLMDB::~BlockchainLMDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -1445,7 +1579,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); - lmdb_db_open(txn, LMDB_BRANCHES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_branches, "Failed to open db handle for m_branches"); + lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); lmdb_db_open(txn, LMDB_TXPOOL_META, MDB_CREATE, m_txpool_meta, "Failed to open db handle for m_txpool_meta"); lmdb_db_open(txn, LMDB_TXPOOL_BLOB, MDB_CREATE, m_txpool_blob, "Failed to open db handle for m_txpool_blob"); @@ -1467,7 +1601,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); mdb_set_dupsort(txn, m_leaves, compare_uint64); - mdb_set_dupsort(txn, m_branches, compare_uint64); + mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); mdb_set_dupsort(txn, m_block_info, compare_uint64); if (!(mdb_flags & MDB_RDONLY)) @@ -1647,8 +1781,8 @@ void BlockchainLMDB::reset() throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str())); if (auto result = mdb_drop(txn, m_leaves, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str())); - if (auto result = mdb_drop(txn, m_branches, 0)) - throw0(DB_ERROR(lmdb_error("Failed to drop m_branches: ", result).c_str())); + if (auto result = mdb_drop(txn, m_layers, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_layers: ", result).c_str())); (void)mdb_drop(txn, m_hf_starting_heights, 0); // this one is dropped in new code if (auto result = mdb_drop(txn, m_hf_versions, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_hf_versions: ", result).c_str())); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index c31250af2fa..688f4f998d2 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -65,7 +65,7 @@ typedef struct mdb_txn_cursors MDB_cursor *m_txc_spent_keys; MDB_cursor *m_txc_leaves; - MDB_cursor *m_txc_branches; + MDB_cursor *m_txc_layers; MDB_cursor *m_txc_txpool_meta; MDB_cursor *m_txc_txpool_blob; @@ -91,7 +91,7 @@ typedef struct mdb_txn_cursors #define m_cur_tx_outputs m_cursors->m_txc_tx_outputs #define m_cur_spent_keys m_cursors->m_txc_spent_keys #define m_cur_leaves m_cursors->m_txc_leaves -#define m_cur_branches m_cursors->m_txc_branches +#define m_cur_layers m_cursors->m_txc_layers #define m_cur_txpool_meta m_cursors->m_txc_txpool_meta #define m_cur_txpool_blob m_cursors->m_txc_txpool_blob #define m_cur_alt_blocks m_cursors->m_txc_alt_blocks @@ -115,7 +115,7 @@ typedef struct mdb_rflags bool m_rf_tx_outputs; bool m_rf_spent_keys; bool m_rf_leaves; - bool m_rf_branches; + bool m_rf_layers; bool m_rf_txpool_meta; bool m_rf_txpool_blob; bool m_rf_alt_blocks; @@ -363,6 +363,10 @@ class BlockchainLMDB : public BlockchainDB static int compare_hash32(const MDB_val *a, const MDB_val *b); static int compare_string(const MDB_val *a, const MDB_val *b); + // make private + virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::vector &new_leaves); + private: void do_resize(uint64_t size_increase=0); @@ -406,6 +410,12 @@ class BlockchainLMDB : public BlockchainDB virtual void remove_spent_key(const crypto::key_image& k_image); + template + void grow_layer(const std::vector> &layer_extensions, + const std::size_t c_idx, + const std::size_t layer_idx, + const fcmp::curve_trees::LastChunkData *last_chunk_data); + uint64_t num_outputs() const; // Hard fork @@ -471,7 +481,7 @@ class BlockchainLMDB : public BlockchainDB MDB_dbi m_spent_keys; MDB_dbi m_leaves; - MDB_dbi m_branches; + MDB_dbi m_layers; MDB_dbi m_txpool_meta; MDB_dbi m_txpool_blob; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 308bdd4c24b..f05338e1f30 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -116,6 +116,8 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} + virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::vector &new_leaves) override {}; virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 0fd0fb9134e..6801475bcb5 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -243,7 +243,7 @@ CurveTrees::LeafTuple CurveTrees::output_to_leaf template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( const LastChunks &existing_last_chunks, - const std::vector &new_leaf_tuples) + const std::vector &new_leaf_tuples) const { TreeExtension tree_extension; diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 903a6cc538a..642ff92318b 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -143,7 +143,7 @@ class CurveTrees // Take in the existing last chunks of each layer in the tree, as well as new leaves to add to the tree, // and return a tree extension struct that can be used to extend a global tree TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, - const std::vector &new_leaf_tuples); + const std::vector &new_leaf_tuples) const; //private member functions private: diff --git a/tests/unit_tests/blockchain_db.cpp b/tests/unit_tests/blockchain_db.cpp index 66219322e9e..e5085bccb96 100644 --- a/tests/unit_tests/blockchain_db.cpp +++ b/tests/unit_tests/blockchain_db.cpp @@ -39,6 +39,7 @@ #include "blockchain_db/blockchain_db.h" #include "blockchain_db/lmdb/db_lmdb.h" #include "cryptonote_basic/cryptonote_format_utils.h" +#include "curve_trees.h" using namespace cryptonote; using epee::string_tools::pod_to_hex; @@ -341,4 +342,37 @@ TYPED_TEST(BlockchainDBTest, RetrieveBlockData) ASSERT_HASH_EQ(get_block_hash(this->m_blocks[1].first), hashes[1]); } +TYPED_TEST(BlockchainDBTest, GrowCurveTrees) +{ + boost::filesystem::path tempPath = boost::filesystem::temp_directory_path() / boost::filesystem::unique_path(); + std::string dirPath = tempPath.string(); + + this->set_prefix(dirPath); + + // make sure open does not throw + ASSERT_NO_THROW(this->m_db->open(dirPath)); + this->get_filenames(); + this->init_hard_fork(); + + db_wtxn_guard guard(this->m_db); + + CHECK_AND_ASSERT_THROW_MES(HELIOS_GENERATORS_LEN >= HELIOS_CHUNK_WIDTH, "helios generators < chunk width"); + CHECK_AND_ASSERT_THROW_MES(SELENE_GENERATORS_LEN >= (SELENE_CHUNK_WIDTH * CurveTreesV1::LEAF_TUPLE_SIZE), + "selene generators < max chunk width"); + + Helios helios(HELIOS_GENERATORS, HELIOS_HASH_INIT_POINT); + Selene selene(SELENE_GENERATORS, SELENE_HASH_INIT_POINT); + + auto curve_trees = CurveTreesV1( + helios, + selene, + HELIOS_CHUNK_WIDTH, + SELENE_CHUNK_WIDTH); + + // Grow tree by 1 leaf + ASSERT_NO_THROW(this->m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, 1))); + + // TODO: Validate the tree +} + } // anonymous namespace diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 44f123b7a1d..e2a588c44a3 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -521,7 +521,7 @@ void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, +const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t num_leaves) { std::vector tuples; @@ -653,25 +653,10 @@ static void grow_tree_test(Helios &helios, //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, grow_tree) { - // TODO: use static constant generators and hash init points - const std::size_t HELIOS_GENERATORS_LEN = 128; - const std::size_t SELENE_GENERATORS_LEN = 256; - - // https://github.com/kayabaNerve/fcmp-plus-plus/blob - // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 - const std::size_t HELIOS_CHUNK_WIDTH = 38; - const std::size_t SELENE_CHUNK_WIDTH = 18; - CHECK_AND_ASSERT_THROW_MES(HELIOS_GENERATORS_LEN >= HELIOS_CHUNK_WIDTH, "helios generators < chunk width"); CHECK_AND_ASSERT_THROW_MES(SELENE_GENERATORS_LEN >= (SELENE_CHUNK_WIDTH * CurveTreesV1::LEAF_TUPLE_SIZE), "selene generators < max chunk width"); - const Helios::Generators HELIOS_GENERATORS = fcmp::tower_cycle::random_helios_generators(HELIOS_GENERATORS_LEN); - const Selene::Generators SELENE_GENERATORS = fcmp::tower_cycle::random_selene_generators(SELENE_GENERATORS_LEN); - - const Helios::Point HELIOS_HASH_INIT_POINT = fcmp::tower_cycle::random_helios_hash_init_point(); - const Selene::Point SELENE_HASH_INIT_POINT = fcmp::tower_cycle::random_selene_hash_init_point(); - Helios helios(HELIOS_GENERATORS, HELIOS_HASH_INIT_POINT); Selene selene(SELENE_GENERATORS, SELENE_HASH_INIT_POINT); diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index cd3d8f370af..0be03172dfe 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -74,3 +74,21 @@ class CurveTreesUnitTest private: CurveTreesV1 &m_curve_trees; }; + +const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, + const std::size_t num_leaves); + +// TODO: use static constant generators and hash init points +const std::size_t HELIOS_GENERATORS_LEN = 128; +const std::size_t SELENE_GENERATORS_LEN = 256; + +// https://github.com/kayabaNerve/fcmp-plus-plus/blob +// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 +const std::size_t HELIOS_CHUNK_WIDTH = 38; +const std::size_t SELENE_CHUNK_WIDTH = 18; + +const Helios::Generators HELIOS_GENERATORS = fcmp::tower_cycle::random_helios_generators(HELIOS_GENERATORS_LEN); +const Selene::Generators SELENE_GENERATORS = fcmp::tower_cycle::random_selene_generators(SELENE_GENERATORS_LEN); + +const Helios::Point HELIOS_HASH_INIT_POINT = fcmp::tower_cycle::random_helios_hash_init_point(); +const Selene::Point SELENE_HASH_INIT_POINT = fcmp::tower_cycle::random_selene_hash_init_point(); From af47a135eb9eab702344fbacb2706170864004d4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 23 May 2024 20:44:55 -0400 Subject: [PATCH 017/127] Remove cxx and expose scalars/points directly --- .gitignore | 2 - .gitmodules | 3 - src/fcmp/CMakeLists.txt | 4 +- src/fcmp/curve_trees.cpp | 39 +- src/fcmp/fcmp_rust/.gitignore | 4 + src/fcmp/fcmp_rust/CMakeLists.txt | 31 +- src/fcmp/fcmp_rust/Cargo.lock | 716 ++++++++++++++++++++++++++++++ src/fcmp/fcmp_rust/Cargo.toml | 20 +- src/fcmp/fcmp_rust/build.rs | 5 - src/fcmp/fcmp_rust/fcmp++.h | 144 ++++++ src/fcmp/fcmp_rust/src/lib.rs | 270 +++++------ src/fcmp/tower_cycle.cpp | 80 ++-- src/fcmp/tower_cycle.h | 40 +- tests/unit_tests/curve_trees.cpp | 30 +- 14 files changed, 1092 insertions(+), 296 deletions(-) create mode 100644 src/fcmp/fcmp_rust/.gitignore create mode 100644 src/fcmp/fcmp_rust/Cargo.lock delete mode 100644 src/fcmp/fcmp_rust/build.rs create mode 100644 src/fcmp/fcmp_rust/fcmp++.h diff --git a/.gitignore b/.gitignore index 2fc767cca2a..9f62575e5ab 100644 --- a/.gitignore +++ b/.gitignore @@ -120,5 +120,3 @@ nbproject __pycache__/ *.pyc *.log - -Cargo.lock diff --git a/.gitmodules b/.gitmodules index 95bee114a20..721cce3b4bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -14,6 +14,3 @@ path = external/supercop url = https://github.com/monero-project/supercop branch = monero -[submodule "external/fcmp-plus-plus"] - path = external/fcmp-plus-plus - url = https://github.com/kayabaNerve/fcmp-plus-plus.git diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt index 4db451d6800..b7aca03fb82 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp/CMakeLists.txt @@ -36,7 +36,7 @@ add_subdirectory(fcmp_rust) monero_add_library_with_deps( NAME fcmp - DEPENDS rust_cxx + DEPENDS fcmp_rust SOURCES ${fcmp_sources} ${fcmp_headers}) @@ -46,5 +46,5 @@ target_link_libraries(fcmp crypto epee PRIVATE - fcmp_rust + ${CMAKE_CURRENT_BINARY_DIR}/fcmp_rust/libfcmp_rust.a ${EXTRA_LIBRARIES}) diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 6801475bcb5..f210022ef9c 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -1,21 +1,21 @@ // Copyright (c) 2024, The Monero Project -// +// // All rights reserved. -// +// // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: -// +// // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. -// +// // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other // materials provided with the distribution. -// +// // 3. Neither the name of the copyright holder nor the names of its contributors may be // used to endorse or promote products derived from this software without specific // prior written permission. -// +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL @@ -46,7 +46,7 @@ typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_ch { // New parent means no prior children, fill priors with 0 std::vector prior_children; - tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); + tower_cycle::extend_zeroes(curve, new_children.len, prior_children); return curve.hash_grow( curve.m_hash_init_point, @@ -76,17 +76,17 @@ static typename C::Point get_first_parent(const C &curve, if (child_layer_last_hash_updated) { // If the last chunk has updated children in it, then we need to get the delta to the old children - prior_children.emplace_back(curve.clone(last_chunk_ptr->last_child)); + prior_children.emplace_back(last_chunk_ptr->last_child); // Extend prior children by zeroes for any additional new children, since they must be new - if (new_children.size() > 1) - tower_cycle::extend_zeroes(curve, new_children.size() - 1, prior_children); + if (new_children.len > 1) + tower_cycle::extend_zeroes(curve, new_children.len - 1, prior_children); } else if (offset > 0) { // If we're updating the parent hash and no children were updated, then we're just adding new children // to the existing last chunk and can fill priors with 0 - tower_cycle::extend_zeroes(curve, new_children.size(), prior_children); + tower_cycle::extend_zeroes(curve, new_children.len, prior_children); } else { @@ -190,8 +190,9 @@ static void hash_layer(const C &curve, const auto chunk_start = child_scalars.data() + chunk_start_idx; const typename C::Chunk chunk{chunk_start, chunk_size}; - for (const auto &c : chunk) - MDEBUG("Hashing " << curve.to_string(c)); + for (uint c = 0; c < chunk_size; ++c) { + MDEBUG("Hashing " << curve.to_string(chunk_start[c])); + } // Hash the chunk of children typename C::Point chunk_hash = chunk_start_idx == 0 @@ -264,9 +265,9 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio for (const auto &leaf : new_leaf_tuples) { tree_extension.leaves.tuples.emplace_back(LeafTuple{ - .O_x = m_c2.clone(leaf.O_x), - .I_x = m_c2.clone(leaf.I_x), - .C_x = m_c2.clone(leaf.C_x) + .O_x = leaf.O_x, + .I_x = leaf.I_x, + .C_x = leaf.C_x }); } @@ -378,9 +379,9 @@ std::vector CurveTrees::flatten_leaves(const std::v for (const auto &l : leaves) { // TODO: implement without cloning - flattened_leaves.emplace_back(m_c2.clone(l.O_x)); - flattened_leaves.emplace_back(m_c2.clone(l.I_x)); - flattened_leaves.emplace_back(m_c2.clone(l.C_x)); + flattened_leaves.emplace_back(l.O_x); + flattened_leaves.emplace_back(l.I_x); + flattened_leaves.emplace_back(l.C_x); } return flattened_leaves; diff --git a/src/fcmp/fcmp_rust/.gitignore b/src/fcmp/fcmp_rust/.gitignore new file mode 100644 index 00000000000..5a07b8927f8 --- /dev/null +++ b/src/fcmp/fcmp_rust/.gitignore @@ -0,0 +1,4 @@ +# If a developer runs cargo build inside this sub-directory to only work with +# the Rust side of things, they'll create this target directory which shouldn't +# be committed +target diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index e14564be023..cb25b33783c 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -34,37 +34,28 @@ else () set(TARGET_DIR "release") endif () -set(FCMP_RUST_CXX "${CMAKE_CURRENT_BINARY_DIR}/fcmp_rust.cc") set(FCMP_RUST_HEADER_DIR "${MONERO_GENERATED_HEADERS_DIR}/fcmp_rust") -set(FCMP_RUST_HEADER "${FCMP_RUST_HEADER_DIR}/fcmp_rust.h") -set(CXX_HEADER "${FCMP_RUST_HEADER_DIR}/cxx.h") +set(FCMP_RUST_HEADER "${FCMP_RUST_HEADER_DIR}/fcmp++.h") +set(FCMP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/libfcmp_rust.a") # Removing OUTPUT files makes sure custom command runs every time -file(REMOVE_RECURSE "${FCMP_RUST_CXX}") file(REMOVE_RECURSE "${FCMP_RUST_HEADER_DIR}") file(MAKE_DIRECTORY "${FCMP_RUST_HEADER_DIR}") +file(REMOVE "${FCMP_RUST_LIB}") + add_custom_command( COMMENT "Building rust fcmp lib" - OUTPUT ${FCMP_RUST_CXX} ${FCMP_RUST_HEADER} ${CXX_HEADER} + OUTPUT ${FCMP_RUST_HEADER} + OUTPUT ${FCMP_RUST_LIB} COMMAND CARGO_TARGET_DIR=${CMAKE_CURRENT_BINARY_DIR} ${CARGO_CMD} - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/cxxbridge/fcmp_rust/src/lib.rs.cc ${FCMP_RUST_CXX} - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/cxxbridge/fcmp_rust/src/lib.rs.h ${FCMP_RUST_HEADER} - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/cxxbridge/rust/cxx.h ${CXX_HEADER} + COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/fcmp++.h ${FCMP_RUST_HEADER} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_DIR}/libfcmp_rust.a ${FCMP_RUST_LIB} COMMAND echo "Finished copying fcmp rust targets" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} VERBATIM ) -add_custom_target(rust_cxx ALL DEPENDS ${CXX_HEADER}) - -set(fcmp_rust_sources ${FCMP_RUST_CXX}) - -monero_find_all_headers(fcmp_rust_headers "${FCMP_RUST_HEAfDER_DIR}") - -monero_add_library(fcmp_rust - ${fcmp_rust_sources} - ${fcmp_rust_headers}) - -set(FCMP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/${TARGET_DIR}/libfcmp_rust.a") -target_link_libraries(fcmp_rust dl ${FCMP_RUST_LIB}) +#monero_find_all_headers(fcmp_rust_headers "${FCMP_RUST_HEADER_DIR}") +add_custom_target(fcmp_rust DEPENDS ${FCMP_RUST_LIB}) +#target_link_libraries(fcmp ${FCMP_RUST_LIB}) diff --git a/src/fcmp/fcmp_rust/Cargo.lock b/src/fcmp/fcmp_rust/Cargo.lock new file mode 100644 index 00000000000..a8bd846a1e0 --- /dev/null +++ b/src/fcmp/fcmp_rust/Cargo.lock @@ -0,0 +1,716 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciphersuite" +version = "0.4.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "blake2", + "dalek-ff-group", + "digest", + "elliptic-curve", + "ff", + "flexible-transcript", + "group", + "helioselene", + "k256", + "minimal-ed448", + "p256", + "rand_core", + "sha2", + "sha3", + "std-shims", + "subtle", + "zeroize", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "git+https://github.com/kayabaNerve/crypto-bigint?branch=c-repr#78352771313f1e9b8e48abe5ce30d50d6bdd291d" +dependencies = [ + "generic-array 0.14.7", + "rand_core", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array 0.14.7", + "typenum", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "group", + "platforms", + "rand_core", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dalek-ff-group" +version = "0.4.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "crypto-bigint", + "curve25519-dalek", + "digest", + "ff", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "ec-divisors" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "dalek-ff-group", + "group", + "hex", + "rand_core", + "zeroize", +] + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "elliptic-curve", + "signature", + "spki", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array 0.14.7", + "group", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "tap", + "zeroize", +] + +[[package]] +name = "fcmp_rust" +version = "0.0.0" +dependencies = [ + "ciphersuite", + "ec-divisors", + "flexible-transcript", + "full-chain-membership-proofs", + "generalized-bulletproofs", + "helioselene", + "rand_core", +] + +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "bitvec", + "rand_core", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "flexible-transcript" +version = "0.3.2" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "blake2", + "digest", + "merlin", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "full-chain-membership-proofs" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "ciphersuite", + "ec-divisors", + "flexible-transcript", + "generalized-bulletproofs", + "multiexp", + "rand_core", + "zeroize", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "generalized-bulletproofs" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "ciphersuite", + "flexible-transcript", + "multiexp", + "rand_core", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe739944a5406424e080edccb6add95685130b9f160d5407c639c7df0c5836b0" +dependencies = [ + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "helioselene" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "crypto-bigint", + "dalek-ff-group", + "ec-divisors", + "ff", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "k256" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "libc" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core", + "zeroize", +] + +[[package]] +name = "minimal-ed448" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "crypto-bigint", + "ff", + "generic-array 1.0.0", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "multiexp" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "ff", + "group", + "rand_core", + "rustversion", + "std-shims", + "zeroize", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "platforms" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" + +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + +[[package]] +name = "proc-macro2" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "std-shims" +version = "0.1.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +dependencies = [ + "hashbrown", + "spin", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "2.0.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "zerocopy" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/src/fcmp/fcmp_rust/Cargo.toml b/src/fcmp/fcmp_rust/Cargo.toml index d5052aac72f..b6ac65d6c5b 100644 --- a/src/fcmp/fcmp_rust/Cargo.toml +++ b/src/fcmp/fcmp_rust/Cargo.toml @@ -8,14 +8,16 @@ name = "fcmp_rust" crate-type = ["staticlib"] [dependencies] -cxx = "1.0" -full-chain-membership-proofs = { path = "../../../external/fcmp-plus-plus/crypto/fcmps" } -ciphersuite = { path = "../../../external/fcmp-plus-plus/crypto/ciphersuite", features = ["helioselene", "ed25519"] } - -ec-divisors = { path = "../../../external/fcmp-plus-plus/crypto/divisors", features = ["ed25519"] } rand_core = { version = "0.6", features = ["getrandom"] } -transcript = { package = "flexible-transcript", path = "../../../external/fcmp-plus-plus/crypto/transcript", features = ["recommended"] } -generalized-bulletproofs = { path = "../../../external/fcmp-plus-plus/crypto/generalized-bulletproofs", features = ["tests"] } -[build-dependencies] -cxx-build = "1.0" \ No newline at end of file +transcript = { package = "flexible-transcript", git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["recommended"] } +helioselene = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } +ciphersuite = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["ed25519", "helioselene"] } + +generalized-bulletproofs = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["tests"] } + +ec-divisors = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["ed25519"] } +full-chain-membership-proofs = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + +[patch.crates-io] +crypto-bigint = { git = "https://github.com/kayabaNerve/crypto-bigint", branch = "c-repr" } diff --git a/src/fcmp/fcmp_rust/build.rs b/src/fcmp/fcmp_rust/build.rs deleted file mode 100644 index 0b216f6d3cf..00000000000 --- a/src/fcmp/fcmp_rust/build.rs +++ /dev/null @@ -1,5 +0,0 @@ -fn main() { - let _ = cxx_build::bridge("src/lib.rs"); - - println!("cargo:rerun-if-changed=src/lib.rs"); -} \ No newline at end of file diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h new file mode 100644 index 00000000000..590f2000aff --- /dev/null +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -0,0 +1,144 @@ +namespace fcmp_rust { +#include +#include +#include +#include +#include + +// ----- deps C bindings ----- + +/// Inner integer type that the [`Limb`] newtype wraps. +// TODO: This is only valid for 64-bit platforms +using Word = uint64_t; + +/// Big integers are represented as an array of smaller CPU word-size integers +/// called "limbs". +using Limb = Word; + + +/// Stack-allocated big unsigned integer. +/// +/// Generic over the given number of `LIMBS` +/// +/// # Encoding support +/// This type supports many different types of encodings, either via the +/// [`Encoding`][`crate::Encoding`] trait or various `const fn` decoding and +/// encoding functions that can be used with [`Uint`] constants. +/// +/// Optional crate features for encoding (off-by-default): +/// - `generic-array`: enables [`ArrayEncoding`][`crate::ArrayEncoding`] trait which can be used to +/// [`Uint`] as `GenericArray` and a [`ArrayDecoding`][`crate::ArrayDecoding`] trait which +/// can be used to `GenericArray` as [`Uint`]. +/// - `rlp`: support for [Recursive Length Prefix (RLP)][RLP] encoding. +/// +/// [RLP]: https://eth.wiki/fundamentals/rlp +template +struct Uint { + /// Inner limb array. Stored from least significant to most significant. + Limb limbs[LIMBS]; +}; + + +/// A residue mod `MOD`, represented using `LIMBS` limbs. The modulus of this residue is constant, so it cannot be set at runtime. +/// Internally, the value is stored in Montgomery form (multiplied by MOD::R) until it is retrieved. +template +struct Residue { + Uint montgomery_form; +}; + + +/// A constant-time implementation of the Ed25519 field. +struct SeleneScalar { + Residue<4> _0; +}; + + +/// The field novel to Helios/Selene. +struct HeliosScalar { + Residue<4> _0; +}; + +struct HeliosPoint { + SeleneScalar x; + SeleneScalar y; + SeleneScalar z; +}; + +struct SelenePoint { + HeliosScalar x; + HeliosScalar y; + HeliosScalar z; +}; + +// ----- End deps C bindings ----- + +template +struct Box; + +struct HeliosGenerators { + void* generators; +}; + +struct SeleneGenerators { + void* generators; +}; + +template +struct CResult { + T value; + void* err; +}; + +template +struct Slice { + const T *buf; + uintptr_t len; +}; + +using HeliosScalarSlice = Slice; + +using SeleneScalarSlice = Slice; + +extern "C" { + +HeliosGenerators random_helios_generators(uintptr_t n); + +SeleneGenerators random_selene_generators(uintptr_t n); + +HeliosPoint random_helios_hash_init_point(); + +SelenePoint random_selene_hash_init_point(); + +uint8_t *helios_scalar_to_bytes(HeliosScalar helios_scalar); + +uint8_t *selene_scalar_to_bytes(SeleneScalar selene_scalar); + +uint8_t *helios_point_to_bytes(HeliosPoint helios_point); + +uint8_t *selene_point_to_bytes(SelenePoint selene_point); + +SeleneScalar ed25519_point_to_selene_scalar(const uint8_t *ed25519_point); + +HeliosScalar selene_point_to_helios_scalar(SelenePoint selene_point); + +SeleneScalar helios_point_to_selene_scalar(HeliosPoint helios_point); + +HeliosScalar helios_zero_scalar(); + +SeleneScalar selene_zero_scalar(); + +CResult hash_grow_helios(const HeliosGenerators *helios_generators, + HeliosPoint existing_hash, + uintptr_t offset, + HeliosScalarSlice prior_children, + HeliosScalarSlice new_children); + +CResult hash_grow_selene(const SeleneGenerators *selene_generators, + SelenePoint existing_hash, + uintptr_t offset, + SeleneScalarSlice prior_children, + SeleneScalarSlice new_children); + +} // extern "C" + +} diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 8270e726c38..9ae55f929db 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -1,226 +1,186 @@ -use rand_core::OsRng; - use std::io; -use full_chain_membership_proofs::tree::hash_grow; +use rand_core::OsRng; use transcript::RecommendedTranscript; - +use helioselene::{HeliosPoint, SelenePoint, HelioseleneField as HeliosScalar, Field25519 as SeleneScalar}; use ciphersuite::{group::{Group, GroupEncoding, ff::{PrimeField, Field}}, Ciphersuite, Ed25519, Selene, Helios}; -use ec_divisors::DivisorCurve; - use generalized_bulletproofs::Generators; -// TODO: lint -#[cxx::bridge] -mod ffi { - // Rust types and signatures exposed to C++. - #[namespace = "fcmp_rust"] - extern "Rust" { - // TODO: Separate Helios and Selene namespaces - type HeliosGenerators; - type HeliosPoint; - type HeliosScalar; - - type SeleneGenerators; - type SelenePoint; - type SeleneScalar; - - fn random_helios_generators(n: usize) -> Box; - fn random_helios_hash_init_point() -> Box; - - fn random_selene_generators(n: usize) -> Box; - fn random_selene_hash_init_point() -> Box; - - fn clone_helios_scalar(helios_scalar: &Box) -> Box; - fn clone_selene_scalar(selene_scalar: &Box) -> Box; - fn clone_helios_point(helios_point: &Box) -> Box; - fn clone_selene_point(selene_point: &Box) -> Box; - - fn helios_scalar_to_bytes(helios_scalar: &Box) -> [u8; 32]; - fn selene_scalar_to_bytes(selene_scalar: &Box) -> [u8; 32]; - fn helios_point_to_bytes(helios_point: &Box) -> [u8; 32]; - fn selene_point_to_bytes(selene_point: &Box) -> [u8; 32]; - - fn ed25519_point_to_selene_scalar(ed25519_point: &[u8; 32]) -> Box; - fn selene_point_to_helios_scalar(selene_point: &Box) -> Box; - fn helios_point_to_selene_scalar(helios_point: &Box) -> Box; - - fn helios_zero_scalar() -> Box; - fn selene_zero_scalar() -> Box; - - pub fn hash_grow_helios( - helios_generators: &Box, - existing_hash: &Box, - offset: usize, - prior_children: &[Box], - new_children: &[Box] - ) -> Result>; - - pub fn hash_grow_selene( - selene_generators: &Box, - existing_hash: &Box, - offset: usize, - prior_children: &[Box], - new_children: &[Box] - ) -> Result>; - } -} +use ec_divisors::DivisorCurve; +use full_chain_membership_proofs::tree::hash_grow; // TODO: cleaner const usage of generators // TODO: try to get closer to underlying types // TODO: maybe don't do both tuple and Box? Just make these all boxes -pub struct HeliosGenerators(Generators); -pub struct HeliosPoint(::G); -pub struct HeliosScalar(::F); - -pub struct SeleneGenerators(Generators); -pub struct SelenePoint(::G); -pub struct SeleneScalar(::F); - -#[allow(non_snake_case)] -pub fn random_helios_generators(n: usize) -> Box { - let helios_generators = generalized_bulletproofs::tests::generators::(n); - Box::new(HeliosGenerators(helios_generators)) -} - -#[allow(non_snake_case)] -pub fn random_selene_generators(n: usize) -> Box { - let selene_generators = generalized_bulletproofs::tests::generators::(n); - Box::new(SeleneGenerators(selene_generators)) +#[repr(C)] +pub struct HeliosGenerators { + generators: Box> } - -#[allow(non_snake_case)] -pub fn random_helios_hash_init_point() -> Box { - let helios_hash_init_point = ::G::random(&mut OsRng); - dbg!(&helios_hash_init_point); - Box::new(HeliosPoint(helios_hash_init_point)) +#[repr(C)] +pub struct SeleneGenerators { + generators: Box> } -#[allow(non_snake_case)] -pub fn random_selene_hash_init_point() -> Box { - let selene_hash_init_point = ::G::random(&mut OsRng); - dbg!(&selene_hash_init_point); - Box::new(SelenePoint(selene_hash_init_point)) +#[no_mangle] +pub extern "C" fn random_helios_generators(n: usize) -> HeliosGenerators { + let helios_generators = generalized_bulletproofs::tests::generators::(n); + HeliosGenerators { generators: Box::new(helios_generators) } } -// TODO: should be able to use generics -// TODO: shorter names -pub fn clone_helios_scalar(helios_scalar: &Box) -> Box { - Box::new(HeliosScalar(helios_scalar.0)) +#[no_mangle] +pub extern "C" fn random_selene_generators(n: usize) -> SeleneGenerators { + let selene_generators = generalized_bulletproofs::tests::generators::(n); + SeleneGenerators { generators: Box::new(selene_generators) } } -pub fn clone_selene_scalar(selene_scalar: &Box) -> Box { - Box::new(SeleneScalar(selene_scalar.0)) +#[no_mangle] +pub extern "C" fn random_helios_hash_init_point() -> HeliosPoint { + HeliosPoint::random(&mut OsRng) } -pub fn clone_helios_point(helios_point: &Box) -> Box { - Box::new(HeliosPoint(helios_point.0)) +#[no_mangle] +pub extern "C" fn random_selene_hash_init_point() -> SelenePoint { + SelenePoint::random(&mut OsRng) } -pub fn clone_selene_point(selene_point: &Box) -> Box { - Box::new(SelenePoint(selene_point.0)) +fn c_u8_32(bytes: [u8; 32]) -> *const u8 { + let arr_ptr = Box::into_raw(Box::new(bytes)); + arr_ptr as *const u8 } -// TODO: generics -pub fn helios_scalar_to_bytes(helios_scalar: &Box) -> [u8; 32] { - helios_scalar.0.to_repr() +#[no_mangle] +pub extern "C" fn helios_scalar_to_bytes(helios_scalar: HeliosScalar) -> *const u8 { + c_u8_32(helios_scalar.to_repr()) } -pub fn selene_scalar_to_bytes(selene_scalar: &Box) -> [u8; 32] { - selene_scalar.0.to_repr() +#[no_mangle] +pub extern "C" fn selene_scalar_to_bytes(selene_scalar: SeleneScalar) -> *const u8 { + c_u8_32(selene_scalar.to_repr()) } -pub fn helios_point_to_bytes(helios_point: &Box) -> [u8; 32] { - helios_point.0.to_bytes() +#[no_mangle] +pub extern "C" fn helios_point_to_bytes(helios_point: HeliosPoint) -> *const u8 { + c_u8_32(helios_point.to_bytes()) } -pub fn selene_point_to_bytes(selene_point: &Box) -> [u8; 32] { - selene_point.0.to_bytes() +#[no_mangle] +pub extern "C" fn selene_point_to_bytes(selene_point: SelenePoint) -> *const u8 { + c_u8_32(selene_point.to_bytes()) } // Get the x coordinate of the ed25519 point // TODO: use generics for below logic -pub fn ed25519_point_to_selene_scalar(ed25519_point: &[u8; 32]) -> Box { +#[no_mangle] +pub extern "C" fn ed25519_point_to_selene_scalar(ed25519_point: *const u8) -> SeleneScalar { // TODO: unwrap or else error - let ed25519_point = ::read_G(&mut ed25519_point.as_slice()).unwrap(); + let mut ed25519_point = unsafe { core::slice::from_raw_parts(ed25519_point, 32) }; + let ed25519_point = ::read_G(&mut ed25519_point).unwrap(); let xy_coords = ::G::to_xy(ed25519_point); - let x: ::F = xy_coords.0; - Box::new(SeleneScalar(x)) + let x: SeleneScalar = xy_coords.0; + x } // TODO: use generics for below logic -pub fn selene_point_to_helios_scalar(selene_point: &Box) -> Box { - let xy_coords = ::G::to_xy(selene_point.0); - let x: ::F = xy_coords.0; - Box::new(HeliosScalar(x)) +#[no_mangle] +pub extern "C" fn selene_point_to_helios_scalar(selene_point: SelenePoint) -> HeliosScalar { + let xy_coords = SelenePoint::to_xy(selene_point); + let x: HeliosScalar = xy_coords.0; + x } // TODO: use generics for below logic -pub fn helios_point_to_selene_scalar(helios_point: &Box) -> Box { - let xy_coords = ::G::to_xy(helios_point.0); - let x: ::F = xy_coords.0; - Box::new(SeleneScalar(x)) +#[no_mangle] +pub extern "C" fn helios_point_to_selene_scalar(helios_point: HeliosPoint) -> SeleneScalar { + let xy_coords = HeliosPoint::to_xy(helios_point); + let x: SeleneScalar = xy_coords.0; + x +} + +#[no_mangle] +pub extern "C" fn helios_zero_scalar() -> HeliosScalar { + HeliosScalar::ZERO +} + +#[no_mangle] +pub extern "C" fn selene_zero_scalar() -> SeleneScalar { + SeleneScalar::ZERO } -pub fn helios_zero_scalar() -> Box { - Box::new(HeliosScalar(::F::ZERO)) +#[repr(C)] +pub struct Slice { + buf: *const T, + len: usize, +} +pub type HeliosScalarSlice = Slice; +pub type SeleneScalarSlice = Slice; +impl<'a, T> Into<&'a [T]> for Slice { + fn into(self) -> &'a [T] { + unsafe { core::slice::from_raw_parts(self.buf, self.len) } + } } -pub fn selene_zero_scalar() -> Box { - Box::new(SeleneScalar(::F::ZERO)) +#[repr(C)] +pub struct CResult { + value: T, + err: *const E, +} +impl CResult { + fn ok(value: T) -> Self { + CResult { value, err: core::ptr::null() } + } + fn err(default: T, err: E) -> Self { + CResult { value: default, err: Box::into_raw(Box::new(err)) } + } } // TODO: use generics for curves -pub fn hash_grow_helios( - helios_generators: &Box, - existing_hash: &Box, +#[no_mangle] +pub extern "C" fn hash_grow_helios( + helios_generators: &HeliosGenerators, + existing_hash: HeliosPoint, offset: usize, - prior_children: &[Box], - new_children: &[Box] -) -> Result, io::Error> { - let prior_children = prior_children.iter().map(|c| c.0).collect::>(); - let new_children = new_children.iter().map(|c| c.0).collect::>(); - + prior_children: HeliosScalarSlice, + new_children: HeliosScalarSlice +) -> CResult { let hash = hash_grow( - &helios_generators.0, - existing_hash.0, + &helios_generators.generators, + existing_hash, offset, - &prior_children, - &new_children + prior_children.into(), + new_children.into(), ); if let Some(hash) = hash { - Ok(Box::new(HeliosPoint(hash))) + CResult::ok(hash) } else { - Err(io::Error::new(io::ErrorKind::Other, "failed to grow hash")) + CResult::err(HeliosPoint::identity(), io::Error::new(io::ErrorKind::Other, "failed to grow hash")) } } // TODO: use generics for curves -pub fn hash_grow_selene( - selene_generators: &Box, - existing_hash: &Box, +#[no_mangle] +pub extern "C" fn hash_grow_selene( + selene_generators: &SeleneGenerators, + existing_hash: SelenePoint, offset: usize, - prior_children: &[Box], - new_children: &[Box] -) -> Result, io::Error> { - let prior_children = prior_children.iter().map(|c| c.0).collect::>(); - let new_children = new_children.iter().map(|c| c.0).collect::>(); - + prior_children: SeleneScalarSlice, + new_children: SeleneScalarSlice +) -> CResult { let hash = hash_grow( - &selene_generators.0, - existing_hash.0, + &selene_generators.generators, + existing_hash, offset, - &prior_children, - &new_children + prior_children.into(), + new_children.into(), ); if let Some(hash) = hash { - Ok(Box::new(SelenePoint(hash))) + CResult::ok(hash) } else { - Err(io::Error::new(io::ErrorKind::Other, "failed to grow hash")) + CResult::err(SelenePoint::identity(), io::Error::new(io::ErrorKind::Other, "failed to grow hash")) } + } diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 96fa5fe09a0..8ad64b4be74 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -1,21 +1,21 @@ // Copyright (c) 2024, The Monero Project -// +// // All rights reserved. -// +// // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: -// +// // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. -// +// // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other // materials provided with the distribution. -// +// // 3. Neither the name of the copyright holder nor the names of its contributors may be // used to endorse or promote products derived from this software without specific // prior written permission. -// +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL @@ -51,12 +51,16 @@ Helios::Point Helios::hash_grow( const Helios::Chunk &prior_children, const Helios::Chunk &new_children) const { - return fcmp_rust::hash_grow_helios( - m_generators, + auto res = fcmp_rust::hash_grow_helios( + &m_generators, existing_hash, offset, prior_children, new_children); + if (res.err != 0) { + throw std::runtime_error("failed to hash grow"); + } + return res.value; } //---------------------------------------------------------------------------------------------------------------------- Selene::Point Selene::hash_grow( @@ -65,32 +69,16 @@ Selene::Point Selene::hash_grow( const Selene::Chunk &prior_children, const Selene::Chunk &new_children) const { - return fcmp_rust::hash_grow_selene( - m_generators, + auto res = fcmp_rust::hash_grow_selene( + &m_generators, existing_hash, offset, prior_children, new_children); -} -//---------------------------------------------------------------------------------------------------------------------- -Helios::Scalar Helios::clone(const Helios::Scalar &scalar) const -{ - return fcmp_rust::clone_helios_scalar(scalar); -} -//---------------------------------------------------------------------------------------------------------------------- -Selene::Scalar Selene::clone(const Selene::Scalar &scalar) const -{ - return fcmp_rust::clone_selene_scalar(scalar); -} -//---------------------------------------------------------------------------------------------------------------------- -Helios::Point Helios::clone(const Helios::Point &point) const -{ - return fcmp_rust::clone_helios_point(point); -} -//---------------------------------------------------------------------------------------------------------------------- -Selene::Point Selene::clone(const Selene::Point &point) const -{ - return fcmp_rust::clone_selene_point(point); + if (res.err != 0) { + throw std::runtime_error("failed to hash grow"); + } + return res.value; } //---------------------------------------------------------------------------------------------------------------------- Helios::Scalar Helios::zero_scalar() const @@ -105,22 +93,38 @@ Selene::Scalar Selene::zero_scalar() const //---------------------------------------------------------------------------------------------------------------------- std::array Helios::to_bytes(const Helios::Scalar &scalar) const { - return fcmp_rust::helios_scalar_to_bytes(scalar); + auto bytes = fcmp_rust::helios_scalar_to_bytes(scalar); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; } //---------------------------------------------------------------------------------------------------------------------- std::array Selene::to_bytes(const Selene::Scalar &scalar) const { - return fcmp_rust::selene_scalar_to_bytes(scalar); + auto bytes = fcmp_rust::selene_scalar_to_bytes(scalar); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; } //---------------------------------------------------------------------------------------------------------------------- std::array Helios::to_bytes(const Helios::Point &point) const { - return fcmp_rust::helios_point_to_bytes(point); + auto bytes = fcmp_rust::helios_point_to_bytes(point); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; } //---------------------------------------------------------------------------------------------------------------------- std::array Selene::to_bytes(const Selene::Point &point) const { - return fcmp_rust::selene_point_to_bytes(point); + auto bytes = fcmp_rust::selene_point_to_bytes(point); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; } //---------------------------------------------------------------------------------------------------------------------- std::string Helios::to_string(const typename Helios::Scalar &scalar) const @@ -148,13 +152,7 @@ std::string Selene::to_string(const typename Selene::Point &point) const //---------------------------------------------------------------------------------------------------------------------- SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) { - static_assert(sizeof(RustEd25519Point) == sizeof(crypto::ec_point), - "expected same size ed25519 point to rust representation"); - - // TODO: implement reading just the x coordinate of ed25519 point in C/C++ - fcmp::tower_cycle::RustEd25519Point rust_point; - memcpy(&rust_point, &point, sizeof(fcmp::tower_cycle::RustEd25519Point)); - return fcmp_rust::ed25519_point_to_selene_scalar(rust_point); + return fcmp_rust::ed25519_point_to_selene_scalar((uint8_t*) &point.data); } //---------------------------------------------------------------------------------------------------------------------- template diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index f63cac5ffe4..0f61bc08bb3 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -1,21 +1,21 @@ // Copyright (c) 2024, The Monero Project -// +// // All rights reserved. -// +// // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: -// +// // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. -// +// // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other // materials provided with the distribution. -// +// // 3. Neither the name of the copyright holder nor the names of its contributors may be // used to endorse or promote products derived from this software without specific // prior written permission. -// +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL @@ -29,8 +29,7 @@ #pragma once #include "crypto/crypto.h" -#include "fcmp_rust/cxx.h" -#include "fcmp_rust/fcmp_rust.h" +#include "fcmp_rust/fcmp++.h" #include @@ -45,24 +44,24 @@ namespace tower_cycle using RustEd25519Point = std::array; // Need to forward declare Scalar types for point_to_cycle_scalar below -using SeleneScalar = rust::Box; -using HeliosScalar = rust::Box; +using SeleneScalar = fcmp_rust::SeleneScalar; +using HeliosScalar = fcmp_rust::HeliosScalar; //---------------------------------------------------------------------------------------------------------------------- struct HeliosT final { - using Generators = rust::Box; + using Generators = fcmp_rust::HeliosGenerators; using Scalar = HeliosScalar; - using Point = rust::Box; - using Chunk = rust::Slice; + using Point = fcmp_rust::HeliosPoint; + using Chunk = fcmp_rust::HeliosScalarSlice; using CycleScalar = SeleneScalar; }; //---------------------------------------------------------------------------------------------------------------------- struct SeleneT final { - using Generators = rust::Box; + using Generators = fcmp_rust::SeleneGenerators; using Scalar = SeleneScalar; - using Point = rust::Box; - using Chunk = rust::Slice; + using Point = fcmp_rust::SelenePoint; + using Chunk = fcmp_rust::SeleneScalarSlice; using CycleScalar = HeliosScalar; }; //---------------------------------------------------------------------------------------------------------------------- @@ -89,9 +88,6 @@ class Curve const typename C::Chunk &prior_children, const typename C::Chunk &new_children) const = 0; - virtual typename C::Scalar clone(const typename C::Scalar &scalar) const = 0; - virtual typename C::Point clone(const typename C::Point &point) const = 0; - virtual typename C::Scalar zero_scalar() const = 0; virtual std::array to_bytes(const typename C::Scalar &scalar) const = 0; @@ -133,9 +129,6 @@ class Helios final : public Curve const Chunk &prior_children, const Chunk &new_children) const override; - Scalar clone(const Scalar &scalar) const override; - Point clone(const Point &point) const override; - Scalar zero_scalar() const override; std::array to_bytes(const Scalar &scalar) const override; @@ -171,9 +164,6 @@ class Selene final : public Curve const Chunk &prior_children, const Chunk &new_children) const override; - Scalar clone(const Scalar &scalar) const override; - Point clone(const Point &point) const override; - Scalar zero_scalar() const override; std::array to_bytes(const Scalar &scalar) const override; diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index e2a588c44a3..d29e78249df 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -1,21 +1,21 @@ // Copyright (c) 2014, The Monero Project -// +// // All rights reserved. -// +// // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: -// +// // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. -// +// // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other // materials provided with the distribution. -// +// // 3. Neither the name of the copyright holder nor the names of its contributors may be // used to endorse or promote products derived from this software without specific // prior written permission. -// +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL @@ -49,8 +49,8 @@ static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const C &c return fcmp::curve_trees::LastChunkData{ .child_offset = child_offset, - .last_child = curve.clone(last_child), - .last_parent = curve.clone(last_parent), + .last_child = last_child, + .last_parent = last_parent, .child_layer_size = child_layer_size, .parent_layer_size = parent_layer_size }; @@ -204,9 +204,9 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext for (const auto &leaf : tree_extension.leaves.tuples) { tree_inout.leaves.emplace_back(CurveTreesV1::LeafTuple{ - .O_x = m_curve_trees.m_c2.clone(leaf.O_x), - .I_x = m_curve_trees.m_c2.clone(leaf.I_x), - .C_x = m_curve_trees.m_c2.clone(leaf.C_x) + .O_x = leaf.O_x, + .I_x = leaf.I_x, + .C_x = leaf.C_x }); } @@ -240,10 +240,10 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext // We updated the last hash if (started_at_tip) - c2_inout.back() = m_curve_trees.m_c2.clone(c2_ext.hashes.front()); + c2_inout.back() = c2_ext.hashes.front(); for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) - c2_inout.emplace_back(m_curve_trees.m_c2.clone(c2_ext.hashes[i])); + c2_inout.emplace_back(c2_ext.hashes[i]); ++c2_idx; } @@ -266,10 +266,10 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext // We updated the last hash if (started_at_tip) - c1_inout.back() = m_curve_trees.m_c1.clone(c1_ext.hashes.front()); + c1_inout.back() = c1_ext.hashes.front(); for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) - c1_inout.emplace_back(m_curve_trees.m_c1.clone(c1_ext.hashes[i])); + c1_inout.emplace_back(c1_ext.hashes[i]); ++c1_idx; } From 42f6ef273d7be01b3200a8328720a175255e312f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 23 May 2024 20:49:37 -0400 Subject: [PATCH 018/127] fmt, clippy --- src/fcmp/fcmp_rust/src/lib.rs | 201 +++++++++++++++++++--------------- 1 file changed, 110 insertions(+), 91 deletions(-) diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 9ae55f929db..8a6d465bd66 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -2,185 +2,204 @@ use std::io; use rand_core::OsRng; +use ciphersuite::{ + group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, + }, + Ciphersuite, Ed25519, Helios, Selene, +}; +use helioselene::{ + Field25519 as SeleneScalar, HeliosPoint, HelioseleneField as HeliosScalar, SelenePoint, +}; use transcript::RecommendedTranscript; -use helioselene::{HeliosPoint, SelenePoint, HelioseleneField as HeliosScalar, Field25519 as SeleneScalar}; -use ciphersuite::{group::{Group, GroupEncoding, ff::{PrimeField, Field}}, Ciphersuite, Ed25519, Selene, Helios}; use generalized_bulletproofs::Generators; use ec_divisors::DivisorCurve; use full_chain_membership_proofs::tree::hash_grow; -// TODO: cleaner const usage of generators -// TODO: try to get closer to underlying types -// TODO: maybe don't do both tuple and Box? Just make these all boxes +// TODO: Use a macro to de-duplicate some of of this code + #[repr(C)] pub struct HeliosGenerators { - generators: Box> + generators: Box>, } #[repr(C)] pub struct SeleneGenerators { - generators: Box> + generators: Box>, } #[no_mangle] pub extern "C" fn random_helios_generators(n: usize) -> HeliosGenerators { - let helios_generators = generalized_bulletproofs::tests::generators::(n); - HeliosGenerators { generators: Box::new(helios_generators) } + let helios_generators = generalized_bulletproofs::tests::generators::(n); + HeliosGenerators { + generators: Box::new(helios_generators), + } } #[no_mangle] pub extern "C" fn random_selene_generators(n: usize) -> SeleneGenerators { - let selene_generators = generalized_bulletproofs::tests::generators::(n); - SeleneGenerators { generators: Box::new(selene_generators) } + let selene_generators = generalized_bulletproofs::tests::generators::(n); + SeleneGenerators { + generators: Box::new(selene_generators), + } } #[no_mangle] pub extern "C" fn random_helios_hash_init_point() -> HeliosPoint { - HeliosPoint::random(&mut OsRng) + HeliosPoint::random(&mut OsRng) } #[no_mangle] pub extern "C" fn random_selene_hash_init_point() -> SelenePoint { - SelenePoint::random(&mut OsRng) + SelenePoint::random(&mut OsRng) } fn c_u8_32(bytes: [u8; 32]) -> *const u8 { - let arr_ptr = Box::into_raw(Box::new(bytes)); - arr_ptr as *const u8 + let arr_ptr = Box::into_raw(Box::new(bytes)); + arr_ptr as *const u8 } #[no_mangle] pub extern "C" fn helios_scalar_to_bytes(helios_scalar: HeliosScalar) -> *const u8 { - c_u8_32(helios_scalar.to_repr()) + c_u8_32(helios_scalar.to_repr()) } #[no_mangle] pub extern "C" fn selene_scalar_to_bytes(selene_scalar: SeleneScalar) -> *const u8 { - c_u8_32(selene_scalar.to_repr()) + c_u8_32(selene_scalar.to_repr()) } #[no_mangle] pub extern "C" fn helios_point_to_bytes(helios_point: HeliosPoint) -> *const u8 { - c_u8_32(helios_point.to_bytes()) + c_u8_32(helios_point.to_bytes()) } #[no_mangle] pub extern "C" fn selene_point_to_bytes(selene_point: SelenePoint) -> *const u8 { - c_u8_32(selene_point.to_bytes()) + c_u8_32(selene_point.to_bytes()) } // Get the x coordinate of the ed25519 point -// TODO: use generics for below logic +// TODO: Move this to C++ +#[allow(clippy::not_unsafe_ptr_arg_deref)] #[no_mangle] pub extern "C" fn ed25519_point_to_selene_scalar(ed25519_point: *const u8) -> SeleneScalar { - // TODO: unwrap or else error - let mut ed25519_point = unsafe { core::slice::from_raw_parts(ed25519_point, 32) }; - let ed25519_point = ::read_G(&mut ed25519_point).unwrap(); + let mut ed25519_point = unsafe { core::slice::from_raw_parts(ed25519_point, 32) }; + // TODO: If not moved to C++, at least return an error here (instead of unwrapping) + let ed25519_point = ::read_G(&mut ed25519_point).unwrap(); - let xy_coords = ::G::to_xy(ed25519_point); - let x: SeleneScalar = xy_coords.0; - x + let xy_coords = ::G::to_xy(ed25519_point); + let x: SeleneScalar = xy_coords.0; + x } -// TODO: use generics for below logic #[no_mangle] pub extern "C" fn selene_point_to_helios_scalar(selene_point: SelenePoint) -> HeliosScalar { - let xy_coords = SelenePoint::to_xy(selene_point); - let x: HeliosScalar = xy_coords.0; - x + let xy_coords = SelenePoint::to_xy(selene_point); + let x: HeliosScalar = xy_coords.0; + x } -// TODO: use generics for below logic #[no_mangle] pub extern "C" fn helios_point_to_selene_scalar(helios_point: HeliosPoint) -> SeleneScalar { - let xy_coords = HeliosPoint::to_xy(helios_point); - let x: SeleneScalar = xy_coords.0; - x + let xy_coords = HeliosPoint::to_xy(helios_point); + let x: SeleneScalar = xy_coords.0; + x } #[no_mangle] pub extern "C" fn helios_zero_scalar() -> HeliosScalar { - HeliosScalar::ZERO + HeliosScalar::ZERO } #[no_mangle] pub extern "C" fn selene_zero_scalar() -> SeleneScalar { - SeleneScalar::ZERO + SeleneScalar::ZERO } #[repr(C)] pub struct Slice { - buf: *const T, - len: usize, + buf: *const T, + len: usize, } pub type HeliosScalarSlice = Slice; pub type SeleneScalarSlice = Slice; -impl<'a, T> Into<&'a [T]> for Slice { - fn into(self) -> &'a [T] { - unsafe { core::slice::from_raw_parts(self.buf, self.len) } - } +impl<'a, T> From> for &'a [T] { + fn from(slice: Slice) -> Self { + unsafe { core::slice::from_raw_parts(slice.buf, slice.len) } + } } #[repr(C)] pub struct CResult { - value: T, - err: *const E, + value: T, + err: *const E, } impl CResult { - fn ok(value: T) -> Self { - CResult { value, err: core::ptr::null() } - } - fn err(default: T, err: E) -> Self { - CResult { value: default, err: Box::into_raw(Box::new(err)) } - } + fn ok(value: T) -> Self { + CResult { + value, + err: core::ptr::null(), + } + } + fn err(default: T, err: E) -> Self { + CResult { + value: default, + err: Box::into_raw(Box::new(err)), + } + } } -// TODO: use generics for curves #[no_mangle] pub extern "C" fn hash_grow_helios( - helios_generators: &HeliosGenerators, - existing_hash: HeliosPoint, - offset: usize, - prior_children: HeliosScalarSlice, - new_children: HeliosScalarSlice + helios_generators: &HeliosGenerators, + existing_hash: HeliosPoint, + offset: usize, + prior_children: HeliosScalarSlice, + new_children: HeliosScalarSlice, ) -> CResult { - let hash = hash_grow( - &helios_generators.generators, - existing_hash, - offset, - prior_children.into(), - new_children.into(), - ); - - if let Some(hash) = hash { - CResult::ok(hash) - } else { - CResult::err(HeliosPoint::identity(), io::Error::new(io::ErrorKind::Other, "failed to grow hash")) - } -} - -// TODO: use generics for curves + let hash = hash_grow( + &helios_generators.generators, + existing_hash, + offset, + prior_children.into(), + new_children.into(), + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + CResult::err( + HeliosPoint::identity(), + io::Error::new(io::ErrorKind::Other, "failed to grow hash"), + ) + } +} + #[no_mangle] pub extern "C" fn hash_grow_selene( - selene_generators: &SeleneGenerators, - existing_hash: SelenePoint, - offset: usize, - prior_children: SeleneScalarSlice, - new_children: SeleneScalarSlice + selene_generators: &SeleneGenerators, + existing_hash: SelenePoint, + offset: usize, + prior_children: SeleneScalarSlice, + new_children: SeleneScalarSlice, ) -> CResult { - let hash = hash_grow( - &selene_generators.generators, - existing_hash, - offset, - prior_children.into(), - new_children.into(), - ); - - if let Some(hash) = hash { - CResult::ok(hash) - } else { - CResult::err(SelenePoint::identity(), io::Error::new(io::ErrorKind::Other, "failed to grow hash")) - } - + let hash = hash_grow( + &selene_generators.generators, + existing_hash, + offset, + prior_children.into(), + new_children.into(), + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + CResult::err( + SelenePoint::identity(), + io::Error::new(io::ErrorKind::Other, "failed to grow hash"), + ) + } } From c792b215352ce66265ef522dac97306e4f309a42 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 23 May 2024 21:16:25 -0400 Subject: [PATCH 019/127] Use statics on the Rust side for generators --- src/fcmp/fcmp_rust/fcmp++.h | 26 +++------------ src/fcmp/fcmp_rust/src/lib.rs | 53 ++++++++++++++---------------- src/fcmp/tower_cycle.cpp | 22 ------------- src/fcmp/tower_cycle.h | 24 +++----------- tests/unit_tests/blockchain_db.cpp | 8 ++--- tests/unit_tests/curve_trees.cpp | 43 +++++++++--------------- tests/unit_tests/curve_trees.h | 10 ------ 7 files changed, 51 insertions(+), 135 deletions(-) diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index 590f2000aff..6a7e61ff523 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -72,17 +72,6 @@ struct SelenePoint { // ----- End deps C bindings ----- -template -struct Box; - -struct HeliosGenerators { - void* generators; -}; - -struct SeleneGenerators { - void* generators; -}; - template struct CResult { T value; @@ -100,14 +89,9 @@ using HeliosScalarSlice = Slice; using SeleneScalarSlice = Slice; extern "C" { +HeliosPoint helios_hash_init_point(); -HeliosGenerators random_helios_generators(uintptr_t n); - -SeleneGenerators random_selene_generators(uintptr_t n); - -HeliosPoint random_helios_hash_init_point(); - -SelenePoint random_selene_hash_init_point(); +SelenePoint selene_hash_init_point(); uint8_t *helios_scalar_to_bytes(HeliosScalar helios_scalar); @@ -127,14 +111,12 @@ HeliosScalar helios_zero_scalar(); SeleneScalar selene_zero_scalar(); -CResult hash_grow_helios(const HeliosGenerators *helios_generators, - HeliosPoint existing_hash, +CResult hash_grow_helios(HeliosPoint existing_hash, uintptr_t offset, HeliosScalarSlice prior_children, HeliosScalarSlice new_children); -CResult hash_grow_selene(const SeleneGenerators *selene_generators, - SelenePoint existing_hash, +CResult hash_grow_selene(SelenePoint existing_hash, uintptr_t offset, SeleneScalarSlice prior_children, SeleneScalarSlice new_children); diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 8a6d465bd66..891cf91b91a 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -1,4 +1,4 @@ -use std::io; +use std::{io, sync::OnceLock}; use rand_core::OsRng; @@ -21,39 +21,36 @@ use full_chain_membership_proofs::tree::hash_grow; // TODO: Use a macro to de-duplicate some of of this code -#[repr(C)] -pub struct HeliosGenerators { - generators: Box>, -} -#[repr(C)] -pub struct SeleneGenerators { - generators: Box>, -} +pub const HELIOS_GENERATORS_LENGTH: usize = 128; +pub const SELENE_GENERATORS_LENGTH: usize = 256; -#[no_mangle] -pub extern "C" fn random_helios_generators(n: usize) -> HeliosGenerators { - let helios_generators = generalized_bulletproofs::tests::generators::(n); - HeliosGenerators { - generators: Box::new(helios_generators), - } +static HELIOS_GENERATORS: OnceLock> = OnceLock::new(); +static SELENE_GENERATORS: OnceLock> = OnceLock::new(); + +static HELIOS_HASH_INIT: OnceLock = OnceLock::new(); +static SELENE_HASH_INIT: OnceLock = OnceLock::new(); + +// TODO: Don't use random generators +fn helios_generators() -> &'static Generators { + HELIOS_GENERATORS.get_or_init(|| { + generalized_bulletproofs::tests::generators::(HELIOS_GENERATORS_LENGTH) + }) } -#[no_mangle] -pub extern "C" fn random_selene_generators(n: usize) -> SeleneGenerators { - let selene_generators = generalized_bulletproofs::tests::generators::(n); - SeleneGenerators { - generators: Box::new(selene_generators), - } +fn selene_generators() -> &'static Generators { + SELENE_GENERATORS.get_or_init(|| { + generalized_bulletproofs::tests::generators::(SELENE_GENERATORS_LENGTH) + }) } #[no_mangle] -pub extern "C" fn random_helios_hash_init_point() -> HeliosPoint { - HeliosPoint::random(&mut OsRng) +pub extern "C" fn helios_hash_init_point() -> HeliosPoint { + *HELIOS_HASH_INIT.get_or_init(|| HeliosPoint::random(&mut OsRng)) } #[no_mangle] -pub extern "C" fn random_selene_hash_init_point() -> SelenePoint { - SelenePoint::random(&mut OsRng) +pub extern "C" fn selene_hash_init_point() -> SelenePoint { + *SELENE_HASH_INIT.get_or_init(|| SelenePoint::random(&mut OsRng)) } fn c_u8_32(bytes: [u8; 32]) -> *const u8 { @@ -154,14 +151,13 @@ impl CResult { #[no_mangle] pub extern "C" fn hash_grow_helios( - helios_generators: &HeliosGenerators, existing_hash: HeliosPoint, offset: usize, prior_children: HeliosScalarSlice, new_children: HeliosScalarSlice, ) -> CResult { let hash = hash_grow( - &helios_generators.generators, + helios_generators(), existing_hash, offset, prior_children.into(), @@ -180,14 +176,13 @@ pub extern "C" fn hash_grow_helios( #[no_mangle] pub extern "C" fn hash_grow_selene( - selene_generators: &SeleneGenerators, existing_hash: SelenePoint, offset: usize, prior_children: SeleneScalarSlice, new_children: SeleneScalarSlice, ) -> CResult { let hash = hash_grow( - &selene_generators.generators, + selene_generators(), existing_hash, offset, prior_children.into(), diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 8ad64b4be74..f31c811d9a5 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -52,7 +52,6 @@ Helios::Point Helios::hash_grow( const Helios::Chunk &new_children) const { auto res = fcmp_rust::hash_grow_helios( - &m_generators, existing_hash, offset, prior_children, @@ -70,7 +69,6 @@ Selene::Point Selene::hash_grow( const Selene::Chunk &new_children) const { auto res = fcmp_rust::hash_grow_selene( - &m_generators, existing_hash, offset, prior_children, @@ -199,26 +197,6 @@ template void extend_scalars_from_cycle_points(const Selene &cur const std::vector &points, std::vector &scalars_out); //---------------------------------------------------------------------------------------------------------------------- -Helios::Generators random_helios_generators(std::size_t n) -{ - return fcmp_rust::random_helios_generators(n); -} -//---------------------------------------------------------------------------------------------------------------------- -Selene::Generators random_selene_generators(std::size_t n) -{ - return fcmp_rust::random_selene_generators(n); -} -//---------------------------------------------------------------------------------------------------------------------- -Helios::Point random_helios_hash_init_point() -{ - return fcmp_rust::random_helios_hash_init_point(); -} -//---------------------------------------------------------------------------------------------------------------------- -Selene::Point random_selene_hash_init_point() -{ - return fcmp_rust::random_selene_hash_init_point(); -} -//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace tower_cycle } //namespace fcmp diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index 0f61bc08bb3..ec67510a304 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -49,7 +49,6 @@ using HeliosScalar = fcmp_rust::HeliosScalar; //---------------------------------------------------------------------------------------------------------------------- struct HeliosT final { - using Generators = fcmp_rust::HeliosGenerators; using Scalar = HeliosScalar; using Point = fcmp_rust::HeliosPoint; using Chunk = fcmp_rust::HeliosScalarSlice; @@ -58,7 +57,6 @@ struct HeliosT final //---------------------------------------------------------------------------------------------------------------------- struct SeleneT final { - using Generators = fcmp_rust::SeleneGenerators; using Scalar = SeleneScalar; using Point = fcmp_rust::SelenePoint; using Chunk = fcmp_rust::SeleneScalarSlice; @@ -72,8 +70,7 @@ class Curve { //constructor public: - Curve(const typename C::Generators &generators, const typename C::Point &hash_init_point): - m_generators{generators}, + Curve(const typename C::Point &hash_init_point): m_hash_init_point{hash_init_point} {}; @@ -98,8 +95,6 @@ class Curve //member variables public: - // TODO: make these static constants - const typename C::Generators &m_generators; const typename C::Point &m_hash_init_point; }; //---------------------------------------------------------------------------------------------------------------------- @@ -107,7 +102,6 @@ class Helios final : public Curve { //typedefs public: - using Generators = HeliosT::Generators; using Scalar = HeliosT::Scalar; using Point = HeliosT::Point; using Chunk = HeliosT::Chunk; @@ -115,8 +109,8 @@ class Helios final : public Curve //constructor public: - Helios(const Generators &generators, const Point &hash_init_point) - : Curve(generators, hash_init_point) + Helios() + : Curve(fcmp_rust::helios_hash_init_point()) {}; //member functions @@ -142,7 +136,6 @@ class Selene final : public Curve { //typedefs public: - using Generators = SeleneT::Generators; using Scalar = SeleneT::Scalar; using Point = SeleneT::Point; using Chunk = SeleneT::Chunk; @@ -150,8 +143,8 @@ class Selene final : public Curve //constructor public: - Selene(const Generators &generators, const Point &hash_init_point) - : Curve(generators, hash_init_point) + Selene() + : Curve(fcmp_rust::selene_hash_init_point()) {}; //member functions @@ -187,13 +180,6 @@ void extend_scalars_from_cycle_points(const C_POINTS &curve, const std::vector &points, std::vector &scalars_out); //---------------------------------------------------------------------------------------------------------------------- -// TODO: use static constants and get rid of the below functions (WARNING: number of generators must be >= curve's -// width, and also need to account for selene leaf layer 3x) -Helios::Generators random_helios_generators(std::size_t n); -Selene::Generators random_selene_generators(std::size_t n); -Helios::Point random_helios_hash_init_point(); -Selene::Point random_selene_hash_init_point(); -//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- }//namespace tower_cycle }//namespace fcmp diff --git a/tests/unit_tests/blockchain_db.cpp b/tests/unit_tests/blockchain_db.cpp index e5085bccb96..6ab826bf0e0 100644 --- a/tests/unit_tests/blockchain_db.cpp +++ b/tests/unit_tests/blockchain_db.cpp @@ -356,12 +356,8 @@ TYPED_TEST(BlockchainDBTest, GrowCurveTrees) db_wtxn_guard guard(this->m_db); - CHECK_AND_ASSERT_THROW_MES(HELIOS_GENERATORS_LEN >= HELIOS_CHUNK_WIDTH, "helios generators < chunk width"); - CHECK_AND_ASSERT_THROW_MES(SELENE_GENERATORS_LEN >= (SELENE_CHUNK_WIDTH * CurveTreesV1::LEAF_TUPLE_SIZE), - "selene generators < max chunk width"); - - Helios helios(HELIOS_GENERATORS, HELIOS_HASH_INIT_POINT); - Selene selene(SELENE_GENERATORS, SELENE_HASH_INIT_POINT); + Helios helios; + Selene selene; auto curve_trees = CurveTreesV1( helios, diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index d29e78249df..311d673de5e 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -569,28 +569,32 @@ static void grow_tree(CurveTreesV1 &curve_trees, ASSERT_TRUE(curve_trees_accessor.validate_tree(tree_inout)); } //---------------------------------------------------------------------------------------------------------------------- -static void grow_tree_test(Helios &helios, - Selene &selene, - const std::size_t helios_width, - const std::size_t selene_width) +//---------------------------------------------------------------------------------------------------------------------- +// Test +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, grow_tree) { - LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_width << ", selene chunk width " << selene_width); + Helios helios; + Selene selene; + + LOG_PRINT_L1("Test grow tree with helios chunk width " << HELIOS_CHUNK_WIDTH + << ", selene chunk width " << SELENE_CHUNK_WIDTH); auto curve_trees = CurveTreesV1( helios, selene, - helios_width, - selene_width); + HELIOS_CHUNK_WIDTH, + SELENE_CHUNK_WIDTH); CurveTreesUnitTest curve_trees_accessor{curve_trees}; - CHECK_AND_ASSERT_THROW_MES(helios_width > 1, "helios width must be > 1"); - CHECK_AND_ASSERT_THROW_MES(selene_width > 1, "selene width must be > 1"); + CHECK_AND_ASSERT_THROW_MES(HELIOS_CHUNK_WIDTH > 1, "helios width must be > 1"); + CHECK_AND_ASSERT_THROW_MES(SELENE_CHUNK_WIDTH > 1, "selene width must be > 1"); // Number of leaves for which x number of layers is required - const std::size_t NEED_1_LAYER = selene_width; - const std::size_t NEED_2_LAYERS = NEED_1_LAYER * helios_width; - const std::size_t NEED_3_LAYERS = NEED_2_LAYERS * selene_width; + const std::size_t NEED_1_LAYER = SELENE_CHUNK_WIDTH; + const std::size_t NEED_2_LAYERS = NEED_1_LAYER * HELIOS_CHUNK_WIDTH; + const std::size_t NEED_3_LAYERS = NEED_2_LAYERS * SELENE_CHUNK_WIDTH; const std::vector N_LEAVES{ // Basic tests @@ -647,18 +651,3 @@ static void grow_tree_test(Helios &helios, } } } -//---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- -// Test -//---------------------------------------------------------------------------------------------------------------------- -TEST(curve_trees, grow_tree) -{ - CHECK_AND_ASSERT_THROW_MES(HELIOS_GENERATORS_LEN >= HELIOS_CHUNK_WIDTH, "helios generators < chunk width"); - CHECK_AND_ASSERT_THROW_MES(SELENE_GENERATORS_LEN >= (SELENE_CHUNK_WIDTH * CurveTreesV1::LEAF_TUPLE_SIZE), - "selene generators < max chunk width"); - - Helios helios(HELIOS_GENERATORS, HELIOS_HASH_INIT_POINT); - Selene selene(SELENE_GENERATORS, SELENE_HASH_INIT_POINT); - - grow_tree_test(helios, selene, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); -} diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index 0be03172dfe..a89032f18ad 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -78,17 +78,7 @@ class CurveTreesUnitTest const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t num_leaves); -// TODO: use static constant generators and hash init points -const std::size_t HELIOS_GENERATORS_LEN = 128; -const std::size_t SELENE_GENERATORS_LEN = 256; - // https://github.com/kayabaNerve/fcmp-plus-plus/blob // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 const std::size_t HELIOS_CHUNK_WIDTH = 38; const std::size_t SELENE_CHUNK_WIDTH = 18; - -const Helios::Generators HELIOS_GENERATORS = fcmp::tower_cycle::random_helios_generators(HELIOS_GENERATORS_LEN); -const Selene::Generators SELENE_GENERATORS = fcmp::tower_cycle::random_selene_generators(SELENE_GENERATORS_LEN); - -const Helios::Point HELIOS_HASH_INIT_POINT = fcmp::tower_cycle::random_helios_hash_init_point(); -const Selene::Point SELENE_HASH_INIT_POINT = fcmp::tower_cycle::random_selene_hash_init_point(); From effa9eea0e2664589ee70733e5150ae69238ac69 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 May 2024 02:56:57 -0400 Subject: [PATCH 020/127] Only pass a single prior child --- src/fcmp/curve_trees.cpp | 24 +++++++++--------------- src/fcmp/fcmp_rust/Cargo.lock | 24 ++++++++++++------------ src/fcmp/fcmp_rust/fcmp++.h | 4 ++-- src/fcmp/fcmp_rust/src/lib.rs | 11 +++++------ src/fcmp/tower_cycle.cpp | 13 ++++++++----- src/fcmp/tower_cycle.h | 6 +++--- 6 files changed, 39 insertions(+), 43 deletions(-) diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index f210022ef9c..b9be9fc7643 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -44,22 +44,20 @@ template class CurveTrees; template typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_children) { - // New parent means no prior children, fill priors with 0 - std::vector prior_children; - tower_cycle::extend_zeroes(curve, new_children.len, prior_children); - return curve.hash_grow( curve.m_hash_init_point, 0,/*offset*/ - typename C::Chunk{prior_children.data(), prior_children.size()}, + curve.zero_scalar(), new_children ); }; +template Helios::Point get_new_parent(const Helios &curve, const typename Helios::Chunk &new_children); +template Selene::Point get_new_parent(const Selene &curve, const typename Selene::Chunk &new_children); //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // Static functions //---------------------------------------------------------------------------------------------------------------------- -// Hash the first chunk of children being added to a layer +// Hash the first chunk of the children now being added to a layer template static typename C::Point get_first_parent(const C &curve, const typename C::Chunk &new_children, @@ -72,21 +70,17 @@ static typename C::Point get_first_parent(const C &curve, if (last_chunk_ptr == nullptr) return get_new_parent(curve, new_children); - std::vector prior_children; + typename C::Scalar first_child_after_offset = curve.zero_scalar(); + if (child_layer_last_hash_updated) { // If the last chunk has updated children in it, then we need to get the delta to the old children - prior_children.emplace_back(last_chunk_ptr->last_child); - - // Extend prior children by zeroes for any additional new children, since they must be new - if (new_children.len > 1) - tower_cycle::extend_zeroes(curve, new_children.len - 1, prior_children); + first_child_after_offset = last_chunk_ptr->last_child; } else if (offset > 0) { // If we're updating the parent hash and no children were updated, then we're just adding new children - // to the existing last chunk and can fill priors with 0 - tower_cycle::extend_zeroes(curve, new_children.len, prior_children); + // to the existing last chunk and can leave first_child_after_offset as zero } else { @@ -97,7 +91,7 @@ static typename C::Point get_first_parent(const C &curve, return curve.hash_grow( last_chunk_ptr->last_parent, offset, - typename C::Chunk{prior_children.data(), prior_children.size()}, + first_child_after_offset, new_children ); }; diff --git a/src/fcmp/fcmp_rust/Cargo.lock b/src/fcmp/fcmp_rust/Cargo.lock index a8bd846a1e0..fc27ca4b875 100644 --- a/src/fcmp/fcmp_rust/Cargo.lock +++ b/src/fcmp/fcmp_rust/Cargo.lock @@ -71,7 +71,7 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "ciphersuite" version = "0.4.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "blake2", "dalek-ff-group", @@ -161,7 +161,7 @@ dependencies = [ [[package]] name = "dalek-ff-group" version = "0.4.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "crypto-bigint", "curve25519-dalek", @@ -198,7 +198,7 @@ dependencies = [ [[package]] name = "ec-divisors" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "dalek-ff-group", "group", @@ -272,7 +272,7 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "flexible-transcript" version = "0.3.2" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "blake2", "digest", @@ -285,7 +285,7 @@ dependencies = [ [[package]] name = "full-chain-membership-proofs" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "ciphersuite", "ec-divisors", @@ -305,7 +305,7 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "generalized-bulletproofs" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "ciphersuite", "flexible-transcript", @@ -368,7 +368,7 @@ dependencies = [ [[package]] name = "helioselene" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "crypto-bigint", "dalek-ff-group", @@ -429,7 +429,7 @@ dependencies = [ [[package]] name = "minimal-ed448" version = "0.4.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "crypto-bigint", "ff", @@ -444,7 +444,7 @@ dependencies = [ [[package]] name = "multiexp" version = "0.4.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "ff", "group", @@ -613,7 +613,7 @@ dependencies = [ [[package]] name = "std-shims" version = "0.1.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a17500708f5c6d79ec9cc33d53c771149db152c3" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" dependencies = [ "hashbrown", "spin", @@ -627,9 +627,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" -version = "2.0.65" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ "proc-macro2", "quote", diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index 6a7e61ff523..695d9807bf5 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -113,12 +113,12 @@ SeleneScalar selene_zero_scalar(); CResult hash_grow_helios(HeliosPoint existing_hash, uintptr_t offset, - HeliosScalarSlice prior_children, + HeliosScalar first_child_after_offset, HeliosScalarSlice new_children); CResult hash_grow_selene(SelenePoint existing_hash, uintptr_t offset, - SeleneScalarSlice prior_children, + SeleneScalar first_child_after_offset, SeleneScalarSlice new_children); } // extern "C" diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 891cf91b91a..d126f06bbdb 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -79,12 +79,11 @@ pub extern "C" fn selene_point_to_bytes(selene_point: SelenePoint) -> *const u8 } // Get the x coordinate of the ed25519 point -// TODO: Move this to C++ #[allow(clippy::not_unsafe_ptr_arg_deref)] #[no_mangle] pub extern "C" fn ed25519_point_to_selene_scalar(ed25519_point: *const u8) -> SeleneScalar { let mut ed25519_point = unsafe { core::slice::from_raw_parts(ed25519_point, 32) }; - // TODO: If not moved to C++, at least return an error here (instead of unwrapping) + // TODO: Return an error here (instead of unwrapping) let ed25519_point = ::read_G(&mut ed25519_point).unwrap(); let xy_coords = ::G::to_xy(ed25519_point); @@ -153,14 +152,14 @@ impl CResult { pub extern "C" fn hash_grow_helios( existing_hash: HeliosPoint, offset: usize, - prior_children: HeliosScalarSlice, + first_child_after_offset: HeliosScalar, new_children: HeliosScalarSlice, ) -> CResult { let hash = hash_grow( helios_generators(), existing_hash, offset, - prior_children.into(), + first_child_after_offset, new_children.into(), ); @@ -178,14 +177,14 @@ pub extern "C" fn hash_grow_helios( pub extern "C" fn hash_grow_selene( existing_hash: SelenePoint, offset: usize, - prior_children: SeleneScalarSlice, + first_child_after_offset: SeleneScalar, new_children: SeleneScalarSlice, ) -> CResult { let hash = hash_grow( selene_generators(), existing_hash, offset, - prior_children.into(), + first_child_after_offset, new_children.into(), ); diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index f31c811d9a5..04119844c2d 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -48,13 +48,13 @@ Selene::CycleScalar Selene::point_to_cycle_scalar(const Selene::Point &point) co Helios::Point Helios::hash_grow( const Helios::Point &existing_hash, const std::size_t offset, - const Helios::Chunk &prior_children, + const Helios::Scalar &first_child_after_offset, const Helios::Chunk &new_children) const { auto res = fcmp_rust::hash_grow_helios( existing_hash, offset, - prior_children, + first_child_after_offset, new_children); if (res.err != 0) { throw std::runtime_error("failed to hash grow"); @@ -65,13 +65,13 @@ Helios::Point Helios::hash_grow( Selene::Point Selene::hash_grow( const Selene::Point &existing_hash, const std::size_t offset, - const Selene::Chunk &prior_children, + const Selene::Scalar &first_child_after_offset, const Selene::Chunk &new_children) const { auto res = fcmp_rust::hash_grow_selene( existing_hash, offset, - prior_children, + first_child_after_offset, new_children); if (res.err != 0) { throw std::runtime_error("failed to hash grow"); @@ -150,6 +150,10 @@ std::string Selene::to_string(const typename Selene::Point &point) const //---------------------------------------------------------------------------------------------------------------------- SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) { + // If this function receives the ec_point, this is fine + // If this function can receive a decompressed point, it'd be notably faster + // to extract the Wei25519 x coordinate from the C side of things and then + // pass that return fcmp_rust::ed25519_point_to_selene_scalar((uint8_t*) &point.data); } //---------------------------------------------------------------------------------------------------------------------- @@ -182,7 +186,6 @@ void extend_scalars_from_cycle_points(const C_POINTS &curve, for (const auto &point : points) { - // TODO: implement reading just the x coordinate of points on curves in curve cycle in C/C++ typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); scalars_out.push_back(std::move(scalar)); } diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index ec67510a304..dfd9eee4bc2 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -82,7 +82,7 @@ class Curve virtual typename C::Point hash_grow( const typename C::Point &existing_hash, const std::size_t offset, - const typename C::Chunk &prior_children, + const typename C::Scalar &first_child_after_offset, const typename C::Chunk &new_children) const = 0; virtual typename C::Scalar zero_scalar() const = 0; @@ -120,7 +120,7 @@ class Helios final : public Curve Point hash_grow( const Point &existing_hash, const std::size_t offset, - const Chunk &prior_children, + const Scalar &first_child_after_offset, const Chunk &new_children) const override; Scalar zero_scalar() const override; @@ -154,7 +154,7 @@ class Selene final : public Curve Point hash_grow( const Point &existing_hash, const std::size_t offset, - const Chunk &prior_children, + const Scalar &first_child_after_offset, const Chunk &new_children) const override; Scalar zero_scalar() const override; From 517f5a301e5ee2e95abecc50bda71cf050d49c77 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 24 May 2024 01:36:34 -0700 Subject: [PATCH 021/127] don't want to expose generator lengths in ffi --- src/fcmp/fcmp_rust/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index d126f06bbdb..8b8f35d84cd 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -21,8 +21,8 @@ use full_chain_membership_proofs::tree::hash_grow; // TODO: Use a macro to de-duplicate some of of this code -pub const HELIOS_GENERATORS_LENGTH: usize = 128; -pub const SELENE_GENERATORS_LENGTH: usize = 256; +const HELIOS_GENERATORS_LENGTH: usize = 128; +const SELENE_GENERATORS_LENGTH: usize = 256; static HELIOS_GENERATORS: OnceLock> = OnceLock::new(); static SELENE_GENERATORS: OnceLock> = OnceLock::new(); From a1ee603132f4b2a45cdcf6eedfdc18091c4f42f4 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 24 May 2024 01:55:49 -0700 Subject: [PATCH 022/127] explicit type response to hash_grow 's --- src/fcmp/tower_cycle.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 04119844c2d..46ee8f1bb79 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -51,7 +51,7 @@ Helios::Point Helios::hash_grow( const Helios::Scalar &first_child_after_offset, const Helios::Chunk &new_children) const { - auto res = fcmp_rust::hash_grow_helios( + fcmp_rust::CResult res = fcmp_rust::hash_grow_helios( existing_hash, offset, first_child_after_offset, @@ -68,7 +68,7 @@ Selene::Point Selene::hash_grow( const Selene::Scalar &first_child_after_offset, const Selene::Chunk &new_children) const { - auto res = fcmp_rust::hash_grow_selene( + fcmp_rust::CResult res = fcmp_rust::hash_grow_selene( existing_hash, offset, first_child_after_offset, From 988c4eae402880027bae7aa60b79ed1a75e7b7ac Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 May 2024 19:24:38 -0400 Subject: [PATCH 023/127] Remove reference from m_hash_init_point Identified by kayabaNerve, patch suggested by j-berman. --- src/fcmp/tower_cycle.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index dfd9eee4bc2..9fdea38f350 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -70,7 +70,9 @@ class Curve { //constructor public: - Curve(const typename C::Point &hash_init_point): + // This doesn't have a reference as doing so delays initialization and borks + // it + Curve(const typename C::Point hash_init_point): m_hash_init_point{hash_init_point} {}; @@ -95,7 +97,7 @@ class Curve //member variables public: - const typename C::Point &m_hash_init_point; + const typename C::Point m_hash_init_point; }; //---------------------------------------------------------------------------------------------------------------------- class Helios final : public Curve From ab7c74136b30be53636f0a692bbd47e2a450bcce Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 24 May 2024 18:12:21 -0700 Subject: [PATCH 024/127] Simplify edge case handling in hash_layer - When retrieving last chunks, set next_start_child_chunk_index so can know the correct start index without needing to modify the offset - Other smaller cleanup --- src/blockchain_db/lmdb/db_lmdb.cpp | 36 ++++---- src/blockchain_db/lmdb/db_lmdb.h | 3 +- src/fcmp/curve_trees.cpp | 128 ++++++++++++---------------- src/fcmp/curve_trees.h | 38 ++++++--- src/fcmp/tower_cycle.cpp | 2 + src/fcmp/tower_cycle.h | 7 +- tests/unit_tests/curve_trees.cpp | 132 ++++++++++++++++------------- 7 files changed, 176 insertions(+), 170 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 35d04147356..1a4be122928 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1301,7 +1301,8 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } template -void BlockchainLMDB::grow_layer(const std::vector> &layer_extensions, +void BlockchainLMDB::grow_layer(const C &curve, + const std::vector> &layer_extensions, const std::size_t ext_idx, const std::size_t layer_idx, const fcmp::curve_trees::LastChunkData *last_chunk_ptr) @@ -1317,26 +1318,17 @@ void BlockchainLMDB::grow_layer(const std::vectorparent_layer_size is correct - - // Check if we started at 0 if fresh layer, or if started 1 after the last element in the layer - const bool started_after_tip = (ext.start_idx == 0 && last_chunk_ptr == nullptr) - || (last_chunk_ptr != nullptr && ext.start_idx == last_chunk_ptr->parent_layer_size); - - // Check if we updated the last element in the layer - const bool started_at_tip = (last_chunk_ptr != nullptr - && (ext.start_idx + 1) == last_chunk_ptr->parent_layer_size); - - CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected layer start"); + // TODO: make sure last_chunk_ptr->next_start_child_chunk_index lines up MDB_val_copy k(layer_idx); - if (started_at_tip) + const bool update_last_parent = last_chunk_ptr != nullptr && last_chunk_ptr->update_last_parent; + if (update_last_parent) { // We updated the last hash, so update it layer_val lv; lv.child_chunk_idx = ext.start_idx; - lv.child_chunk_hash = std::array(); // ext.hashes.front(); // TODO + lv.child_chunk_hash = curve.to_bytes(ext.hashes.front()); MDB_val_set(v, lv); // We expect to overwrite the existing hash @@ -1347,11 +1339,11 @@ void BlockchainLMDB::grow_layer(const std::vector(); // ext.hashes[i]; // TODO + lv.child_chunk_hash = curve.to_bytes(ext.hashes[i]); MDB_val_set(v, lv); // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. @@ -1410,7 +1402,11 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree ? nullptr : &last_chunks.c2_last_chunks[c2_idx]; - this->grow_layer(c2_extensions, c2_idx, layer_idx, c2_last_chunk_ptr); + this->grow_layer(curve_trees.m_c2, + c2_extensions, + c2_idx, + layer_idx, + c2_last_chunk_ptr); ++c2_idx; } @@ -1420,7 +1416,11 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree ? nullptr : &last_chunks.c1_last_chunks[c1_idx]; - this->grow_layer(c1_extensions, c1_idx, layer_idx, c1_last_chunk_ptr); + this->grow_layer(curve_trees.m_c1, + c1_extensions, + c1_idx, + layer_idx, + c1_last_chunk_ptr); ++c1_idx; } diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 688f4f998d2..01fbcdf88e3 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -411,7 +411,8 @@ class BlockchainLMDB : public BlockchainDB virtual void remove_spent_key(const crypto::key_image& k_image); template - void grow_layer(const std::vector> &layer_extensions, + void grow_layer(const C &curve, + const std::vector> &layer_extensions, const std::size_t c_idx, const std::size_t layer_idx, const fcmp::curve_trees::LastChunkData *last_chunk_data); diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index b9be9fc7643..859e9dcf803 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -62,7 +62,6 @@ template static typename C::Point get_first_parent(const C &curve, const typename C::Chunk &new_children, const std::size_t chunk_width, - const bool child_layer_last_hash_updated, const LastChunkData *last_chunk_ptr, const std::size_t offset) { @@ -70,28 +69,31 @@ static typename C::Point get_first_parent(const C &curve, if (last_chunk_ptr == nullptr) return get_new_parent(curve, new_children); - typename C::Scalar first_child_after_offset = curve.zero_scalar(); - - if (child_layer_last_hash_updated) + typename C::Scalar prior_child_after_offset; + if (last_chunk_ptr->update_last_parent) { - // If the last chunk has updated children in it, then we need to get the delta to the old children - first_child_after_offset = last_chunk_ptr->last_child; + // If the last parent has an updated child in it, then we need to get the delta to the old child + prior_child_after_offset = last_chunk_ptr->last_child; } else if (offset > 0) { - // If we're updating the parent hash and no children were updated, then we're just adding new children - // to the existing last chunk and can leave first_child_after_offset as zero + // If we're not updating the last parent hash and offset is non-zero, then we must be adding new children + // to the existing last chunk. New children means no prior child after offset exists, use zero scalar + prior_child_after_offset = curve.zero_scalar(); } else { - // If the last chunk is already full and isn't updated in any way, then we just get a new parent + // If we're not updating the last parent and the last chunk is already full, we can get a new parent return get_new_parent(curve, new_children); } + MDEBUG("Updating existing hash: " << curve.to_string(last_chunk_ptr->last_parent) << " , offset: " << offset + << ", prior_child_after_offset: " << curve.to_string(prior_child_after_offset)); + return curve.hash_grow( last_chunk_ptr->last_parent, offset, - first_child_after_offset, + prior_child_after_offset, new_children ); }; @@ -99,77 +101,55 @@ static typename C::Point get_first_parent(const C &curve, // After hashing a layer of children points, convert those children x-coordinates into their respective cycle // scalars, and prepare them to be hashed for the next layer template -static std::vector next_child_scalars_from_children(const C_CHILD &c_child, +static std::size_t next_child_scalars_from_children(const C_CHILD &c_child, + const bool updating_root_layer, const LastChunkData *last_child_chunk_ptr, - const LastChunkData *last_parent_chunk_ptr, - const LayerExtension &children) + const LayerExtension &children, + std::vector &child_scalars_out) { - std::vector child_scalars; + child_scalars_out.clear(); + child_scalars_out.reserve(1 + children.hashes.size()); - // The existing root would have a size of 1 - const bool updating_root_layer = last_child_chunk_ptr != nullptr - && last_child_chunk_ptr->parent_layer_size == 1; + std::uint64_t next_child_start_index = children.start_idx; // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when // hashing the *existing* root layer if (updating_root_layer) { - // We should be updating the existing root, there shouldn't be a last parent chunk - CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root"); + CHECK_AND_ASSERT_THROW_MES(last_child_chunk_ptr != nullptr, "last child chunk does not exist at root"); // If the children don't already include the existing root, then we need to include it to be hashed // - the children would include the existing root already if the existing root was updated in the child // layer (the start_idx would be 0) - if (children.start_idx > 0) - child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); + if (next_child_start_index > 0) + { + MDEBUG("Updating root layer and including the existing root in next children"); + child_scalars_out.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); + --next_child_start_index; + } } // Convert child points to scalars - tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars); + tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars_out); - return child_scalars; + return next_child_start_index; }; //---------------------------------------------------------------------------------------------------------------------- // Hash chunks of a layer of new children, outputting the next layer's parents template static void hash_layer(const C &curve, - const LastChunkData *last_parent_chunk_ptr, + const LastChunkData *last_chunk_ptr, const std::vector &child_scalars, - const std::size_t children_start_idx, + const std::size_t child_start_idx, const std::size_t chunk_width, LayerExtension &parents_out) { - parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size; + parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->next_start_child_chunk_index; parents_out.hashes.clear(); CHECK_AND_ASSERT_THROW_MES(!child_scalars.empty(), "empty child scalars"); - // If the child layer had its existing last hash updated (if the new children include the last element in - // the child layer), then we'll need to use the last hash's prior version in order to update the existing - // last parent hash in this layer - // - Note: the leaf layer is strictly append-only, so this cannot be true for the leaf layer - const bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr) - ? false - : last_parent_chunk_ptr->child_layer_size == (children_start_idx + 1); - - std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset; - - // The offset needs to be brought back because we're going to start with the prior hash, and so the chunk - // will start from there and may need 1 more to fill - CHECK_AND_ASSERT_THROW_MES(chunk_width > offset, "unexpected offset"); - if (child_layer_last_hash_updated) - { - MDEBUG("child_layer_last_hash_updated, updating offset: " << offset); - offset = offset > 0 ? (offset - 1) : (chunk_width - 1); - } - - // If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1 - // since we'll be updating the existing parent hash of the last chunk - if (offset > 0 || child_layer_last_hash_updated) - { - CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0"); - --parents_out.start_idx; - } + const std::size_t offset = child_start_idx % chunk_width; // See how many children we need to fill up the existing last chunk std::size_t chunk_size = std::min(child_scalars.size(), chunk_width - offset); @@ -184,21 +164,19 @@ static void hash_layer(const C &curve, const auto chunk_start = child_scalars.data() + chunk_start_idx; const typename C::Chunk chunk{chunk_start, chunk_size}; - for (uint c = 0; c < chunk_size; ++c) { - MDEBUG("Hashing " << curve.to_string(chunk_start[c])); - } + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Hashing child " << curve.to_string(chunk_start[i])); // Hash the chunk of children typename C::Point chunk_hash = chunk_start_idx == 0 ? get_first_parent(curve, chunk, chunk_width, - child_layer_last_hash_updated, - last_parent_chunk_ptr, + last_chunk_ptr, offset) : get_new_parent(curve, chunk); - MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash) + MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash) << " , chunk_size: " << chunk_size); // We've got our hash @@ -248,10 +226,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; - // Set the leaf start idx - tree_extension.leaves.start_idx = c2_last_chunks.empty() - ? 0 - : c2_last_chunks[0].child_layer_size; + tree_extension.leaves.start_idx = existing_last_chunks.next_start_leaf_index; // Copy the leaves // TODO: don't copy here @@ -285,6 +260,8 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) return tree_extension; + const std::size_t next_root_layer_idx = c1_last_chunks.size() + c2_last_chunks.size(); + // Alternate between hashing c2 children, c1 children, c2, c1, ... bool parent_is_c1 = true; @@ -293,11 +270,14 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) while (true) { - const LastChunkData *c1_last_chunk_ptr = (c1_last_idx >= c1_last_chunks.size()) + const std::size_t updating_layer_idx = 1 + c1_last_idx + c2_last_idx; + const std::size_t updating_root_layer = updating_layer_idx == next_root_layer_idx; + + const auto *c1_last_chunk_ptr = (c1_last_idx >= c1_last_chunks.size()) ? nullptr : &c1_last_chunks[c1_last_idx]; - const LastChunkData *c2_last_chunk_ptr = (c2_last_idx >= c2_last_chunks.size()) + const auto *c2_last_chunk_ptr = (c2_last_idx >= c2_last_chunks.size()) ? nullptr : &c2_last_chunks[c2_last_idx]; @@ -308,16 +288,18 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx]; - const auto c1_child_scalars = next_child_scalars_from_children(m_c2, + std::vector c1_child_scalars; + const std::size_t next_child_start_idx = next_child_scalars_from_children(m_c2, + updating_root_layer, c2_last_chunk_ptr, - c1_last_chunk_ptr, - c2_child_extension); + c2_child_extension, + c1_child_scalars); LayerExtension c1_layer_extension; hash_layer(m_c1, c1_last_chunk_ptr, c1_child_scalars, - c2_child_extension.start_idx, + next_child_start_idx, m_c1_width, c1_layer_extension); @@ -335,16 +317,18 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx]; - const auto c2_child_scalars = next_child_scalars_from_children(m_c1, + std::vector c2_child_scalars; + const std::size_t next_child_start_idx = next_child_scalars_from_children(m_c1, + updating_root_layer, c1_last_chunk_ptr, - c2_last_chunk_ptr, - c1_child_extension); + c1_child_extension, + c2_child_scalars); LayerExtension c2_layer_extension; hash_layer(m_c2, c2_last_chunk_ptr, c2_child_scalars, - c1_child_extension.start_idx, + next_child_start_idx, m_c2_width, c2_layer_extension); diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 642ff92318b..349ff92ada6 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -60,16 +60,23 @@ struct LayerExtension final template struct LastChunkData final { - // The total number of children % child layer chunk width - const std::size_t child_offset; - // The last child in the chunk (and therefore the last child in the child layer) - /* TODO: const */ typename C::Scalar last_child; - // The hash of the last chunk of child scalars - /* TODO: const */ typename C::Point last_parent; - // Total number of children in the child layer - const std::size_t child_layer_size; - // Total number of hashes in the parent layer - const std::size_t parent_layer_size; + // The next starting index in the layer (referencing the "next" child chunk) + const std::size_t next_start_child_chunk_index; + // The existing hash of the last chunk of child scalars + // - Used to grow the existing last chunk in the layer + // - Only must be set if the existing last chunk isn't full + const typename C::Point last_parent; + // Whether or not the existing last parent in the layer needs to be updated + // - True if the last leaf layer chunk is not yet full + // - If true, next_start_child_chunk_index == existing layer size + // - If false, next_start_child_chunk_index == (existing layer size - 1), since updating existing last parent + const bool update_last_parent; + // The last child in the last chunk (and therefore the last child in the child layer) + // - Used to get the delta from the existing last child to the new last child + // - Only needs to be set if update_last_parent is true + // - Since the leaf layer is append-only, the layer above leaf layer does not actually need this value since the + // last leaf will never change (and therefore, we'll never need the delta to a prior leaf) + const typename C::Scalar last_child; }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- @@ -111,7 +118,7 @@ class CurveTrees struct Leaves final { // Starting index in the leaf layer - std::size_t start_idx; + std::size_t start_idx{0}; // Contiguous leaves in a tree that start at the start_idx std::vector tuples; }; @@ -131,6 +138,7 @@ class CurveTrees // - c2_last_chunks[0] is first layer after leaves, then c1_last_chunks[0], then c2_last_chunks[1], etc struct LastChunks final { + std::size_t next_start_leaf_index{0}; std::vector> c1_last_chunks; std::vector> c2_last_chunks; }; @@ -150,12 +158,14 @@ class CurveTrees // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] std::vector flatten_leaves(const std::vector &leaves) const; -//member variables -private: - // The curves +//public member variables +public: + // The curve interfaces const C1 &m_c1; const C2 &m_c2; +//member variables +private: // The chunk widths of the layers in the tree tied to each curve const std::size_t m_c1_width; const std::size_t m_c2_width; diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 46ee8f1bb79..bbac37f64d1 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -150,6 +150,8 @@ std::string Selene::to_string(const typename Selene::Point &point) const //---------------------------------------------------------------------------------------------------------------------- SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) { + static_assert(sizeof(SeleneScalar) == sizeof(point), "size of selene scalar != size of ed25519 point"); + // If this function receives the ec_point, this is fine // If this function can receive a decompressed point, it'd be notably faster // to extract the Wei25519 x coordinate from the C side of things and then diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index 9fdea38f350..efb87b799f5 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -41,8 +41,6 @@ namespace tower_cycle //---------------------------------------------------------------------------------------------------------------------- // Rust types //---------------------------------------------------------------------------------------------------------------------- -using RustEd25519Point = std::array; - // Need to forward declare Scalar types for point_to_cycle_scalar below using SeleneScalar = fcmp_rust::SeleneScalar; using HeliosScalar = fcmp_rust::HeliosScalar; @@ -70,9 +68,7 @@ class Curve { //constructor public: - // This doesn't have a reference as doing so delays initialization and borks - // it - Curve(const typename C::Point hash_init_point): + Curve(const typename C::Point &hash_init_point): m_hash_init_point{hash_init_point} {}; @@ -97,6 +93,7 @@ class Curve //member variables public: + // kayabaNerve: this doesn't have a reference as doing so delays initialization and borks it const typename C::Point m_hash_init_point; }; //---------------------------------------------------------------------------------------------------------------------- diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 311d673de5e..9092ed68dcf 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -35,31 +35,31 @@ // CurveTreesUnitTest helpers //---------------------------------------------------------------------------------------------------------------------- template -static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const C &curve, - const std::size_t child_layer_size, +static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const bool update_last_parent, const std::size_t parent_layer_size, - const std::size_t chunk_width, - const typename C::Scalar &last_child, - const typename C::Point &last_parent) + const typename C::Point &last_parent, + const typename C::Scalar &last_child) { - CHECK_AND_ASSERT_THROW_MES(child_layer_size > 0, "empty child layer"); - CHECK_AND_ASSERT_THROW_MES(parent_layer_size > 0, "empty parent layer"); + if (update_last_parent) + CHECK_AND_ASSERT_THROW_MES(parent_layer_size > 0, "empty parent layer"); - const std::size_t child_offset = child_layer_size % chunk_width; + // If updating last parent, the next start will be the last parent's index, else we start at the tip + const std::size_t next_start_child_chunk_index = update_last_parent + ? (parent_layer_size - 1) + : parent_layer_size; return fcmp::curve_trees::LastChunkData{ - .child_offset = child_offset, - .last_child = last_child, - .last_parent = last_parent, - .child_layer_size = child_layer_size, - .parent_layer_size = parent_layer_size + .next_start_child_chunk_index = next_start_child_chunk_index, + .last_parent = last_parent, + .update_last_parent = update_last_parent, + .last_child = last_child }; } //---------------------------------------------------------------------------------------------------------------------- -template -static bool validate_layer(const C_PARENT &c_parent, - const CurveTreesUnitTest::Layer &parents, - const std::vector &child_scalars, +template +static bool validate_layer(const C &curve, + const CurveTreesUnitTest::Layer &parents, + const std::vector &child_scalars, const std::size_t max_chunk_size) { // Hash chunk of children scalars, then see if the hash matches up to respective parent @@ -70,15 +70,20 @@ static bool validate_layer(const C_PARENT &c_parent, const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); - const typename C_PARENT::Point &parent = parents[i]; + const typename C::Point &parent = parents[i]; const auto chunk_start = child_scalars.data() + chunk_start_idx; - const typename C_PARENT::Chunk chunk{chunk_start, chunk_size}; + const typename C::Chunk chunk{chunk_start, chunk_size}; + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Hashing " << curve.to_string(chunk_start[i])); + + const typename C::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve, chunk); - const typename C_PARENT::Point chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk); + MDEBUG("chunk_start_idx: " << chunk_start_idx << " , chunk_size: " << chunk_size << " , chunk_hash: " << curve.to_string(chunk_hash)); - const auto actual_bytes = c_parent.to_bytes(parent); - const auto expected_bytes = c_parent.to_bytes(chunk_hash); + const auto actual_bytes = curve.to_bytes(parent); + const auto expected_bytes = curve.to_bytes(chunk_hash); CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); chunk_start_idx += chunk_size; @@ -104,6 +109,10 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni CurveTreesV1::LastChunks last_chunks; + // Since leaf layer is append-only, we know the next start will be right after all existing leaf tuple + const std::size_t num_leaf_tuples = leaves.size() * CurveTreesV1::LEAF_TUPLE_SIZE; + last_chunks.next_start_leaf_index = num_leaf_tuples; + if (c2_layers.empty()) return last_chunks; @@ -114,12 +123,13 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni c2_last_chunks_out.reserve(c2_layers.size()); // First push the last leaf chunk data into c2 chunks - auto last_leaf_chunk = get_last_child_layer_chunk(m_curve_trees.m_c2, - /*child_layer_size */ leaves.size() * CurveTreesV1::LEAF_TUPLE_SIZE, - /*parent_layer_size*/ c2_layers[0].size(), - /*chunk_width */ m_curve_trees.m_leaf_layer_chunk_width, - /*last_child */ leaves.back().C_x, - /*last_parent */ c2_layers[0].back()); + const bool update_last_parent = (num_leaf_tuples % m_curve_trees.m_leaf_layer_chunk_width) > 0; + auto last_leaf_chunk = get_last_child_layer_chunk( + /*update_last_parent*/ update_last_parent, + /*parent_layer_size */ c2_layers[0].size(), + /*last_parent */ c2_layers[0].back(), + // Since the leaf layer is append-only, we'll never need access to the last child + /*last_child */ m_curve_trees.m_c2.zero_scalar()); c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); @@ -149,12 +159,10 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni const auto &last_child = m_curve_trees.m_c2.point_to_cycle_scalar(child_layer.back()); - auto last_parent_chunk = get_last_child_layer_chunk(m_curve_trees.m_c1, - child_layer.size(), + auto last_parent_chunk = get_last_child_layer_chunk(update_last_parent, parent_layer.size(), - m_curve_trees.m_c1_width, - last_child, - parent_layer.back()); + parent_layer.back(), + last_child); c1_last_chunks_out.push_back(std::move(last_parent_chunk)); @@ -170,12 +178,10 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni const auto &last_child = m_curve_trees.m_c1.point_to_cycle_scalar(child_layer.back()); - auto last_parent_chunk = get_last_child_layer_chunk(m_curve_trees.m_c2, - child_layer.size(), + auto last_parent_chunk = get_last_child_layer_chunk(update_last_parent, parent_layer.size(), - m_curve_trees.m_c2_width, - last_child, - parent_layer.back()); + parent_layer.back(), + last_child); c2_last_chunks_out.push_back(std::move(last_parent_chunk)); @@ -303,6 +309,8 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) // TODO: implement templated function for below if statement if (parent_is_c2) { + MDEBUG("Validating parent c2 layer " << c2_idx << " , child c1 layer " << c1_idx); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); @@ -328,6 +336,8 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) } else { + MDEBUG("Validating parent c1 layer " << c1_idx << " , child c2 layer " << c2_idx); + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); @@ -356,6 +366,8 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) parent_is_c2 = !parent_is_c2; } + MDEBUG("Validating leaves"); + // Now validate leaves return validate_layer(m_curve_trees.m_c2, c2_layers[0], @@ -384,11 +396,10 @@ void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_ch const fcmp::curve_trees::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; - MDEBUG("child_offset: " << last_chunk.child_offset - << " , last_child: " << m_curve_trees.m_c2.to_string(last_chunk.last_child) - << " , last_parent: " << m_curve_trees.m_c2.to_string(last_chunk.last_parent) - << " , child_layer_size: " << last_chunk.child_layer_size - << " , parent_layer_size: " << last_chunk.parent_layer_size); + MDEBUG("next_start_child_chunk_index: " << last_chunk.next_start_child_chunk_index + << " , last_parent: " << m_curve_trees.m_c2.to_string(last_chunk.last_parent) + << " , update_last_parent: " << last_chunk.update_last_parent + << " , last_child: " << m_curve_trees.m_c2.to_string(last_chunk.last_child)); ++c2_idx; } @@ -398,11 +409,10 @@ void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_ch const fcmp::curve_trees::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; - MDEBUG("child_offset: " << last_chunk.child_offset - << " , last_child: " << m_curve_trees.m_c1.to_string(last_chunk.last_child) - << " , last_parent: " << m_curve_trees.m_c1.to_string(last_chunk.last_parent) - << " , child_layer_size: " << last_chunk.child_layer_size - << " , parent_layer_size: " << last_chunk.parent_layer_size); + MDEBUG("next_start_child_chunk_index: " << last_chunk.next_start_child_chunk_index + << " , last_parent: " << m_curve_trees.m_c1.to_string(last_chunk.last_parent) + << " , update_last_parent: " << last_chunk.update_last_parent + << " , last_child: " << m_curve_trees.m_c1.to_string(last_chunk.last_child)); ++c1_idx; } @@ -445,7 +455,7 @@ void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &t MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) - MDEBUG("Hash idx: " << (j + c2_layer.start_idx) << " , hash: " + MDEBUG("Child chunk start idx: " << (j + c2_layer.start_idx) << " , hash: " << m_curve_trees.m_c2.to_string(c2_layer.hashes[j])); ++c2_idx; @@ -458,7 +468,7 @@ void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &t MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) - MDEBUG("Hash idx: " << (j + c1_layer.start_idx) << " , hash: " + MDEBUG("Child chunk start idx: " << (j + c1_layer.start_idx) << " , hash: " << m_curve_trees.m_c1.to_string(c1_layer.hashes[j])); ++c1_idx; @@ -497,7 +507,7 @@ void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c2_layer.size(); ++j) - MDEBUG("Hash idx: " << j << " , hash: " << m_curve_trees.m_c2.to_string(c2_layer[j])); + MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c2.to_string(c2_layer[j])); ++c2_idx; } @@ -509,7 +519,7 @@ void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c1_layer.size(); ++j) - MDEBUG("Hash idx: " << j << " , hash: " << m_curve_trees.m_c1.to_string(c1_layer[j])); + MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c1.to_string(c1_layer[j])); ++c1_idx; } @@ -543,7 +553,7 @@ const std::vector generate_random_leaves(const CurveTre return tuples; } //---------------------------------------------------------------------------------------------------------------------- -static void grow_tree(CurveTreesV1 &curve_trees, +static bool grow_tree(CurveTreesV1 &curve_trees, CurveTreesUnitTest &curve_trees_accessor, const std::size_t num_leaves, CurveTreesUnitTest::Tree &tree_inout) @@ -566,7 +576,7 @@ static void grow_tree(CurveTreesV1 &curve_trees, curve_trees_accessor.log_tree(tree_inout); // Validate tree structure and all hashes - ASSERT_TRUE(curve_trees_accessor.validate_tree(tree_inout)); + return curve_trees_accessor.validate_tree(tree_inout); } //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- @@ -617,13 +627,11 @@ TEST(curve_trees, grow_tree) { for (const std::size_t ext_leaves : N_LEAVES) { - // Tested reverse order already - if (ext_leaves < init_leaves) - continue; - // Only test 3rd layer once because it's a huge test if (init_leaves > 1 && ext_leaves == NEED_3_LAYERS) continue; + if (ext_leaves > 1 && init_leaves == NEED_3_LAYERS) + continue; LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); @@ -632,21 +640,25 @@ TEST(curve_trees, grow_tree) // Initialize global tree with `init_leaves` MDEBUG("Adding " << init_leaves << " leaves to tree"); - grow_tree(curve_trees, + bool res = grow_tree(curve_trees, curve_trees_accessor, init_leaves, global_tree); + ASSERT_TRUE(res); + MDEBUG("Successfully added initial " << init_leaves << " leaves to tree"); // Then extend the global tree by `ext_leaves` MDEBUG("Extending tree by " << ext_leaves << " leaves"); - grow_tree(curve_trees, + res = grow_tree(curve_trees, curve_trees_accessor, ext_leaves, global_tree); + ASSERT_TRUE(res); + MDEBUG("Successfully extended by " << ext_leaves << " leaves"); } } From 1ffde095c8c1b599ae2812050e95e369cc5a32d1 Mon Sep 17 00:00:00 2001 From: j-berman Date: Sat, 25 May 2024 12:37:32 -0700 Subject: [PATCH 025/127] Implement get_tree_last_chunks in db, can now extend tree in db --- src/blockchain_db/lmdb/db_lmdb.cpp | 260 ++++++++++++++++++++++------- src/blockchain_db/lmdb/db_lmdb.h | 3 + src/fcmp/curve_trees.h | 7 +- 3 files changed, 211 insertions(+), 59 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 1a4be122928..7a40c581383 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -358,10 +358,11 @@ typedef struct outtx { uint64_t local_index; } outtx; -typedef struct layer_val { +template +struct layer_val { uint64_t child_chunk_idx; - std::array child_chunk_hash; -} layer_val; + typename C::Point child_chunk_hash; +}; std::atomic mdb_txn_safe::num_active_txns{0}; std::atomic_flag mdb_txn_safe::creation_gate = ATOMIC_FLAG_INIT; @@ -1300,6 +1301,90 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } } +void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::vector &new_leaves) +{ + // TODO: block_wtxn_start like pop_block, then call BlockchainDB::grow_tree + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(leaves) + + // Read every layer's last chunk data + const auto last_chunks = this->get_tree_last_chunks(curve_trees); + + // Using the last chunk data and new leaves, get a struct we can use to extend the tree + const auto tree_extension = curve_trees.get_tree_extension(last_chunks, new_leaves); + + // Insert the leaves + // TODO: grow_leaves + const auto &leaves = tree_extension.leaves; + for (std::size_t i = 0; i < leaves.tuples.size(); ++i) + { + MDB_val_copy k(i + leaves.start_idx); + MDB_val_set(v, leaves.tuples[i]); + + // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. + // Adding MDB_NOOVERWRITE I assume re-introduces a key comparison. Benchmark NOOVERWRITE here + // MDB_NOOVERWRITE makes sure key doesn't already exist + int result = mdb_cursor_put(m_cur_leaves, &k, &v, MDB_APPENDDUP | MDB_NOOVERWRITE); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); + } + + // Grow the layers + // TODO: grow_layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + const std::size_t layer_idx = c2_idx + c1_idx; + + if (use_c2) + { + if (layer_idx % 2 != 0) + throw0(DB_ERROR(lmdb_error("Growing odd c2 layer, expected even layer idx for c2: ", layer_idx).c_str())); + + const auto *c2_last_chunk_ptr = (c2_idx >= last_chunks.c2_last_chunks.size()) + ? nullptr + : &last_chunks.c2_last_chunks[c2_idx]; + + this->grow_layer(curve_trees.m_c2, + c2_extensions, + c2_idx, + layer_idx, + c2_last_chunk_ptr); + + ++c2_idx; + } + else + { + if (layer_idx % 2 == 0) + throw0(DB_ERROR(lmdb_error("Growing even c1 layer, expected odd layer idx for c2: ", layer_idx).c_str())); + + const auto *c1_last_chunk_ptr = (c1_idx >= last_chunks.c1_last_chunks.size()) + ? nullptr + : &last_chunks.c1_last_chunks[c1_idx]; + + this->grow_layer(curve_trees.m_c1, + c1_extensions, + c1_idx, + layer_idx, + c1_last_chunk_ptr); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} + template void BlockchainLMDB::grow_layer(const C &curve, const std::vector> &layer_extensions, @@ -1318,17 +1403,17 @@ void BlockchainLMDB::grow_layer(const C &curve, CHECK_AND_ASSERT_THROW_MES(!ext.hashes.empty(), "empty layer extension"); - // TODO: make sure last_chunk_ptr->next_start_child_chunk_index lines up + // TODO: make sure last_chunk_ptr->next_start_child_chunk_idx lines up - MDB_val_copy k(layer_idx); + MDB_val_copy k(layer_idx); const bool update_last_parent = last_chunk_ptr != nullptr && last_chunk_ptr->update_last_parent; if (update_last_parent) { // We updated the last hash, so update it - layer_val lv; + layer_val lv; lv.child_chunk_idx = ext.start_idx; - lv.child_chunk_hash = curve.to_bytes(ext.hashes.front()); + lv.child_chunk_hash = ext.hashes.front(); MDB_val_set(v, lv); // We expect to overwrite the existing hash @@ -1341,9 +1426,9 @@ void BlockchainLMDB::grow_layer(const C &curve, // Now add all the new hashes found in the extension for (std::size_t i = update_last_parent ? 1 : 0; i < ext.hashes.size(); ++i) { - layer_val lv; + layer_val lv; lv.child_chunk_idx = i + ext.start_idx; - lv.child_chunk_hash = curve.to_bytes(ext.hashes[i]); + lv.child_chunk_hash = ext.hashes[i]; MDB_val_set(v, lv); // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. @@ -1355,78 +1440,141 @@ void BlockchainLMDB::grow_layer(const C &curve, } } -void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves) +template +static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const bool update_last_parent, + const std::size_t parent_layer_size, + const typename C::Point &last_parent, + const typename C::Scalar &last_child) +{ + if (update_last_parent) + CHECK_AND_ASSERT_THROW_MES(parent_layer_size > 0, "empty parent layer"); + + // If updating last parent, the next start will be the last parent's index, else we start at the tip + const std::size_t next_start_child_chunk_index = update_last_parent + ? (parent_layer_size - 1) + : parent_layer_size; + + return fcmp::curve_trees::LastChunkData{ + .next_start_child_chunk_index = next_start_child_chunk_index, + .last_parent = last_parent, + .update_last_parent = update_last_parent, + .last_child = last_child + }; +} + +fcmp::curve_trees::CurveTreesV1::LastChunks BlockchainLMDB::get_tree_last_chunks( + const fcmp::curve_trees::CurveTreesV1 &curve_trees) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); - mdb_txn_cursors *m_cursors = &m_wcursors; - CURSOR(leaves) + TXN_PREFIX_RDONLY(); + RCURSOR(leaves) + RCURSOR(layers) - // TODO: read every layer's last chunks - const auto last_chunks = fcmp::curve_trees::CurveTreesV1::LastChunks{}; + fcmp::curve_trees::CurveTreesV1::LastChunks last_chunks; - const auto tree_extension = curve_trees.get_tree_extension(last_chunks, new_leaves); + // Get the number of leaves in the tree + std::uint64_t num_leaf_tuples = 0; + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_LAST); + if (result == MDB_NOTFOUND) + num_leaf_tuples = 0; + else if (result == MDB_SUCCESS) + num_leaf_tuples = (1 + (*(const std::size_t*)k.mv_data)) * fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + else + throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); + } + last_chunks.next_start_leaf_index = num_leaf_tuples; - // Insert the leaves - const auto &leaves = tree_extension.leaves; - for (std::size_t i = 0; i < leaves.tuples.size(); ++i) + MDEBUG(num_leaf_tuples << " total leaves in the tree"); + + // Now set the last chunk data from each layer + auto &c1_last_chunks_out = last_chunks.c1_last_chunks; + auto &c2_last_chunks_out = last_chunks.c2_last_chunks; + + // Check if we'll need to update the last parent in each layer + const bool update_last_parent = (num_leaf_tuples % curve_trees.m_leaf_layer_chunk_width) > 0; + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer + std::size_t layer_idx = 0; + while (1) { - MDB_val_copy k(i + leaves.start_idx); - MDB_val_set(v, leaves.tuples[i]); + MDB_val_copy k(layer_idx); + MDB_val v; - // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. - // Adding MDB_NOOVERWRITE I assume re-introduces a key comparison. Benchmark NOOVERWRITE here - // MDB_NOOVERWRITE makes sure key doesn't already exist - int result = mdb_cursor_put(m_cur_leaves, &k, &v, MDB_APPENDDUP | MDB_NOOVERWRITE); + // Get the first record in a layer so we can then get the last record + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_SET); + if (result == MDB_NOTFOUND) + break; if (result != MDB_SUCCESS) - throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); - } + throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); - // Grow the layers - const auto &c2_extensions = tree_extension.c2_layer_extensions; - const auto &c1_extensions = tree_extension.c1_layer_extensions; - CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + // TODO: why can't I just use MDB_LAST_DUP once and get the last record? - bool use_c2 = true; - std::size_t c2_idx = 0; - std::size_t c1_idx = 0; - for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) - { - const std::size_t layer_idx = c2_idx + c1_idx; + // Get the last record in a layer + result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last record in layer: ", result).c_str())); + + // First push the last leaf chunk data into c2 chunks + if (layer_idx == 0) + { + const auto *lv = (layer_val *)v.mv_data; + MDEBUG("Selene, layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); + auto last_leaf_chunk = get_last_child_layer_chunk( + /*update_last_parent*/ update_last_parent, + /*parent_layer_size */ lv->child_chunk_idx + 1, + /*last_parent */ lv->child_chunk_hash, + // Since the leaf layer is append-only, we'll never need access to the last child + /*last_child */ curve_trees.m_c2.zero_scalar()); + + c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); + + ++layer_idx; + continue; + } + + // Then push last chunk data from subsequent layers, alternating c1 -> c2 -> c1 -> ... + // TODO: template below if statement + const bool use_c2 = (layer_idx % 2) == 0; if (use_c2) { - const auto *c2_last_chunk_ptr = (c2_idx >= last_chunks.c2_last_chunks.size()) - ? nullptr - : &last_chunks.c2_last_chunks[c2_idx]; + const auto *lv = (layer_val *)v.mv_data; + MDEBUG("Selene, layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); - this->grow_layer(curve_trees.m_c2, - c2_extensions, - c2_idx, - layer_idx, - c2_last_chunk_ptr); + const auto &last_child = curve_trees.m_c1.point_to_cycle_scalar(c1_last_chunks_out.back().last_parent); - ++c2_idx; + auto last_parent_chunk = get_last_child_layer_chunk( + update_last_parent, + lv->child_chunk_idx + 1, + lv->child_chunk_hash, + last_child); + + c2_last_chunks_out.push_back(std::move(last_parent_chunk)); } else { - const auto *c1_last_chunk_ptr = (c1_idx >= last_chunks.c1_last_chunks.size()) - ? nullptr - : &last_chunks.c1_last_chunks[c1_idx]; + const auto *lv = (layer_val *)v.mv_data; + MDEBUG("Helios, layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); - this->grow_layer(curve_trees.m_c1, - c1_extensions, - c1_idx, - layer_idx, - c1_last_chunk_ptr); + const auto &last_child = curve_trees.m_c2.point_to_cycle_scalar(c2_last_chunks_out.back().last_parent); - ++c1_idx; + auto last_parent_chunk = get_last_child_layer_chunk( + update_last_parent, + lv->child_chunk_idx + 1, + lv->child_chunk_hash, + last_child); + + c1_last_chunks_out.push_back(std::move(last_parent_chunk)); } - use_c2 = !use_c2; + ++layer_idx; } + + return last_chunks; } BlockchainLMDB::~BlockchainLMDB() diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 01fbcdf88e3..9ddd3f2e4de 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -417,6 +417,9 @@ class BlockchainLMDB : public BlockchainDB const std::size_t layer_idx, const fcmp::curve_trees::LastChunkData *last_chunk_data); + fcmp::curve_trees::CurveTreesV1::LastChunks get_tree_last_chunks( + const fcmp::curve_trees::CurveTreesV1 &curve_trees) const; + uint64_t num_outputs() const; // Hard fork diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 349ff92ada6..d40f7a916a9 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -164,14 +164,15 @@ class CurveTrees const C1 &m_c1; const C2 &m_c2; + // The leaf layer has a distinct chunk width than the other layers + // TODO: public function for update_last_parent, and make this private + const std::size_t m_leaf_layer_chunk_width; + //member variables private: // The chunk widths of the layers in the tree tied to each curve const std::size_t m_c1_width; const std::size_t m_c2_width; - - // The leaf layer has a distinct chunk width than the other layers - const std::size_t m_leaf_layer_chunk_width; }; //---------------------------------------------------------------------------------------------------------------------- using Helios = tower_cycle::Helios; From 6045357f6a0d0d576744953f88d78dcd1a831ac1 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 27 May 2024 12:25:37 -0700 Subject: [PATCH 026/127] implement db->audit_tree, and flesh out db test to init/extend tree --- src/blockchain_db/blockchain_db.h | 3 + src/blockchain_db/lmdb/db_lmdb.cpp | 237 ++++++++++++++++++++++++++++- src/blockchain_db/lmdb/db_lmdb.h | 10 ++ src/blockchain_db/testdb.h | 1 + src/fcmp/curve_trees.h | 4 - tests/unit_tests/blockchain_db.cpp | 82 ++++++++-- 6 files changed, 315 insertions(+), 22 deletions(-) diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index b5224127e07..ef42ce41fe3 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -1769,6 +1769,9 @@ class BlockchainDB virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves) = 0; + // TODO: description + virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const = 0; + // // Hard fork related storage // diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 7a40c581383..4434781afe2 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -361,6 +361,7 @@ typedef struct outtx { template struct layer_val { uint64_t child_chunk_idx; + // TODO: use compressed 32 byte point; also need a from_bytes implemented on rust side typename C::Point child_chunk_hash; }; @@ -1345,11 +1346,13 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) { const std::size_t layer_idx = c2_idx + c1_idx; + MDEBUG("Growing layer " << layer_idx); if (use_c2) { if (layer_idx % 2 != 0) - throw0(DB_ERROR(lmdb_error("Growing odd c2 layer, expected even layer idx for c2: ", layer_idx).c_str())); + throw0(DB_ERROR(("Growing odd c2 layer, expected even layer idx for c1: " + + std::to_string(layer_idx)).c_str())); const auto *c2_last_chunk_ptr = (c2_idx >= last_chunks.c2_last_chunks.size()) ? nullptr @@ -1366,7 +1369,8 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree else { if (layer_idx % 2 == 0) - throw0(DB_ERROR(lmdb_error("Growing even c1 layer, expected odd layer idx for c2: ", layer_idx).c_str())); + throw0(DB_ERROR(("Growing even c1 layer, expected odd layer idx for c2: " + + std::to_string(layer_idx)).c_str())); const auto *c1_last_chunk_ptr = (c1_idx >= last_chunks.c1_last_chunks.size()) ? nullptr @@ -1488,7 +1492,7 @@ fcmp::curve_trees::CurveTreesV1::LastChunks BlockchainLMDB::get_tree_last_chunks } last_chunks.next_start_leaf_index = num_leaf_tuples; - MDEBUG(num_leaf_tuples << " total leaves in the tree"); + MDEBUG(num_leaf_tuples << " total leaf tuples in the tree"); // Now set the last chunk data from each layer auto &c1_last_chunks_out = last_chunks.c1_last_chunks; @@ -1574,9 +1578,236 @@ fcmp::curve_trees::CurveTreesV1::LastChunks BlockchainLMDB::get_tree_last_chunks ++layer_idx; } + TXN_POSTFIX_RDONLY(); + return last_chunks; } +bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(leaves) + RCURSOR(layers) + + // Check chunks of leaves hash into first layer as expected + std::size_t layer_idx = 0; + std::size_t child_chunk_idx = 0; + MDB_cursor_op leaf_op = MDB_FIRST; + while (1) + { + // Get next leaf chunk + std::vector leaf_tuples_chunk; + leaf_tuples_chunk.reserve(curve_trees.m_c2_width); + + // Iterate until chunk is full or we get to the end of all leaves + while (1) + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_leaves, &k, &v, leaf_op); + leaf_op = MDB_NEXT; + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); + + const auto leaf = *(fcmp::curve_trees::CurveTreesV1::LeafTuple *)v.mv_data; + leaf_tuples_chunk.push_back(leaf); + + if (leaf_tuples_chunk.size() == curve_trees.m_c2_width) + break; + } + + // Get the actual leaf chunk hash from the db + MDB_val_copy k_parent(layer_idx); + MDB_val_set(v_parent, child_chunk_idx); + + MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx); + int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, MDB_GET_BOTH); + + // Check end condition: no more leaf tuples in the leaf layer + if (leaf_tuples_chunk.empty()) + { + // No more leaves, expect to be done with parent chunks as well + if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected leaf chunk parent result found at child_chunk_idx " + + std::to_string(child_chunk_idx), result).c_str())); + + MDEBUG("Successfully audited leaf layer"); + break; + } + + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str())); + + // Get the expected leaf chunk hash + const auto leaves = curve_trees.flatten_leaves(leaf_tuples_chunk); + const fcmp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; + + // Hash the chunk of leaves + for (std::size_t i = 0; i < leaves.size(); ++i) + MDEBUG("Hashing " << curve_trees.m_c2.to_string(leaves[i])); + + const fcmp::curve_trees::Selene::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve_trees.m_c2, chunk); + MDEBUG("chunk_hash " << curve_trees.m_c2.to_string(chunk_hash) << " (" << leaves.size() << " leaves)"); + + // Now compare to value from the db + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual leaf chunk hash " << curve_trees.m_c2.to_string(lv->child_chunk_hash)); + + const auto expected_bytes = curve_trees.m_c2.to_bytes(chunk_hash); + const auto actual_bytes = curve_trees.m_c2.to_bytes(lv->child_chunk_hash); + CHECK_AND_ASSERT_MES(expected_bytes == actual_bytes, false, "unexpected leaf chunk hash"); + + ++child_chunk_idx; + } + + // Traverse up the tree auditing each layer until we've audited every layer in the tree + while (1) + { + // Alternate starting with c1 as parent (we already audited c2 leaf parents), then c2 as parent, then c1, etc. + const bool parent_is_c1 = layer_idx % 2 == 0; + if (parent_is_c1) + { + if (this->audit_layer( + /*c_child*/ curve_trees.m_c2, + /*c_parent*/ curve_trees.m_c1, + layer_idx, + /*child_start_idx*/ 0, + /*child_chunk_idx*/ 0, + /*chunk_width*/ curve_trees.m_c1_width)) + { + break; + } + } + else + { + if (this->audit_layer( + /*c_child*/ curve_trees.m_c1, + /*c_parent*/ curve_trees.m_c2, + layer_idx, + /*child_start_idx*/ 0, + /*child_chunk_idx*/ 0, + /*chunk_width*/ curve_trees.m_c2_width)) + { + break; + } + } + + ++layer_idx; + } + + TXN_POSTFIX_RDONLY(); + + return true; +} + +template +bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, + const C_PARENT &c_parent, + const std::size_t layer_idx, + const std::size_t child_start_idx, + const std::size_t child_chunk_idx, + const std::size_t chunk_width) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + MDEBUG("Auditing layer " << layer_idx << " at child_start_idx " << child_start_idx + << " and child_chunk_idx " << child_chunk_idx); + + // Get next child chunk + std::vector child_chunk; + child_chunk.reserve(chunk_width); + + MDB_val_copy k_child(layer_idx); + MDB_val_set(v_child, child_start_idx); + MDB_cursor_op op_child = MDB_GET_BOTH; + while (1) + { + int result = mdb_cursor_get(m_cur_layers, &k_child, &v_child, op_child); + op_child = MDB_NEXT_DUP; + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str())); + + const auto *child = (layer_val*)v_child.mv_data; + child_chunk.push_back(child->child_chunk_hash); + + if (child_chunk.size() == chunk_width) + break; + } + + // Get the actual chunk hash from the db + const std::size_t parent_layer_idx = layer_idx + 1; + MDB_val_copy k_parent(parent_layer_idx); + MDB_val_set(v_parent, child_chunk_idx); + + // Check for end conditions + // End condition A (return false): finished auditing layer and ready to move up a layer + // End condition B (return true): finished auditing the tree, no more layers remaining + int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, MDB_GET_BOTH); + + // End condition A: check if finished auditing this layer + if (child_chunk.empty()) + { + // No more children, expect to be done auditing layer and ready to move up a layer + if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected parent result at parent_layer_idx " + std::to_string(parent_layer_idx) + + " , child_chunk_idx " + std::to_string(child_chunk_idx), result).c_str())); + + MDEBUG("Finished auditing layer " << layer_idx); + TXN_POSTFIX_RDONLY(); + return false; + } + + // End condition B: check if finished auditing the tree + if (child_chunk_idx == 0 && child_chunk.size() == 1 && result == MDB_NOTFOUND) + { + MDEBUG("Encountered root at layer_idx " << layer_idx); + TXN_POSTFIX_RDONLY(); + return true; + } + + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get parent: ", result).c_str())); + + // Get the expected chunk hash + std::vector child_scalars; + child_scalars.reserve(child_chunk.size()); + for (const auto &child : child_chunk) + child_scalars.emplace_back(c_child.point_to_cycle_scalar(child)); + const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()}; + + for (std::size_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Hashing " << c_parent.to_string(child_scalars[i])); + + const auto chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk); + MDEBUG("chunk_hash " << c_parent.to_string(chunk_hash) << " (" << child_scalars.size() << " children)"); + + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual chunk hash " << c_parent.to_string(lv->child_chunk_hash)); + + const auto actual_bytes = c_parent.to_bytes(lv->child_chunk_hash); + const auto expected_bytes = c_parent.to_bytes(chunk_hash); + if (actual_bytes != expected_bytes) + throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str())); + + // TODO: use while (1) for iterative pattern, don't use recursion + return this->audit_layer(c_child, + c_parent, + layer_idx, + child_start_idx + child_chunk.size(), + child_chunk_idx + 1, + chunk_width); +} + BlockchainLMDB::~BlockchainLMDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 9ddd3f2e4de..cd43b2ca419 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -367,6 +367,8 @@ class BlockchainLMDB : public BlockchainDB virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves); + virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const; + private: void do_resize(uint64_t size_increase=0); @@ -420,6 +422,14 @@ class BlockchainLMDB : public BlockchainDB fcmp::curve_trees::CurveTreesV1::LastChunks get_tree_last_chunks( const fcmp::curve_trees::CurveTreesV1 &curve_trees) const; + template + bool audit_layer(const C_CHILD &c_child, + const C_PARENT &c_parent, + const std::size_t layer_idx, + const std::size_t child_start_idx, + const std::size_t child_chunk_idx, + const std::size_t chunk_width) const; + uint64_t num_outputs() const; // Hard fork diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index f05338e1f30..618b102d161 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -118,6 +118,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void remove_spent_key(const crypto::key_image& k_image) override {} virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves) override {}; + virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const override { return false; }; virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index d40f7a916a9..1b8838cd767 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -153,8 +153,6 @@ class CurveTrees TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, const std::vector &new_leaf_tuples) const; -//private member functions -private: // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] std::vector flatten_leaves(const std::vector &leaves) const; @@ -168,8 +166,6 @@ class CurveTrees // TODO: public function for update_last_parent, and make this private const std::size_t m_leaf_layer_chunk_width; -//member variables -private: // The chunk widths of the layers in the tree tied to each curve const std::size_t m_c1_width; const std::size_t m_c2_width; diff --git a/tests/unit_tests/blockchain_db.cpp b/tests/unit_tests/blockchain_db.cpp index 6ab826bf0e0..bee4659b6b3 100644 --- a/tests/unit_tests/blockchain_db.cpp +++ b/tests/unit_tests/blockchain_db.cpp @@ -342,20 +342,9 @@ TYPED_TEST(BlockchainDBTest, RetrieveBlockData) ASSERT_HASH_EQ(get_block_hash(this->m_blocks[1].first), hashes[1]); } +// TODO: implement this in curve_trees.cpp, consider removing CurveTreesUnitTest class TYPED_TEST(BlockchainDBTest, GrowCurveTrees) { - boost::filesystem::path tempPath = boost::filesystem::temp_directory_path() / boost::filesystem::unique_path(); - std::string dirPath = tempPath.string(); - - this->set_prefix(dirPath); - - // make sure open does not throw - ASSERT_NO_THROW(this->m_db->open(dirPath)); - this->get_filenames(); - this->init_hard_fork(); - - db_wtxn_guard guard(this->m_db); - Helios helios; Selene selene; @@ -365,10 +354,73 @@ TYPED_TEST(BlockchainDBTest, GrowCurveTrees) HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); - // Grow tree by 1 leaf - ASSERT_NO_THROW(this->m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, 1))); + // Number of leaves for which x number of layers is required + const std::size_t NEED_1_LAYER = SELENE_CHUNK_WIDTH; + const std::size_t NEED_2_LAYERS = NEED_1_LAYER * HELIOS_CHUNK_WIDTH; + const std::size_t NEED_3_LAYERS = NEED_2_LAYERS * SELENE_CHUNK_WIDTH; + + const std::vector N_LEAVES{ + // Basic tests + 1, + 2, + + // Test with number of leaves {-1,0,+1} relative to chunk width boundaries + NEED_1_LAYER-1, + NEED_1_LAYER, + NEED_1_LAYER+1, + + NEED_2_LAYERS-1, + NEED_2_LAYERS, + NEED_2_LAYERS+1, + + NEED_3_LAYERS, + }; + + for (const std::size_t init_leaves : N_LEAVES) + { + for (const std::size_t ext_leaves : N_LEAVES) + { + // Only test 3rd layer once because it's a huge test + if (init_leaves > 1 && ext_leaves == NEED_3_LAYERS) + continue; + if (ext_leaves > 1 && init_leaves == NEED_3_LAYERS) + continue; + + boost::filesystem::path tempPath = boost::filesystem::temp_directory_path() / boost::filesystem::unique_path(); + std::string dirPath = tempPath.string(); + + this->set_prefix(dirPath); + + ASSERT_NO_THROW(this->m_db->open(dirPath)); + + this->get_filenames(); + auto hardfork = HardFork(*this->m_db, 1, 0); + this->m_db->set_hard_fork(&hardfork); + + { + db_wtxn_guard guard(this->m_db); - // TODO: Validate the tree + LOG_PRINT_L0("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); + + ASSERT_NO_THROW(this->m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves))); + ASSERT_TRUE(this->m_db->audit_tree(curve_trees)); + + MDEBUG("Successfully added initial " << init_leaves << " leaves to tree, extending tree by " + << ext_leaves << " leaves"); + + ASSERT_NO_THROW(this->m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, ext_leaves))); + ASSERT_TRUE(this->m_db->audit_tree(curve_trees)); + + MDEBUG("Successfully extended by " << ext_leaves << " leaves"); + } + + ASSERT_NO_THROW(this->m_db->close()); + this->remove_files(); + this->m_db = new BlockchainLMDB(); + // WARNING: this->m_hardfork is now referencing a freed m_db + // TODO: make a cleaner test in curve_trees.cpp + } + } } } // anonymous namespace From 17b1f421c0b5d98954c5a6721034c7d55c6814ff Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 27 May 2024 14:54:35 -0700 Subject: [PATCH 027/127] cleaner lmdb test structure for curve trees --- tests/unit_tests/blockchain_db.cpp | 82 ------------------------- tests/unit_tests/curve_trees.cpp | 95 +++++++++++++++++++++-------- tests/unit_tests/curve_trees.h | 16 ++--- tests/unit_tests/unit_tests_utils.h | 55 +++++++++++++++++ 4 files changed, 131 insertions(+), 117 deletions(-) diff --git a/tests/unit_tests/blockchain_db.cpp b/tests/unit_tests/blockchain_db.cpp index bee4659b6b3..66219322e9e 100644 --- a/tests/unit_tests/blockchain_db.cpp +++ b/tests/unit_tests/blockchain_db.cpp @@ -39,7 +39,6 @@ #include "blockchain_db/blockchain_db.h" #include "blockchain_db/lmdb/db_lmdb.h" #include "cryptonote_basic/cryptonote_format_utils.h" -#include "curve_trees.h" using namespace cryptonote; using epee::string_tools::pod_to_hex; @@ -342,85 +341,4 @@ TYPED_TEST(BlockchainDBTest, RetrieveBlockData) ASSERT_HASH_EQ(get_block_hash(this->m_blocks[1].first), hashes[1]); } -// TODO: implement this in curve_trees.cpp, consider removing CurveTreesUnitTest class -TYPED_TEST(BlockchainDBTest, GrowCurveTrees) -{ - Helios helios; - Selene selene; - - auto curve_trees = CurveTreesV1( - helios, - selene, - HELIOS_CHUNK_WIDTH, - SELENE_CHUNK_WIDTH); - - // Number of leaves for which x number of layers is required - const std::size_t NEED_1_LAYER = SELENE_CHUNK_WIDTH; - const std::size_t NEED_2_LAYERS = NEED_1_LAYER * HELIOS_CHUNK_WIDTH; - const std::size_t NEED_3_LAYERS = NEED_2_LAYERS * SELENE_CHUNK_WIDTH; - - const std::vector N_LEAVES{ - // Basic tests - 1, - 2, - - // Test with number of leaves {-1,0,+1} relative to chunk width boundaries - NEED_1_LAYER-1, - NEED_1_LAYER, - NEED_1_LAYER+1, - - NEED_2_LAYERS-1, - NEED_2_LAYERS, - NEED_2_LAYERS+1, - - NEED_3_LAYERS, - }; - - for (const std::size_t init_leaves : N_LEAVES) - { - for (const std::size_t ext_leaves : N_LEAVES) - { - // Only test 3rd layer once because it's a huge test - if (init_leaves > 1 && ext_leaves == NEED_3_LAYERS) - continue; - if (ext_leaves > 1 && init_leaves == NEED_3_LAYERS) - continue; - - boost::filesystem::path tempPath = boost::filesystem::temp_directory_path() / boost::filesystem::unique_path(); - std::string dirPath = tempPath.string(); - - this->set_prefix(dirPath); - - ASSERT_NO_THROW(this->m_db->open(dirPath)); - - this->get_filenames(); - auto hardfork = HardFork(*this->m_db, 1, 0); - this->m_db->set_hard_fork(&hardfork); - - { - db_wtxn_guard guard(this->m_db); - - LOG_PRINT_L0("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); - - ASSERT_NO_THROW(this->m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves))); - ASSERT_TRUE(this->m_db->audit_tree(curve_trees)); - - MDEBUG("Successfully added initial " << init_leaves << " leaves to tree, extending tree by " - << ext_leaves << " leaves"); - - ASSERT_NO_THROW(this->m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, ext_leaves))); - ASSERT_TRUE(this->m_db->audit_tree(curve_trees)); - - MDEBUG("Successfully extended by " << ext_leaves << " leaves"); - } - - ASSERT_NO_THROW(this->m_db->close()); - this->remove_files(); - this->m_db = new BlockchainLMDB(); - // WARNING: this->m_hardfork is now referencing a freed m_db - // TODO: make a cleaner test in curve_trees.cpp - } - } -} - } // anonymous namespace diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 9092ed68dcf..b37895a1226 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -28,7 +28,10 @@ #include "gtest/gtest.h" +#include "cryptonote_basic/cryptonote_format_utils.h" #include "curve_trees.h" +#include "misc_log_ex.h" +#include "unit_tests_utils.h" //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- @@ -579,6 +582,68 @@ static bool grow_tree(CurveTreesV1 &curve_trees, return curve_trees_accessor.validate_tree(tree_inout); } //---------------------------------------------------------------------------------------------------------------------- +static bool grow_tree_in_memory(const std::size_t init_leaves, + const std::size_t ext_leaves, + CurveTreesV1 &curve_trees, + CurveTreesUnitTest &curve_trees_accessor) +{ + LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); + + CurveTreesUnitTest::Tree global_tree; + + // Initialize global tree with `init_leaves` + MDEBUG("Adding " << init_leaves << " leaves to tree"); + + bool res = grow_tree(curve_trees, + curve_trees_accessor, + init_leaves, + global_tree); + + CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); + + MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); + + // Then extend the global tree by `ext_leaves` + MDEBUG("Extending tree by " << ext_leaves << " leaves"); + + res = grow_tree(curve_trees, + curve_trees_accessor, + ext_leaves, + global_tree); + + CHECK_AND_ASSERT_MES(res, false, "failed to extend tree in memory"); + + MDEBUG("Successfully extended by " << ext_leaves << " leaves in memory"); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +static bool grow_tree_db(const std::size_t init_leaves, + const std::size_t ext_leaves, + CurveTreesV1 &curve_trees, + unit_test::BlockchainLMDBTest &test_db) +{ + INIT_BLOCKCHAIN_LMDB_TEST_DB(); + + { + cryptonote::db_wtxn_guard guard(test_db.m_db); + + LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); + + test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves)); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees), false, "failed to add initial leaves to db"); + + MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " + << ext_leaves << " leaves"); + + test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, ext_leaves)); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees), false, "failed to extend tree in db"); + + MDEBUG("Successfully extended tree in db by " << ext_leaves << " leaves"); + } + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // Test //---------------------------------------------------------------------------------------------------------------------- @@ -597,6 +662,7 @@ TEST(curve_trees, grow_tree) SELENE_CHUNK_WIDTH); CurveTreesUnitTest curve_trees_accessor{curve_trees}; + unit_test::BlockchainLMDBTest test_db; CHECK_AND_ASSERT_THROW_MES(HELIOS_CHUNK_WIDTH > 1, "helios width must be > 1"); CHECK_AND_ASSERT_THROW_MES(SELENE_CHUNK_WIDTH > 1, "selene width must be > 1"); @@ -633,33 +699,8 @@ TEST(curve_trees, grow_tree) if (ext_leaves > 1 && init_leaves == NEED_3_LAYERS) continue; - LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); - - CurveTreesUnitTest::Tree global_tree; - - // Initialize global tree with `init_leaves` - MDEBUG("Adding " << init_leaves << " leaves to tree"); - - bool res = grow_tree(curve_trees, - curve_trees_accessor, - init_leaves, - global_tree); - - ASSERT_TRUE(res); - - MDEBUG("Successfully added initial " << init_leaves << " leaves to tree"); - - // Then extend the global tree by `ext_leaves` - MDEBUG("Extending tree by " << ext_leaves << " leaves"); - - res = grow_tree(curve_trees, - curve_trees_accessor, - ext_leaves, - global_tree); - - ASSERT_TRUE(res); - - MDEBUG("Successfully extended by " << ext_leaves << " leaves"); + ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, curve_trees, curve_trees_accessor)); + ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); } } } diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index a89032f18ad..6d838ec1d66 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -30,12 +30,19 @@ #include "fcmp/curve_trees.h" #include "fcmp/tower_cycle.h" -#include "misc_log_ex.h" using Helios = fcmp::curve_trees::Helios; using Selene = fcmp::curve_trees::Selene; using CurveTreesV1 = fcmp::curve_trees::CurveTreesV1; +const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, + const std::size_t num_leaves); + +// https://github.com/kayabaNerve/fcmp-plus-plus/blob +// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 +const std::size_t HELIOS_CHUNK_WIDTH = 38; +const std::size_t SELENE_CHUNK_WIDTH = 18; + // Helper class that can access the private members of the CurveTrees class class CurveTreesUnitTest { @@ -75,10 +82,3 @@ class CurveTreesUnitTest CurveTreesV1 &m_curve_trees; }; -const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, - const std::size_t num_leaves); - -// https://github.com/kayabaNerve/fcmp-plus-plus/blob -// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 -const std::size_t HELIOS_CHUNK_WIDTH = 38; -const std::size_t SELENE_CHUNK_WIDTH = 18; diff --git a/tests/unit_tests/unit_tests_utils.h b/tests/unit_tests/unit_tests_utils.h index 65da7bf884d..13a3c4b58bc 100644 --- a/tests/unit_tests/unit_tests_utils.h +++ b/tests/unit_tests/unit_tests_utils.h @@ -30,6 +30,12 @@ #pragma once +#include "gtest/gtest.h" + +#include "blockchain_db/blockchain_db.h" +#include "blockchain_db/lmdb/db_lmdb.h" +#include "misc_log_ex.h" + #include #include @@ -64,8 +70,57 @@ namespace unit_test private: std::atomic m_counter; }; + + class BlockchainLMDBTest + { + public: + BlockchainLMDBTest() : m_temp_db_dir(boost::filesystem::temp_directory_path().string() + "/monero-lmdb-tests/") + {} + + ~BlockchainLMDBTest() + { + delete m_db; + remove_files(); + } + + void init_new_db() + { + CHECK_AND_ASSERT_THROW_MES(this->m_db == nullptr, "expected nullptr m_db"); + this->m_db = new cryptonote::BlockchainLMDB(); + + const auto temp_db_path = boost::filesystem::unique_path(); + const std::string dir_path = m_temp_db_dir + temp_db_path.string(); + + MDEBUG("Creating test db at path " << dir_path); + ASSERT_NO_THROW(this->m_db->open(dir_path)); + } + + void init_hardfork(cryptonote::HardFork *hardfork) + { + hardfork->init(); + this->m_db->set_hard_fork(hardfork); + } + + void remove_files() + { + boost::filesystem::remove_all(m_temp_db_dir); + } + + cryptonote::BlockchainDB* m_db{nullptr}; + const std::string m_temp_db_dir; + }; } +#define INIT_BLOCKCHAIN_LMDB_TEST_DB() \ + test_db.init_new_db(); \ + auto hardfork = cryptonote::HardFork(*test_db.m_db, 1, 0); \ + test_db.init_hardfork(&hardfork); \ + auto scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){ \ + ASSERT_NO_THROW(test_db.m_db->close()); \ + delete test_db.m_db; \ + test_db.m_db = nullptr; \ + }) + # define ASSERT_EQ_MAP(val, map, key) \ do { \ auto found = map.find(key); \ From c7c6c6afffc8b15819855106892e9e5c973c4ca0 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 27 May 2024 16:03:58 -0700 Subject: [PATCH 028/127] CurveTreesUnitTest -> CurveTreesGlobalTree class --- src/fcmp/curve_trees.h | 11 ++-- tests/unit_tests/curve_trees.cpp | 109 +++++++++++++++---------------- tests/unit_tests/curve_trees.h | 16 +++-- 3 files changed, 65 insertions(+), 71 deletions(-) diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 1b8838cd767..361b9ade231 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -34,8 +34,6 @@ #include -// forward declarations -class CurveTreesUnitTest; namespace fcmp @@ -52,7 +50,7 @@ typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_ch template struct LayerExtension final { - std::size_t start_idx; + std::size_t start_idx{0}; std::vector hashes; }; @@ -86,7 +84,6 @@ struct LastChunkData final template class CurveTrees { - friend class ::CurveTreesUnitTest; public: CurveTrees(const C1 &c1, const C2 &c2, const std::size_t c1_width, const std::size_t c2_width): m_c1{c1}, @@ -105,11 +102,11 @@ class CurveTrees struct LeafTuple final { // Output ed25519 point x-coordinate - typename C2::Scalar O_x; + const typename C2::Scalar O_x; // Key image generator x-coordinate - typename C2::Scalar I_x; + const typename C2::Scalar I_x; // Commitment x-coordinate - typename C2::Scalar C_x; + const typename C2::Scalar C_x; }; static const std::size_t LEAF_TUPLE_SIZE = 3; static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index b37895a1226..522021f5cb3 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -35,7 +35,7 @@ //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// CurveTreesUnitTest helpers +// CurveTreesGlobalTree helpers //---------------------------------------------------------------------------------------------------------------------- template static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const bool update_last_parent, @@ -61,7 +61,7 @@ static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const bool //---------------------------------------------------------------------------------------------------------------------- template static bool validate_layer(const C &curve, - const CurveTreesUnitTest::Layer &parents, + const CurveTreesGlobalTree::Layer &parents, const std::vector &child_scalars, const std::size_t max_chunk_size) { @@ -98,13 +98,13 @@ static bool validate_layer(const C &curve, } //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// CurveTreesUnitTest implementations +// CurveTreesGlobalTree implementations //---------------------------------------------------------------------------------------------------------------------- -CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUnitTest::Tree &tree) +CurveTreesV1::LastChunks CurveTreesGlobalTree::get_last_chunks() { - const auto &leaves = tree.leaves; - const auto &c1_layers = tree.c1_layers; - const auto &c2_layers = tree.c2_layers; + const auto &leaves = m_tree.leaves; + const auto &c1_layers = m_tree.c1_layers; + const auto &c2_layers = m_tree.c2_layers; // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), @@ -201,18 +201,17 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni return last_chunks; } //---------------------------------------------------------------------------------------------------------------------- -void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_extension, - CurveTreesUnitTest::Tree &tree_inout) +void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_extension) { // Add the leaves - const std::size_t init_num_leaves = tree_inout.leaves.size() * m_curve_trees.LEAF_TUPLE_SIZE; + const std::size_t init_num_leaves = m_tree.leaves.size() * m_curve_trees.LEAF_TUPLE_SIZE; CHECK_AND_ASSERT_THROW_MES(init_num_leaves == tree_extension.leaves.start_idx, "unexpected leaf start idx"); - tree_inout.leaves.reserve(tree_inout.leaves.size() + tree_extension.leaves.tuples.size()); + m_tree.leaves.reserve(m_tree.leaves.size() + tree_extension.leaves.tuples.size()); for (const auto &leaf : tree_extension.leaves.tuples) { - tree_inout.leaves.emplace_back(CurveTreesV1::LeafTuple{ + m_tree.leaves.emplace_back(CurveTreesV1::LeafTuple{ .O_x = leaf.O_x, .I_x = leaf.I_x, .C_x = leaf.C_x @@ -237,11 +236,11 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); - CHECK_AND_ASSERT_THROW_MES(c2_idx <= tree_inout.c2_layers.size(), "missing c2 layer"); - if (tree_inout.c2_layers.size() == c2_idx) - tree_inout.c2_layers.emplace_back(Layer{}); + CHECK_AND_ASSERT_THROW_MES(c2_idx <= m_tree.c2_layers.size(), "missing c2 layer"); + if (m_tree.c2_layers.size() == c2_idx) + m_tree.c2_layers.emplace_back(Layer{}); - auto &c2_inout = tree_inout.c2_layers[c2_idx]; + auto &c2_inout = m_tree.c2_layers[c2_idx]; const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); @@ -263,11 +262,11 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); - CHECK_AND_ASSERT_THROW_MES(c1_idx <= tree_inout.c1_layers.size(), "missing c1 layer"); - if (tree_inout.c1_layers.size() == c1_idx) - tree_inout.c1_layers.emplace_back(Layer{}); + CHECK_AND_ASSERT_THROW_MES(c1_idx <= m_tree.c1_layers.size(), "missing c1 layer"); + if (m_tree.c1_layers.size() == c1_idx) + m_tree.c1_layers.emplace_back(Layer{}); - auto &c1_inout = tree_inout.c1_layers[c1_idx]; + auto &c1_inout = m_tree.c1_layers[c1_idx]; const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); @@ -287,11 +286,11 @@ void CurveTreesUnitTest::extend_tree(const CurveTreesV1::TreeExtension &tree_ext } } //---------------------------------------------------------------------------------------------------------------------- -bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) +bool CurveTreesGlobalTree::audit_tree() { - const auto &leaves = tree.leaves; - const auto &c1_layers = tree.c1_layers; - const auto &c2_layers = tree.c2_layers; + const auto &leaves = m_tree.leaves; + const auto &c1_layers = m_tree.c1_layers; + const auto &c2_layers = m_tree.c2_layers; CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree"); CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); @@ -380,7 +379,7 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree) //---------------------------------------------------------------------------------------------------------------------- // Logging helpers //---------------------------------------------------------------------------------------------------------------------- -void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) +void CurveTreesGlobalTree::log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) { const auto &c1_last_chunks = last_chunks.c1_last_chunks; const auto &c2_last_chunks = last_chunks.c2_last_chunks; @@ -424,7 +423,7 @@ void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_ch } } //---------------------------------------------------------------------------------------------------------------------- -void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension) +void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension) { const auto &c1_extensions = tree_extension.c1_layer_extensions; const auto &c2_extensions = tree_extension.c2_layer_extensions; @@ -481,14 +480,14 @@ void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &t } } //---------------------------------------------------------------------------------------------------------------------- -void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) +void CurveTreesGlobalTree::log_tree() { - LOG_PRINT_L1("Tree has " << tree.leaves.size() << " leaves, " - << tree.c1_layers.size() << " helios layers, " << tree.c2_layers.size() << " selene layers"); + MDEBUG("Tree has " << m_tree.leaves.size() << " leaves, " + << m_tree.c1_layers.size() << " helios layers, " << m_tree.c2_layers.size() << " selene layers"); - for (std::size_t i = 0; i < tree.leaves.size(); ++i) + for (std::size_t i = 0; i < m_tree.leaves.size(); ++i) { - const auto &leaf = tree.leaves[i]; + const auto &leaf = m_tree.leaves[i]; const auto O_x = m_curve_trees.m_c2.to_string(leaf.O_x); const auto I_x = m_curve_trees.m_c2.to_string(leaf.I_x); @@ -500,13 +499,13 @@ void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) bool use_c2 = true; std::size_t c1_idx = 0; std::size_t c2_idx = 0; - for (std::size_t i = 0; i < (tree.c1_layers.size() + tree.c2_layers.size()); ++i) + for (std::size_t i = 0; i < (m_tree.c1_layers.size() + m_tree.c2_layers.size()); ++i) { if (use_c2) { - CHECK_AND_ASSERT_THROW_MES(c2_idx < tree.c2_layers.size(), "unexpected c2 layer"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < m_tree.c2_layers.size(), "unexpected c2 layer"); - const CurveTreesUnitTest::Layer &c2_layer = tree.c2_layers[c2_idx]; + const CurveTreesGlobalTree::Layer &c2_layer = m_tree.c2_layers[c2_idx]; MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c2_layer.size(); ++j) @@ -516,9 +515,9 @@ void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree) } else { - CHECK_AND_ASSERT_THROW_MES(c1_idx < tree.c1_layers.size(), "unexpected c1 layer"); + CHECK_AND_ASSERT_THROW_MES(c1_idx < m_tree.c1_layers.size(), "unexpected c1 layer"); - const CurveTreesUnitTest::Layer &c1_layer = tree.c1_layers[c1_idx]; + const CurveTreesGlobalTree::Layer &c1_layer = m_tree.c1_layers[c1_idx]; MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c1_layer.size(); ++j) @@ -557,47 +556,45 @@ const std::vector generate_random_leaves(const CurveTre } //---------------------------------------------------------------------------------------------------------------------- static bool grow_tree(CurveTreesV1 &curve_trees, - CurveTreesUnitTest &curve_trees_accessor, - const std::size_t num_leaves, - CurveTreesUnitTest::Tree &tree_inout) + CurveTreesGlobalTree &global_tree, + const std::size_t num_leaves) { // Get the last chunk from each layer in the tree; empty if tree is empty - const auto last_chunks = curve_trees_accessor.get_last_chunks(tree_inout); + const auto last_chunks = global_tree.get_last_chunks(); - curve_trees_accessor.log_last_chunks(last_chunks); + global_tree.log_last_chunks(last_chunks); // Get a tree extension object to the existing tree using randomly generated leaves // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves const auto tree_extension = curve_trees.get_tree_extension(last_chunks, generate_random_leaves(curve_trees, num_leaves)); - curve_trees_accessor.log_tree_extension(tree_extension); + global_tree.log_tree_extension(tree_extension); // Use the tree extension to extend the existing tree - curve_trees_accessor.extend_tree(tree_extension, tree_inout); + global_tree.extend_tree(tree_extension); - curve_trees_accessor.log_tree(tree_inout); + global_tree.log_tree(); // Validate tree structure and all hashes - return curve_trees_accessor.validate_tree(tree_inout); + return global_tree.audit_tree(); } //---------------------------------------------------------------------------------------------------------------------- static bool grow_tree_in_memory(const std::size_t init_leaves, const std::size_t ext_leaves, - CurveTreesV1 &curve_trees, - CurveTreesUnitTest &curve_trees_accessor) + CurveTreesV1 &curve_trees) { - LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves"); + LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree in memory, then extending by " + << ext_leaves << " leaves"); - CurveTreesUnitTest::Tree global_tree; + CurveTreesGlobalTree global_tree(curve_trees); // Initialize global tree with `init_leaves` MDEBUG("Adding " << init_leaves << " leaves to tree"); bool res = grow_tree(curve_trees, - curve_trees_accessor, - init_leaves, - global_tree); + global_tree, + init_leaves); CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); @@ -607,9 +604,8 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, MDEBUG("Extending tree by " << ext_leaves << " leaves"); res = grow_tree(curve_trees, - curve_trees_accessor, - ext_leaves, - global_tree); + global_tree, + ext_leaves); CHECK_AND_ASSERT_MES(res, false, "failed to extend tree in memory"); @@ -661,7 +657,6 @@ TEST(curve_trees, grow_tree) HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); - CurveTreesUnitTest curve_trees_accessor{curve_trees}; unit_test::BlockchainLMDBTest test_db; CHECK_AND_ASSERT_THROW_MES(HELIOS_CHUNK_WIDTH > 1, "helios width must be > 1"); @@ -699,7 +694,7 @@ TEST(curve_trees, grow_tree) if (ext_leaves > 1 && init_leaves == NEED_3_LAYERS) continue; - ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, curve_trees, curve_trees_accessor)); + ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, curve_trees)); ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); } } diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index 6d838ec1d66..6342497cee8 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -43,11 +43,12 @@ const std::vector generate_random_leaves(const CurveTre const std::size_t HELIOS_CHUNK_WIDTH = 38; const std::size_t SELENE_CHUNK_WIDTH = 18; -// Helper class that can access the private members of the CurveTrees class -class CurveTreesUnitTest +// Helper class to read/write a global tree in memory. It's only used in testing because normally the tree isn't kept +// in memory (it's stored in the db) +class CurveTreesGlobalTree { public: - CurveTreesUnitTest(CurveTreesV1 &curve_trees): m_curve_trees(curve_trees) {}; + CurveTreesGlobalTree(CurveTreesV1 &curve_trees): m_curve_trees(curve_trees) {}; //member structs public: @@ -65,20 +66,21 @@ class CurveTreesUnitTest //public member functions public: // Read the in-memory tree and get data from last chunks from each layer - CurveTreesV1::LastChunks get_last_chunks(const Tree &tree); + CurveTreesV1::LastChunks get_last_chunks(); // Use the tree extension to extend the in-memory tree - void extend_tree(const CurveTreesV1::TreeExtension &tree_extension, Tree &tree_inout); + void extend_tree(const CurveTreesV1::TreeExtension &tree_extension); // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer - bool validate_tree(const Tree &tree); + bool audit_tree(); // logging helpers void log_last_chunks(const CurveTreesV1::LastChunks &last_chunks); void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); - void log_tree(const CurveTreesUnitTest::Tree &tree); + void log_tree(); private: CurveTreesV1 &m_curve_trees; + Tree m_tree = Tree{}; }; From ae89fddc00dbb44091915371119ba31a2f88044a Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 3 Jun 2024 17:15:02 -0700 Subject: [PATCH 029/127] Set up trim_tree_in_memory test --- tests/unit_tests/curve_trees.cpp | 105 ++++++++++++++++++++++++++++--- 1 file changed, 97 insertions(+), 8 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 522021f5cb3..b371cc24c4a 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -580,6 +580,13 @@ static bool grow_tree(CurveTreesV1 &curve_trees, return global_tree.audit_tree(); } //---------------------------------------------------------------------------------------------------------------------- +static bool trim_tree(CurveTreesV1 &curve_trees, + CurveTreesGlobalTree &global_tree, + const std::size_t num_leaves) +{ + return true; +} +//---------------------------------------------------------------------------------------------------------------------- static bool grow_tree_in_memory(const std::size_t init_leaves, const std::size_t ext_leaves, CurveTreesV1 &curve_trees) @@ -592,9 +599,7 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, // Initialize global tree with `init_leaves` MDEBUG("Adding " << init_leaves << " leaves to tree"); - bool res = grow_tree(curve_trees, - global_tree, - init_leaves); + bool res = grow_tree(curve_trees, global_tree, init_leaves); CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); @@ -603,9 +608,7 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, // Then extend the global tree by `ext_leaves` MDEBUG("Extending tree by " << ext_leaves << " leaves"); - res = grow_tree(curve_trees, - global_tree, - ext_leaves); + res = grow_tree(curve_trees, global_tree, ext_leaves); CHECK_AND_ASSERT_MES(res, false, "failed to extend tree in memory"); @@ -613,6 +616,34 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, return true; } //---------------------------------------------------------------------------------------------------------------------- +static bool trim_tree_in_memory(const std::size_t init_leaves, + const std::size_t trim_leaves, + CurveTreesV1 &curve_trees) +{ + LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree in memory then trimming " << trim_leaves << " leaves"); + + CurveTreesGlobalTree global_tree(curve_trees); + + // Initialize global tree with `init_leaves` + MDEBUG("Adding " << init_leaves << " leaves to tree"); + + bool res = grow_tree(curve_trees, global_tree, init_leaves); + + CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); + + MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); + + // Then tim the global tree by `trim_leaves` + MDEBUG("Trimming " << trim_leaves << " leaves from tree"); + + res = trim_tree(curve_trees, global_tree, trim_leaves); + + CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory"); + + MDEBUG("Successfully trimmed " << trim_leaves << " leaves in memory"); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- static bool grow_tree_db(const std::size_t init_leaves, const std::size_t ext_leaves, CurveTreesV1 &curve_trees, @@ -659,8 +690,8 @@ TEST(curve_trees, grow_tree) unit_test::BlockchainLMDBTest test_db; - CHECK_AND_ASSERT_THROW_MES(HELIOS_CHUNK_WIDTH > 1, "helios width must be > 1"); - CHECK_AND_ASSERT_THROW_MES(SELENE_CHUNK_WIDTH > 1, "selene width must be > 1"); + static_assert(HELIOS_CHUNK_WIDTH > 1, "helios width must be > 1"); + static_assert(SELENE_CHUNK_WIDTH > 1, "selene width must be > 1"); // Number of leaves for which x number of layers is required const std::size_t NEED_1_LAYER = SELENE_CHUNK_WIDTH; @@ -699,3 +730,61 @@ TEST(curve_trees, grow_tree) } } } +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, trim_tree) +{ + Helios helios; + Selene selene; + + LOG_PRINT_L1("Test trim tree with helios chunk width " << HELIOS_CHUNK_WIDTH + << ", selene chunk width " << SELENE_CHUNK_WIDTH); + + auto curve_trees = CurveTreesV1( + helios, + selene, + HELIOS_CHUNK_WIDTH, + SELENE_CHUNK_WIDTH); + + unit_test::BlockchainLMDBTest test_db; + + static_assert(HELIOS_CHUNK_WIDTH > 1, "helios width must be > 1"); + static_assert(SELENE_CHUNK_WIDTH > 1, "selene width must be > 1"); + + // Number of leaves for which x number of layers is required + const std::size_t NEED_1_LAYER = SELENE_CHUNK_WIDTH; + const std::size_t NEED_2_LAYERS = NEED_1_LAYER * HELIOS_CHUNK_WIDTH; + const std::size_t NEED_3_LAYERS = NEED_2_LAYERS * SELENE_CHUNK_WIDTH; + + const std::vector N_LEAVES{ + // Basic tests + 1, + 2, + + // Test with number of leaves {-1,0,+1} relative to chunk width boundaries + NEED_1_LAYER-1, + NEED_1_LAYER, + NEED_1_LAYER+1, + + NEED_2_LAYERS-1, + NEED_2_LAYERS, + NEED_2_LAYERS+1, + + NEED_3_LAYERS, + }; + + for (const std::size_t init_leaves : N_LEAVES) + { + for (const std::size_t trim_leaves : N_LEAVES) + { + // Can't trim more leaves than exist in tree + if (trim_leaves > init_leaves) + continue; + + // Only test 3rd layer once because it's a huge test + if (init_leaves == NEED_3_LAYERS && trim_leaves > 1) + continue; + + ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, curve_trees)); + } + } +} From e8af7090b091a5b088caf179b3bdc10211465e5a Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 5 Jun 2024 19:39:43 -0700 Subject: [PATCH 030/127] expose and test hash_trim from rust lib --- src/fcmp/fcmp_rust/fcmp++.h | 8 ++++ src/fcmp/fcmp_rust/src/lib.rs | 48 ++++++++++++++++++- src/fcmp/tower_cycle.cpp | 30 ++++++++++++ src/fcmp/tower_cycle.h | 15 ++++++ tests/unit_tests/curve_trees.cpp | 81 ++++++++++++++++++++++++++++++++ 5 files changed, 181 insertions(+), 1 deletion(-) diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index 695d9807bf5..f12cbb55be7 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -116,11 +116,19 @@ CResult hash_grow_helios(HeliosPoint existing_hash, HeliosScalar first_child_after_offset, HeliosScalarSlice new_children); +CResult hash_trim_helios(HeliosPoint existing_hash, + uintptr_t offset, + HeliosScalarSlice children); + CResult hash_grow_selene(SelenePoint existing_hash, uintptr_t offset, SeleneScalar first_child_after_offset, SeleneScalarSlice new_children); +CResult hash_trim_selene(SelenePoint existing_hash, + uintptr_t offset, + SeleneScalarSlice children); + } // extern "C" } diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 8b8f35d84cd..3b9610b2889 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -17,7 +17,7 @@ use transcript::RecommendedTranscript; use generalized_bulletproofs::Generators; use ec_divisors::DivisorCurve; -use full_chain_membership_proofs::tree::hash_grow; +use full_chain_membership_proofs::tree::{hash_grow, hash_trim}; // TODO: Use a macro to de-duplicate some of of this code @@ -173,6 +173,29 @@ pub extern "C" fn hash_grow_helios( } } +#[no_mangle] +pub extern "C" fn hash_trim_helios( + existing_hash: HeliosPoint, + offset: usize, + children: HeliosScalarSlice, +) -> CResult { + let hash = hash_trim( + helios_generators(), + existing_hash, + offset, + children.into(), + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + CResult::err( + HeliosPoint::identity(), + io::Error::new(io::ErrorKind::Other, "failed to trim hash"), + ) + } +} + #[no_mangle] pub extern "C" fn hash_grow_selene( existing_hash: SelenePoint, @@ -197,3 +220,26 @@ pub extern "C" fn hash_grow_selene( ) } } + +#[no_mangle] +pub extern "C" fn hash_trim_selene( + existing_hash: SelenePoint, + offset: usize, + children: SeleneScalarSlice, +) -> CResult { + let hash = hash_trim( + selene_generators(), + existing_hash, + offset, + children.into(), + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + CResult::err( + SelenePoint::identity(), + io::Error::new(io::ErrorKind::Other, "failed to trim hash"), + ) + } +} diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index bbac37f64d1..cd1ca939b4a 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -62,6 +62,21 @@ Helios::Point Helios::hash_grow( return res.value; } //---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_trim( + const Helios::Point &existing_hash, + const std::size_t offset, + const Helios::Chunk &children) const +{ + fcmp_rust::CResult res = fcmp_rust::hash_trim_helios( + existing_hash, + offset, + children); + if (res.err != 0) { + throw std::runtime_error("failed to hash trim"); + } + return res.value; +} +//---------------------------------------------------------------------------------------------------------------------- Selene::Point Selene::hash_grow( const Selene::Point &existing_hash, const std::size_t offset, @@ -79,6 +94,21 @@ Selene::Point Selene::hash_grow( return res.value; } //---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::hash_trim( + const Selene::Point &existing_hash, + const std::size_t offset, + const Selene::Chunk &children) const +{ + fcmp_rust::CResult res = fcmp_rust::hash_trim_selene( + existing_hash, + offset, + children); + if (res.err != 0) { + throw std::runtime_error("failed to hash trim"); + } + return res.value; +} +//---------------------------------------------------------------------------------------------------------------------- Helios::Scalar Helios::zero_scalar() const { return fcmp_rust::helios_zero_scalar(); diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index efb87b799f5..fde3d92ef3b 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -83,6 +83,11 @@ class Curve const typename C::Scalar &first_child_after_offset, const typename C::Chunk &new_children) const = 0; + virtual typename C::Point hash_trim( + const typename C::Point &existing_hash, + const std::size_t offset, + const typename C::Chunk &children) const = 0; + virtual typename C::Scalar zero_scalar() const = 0; virtual std::array to_bytes(const typename C::Scalar &scalar) const = 0; @@ -122,6 +127,11 @@ class Helios final : public Curve const Scalar &first_child_after_offset, const Chunk &new_children) const override; + Point hash_trim( + const Point &existing_hash, + const std::size_t offset, + const Chunk &children) const override; + Scalar zero_scalar() const override; std::array to_bytes(const Scalar &scalar) const override; @@ -156,6 +166,11 @@ class Selene final : public Curve const Scalar &first_child_after_offset, const Chunk &new_children) const override; + Point hash_trim( + const Point &existing_hash, + const std::size_t offset, + const Chunk &children) const override; + Scalar zero_scalar() const override; std::array to_bytes(const Scalar &scalar) const override; diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index b371cc24c4a..d76958d6fd3 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -788,3 +788,84 @@ TEST(curve_trees, trim_tree) } } } +//---------------------------------------------------------------------------------------------------------------------- +// Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children +TEST(curve_trees, hash_trim) +{ + Helios helios; + Selene selene; + auto curve_trees = CurveTreesV1( + helios, + selene, + HELIOS_CHUNK_WIDTH, + SELENE_CHUNK_WIDTH); + + // Selene + // Generate 3 random leaf tuples + const std::size_t NUM_LEAF_TUPLES = 3; + const std::size_t NUM_LEAVES = NUM_LEAF_TUPLES * CurveTreesV1::LEAF_TUPLE_SIZE; + const auto grow_leaves = generate_random_leaves(curve_trees, NUM_LEAF_TUPLES); + const auto grow_children = curve_trees.flatten_leaves(grow_leaves); + const auto &grow_chunk = Selene::Chunk{grow_children.data(), grow_children.size()}; + + // Hash the leaves + const auto init_grow_result = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*first_child_after_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ grow_chunk); + + // Trim the initial result + const std::size_t trim_offset = NUM_LEAVES - CurveTreesV1::LEAF_TUPLE_SIZE; + const auto &trimmed_child = Selene::Chunk{grow_children.data() + trim_offset, CurveTreesV1::LEAF_TUPLE_SIZE}; + const auto trim_result = curve_trees.m_c2.hash_trim( + init_grow_result, + trim_offset, + trimmed_child); + const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_result); + + // Now compare to calling hash_grow with the remaining children, excluding the trimmed child + const auto &remaining_children = Selene::Chunk{grow_children.data(), trim_offset}; + const auto remaining_children_hash = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*first_child_after_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ remaining_children); + const auto grow_res_bytes = curve_trees.m_c2.to_bytes(remaining_children_hash); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + + // Helios + // Get 2 helios scalars + std::vector grow_helios_scalars; + fcmp::tower_cycle::extend_scalars_from_cycle_points(curve_trees.m_c2, + {init_grow_result, trim_result}, + grow_helios_scalars); + const auto &grow_helios_chunk = Helios::Chunk{grow_helios_scalars.data(), grow_helios_scalars.size()}; + + // Get the initial hash of the 2 helios scalars + const auto helios_grow_result = curve_trees.m_c1.hash_grow( + /*existing_hash*/ curve_trees.m_c1.m_hash_init_point, + /*offset*/ 0, + /*first_child_after_offset*/ curve_trees.m_c1.zero_scalar(), + /*children*/ grow_helios_chunk); + + // Trim the initial result by 1 child + const auto &trimmed_helios_child = Helios::Chunk{grow_helios_scalars.data() + 1, 1}; + const auto trim_helios_result = curve_trees.m_c1.hash_trim( + helios_grow_result, + 1, + trimmed_helios_child); + const auto trim_helios_res_bytes = curve_trees.m_c1.to_bytes(trim_helios_result); + + // Now compare to calling hash_grow with the remaining children, excluding the trimmed child + const auto &remaining_helios_children = Helios::Chunk{grow_helios_scalars.data(), 1}; + const auto remaining_helios_children_hash = curve_trees.m_c1.hash_grow( + /*existing_hash*/ curve_trees.m_c1.m_hash_init_point, + /*offset*/ 0, + /*first_child_after_offset*/ curve_trees.m_c1.zero_scalar(), + /*children*/ remaining_helios_children); + const auto grow_helios_res_bytes = curve_trees.m_c1.to_bytes(remaining_helios_children_hash); + + ASSERT_EQ(trim_helios_res_bytes, grow_helios_res_bytes); +} From ed040cacc1b1b45b5d3d9627b2931e1ca4540edd Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 6 Jun 2024 22:48:01 -0700 Subject: [PATCH 031/127] implement trim_tree_in_memory --- tests/unit_tests/curve_trees.cpp | 536 ++++++++++++++++++++++++++++++- tests/unit_tests/curve_trees.h | 3 + 2 files changed, 527 insertions(+), 12 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index d76958d6fd3..e75f726b87b 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -33,6 +33,8 @@ #include "misc_log_ex.h" #include "unit_tests_utils.h" +#include + //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTreesGlobalTree helpers @@ -286,6 +288,519 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e } } //---------------------------------------------------------------------------------------------------------------------- +// If we reached the new root, then clear all remaining elements in the tree above the root. Otherwise continue +template +static bool handle_root_after_trim(const std::size_t num_parents, + const std::size_t c1_expected_n_layers, + const std::size_t c2_expected_n_layers, + CurveTreesGlobalTree::Layer &parents_inout, + std::vector> &c1_layers_inout, + std::vector> &c2_layers_inout) +{ + // We're at the root if there should only be 1 element in the layer + if (num_parents > 1) + return false; + + MDEBUG("We have encountered the root, clearing remaining elements in the tree"); + + // Clear all parents after root + while (parents_inout.size() > 1) + parents_inout.pop_back(); + + // Clear all remaining layers, if any + while (c1_layers_inout.size() > c1_expected_n_layers) + c1_layers_inout.pop_back(); + + while (c2_layers_inout.size() > c2_expected_n_layers) + c2_layers_inout.pop_back(); + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +// Trims the child layer and caches values needed to update and trim the child's parent layer +// TODO: work on consolidating this function with the leaf layer logic and simplifying edge case handling +template +static typename C_PARENT::Point trim_children(const C_CHILD &c_child, + const C_PARENT &c_parent, + const std::size_t parent_width, + const CurveTreesGlobalTree::Layer &parents, + const typename C_CHILD::Point &old_last_child_hash, + CurveTreesGlobalTree::Layer &children_inout, + std::size_t &last_parent_idx_inout, + typename C_PARENT::Point &old_last_parent_hash_out) +{ + const std::size_t old_num_children = children_inout.size(); + const std::size_t old_last_parent_idx = (old_num_children - 1) / parent_width; + const std::size_t old_last_offset = old_num_children % parent_width; + + const std::size_t new_num_children = last_parent_idx_inout + 1; + const std::size_t new_last_parent_idx = (new_num_children - 1) / parent_width; + const std::size_t new_last_offset = new_num_children % parent_width; + + CHECK_AND_ASSERT_THROW_MES(old_num_children >= new_num_children, "unexpected new_num_children"); + + last_parent_idx_inout = new_last_parent_idx; + old_last_parent_hash_out = parents[new_last_parent_idx]; + + MDEBUG("old_num_children: " << old_num_children << + " , old_last_parent_idx: " << old_last_parent_idx << + " , old_last_offset: " << old_last_offset << + " , old_last_parent_hash_out: " << c_parent.to_string(old_last_parent_hash_out) << + " , new_num_children: " << new_num_children << + " , new_last_parent_idx: " << new_last_parent_idx << + " , new_last_offset: " << new_last_offset); + + // TODO: consolidate logic handling this function with the edge case at the end of this function + if (old_num_children == new_num_children) + { + // No new children means we only updated the last child, so use it to get the new last parent + const auto new_last_child = c_child.point_to_cycle_scalar(children_inout.back()); + std::vector new_child_v{new_last_child}; + const auto &chunk = typename C_PARENT::Chunk{new_child_v.data(), new_child_v.size()}; + + const auto new_last_parent = c_parent.hash_grow( + /*existing_hash*/ old_last_parent_hash_out, + /*offset*/ (new_num_children - 1) % parent_width, + /*first_child_after_offset*/ c_child.point_to_cycle_scalar(old_last_child_hash), + /*children*/ chunk); + + MDEBUG("New last parent using updated last child " << c_parent.to_string(new_last_parent)); + return new_last_parent; + } + + // Get the number of existing children in what will become the new last chunk after trimming + const std::size_t new_last_chunk_old_num_children = (old_last_parent_idx > new_last_parent_idx + || old_last_offset == 0) + ? parent_width + : old_last_offset; + + CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_children > new_last_offset, + "unexpected new_last_chunk_old_num_children"); + + // Get the number of children we'll be trimming from the new last chunk + const std::size_t trim_n_children_from_new_last_chunk = new_last_offset == 0 + ? 0 // it wil remain full + : new_last_chunk_old_num_children - new_last_offset; + + // We use hash trim if we're removing fewer elems in the last chunk than the number of elems remaining + const bool last_chunk_use_hash_trim = trim_n_children_from_new_last_chunk > 0 + && trim_n_children_from_new_last_chunk < new_last_offset; + + MDEBUG("new_last_chunk_old_num_children: " << new_last_chunk_old_num_children << + " , trim_n_children_from_new_last_chunk: " << trim_n_children_from_new_last_chunk << + " , last_chunk_use_hash_trim: " << last_chunk_use_hash_trim); + + // If we're using hash_trim for the last chunk, we'll need to collect the children we're removing + // TODO: use a separate function to handle last_chunk_use_hash_trim case + std::vector new_last_chunk_children_to_trim; + if (last_chunk_use_hash_trim) + new_last_chunk_children_to_trim.reserve(trim_n_children_from_new_last_chunk); + + // Trim the children starting at the back of the child layer + MDEBUG("Trimming " << (old_num_children - new_num_children) << " children"); + while (children_inout.size() > new_num_children) + { + // If we're using hash_trim for the last chunk, collect children from the last chunk + if (last_chunk_use_hash_trim) + { + const std::size_t cur_last_parent_idx = (children_inout.size() - 1) / parent_width; + if (cur_last_parent_idx == new_last_parent_idx) + new_last_chunk_children_to_trim.emplace_back(std::move(children_inout.back())); + } + + children_inout.pop_back(); + } + CHECK_AND_ASSERT_THROW_MES(children_inout.size() == new_num_children, "unexpected new children"); + // We're done trimming the children + + // If we're not using hash_trim for the last chunk, and we will be trimming from the new last chunk, then + // we'll need to collect the new last chunk's remaining children for hash_grow + // TODO: use a separate function to handle last_chunk_remaining_children case + std::vector last_chunk_remaining_children; + if (!last_chunk_use_hash_trim && new_last_offset > 0) + { + last_chunk_remaining_children.reserve(new_last_offset); + + const std::size_t start_child_idx = new_last_parent_idx * parent_width; + + CHECK_AND_ASSERT_THROW_MES((start_child_idx + new_last_offset) == children_inout.size(), + "unexpected start_child_idx"); + + for (std::size_t i = start_child_idx; i < children_inout.size(); ++i) + { + CHECK_AND_ASSERT_THROW_MES(i < children_inout.size(), "unexpected child idx"); + last_chunk_remaining_children.push_back(children_inout[i]); + } + } + + CHECK_AND_ASSERT_THROW_MES(!parents.empty(), "empty parent layer"); + CHECK_AND_ASSERT_THROW_MES(new_last_parent_idx < parents.size(), "unexpected new_last_parent_idx"); + + // Set the new last chunk's parent hash + if (last_chunk_use_hash_trim) + { + CHECK_AND_ASSERT_THROW_MES(new_last_chunk_children_to_trim.size() == trim_n_children_from_new_last_chunk, + "unexpected size of last child chunk"); + + // We need to reverse the order in order to match the order the children were initially inserted into the tree + std::reverse(new_last_chunk_children_to_trim.begin(), new_last_chunk_children_to_trim.end()); + + // Check if the last child changed + const auto &old_last_child = old_last_child_hash; + const auto &new_last_child = children_inout.back(); + + if (c_child.to_bytes(old_last_child) == c_child.to_bytes(new_last_child)) + { + // If the last child didn't change, then simply trim the collected children + std::vector child_scalars; + fcmp::tower_cycle::extend_scalars_from_cycle_points(c_child, + new_last_chunk_children_to_trim, + child_scalars); + + for (std::size_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Trimming child " << c_parent.to_string(child_scalars[i])); + + const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}; + + const auto new_last_parent = c_parent.hash_trim( + old_last_parent_hash_out, + new_last_offset, + chunk); + + MDEBUG("New last parent using simple hash_trim " << c_parent.to_string(new_last_parent)); + return new_last_parent; + } + + // The last child changed, so trim the old child, then grow the chunk by 1 with the new child + // TODO: implement prior_child_at_offset in hash_trim + new_last_chunk_children_to_trim.insert(new_last_chunk_children_to_trim.begin(), old_last_child); + + std::vector child_scalars; + fcmp::tower_cycle::extend_scalars_from_cycle_points(c_child, + new_last_chunk_children_to_trim, + child_scalars); + + for (std::size_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Trimming child " << c_parent.to_string(child_scalars[i])); + + const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}; + + CHECK_AND_ASSERT_THROW_MES(new_last_offset > 0, "new_last_offset must be >0"); + auto new_last_parent = c_parent.hash_trim( + old_last_parent_hash_out, + new_last_offset - 1, + chunk); + + std::vector new_last_child_scalar{c_child.point_to_cycle_scalar(new_last_child)}; + const auto &new_last_child_chunk = typename C_PARENT::Chunk{ + new_last_child_scalar.data(), + new_last_child_scalar.size()}; + + MDEBUG("Growing with new child: " << c_parent.to_string(new_last_child_scalar[0])); + + new_last_parent = c_parent.hash_grow( + new_last_parent, + new_last_offset - 1, + c_parent.zero_scalar(), + new_last_child_chunk); + + MDEBUG("New last parent using hash_trim AND updated last child " << c_parent.to_string(new_last_parent)); + return new_last_parent; + } + else if (!last_chunk_remaining_children.empty()) + { + // If we have reamining children in the new last chunk, and some children were trimmed from the chunk, then + // use hash_grow to calculate the new hash + std::vector child_scalars; + fcmp::tower_cycle::extend_scalars_from_cycle_points(c_child, + last_chunk_remaining_children, + child_scalars); + + const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}; + + auto new_last_parent = c_parent.hash_grow( + /*existing_hash*/ c_parent.m_hash_init_point, + /*offset*/ 0, + /*first_child_after_offset*/ c_parent.zero_scalar(), + /*children*/ chunk); + + MDEBUG("New last parent from re-growing last chunk " << c_parent.to_string(new_last_parent)); + return new_last_parent; + } + + // Check if the last child updated + const auto &old_last_child = old_last_child_hash; + const auto &new_last_child = children_inout.back(); + const auto old_last_child_bytes = c_child.to_bytes(old_last_child); + const auto new_last_child_bytes = c_child.to_bytes(new_last_child); + + if (old_last_child_bytes == new_last_child_bytes) + { + MDEBUG("The last child didn't update, nothing left to do"); + return old_last_parent_hash_out; + } + + // TODO: try to consolidate handling this edge case with the case of old_num_children == new_num_children + MDEBUG("The last child changed, updating last chunk parent hash"); + + CHECK_AND_ASSERT_THROW_MES(new_last_offset == 0, "unexpected new last offset"); + + const auto old_last_child_scalar = c_child.point_to_cycle_scalar(old_last_child); + auto new_last_child_scalar = c_child.point_to_cycle_scalar(new_last_child); + + std::vector child_scalars{std::move(new_last_child_scalar)}; + const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}; + + auto new_last_parent = c_parent.hash_grow( + /*existing_hash*/ old_last_parent_hash_out, + /*offset*/ parent_width - 1, + /*first_child_after_offset*/ old_last_child_scalar, + /*children*/ chunk); + + MDEBUG("New last parent from updated last child " << c_parent.to_string(new_last_parent)); + return new_last_parent; +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::trim_tree(const std::size_t new_num_leaves) +{ + // TODO: consolidate below logic with trim_children above + CHECK_AND_ASSERT_THROW_MES(new_num_leaves >= CurveTreesV1::LEAF_TUPLE_SIZE, + "tree must have at least 1 leaf tuple in it"); + CHECK_AND_ASSERT_THROW_MES(new_num_leaves % CurveTreesV1::LEAF_TUPLE_SIZE == 0, + "num leaves must be divisible by leaf tuple size"); + + auto &leaves_out = m_tree.leaves; + auto &c1_layers_out = m_tree.c1_layers; + auto &c2_layers_out = m_tree.c2_layers; + + const std::size_t old_num_leaves = leaves_out.size() * CurveTreesV1::LEAF_TUPLE_SIZE; + CHECK_AND_ASSERT_THROW_MES(old_num_leaves > new_num_leaves, "unexpected new num leaves"); + + const std::size_t old_last_leaf_parent_idx = (old_num_leaves - CurveTreesV1::LEAF_TUPLE_SIZE) + / m_curve_trees.m_leaf_layer_chunk_width; + const std::size_t old_last_leaf_offset = old_num_leaves % m_curve_trees.m_leaf_layer_chunk_width; + + const std::size_t new_last_leaf_parent_idx = (new_num_leaves - CurveTreesV1::LEAF_TUPLE_SIZE) + / m_curve_trees.m_leaf_layer_chunk_width; + const std::size_t new_last_leaf_offset = new_num_leaves % m_curve_trees.m_leaf_layer_chunk_width; + + MDEBUG("old_num_leaves: " << old_num_leaves << + ", old_last_leaf_parent_idx: " << old_last_leaf_parent_idx << + ", old_last_leaf_offset: " << old_last_leaf_offset << + ", new_num_leaves: " << new_num_leaves << + ", new_last_leaf_parent_idx: " << new_last_leaf_parent_idx << + ", new_last_leaf_offset: " << new_last_leaf_offset); + + // Get the number of existing leaves in what will become the new last chunk after trimming + const std::size_t new_last_chunk_old_num_leaves = (old_last_leaf_parent_idx > new_last_leaf_parent_idx + || old_last_leaf_offset == 0) + ? m_curve_trees.m_leaf_layer_chunk_width + : old_last_leaf_offset; + + CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_leaves > new_last_leaf_offset, + "unexpected last_chunk_old_num_leaves"); + + // Get the number of leaves we'll be trimming from the new last chunk + const std::size_t n_leaves_trim_from_new_last_chunk = new_last_leaf_offset == 0 + ? 0 // the last chunk wil remain full + : new_last_chunk_old_num_leaves - new_last_leaf_offset; + + // We use hash trim if we're removing fewer elems in the last chunk than the number of elems remaining + const bool last_chunk_use_hash_trim = n_leaves_trim_from_new_last_chunk > 0 + && n_leaves_trim_from_new_last_chunk < new_last_leaf_offset; + + MDEBUG("new_last_chunk_old_num_leaves: " << new_last_chunk_old_num_leaves << + ", n_leaves_trim_from_new_last_chunk: " << n_leaves_trim_from_new_last_chunk << + ", last_chunk_use_hash_trim: " << last_chunk_use_hash_trim); + + // If we're using hash_trim for the last chunk, we'll need to collect the leaves we're trimming from that chunk + std::vector new_last_chunk_leaves_to_trim; + if (last_chunk_use_hash_trim) + new_last_chunk_leaves_to_trim.reserve(n_leaves_trim_from_new_last_chunk); + + // Trim the leaves starting at the back of the leaf layer + const std::size_t new_num_leaf_tuples = new_num_leaves / CurveTreesV1::LEAF_TUPLE_SIZE; + while (leaves_out.size() > new_num_leaf_tuples) + { + // If we're using hash_trim for the last chunk, collect leaves from the last chunk to use later + if (last_chunk_use_hash_trim) + { + // Check if we're now trimming leaves from what will be the new last chunk + const std::size_t num_leaves_remaining = (leaves_out.size() - 1) * CurveTreesV1::LEAF_TUPLE_SIZE; + const std::size_t cur_last_leaf_parent_idx = num_leaves_remaining / m_curve_trees.m_leaf_layer_chunk_width; + + if (cur_last_leaf_parent_idx == new_last_leaf_parent_idx) + { + // Add leaves in reverse order, because we're going to reverse the entire vector later on to get the + // correct trim order + new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().C_x)); + new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().I_x)); + new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().O_x)); + } + } + + leaves_out.pop_back(); + } + CHECK_AND_ASSERT_THROW_MES(leaves_out.size() == new_num_leaf_tuples, "unexpected size of new leaves"); + + const std::size_t cur_last_leaf_parent_idx = ((leaves_out.size() - 1) * CurveTreesV1::LEAF_TUPLE_SIZE) + / m_curve_trees.m_leaf_layer_chunk_width; + CHECK_AND_ASSERT_THROW_MES(cur_last_leaf_parent_idx == new_last_leaf_parent_idx, "unexpected last leaf parent idx"); + + // If we're not using hash_trim for the last chunk, and the new last chunk is not full already, we'll need to + // collect the existing leaves to get the hash using hash_grow + std::vector last_chunk_remaining_leaves; + if (!last_chunk_use_hash_trim && new_last_leaf_offset > 0) + { + last_chunk_remaining_leaves.reserve(new_last_leaf_offset); + + const std::size_t start_leaf_idx = new_last_leaf_parent_idx * m_curve_trees.m_leaf_layer_chunk_width; + MDEBUG("start_leaf_idx: " << start_leaf_idx << ", leaves_out.size(): " << leaves_out.size()); + + CHECK_AND_ASSERT_THROW_MES((start_leaf_idx + new_last_leaf_offset) == new_num_leaves, + "unexpected start_leaf_idx"); + + for (std::size_t i = (start_leaf_idx / CurveTreesV1::LEAF_TUPLE_SIZE); i < leaves_out.size(); ++i) + { + CHECK_AND_ASSERT_THROW_MES(i < leaves_out.size(), "unexpected leaf idx"); + last_chunk_remaining_leaves.push_back(leaves_out[i].O_x); + last_chunk_remaining_leaves.push_back(leaves_out[i].I_x); + last_chunk_remaining_leaves.push_back(leaves_out[i].C_x); + } + } + + CHECK_AND_ASSERT_THROW_MES(!c2_layers_out.empty(), "empty leaf parent layer"); + CHECK_AND_ASSERT_THROW_MES(cur_last_leaf_parent_idx < c2_layers_out[0].size(), + "unexpected cur_last_leaf_parent_idx"); + + // Set the new last leaf parent + Selene::Point old_last_c2_hash = std::move(c2_layers_out[0][cur_last_leaf_parent_idx]); + if (last_chunk_use_hash_trim) + { + CHECK_AND_ASSERT_THROW_MES(new_last_chunk_leaves_to_trim.size() == n_leaves_trim_from_new_last_chunk, + "unexpected size of last leaf chunk"); + + // We need to reverse the order in order to match the order the leaves were initially inserted into the tree + std::reverse(new_last_chunk_leaves_to_trim.begin(), new_last_chunk_leaves_to_trim.end()); + + const Selene::Chunk trim_leaves{new_last_chunk_leaves_to_trim.data(), new_last_chunk_leaves_to_trim.size()}; + + for (std::size_t i = 0; i < new_last_chunk_leaves_to_trim.size(); ++i) + MDEBUG("Trimming leaf " << m_curve_trees.m_c2.to_string(new_last_chunk_leaves_to_trim[i])); + + auto new_last_leaf_parent = m_curve_trees.m_c2.hash_trim( + old_last_c2_hash, + new_last_leaf_offset, + trim_leaves); + + MDEBUG("New hash " << m_curve_trees.m_c2.to_string(new_last_leaf_parent)); + + c2_layers_out[0][cur_last_leaf_parent_idx] = std::move(new_last_leaf_parent); + } + else if (new_last_leaf_offset > 0) + { + for (std::size_t i = 0; i < last_chunk_remaining_leaves.size(); ++i) + MDEBUG("Hashing leaf " << m_curve_trees.m_c2.to_string(last_chunk_remaining_leaves[i])); + + const auto &leaves = Selene::Chunk{last_chunk_remaining_leaves.data(), last_chunk_remaining_leaves.size()}; + + auto new_last_leaf_parent = m_curve_trees.m_c2.hash_grow( + /*existing_hash*/ m_curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*first_child_after_offset*/ m_curve_trees.m_c2.zero_scalar(), + /*children*/ leaves); + + MDEBUG("Result hash " << m_curve_trees.m_c2.to_string(new_last_leaf_parent)); + + c2_layers_out[0][cur_last_leaf_parent_idx] = std::move(new_last_leaf_parent); + } + + if (handle_root_after_trim( + /*num_parents*/ cur_last_leaf_parent_idx + 1, + /*c1_expected_n_layers*/ 0, + /*c2_expected_n_layers*/ 1, + /*parents_inout*/ c2_layers_out[0], + /*c1_layers_inout*/ c1_layers_out, + /*c2_layers_inout*/ c2_layers_out)) + { + return; + } + + // Go layer-by-layer starting by trimming the c2 layer we just set, and updating the parent layer hashes + bool trim_c1 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + std::size_t last_parent_idx = cur_last_leaf_parent_idx; + Helios::Point old_last_c1_hash; + for (std::size_t i = 0; i < (c1_layers_out.size() + c2_layers_out.size()); ++i) + { + MDEBUG("Trimming layer " << i); + + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers_out.size(), "unexpected c1 layer"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers_out.size(), "unexpected c2 layer"); + + auto &c1_layer_out = c1_layers_out[c1_idx]; + auto &c2_layer_out = c2_layers_out[c2_idx]; + + if (trim_c1) + { + // TODO: fewer params + auto new_last_parent = trim_children(m_curve_trees.m_c2, + m_curve_trees.m_c1, + m_curve_trees.m_c1_width, + c1_layer_out, + old_last_c2_hash, + c2_layer_out, + last_parent_idx, + old_last_c1_hash); + + // Update the last parent + c1_layer_out[last_parent_idx] = std::move(new_last_parent); + + if (handle_root_after_trim(last_parent_idx + 1, + c1_idx + 1, + c2_idx + 1, + c1_layer_out, + c1_layers_out, + c2_layers_out)) + { + return; + } + + ++c2_idx; + } + else + { + // TODO: fewer params + auto new_last_parent = trim_children(m_curve_trees.m_c1, + m_curve_trees.m_c2, + m_curve_trees.m_c2_width, + c2_layer_out, + old_last_c1_hash, + c1_layer_out, + last_parent_idx, + old_last_c2_hash); + + // Update the last parent + c2_layer_out[last_parent_idx] = std::move(new_last_parent); + + if (handle_root_after_trim(last_parent_idx + 1, + c1_idx + 1, + c2_idx + 1, + c2_layer_out, + c1_layers_out, + c2_layers_out)) + { + return; + } + + ++c1_idx; + } + + trim_c1 = !trim_c1; + } +} +//---------------------------------------------------------------------------------------------------------------------- bool CurveTreesGlobalTree::audit_tree() { const auto &leaves = m_tree.leaves; @@ -580,13 +1095,6 @@ static bool grow_tree(CurveTreesV1 &curve_trees, return global_tree.audit_tree(); } //---------------------------------------------------------------------------------------------------------------------- -static bool trim_tree(CurveTreesV1 &curve_trees, - CurveTreesGlobalTree &global_tree, - const std::size_t num_leaves) -{ - return true; -} -//---------------------------------------------------------------------------------------------------------------------- static bool grow_tree_in_memory(const std::size_t init_leaves, const std::size_t ext_leaves, CurveTreesV1 &curve_trees) @@ -600,7 +1108,6 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, MDEBUG("Adding " << init_leaves << " leaves to tree"); bool res = grow_tree(curve_trees, global_tree, init_leaves); - CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); @@ -609,7 +1116,6 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, MDEBUG("Extending tree by " << ext_leaves << " leaves"); res = grow_tree(curve_trees, global_tree, ext_leaves); - CHECK_AND_ASSERT_MES(res, false, "failed to extend tree in memory"); MDEBUG("Successfully extended by " << ext_leaves << " leaves in memory"); @@ -628,16 +1134,22 @@ static bool trim_tree_in_memory(const std::size_t init_leaves, MDEBUG("Adding " << init_leaves << " leaves to tree"); bool res = grow_tree(curve_trees, global_tree, init_leaves); - CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); - // Then tim the global tree by `trim_leaves` + // Then trim the global tree by `trim_leaves` MDEBUG("Trimming " << trim_leaves << " leaves from tree"); - res = trim_tree(curve_trees, global_tree, trim_leaves); + CHECK_AND_ASSERT_MES(init_leaves > trim_leaves, false, "trimming too many leaves"); + const std::size_t new_num_leaves = init_leaves - trim_leaves; + global_tree.trim_tree(new_num_leaves * CurveTreesV1::LEAF_TUPLE_SIZE); + + MDEBUG("Finished trimming " << trim_leaves << " leaves from tree"); + + global_tree.log_tree(); + res = global_tree.audit_tree(); CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory"); MDEBUG("Successfully trimmed " << trim_leaves << " leaves in memory"); diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index 6342497cee8..24629b58ad8 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -71,6 +71,9 @@ class CurveTreesGlobalTree // Use the tree extension to extend the in-memory tree void extend_tree(const CurveTreesV1::TreeExtension &tree_extension); + // Trim tree to the provided number of leaves + void trim_tree(const std::size_t new_num_leaves); + // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer bool audit_tree(); From 8287ba6f78e22a7c3cdfc6b0eff1ea0114223d2e Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 6 Jun 2024 23:47:29 -0700 Subject: [PATCH 032/127] faster trim_tree tests --- tests/unit_tests/curve_trees.cpp | 47 ++++++++++++++++---------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index e75f726b87b..7ea2fd0c23a 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -1124,22 +1124,10 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, //---------------------------------------------------------------------------------------------------------------------- static bool trim_tree_in_memory(const std::size_t init_leaves, const std::size_t trim_leaves, - CurveTreesV1 &curve_trees) + CurveTreesGlobalTree &&global_tree) { - LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree in memory then trimming " << trim_leaves << " leaves"); - - CurveTreesGlobalTree global_tree(curve_trees); - - // Initialize global tree with `init_leaves` - MDEBUG("Adding " << init_leaves << " leaves to tree"); - - bool res = grow_tree(curve_trees, global_tree, init_leaves); - CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); - - MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); - - // Then trim the global tree by `trim_leaves` - MDEBUG("Trimming " << trim_leaves << " leaves from tree"); + // Trim the global tree by `trim_leaves` + LOG_PRINT_L1("Trimming " << trim_leaves << " leaves from tree"); CHECK_AND_ASSERT_MES(init_leaves > trim_leaves, false, "trimming too many leaves"); const std::size_t new_num_leaves = init_leaves - trim_leaves; @@ -1149,7 +1137,7 @@ static bool trim_tree_in_memory(const std::size_t init_leaves, global_tree.log_tree(); - res = global_tree.audit_tree(); + bool res = global_tree.audit_tree(); CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory"); MDEBUG("Successfully trimmed " << trim_leaves << " leaves in memory"); @@ -1229,6 +1217,8 @@ TEST(curve_trees, grow_tree) for (const std::size_t init_leaves : N_LEAVES) { + // TODO: init tree once, then extend a copy of that tree + for (const std::size_t ext_leaves : N_LEAVES) { // Only test 3rd layer once because it's a huge test @@ -1281,25 +1271,36 @@ TEST(curve_trees, trim_tree) NEED_2_LAYERS, NEED_2_LAYERS+1, + NEED_3_LAYERS-1, NEED_3_LAYERS, + NEED_3_LAYERS+1, }; for (const std::size_t init_leaves : N_LEAVES) { + if (init_leaves == 1) + continue; + + CurveTreesGlobalTree global_tree(curve_trees); + + // Initialize global tree with `init_leaves` + LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); + ASSERT_TRUE(grow_tree(curve_trees, global_tree, init_leaves)); + MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); + for (const std::size_t trim_leaves : N_LEAVES) { - // Can't trim more leaves than exist in tree - if (trim_leaves > init_leaves) - continue; - - // Only test 3rd layer once because it's a huge test - if (init_leaves == NEED_3_LAYERS && trim_leaves > 1) + // Can't trim more leaves than exist in tree, and tree must always have at least 1 leaf in it + if (trim_leaves >= init_leaves) continue; - ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, curve_trees)); + // Copy the already initialized tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, std::move(tree_copy))); } } } +// TODO: write tests with more layers, but smaller widths so the tests run in a reasonable amount of time //---------------------------------------------------------------------------------------------------------------------- // Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children TEST(curve_trees, hash_trim) From 36f1e1965fb492a5752a285d67975376b46388c4 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 28 Jun 2024 11:00:10 -0700 Subject: [PATCH 033/127] Fix grow_tree, restructure it, and clean the approach The initial impl didn't capture the following edge case: - Tree has 3 (or more) layers + 1 leaf layeri - Leaf layer last chunk IS full - Layer 0 last chunk is NOT full - Layer 1 last chunk is NOT full - Layer 2 last chunk IS NOT full In this case, when updating layer 1, we need to use layer 0's old last hash to update layer 1's old last hash. Same for Layer 2. The solution is to use logic that checks the *prev* layer when updating a layer to determine if the old last hash from the prev layer is needed. This commit restructures the grow_tree impl to account for this and simplifies the approach as follows: 1. Read the tree to get num leaf tuples + last hashes in each layer 2. Get the tree extension using the above values + new leaf tuples 2a. Prior to updating the leaf layer, call the function get_update_leaf_layer_metadata. This function uses existing totals in the leaf layer, the new total of leaf tuples, and tree params to calculate how the layer after the leaf layer should be updated. 2b. For each subsequent layer, call the function get_update_layer_metadata. This function uses the existing totals in the *prev* layer, the new total of children in the *prev* layer, and tree params to calculate how the layer should be updated. 3. Grow the tree using the tree extension. This approach isolates update logic and actual hashing into neat structured functions, rather than mix the two. This makes the code easier to follow without needing to keep so much in your head at one time. --- src/blockchain_db/lmdb/db_lmdb.cpp | 140 ++----- src/blockchain_db/lmdb/db_lmdb.h | 8 +- src/fcmp/curve_trees.cpp | 580 ++++++++++++++++++++--------- src/fcmp/curve_trees.h | 77 ++-- tests/unit_tests/curve_trees.cpp | 238 +++++------- tests/unit_tests/curve_trees.h | 9 +- 6 files changed, 581 insertions(+), 471 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 4434781afe2..ff611076b28 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1312,11 +1312,14 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree CURSOR(leaves) - // Read every layer's last chunk data - const auto last_chunks = this->get_tree_last_chunks(curve_trees); + // Get the number of leaf tuples that exist in the tree + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); - // Using the last chunk data and new leaves, get a struct we can use to extend the tree - const auto tree_extension = curve_trees.get_tree_extension(last_chunks, new_leaves); + // Read every layer's last hashes + const auto last_hashes = this->get_tree_last_hashes(); + + // Use the number of leaf tuples and the existing last hashes to get a struct we can use to extend the tree + const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, last_hashes, new_leaves); // Insert the leaves // TODO: grow_leaves @@ -1354,15 +1357,10 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree throw0(DB_ERROR(("Growing odd c2 layer, expected even layer idx for c1: " + std::to_string(layer_idx)).c_str())); - const auto *c2_last_chunk_ptr = (c2_idx >= last_chunks.c2_last_chunks.size()) - ? nullptr - : &last_chunks.c2_last_chunks[c2_idx]; - this->grow_layer(curve_trees.m_c2, c2_extensions, c2_idx, - layer_idx, - c2_last_chunk_ptr); + layer_idx); ++c2_idx; } @@ -1372,15 +1370,10 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree throw0(DB_ERROR(("Growing even c1 layer, expected odd layer idx for c2: " + std::to_string(layer_idx)).c_str())); - const auto *c1_last_chunk_ptr = (c1_idx >= last_chunks.c1_last_chunks.size()) - ? nullptr - : &last_chunks.c1_last_chunks[c1_idx]; - this->grow_layer(curve_trees.m_c1, c1_extensions, c1_idx, - layer_idx, - c1_last_chunk_ptr); + layer_idx); ++c1_idx; } @@ -1393,8 +1386,7 @@ template void BlockchainLMDB::grow_layer(const C &curve, const std::vector> &layer_extensions, const std::size_t ext_idx, - const std::size_t layer_idx, - const fcmp::curve_trees::LastChunkData *last_chunk_ptr) + const std::size_t layer_idx) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1407,12 +1399,11 @@ void BlockchainLMDB::grow_layer(const C &curve, CHECK_AND_ASSERT_THROW_MES(!ext.hashes.empty(), "empty layer extension"); - // TODO: make sure last_chunk_ptr->next_start_child_chunk_idx lines up + // TODO: make sure ext.start_idx lines up with the end of the layer MDB_val_copy k(layer_idx); - const bool update_last_parent = last_chunk_ptr != nullptr && last_chunk_ptr->update_last_parent; - if (update_last_parent) + if (ext.update_existing_last_hash) { // We updated the last hash, so update it layer_val lv; @@ -1421,14 +1412,14 @@ void BlockchainLMDB::grow_layer(const C &curve, MDB_val_set(v, lv); // We expect to overwrite the existing hash - // TODO: make sure the hash already exists + // TODO: make sure the hash already exists and is the existing last hash int result = mdb_cursor_put(m_cur_layers, &k, &v, 0); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to update chunk hash: ", result).c_str())); } // Now add all the new hashes found in the extension - for (std::size_t i = update_last_parent ? 1 : 0; i < ext.hashes.size(); ++i) + for (std::size_t i = ext.update_existing_last_hash ? 1 : 0; i < ext.hashes.size(); ++i) { layer_val lv; lv.child_chunk_idx = i + ext.start_idx; @@ -1444,62 +1435,46 @@ void BlockchainLMDB::grow_layer(const C &curve, } } -template -static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const bool update_last_parent, - const std::size_t parent_layer_size, - const typename C::Point &last_parent, - const typename C::Scalar &last_child) -{ - if (update_last_parent) - CHECK_AND_ASSERT_THROW_MES(parent_layer_size > 0, "empty parent layer"); - - // If updating last parent, the next start will be the last parent's index, else we start at the tip - const std::size_t next_start_child_chunk_index = update_last_parent - ? (parent_layer_size - 1) - : parent_layer_size; - - return fcmp::curve_trees::LastChunkData{ - .next_start_child_chunk_index = next_start_child_chunk_index, - .last_parent = last_parent, - .update_last_parent = update_last_parent, - .last_child = last_child - }; -} - -fcmp::curve_trees::CurveTreesV1::LastChunks BlockchainLMDB::get_tree_last_chunks( - const fcmp::curve_trees::CurveTreesV1 &curve_trees) const +std::size_t BlockchainLMDB::get_num_leaf_tuples() const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); TXN_PREFIX_RDONLY(); RCURSOR(leaves) - RCURSOR(layers) - fcmp::curve_trees::CurveTreesV1::LastChunks last_chunks; + fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes; + + // Get the number of leaf tuples in the tree + std::uint64_t n_leaf_tuples = 0; - // Get the number of leaves in the tree - std::uint64_t num_leaf_tuples = 0; { MDB_val k, v; int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_LAST); if (result == MDB_NOTFOUND) - num_leaf_tuples = 0; + n_leaf_tuples = 0; else if (result == MDB_SUCCESS) - num_leaf_tuples = (1 + (*(const std::size_t*)k.mv_data)) * fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + n_leaf_tuples = (1 + (*(const std::size_t*)k.mv_data)); else throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); } - last_chunks.next_start_leaf_index = num_leaf_tuples; - MDEBUG(num_leaf_tuples << " total leaf tuples in the tree"); + TXN_POSTFIX_RDONLY(); + + return n_leaf_tuples; +} + +fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); - // Now set the last chunk data from each layer - auto &c1_last_chunks_out = last_chunks.c1_last_chunks; - auto &c2_last_chunks_out = last_chunks.c2_last_chunks; + TXN_PREFIX_RDONLY(); + RCURSOR(layers) - // Check if we'll need to update the last parent in each layer - const bool update_last_parent = (num_leaf_tuples % curve_trees.m_leaf_layer_chunk_width) > 0; + fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes; + auto &c1_last_hashes = last_hashes.c1_last_hashes; + auto &c2_last_hashes = last_hashes.c2_last_hashes; // Traverse the tree layer-by-layer starting at the layer closest to leaf layer std::size_t layer_idx = 0; @@ -1522,57 +1497,18 @@ fcmp::curve_trees::CurveTreesV1::LastChunks BlockchainLMDB::get_tree_last_chunks if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get last record in layer: ", result).c_str())); - // First push the last leaf chunk data into c2 chunks - if (layer_idx == 0) - { - const auto *lv = (layer_val *)v.mv_data; - MDEBUG("Selene, layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); - - auto last_leaf_chunk = get_last_child_layer_chunk( - /*update_last_parent*/ update_last_parent, - /*parent_layer_size */ lv->child_chunk_idx + 1, - /*last_parent */ lv->child_chunk_hash, - // Since the leaf layer is append-only, we'll never need access to the last child - /*last_child */ curve_trees.m_c2.zero_scalar()); - - c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); - - ++layer_idx; - continue; - } - - // Then push last chunk data from subsequent layers, alternating c1 -> c2 -> c1 -> ... - // TODO: template below if statement const bool use_c2 = (layer_idx % 2) == 0; if (use_c2) { const auto *lv = (layer_val *)v.mv_data; MDEBUG("Selene, layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); - - const auto &last_child = curve_trees.m_c1.point_to_cycle_scalar(c1_last_chunks_out.back().last_parent); - - auto last_parent_chunk = get_last_child_layer_chunk( - update_last_parent, - lv->child_chunk_idx + 1, - lv->child_chunk_hash, - last_child); - - c2_last_chunks_out.push_back(std::move(last_parent_chunk)); + c2_last_hashes.emplace_back(std::move(lv->child_chunk_hash)); } else { const auto *lv = (layer_val *)v.mv_data; MDEBUG("Helios, layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); - - const auto &last_child = curve_trees.m_c2.point_to_cycle_scalar(c2_last_chunks_out.back().last_parent); - - auto last_parent_chunk = get_last_child_layer_chunk( - update_last_parent, - lv->child_chunk_idx + 1, - lv->child_chunk_hash, - last_child); - - c1_last_chunks_out.push_back(std::move(last_parent_chunk)); + c1_last_hashes.emplace_back(std::move(lv->child_chunk_hash)); } ++layer_idx; @@ -1580,7 +1516,7 @@ fcmp::curve_trees::CurveTreesV1::LastChunks BlockchainLMDB::get_tree_last_chunks TXN_POSTFIX_RDONLY(); - return last_chunks; + return last_hashes; } bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index cd43b2ca419..e2dfb60bc41 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -416,11 +416,11 @@ class BlockchainLMDB : public BlockchainDB void grow_layer(const C &curve, const std::vector> &layer_extensions, const std::size_t c_idx, - const std::size_t layer_idx, - const fcmp::curve_trees::LastChunkData *last_chunk_data); + const std::size_t layer_idx); + + std::size_t get_num_leaf_tuples() const; - fcmp::curve_trees::CurveTreesV1::LastChunks get_tree_last_chunks( - const fcmp::curve_trees::CurveTreesV1 &curve_trees) const; + fcmp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; template bool audit_layer(const C_CHILD &c_child, diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 859e9dcf803..2102cacf802 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -57,124 +57,109 @@ template Selene::Point get_new_parent(const Selene &curve, const typenam //---------------------------------------------------------------------------------------------------------------------- // Static functions //---------------------------------------------------------------------------------------------------------------------- -// Hash the first chunk of the children now being added to a layer -template -static typename C::Point get_first_parent(const C &curve, - const typename C::Chunk &new_children, - const std::size_t chunk_width, - const LastChunkData *last_chunk_ptr, - const std::size_t offset) -{ - // If no last chunk exists, we can get a new parent - if (last_chunk_ptr == nullptr) - return get_new_parent(curve, new_children); - - typename C::Scalar prior_child_after_offset; - if (last_chunk_ptr->update_last_parent) - { - // If the last parent has an updated child in it, then we need to get the delta to the old child - prior_child_after_offset = last_chunk_ptr->last_child; - } - else if (offset > 0) - { - // If we're not updating the last parent hash and offset is non-zero, then we must be adding new children - // to the existing last chunk. New children means no prior child after offset exists, use zero scalar - prior_child_after_offset = curve.zero_scalar(); - } - else - { - // If we're not updating the last parent and the last chunk is already full, we can get a new parent - return get_new_parent(curve, new_children); - } - - MDEBUG("Updating existing hash: " << curve.to_string(last_chunk_ptr->last_parent) << " , offset: " << offset - << ", prior_child_after_offset: " << curve.to_string(prior_child_after_offset)); - - return curve.hash_grow( - last_chunk_ptr->last_parent, - offset, - prior_child_after_offset, - new_children - ); -}; -//---------------------------------------------------------------------------------------------------------------------- // After hashing a layer of children points, convert those children x-coordinates into their respective cycle // scalars, and prepare them to be hashed for the next layer template -static std::size_t next_child_scalars_from_children(const C_CHILD &c_child, - const bool updating_root_layer, - const LastChunkData *last_child_chunk_ptr, - const LayerExtension &children, - std::vector &child_scalars_out) +static std::vector next_child_scalars_from_children(const C_CHILD &c_child, + const typename C_CHILD::Point *last_root, + const LayerExtension &children) { - child_scalars_out.clear(); + std::vector child_scalars_out; child_scalars_out.reserve(1 + children.hashes.size()); - std::uint64_t next_child_start_index = children.start_idx; - // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when // hashing the *existing* root layer - if (updating_root_layer) + if (last_root != nullptr) { - CHECK_AND_ASSERT_THROW_MES(last_child_chunk_ptr != nullptr, "last child chunk does not exist at root"); - // If the children don't already include the existing root, then we need to include it to be hashed // - the children would include the existing root already if the existing root was updated in the child // layer (the start_idx would be 0) - if (next_child_start_index > 0) + if (children.start_idx > 0) { MDEBUG("Updating root layer and including the existing root in next children"); - child_scalars_out.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent)); - --next_child_start_index; + child_scalars_out.emplace_back(c_child.point_to_cycle_scalar(*last_root)); } } // Convert child points to scalars tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars_out); - return next_child_start_index; + return child_scalars_out; }; //---------------------------------------------------------------------------------------------------------------------- // Hash chunks of a layer of new children, outputting the next layer's parents template -static void hash_layer(const C &curve, - const LastChunkData *last_chunk_ptr, - const std::vector &child_scalars, - const std::size_t child_start_idx, - const std::size_t chunk_width, - LayerExtension &parents_out) +static LayerExtension hash_children_chunks(const C &curve, + const typename C::Scalar *old_last_child, + const typename C::Point *old_last_parent, + const std::size_t start_offset, + const std::size_t next_parent_start_index, + const std::vector &new_child_scalars, + const std::size_t chunk_width) { - parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->next_start_child_chunk_index; - parents_out.hashes.clear(); + LayerExtension parents_out; + parents_out.start_idx = next_parent_start_index; + parents_out.update_existing_last_hash = old_last_parent != nullptr; + parents_out.hashes.reserve(1 + (new_child_scalars.size() / chunk_width)); - CHECK_AND_ASSERT_THROW_MES(!child_scalars.empty(), "empty child scalars"); - - const std::size_t offset = child_start_idx % chunk_width; + CHECK_AND_ASSERT_THROW_MES(!new_child_scalars.empty(), "empty child scalars"); + CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width"); // See how many children we need to fill up the existing last chunk - std::size_t chunk_size = std::min(child_scalars.size(), chunk_width - offset); + std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); + + MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() + << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); + + // Hash the first chunk + // TODO: separate function + { + // Prepare to hash + const auto &existing_hash = old_last_parent != nullptr + ? *old_last_parent + : curve.m_hash_init_point; + + const auto &prior_child_after_offset = old_last_child != nullptr + ? *old_last_child + : curve.zero_scalar(); + + const auto chunk_start = new_child_scalars.data(); + const typename C::Chunk chunk{chunk_start, chunk_size}; + + MDEBUG("existing_hash: " << curve.to_string(existing_hash) << " , start_offset: " << start_offset + << " , prior_child_after_offset: " << curve.to_string(prior_child_after_offset)); + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Hashing child " << curve.to_string(chunk_start[i])); + + // Do the hash + auto chunk_hash = curve.hash_grow( + existing_hash, + start_offset, + prior_child_after_offset, + chunk + ); + + MDEBUG("Child chunk_start_idx " << 0 << " result: " << curve.to_string(chunk_hash) + << " , chunk_size: " << chunk_size); - MDEBUG("Starting chunk_size: " << chunk_size << " , num child scalars: " << child_scalars.size() - << " , offset: " << offset); + // We've got our hash + parents_out.hashes.emplace_back(std::move(chunk_hash)); + } // Hash chunks of child scalars to create the parent hashes - std::size_t chunk_start_idx = 0; - while (chunk_start_idx < child_scalars.size()) + std::size_t chunk_start_idx = chunk_size; + while (chunk_start_idx < new_child_scalars.size()) { - const auto chunk_start = child_scalars.data() + chunk_start_idx; + chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); + + const auto chunk_start = new_child_scalars.data() + chunk_start_idx; const typename C::Chunk chunk{chunk_start, chunk_size}; for (std::size_t i = 0; i < chunk_size; ++i) MDEBUG("Hashing child " << curve.to_string(chunk_start[i])); - // Hash the chunk of children - typename C::Point chunk_hash = chunk_start_idx == 0 - ? get_first_parent(curve, - chunk, - chunk_width, - last_chunk_ptr, - offset) - : get_new_parent(curve, chunk); + auto chunk_hash = get_new_parent(curve, chunk); MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash) << " , chunk_size: " << chunk_size); @@ -185,16 +170,247 @@ static void hash_layer(const C &curve, // Advance to the next chunk chunk_start_idx += chunk_size; - // Prepare for next loop if there should be one - if (chunk_start_idx == child_scalars.size()) - break; - // Fill a complete chunk, or add the remaining new children to the last chunk - CHECK_AND_ASSERT_THROW_MES(chunk_start_idx < child_scalars.size(), "unexpected chunk start idx"); - chunk_size = std::min(chunk_width, child_scalars.size() - chunk_start_idx); + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx <= new_child_scalars.size(), "unexpected chunk start idx"); + } + + return parents_out; +}; +//---------------------------------------------------------------------------------------------------------------------- +static UpdateLayerMetadata get_update_layer_metadata(const std::size_t old_total_children, + const std::size_t new_total_children, + const std::size_t parent_chunk_width, + const bool last_child_will_change) +{ + // 1. Check pre-conditions on total number of children + // - If there's only 1 old child, it must be the old root, and we must be setting a new parent layer after old root + const bool setting_next_layer_after_old_root = old_total_children == 1; + if (setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES(new_total_children > old_total_children, + "new_total_children must be > old_total_children when setting next layer after old root"); + } + else + { + CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children, + "new_total_children must be >= old_total_children"); + } + + // 2. Calculate old and new total number of parents using totals for children + // If there's only 1 child, then it must be the old root and thus it would have no old parents + const std::size_t old_total_parents = old_total_children > 1 + ? (1 + ((old_total_children - 1) / parent_chunk_width)) + : 0; + const std::size_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); + + // 3. Check pre-conditions on total number of parents + CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents, + "new_total_parents must be >= old_total_parents"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents < new_total_children, + "new_total_parents must be < new_total_children"); + + if (setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES(old_total_parents == 0, + "old_total_parents expected to be 0 when setting next layer after old root"); + } + + // 4. Set the current offset in the last chunk + // - Note: this value starts at the last child in the last chunk, but it might need to be decremented by 1 if we're + // changing that last child + std::size_t offset = old_total_parents > 0 + ? (old_total_children % parent_chunk_width) + : 0; + + // 5. Check if the last chunk is full (keep in mind it's also possible it's empty) + const bool last_chunk_is_full = offset == 0; + + // 6. When the last child changes, we'll need to use its old value to update the parent + // - We only care if the child has a parent, otherwise we won't need the child's old value to update the parent + // (since there is no parent to update) + const bool need_old_last_child = old_total_parents > 0 && last_child_will_change; + + // 7. If we're changing the last child, we need to subtract the offset by 1 to account for that child + if (need_old_last_child) + { + CHECK_AND_ASSERT_THROW_MES(old_total_children > 0, "no old children but last child is supposed to change"); + + // If the chunk is full, must subtract the chunk width by 1 + offset = offset == 0 ? (parent_chunk_width - 1) : (offset - 1); + } + + // 8. When the last parent changes, we'll need to use its old value to update itself + const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full + && new_total_children > old_total_children; + const bool need_old_last_parent = need_old_last_child || adding_members_to_existing_last_chunk; + + // 9. Set the next parent's start index + std::size_t next_parent_start_index = old_total_parents; + if (need_old_last_parent) + { + // If we're updating the last parent, we need to bring the starting parent index back 1 + CHECK_AND_ASSERT_THROW_MES(old_total_parents > 0, "no old parents but last parent is supposed to change1"); + --next_parent_start_index; + } + + // Done + MDEBUG("parent_chunk_width: " << parent_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , setting_next_layer_after_old_root: " << setting_next_layer_after_old_root + << " , need_old_last_child: " << need_old_last_child + << " , need_old_last_parent: " << need_old_last_parent + << " , start_offset: " << offset + << " , next_parent_start_index: " << next_parent_start_index); + + return UpdateLayerMetadata{ + .parent_chunk_width = parent_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .setting_next_layer_after_old_root = setting_next_layer_after_old_root, + .need_old_last_child = need_old_last_child, + .need_old_last_parent = need_old_last_parent, + .start_offset = offset, + .next_parent_start_index = next_parent_start_index, + }; + +}; +//---------------------------------------------------------------------------------------------------------------------- +static UpdateLayerMetadata get_update_leaf_layer_metadata(const std::size_t old_n_leaf_tuples, + const std::size_t new_n_leaf_tuples, + const std::size_t leaf_tuple_size, + const std::size_t leaf_layer_chunk_width) +{ + // TODO: comments + + // The leaf layer can never be the root layer + const bool setting_next_layer_after_old_root = false; + + const std::size_t old_total_children = old_n_leaf_tuples * leaf_tuple_size; + const std::size_t new_total_children = (old_n_leaf_tuples + new_n_leaf_tuples) * leaf_tuple_size; + + const std::size_t old_total_parents = old_total_children > 0 + ? (1 + ((old_total_children - 1) / leaf_layer_chunk_width)) + : 0; + const std::size_t new_total_parents = 1 + ((new_total_children - 1) / leaf_layer_chunk_width); + + CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children, + "new_total_children must be >= old_total_children"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents, + "new_total_parents must be >= old_total_parents"); + + // Since leaf layer is append-only, no leaf can ever change and we'll never need an old leaf + const bool need_old_last_child = false; + + const std::size_t offset = old_total_children % leaf_layer_chunk_width; + + const bool last_chunk_is_full = offset == 0; + const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full + && new_total_children > old_total_children; + const bool need_old_last_parent = adding_members_to_existing_last_chunk; + + std::size_t next_parent_start_index = old_total_parents; + if (need_old_last_parent) + { + // If we're updating the last parent, we need to bring the starting parent index back 1 + CHECK_AND_ASSERT_THROW_MES(old_total_parents > 0, "no old parents but last parent is supposed to change2"); + --next_parent_start_index; } + + MDEBUG("parent_chunk_width: " << leaf_layer_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , setting_next_layer_after_old_root: " << setting_next_layer_after_old_root + << " , need_old_last_child: " << need_old_last_child + << " , need_old_last_parent: " << need_old_last_parent + << " , start_offset: " << offset + << " , next_parent_start_index: " << next_parent_start_index); + + return UpdateLayerMetadata{ + .parent_chunk_width = leaf_layer_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .setting_next_layer_after_old_root = setting_next_layer_after_old_root, + .need_old_last_child = need_old_last_child, + .need_old_last_parent = need_old_last_parent, + .start_offset = offset, + .next_parent_start_index = next_parent_start_index, + }; }; //---------------------------------------------------------------------------------------------------------------------- +// Helper function used to get the next layer extension used to grow the next layer in the tree +// - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent +// layer of the leaf layer +template +static LayerExtension get_next_layer_extension(const C_CHILD &c_child, + const C_PARENT &c_parent, + const UpdateLayerMetadata &update_layer_metadata, + const std::vector &child_last_hashes, + const std::vector &parent_last_hashes, + const std::vector> child_layer_extensions, + const std::size_t last_updated_child_idx, + const std::size_t last_updated_parent_idx) +{ + // TODO: comments + const auto *child_last_hash = (last_updated_child_idx >= child_last_hashes.size()) + ? nullptr + : &child_last_hashes[last_updated_child_idx]; + + const auto *parent_last_hash = (last_updated_parent_idx >= parent_last_hashes.size()) + ? nullptr + : &parent_last_hashes[last_updated_parent_idx]; + + // Pre-conditions + CHECK_AND_ASSERT_THROW_MES(last_updated_child_idx < child_layer_extensions.size(), "missing child layer"); + const auto &child_extension = child_layer_extensions[last_updated_child_idx]; + + if (update_layer_metadata.setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES((last_updated_child_idx + 1) == child_last_hashes.size(), + "unexpected last updated child idx"); + CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child when setting layer after old root"); + } + + const auto child_scalars = next_child_scalars_from_children(c_child, + update_layer_metadata.setting_next_layer_after_old_root ? child_last_hash : nullptr, + child_extension); + + if (update_layer_metadata.need_old_last_parent) + CHECK_AND_ASSERT_THROW_MES(parent_last_hash != nullptr, "missing last parent"); + + typename C_PARENT::Scalar last_child_scalar; + if (update_layer_metadata.need_old_last_child) + { + CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child"); + last_child_scalar = c_child.point_to_cycle_scalar(*child_last_hash); + } + + // Do the hashing + LayerExtension layer_extension = hash_children_chunks( + c_parent, + update_layer_metadata.need_old_last_child ? &last_child_scalar : nullptr, + update_layer_metadata.need_old_last_parent ? parent_last_hash : nullptr, + update_layer_metadata.start_offset, + update_layer_metadata.next_parent_start_index, + child_scalars, + update_layer_metadata.parent_chunk_width + ); + + CHECK_AND_ASSERT_THROW_MES((layer_extension.start_idx + layer_extension.hashes.size()) == + update_layer_metadata.new_total_parents, + "unexpected num parents extended"); + + return layer_extension; +} +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTrees public member functions //---------------------------------------------------------------------------------------------------------------------- @@ -214,8 +430,69 @@ CurveTrees::LeafTuple CurveTrees::output_to_leaf }; //---------------------------------------------------------------------------------------------------------------------- template +UpdateLayerMetadata CurveTrees::set_next_layer_extension( + const UpdateLayerMetadata &prev_layer_metadata, + const bool parent_is_c1, + const LastHashes &last_hashes, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, + TreeExtension &tree_extension_inout) const +{ + const auto &c1_last_hashes = last_hashes.c1_last_hashes; + const auto &c2_last_hashes = last_hashes.c2_last_hashes; + + auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions; + + const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; + + const auto update_layer_metadata = get_update_layer_metadata( + prev_layer_metadata.old_total_parents, + prev_layer_metadata.new_total_parents, + parent_chunk_width, + prev_layer_metadata.need_old_last_parent + ); + + if (parent_is_c1) + { + auto c1_layer_extension = get_next_layer_extension( + m_c2, + m_c1, + update_layer_metadata, + c2_last_hashes, + c1_last_hashes, + c2_layer_extensions_out, + c2_last_idx_inout, + c1_last_idx_inout + ); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + ++c2_last_idx_inout; + } + else + { + auto c2_layer_extension = get_next_layer_extension( + m_c1, + m_c2, + update_layer_metadata, + c1_last_hashes, + c2_last_hashes, + c1_layer_extensions_out, + c1_last_idx_inout, + c2_last_idx_inout + ); + + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + ++c1_last_idx_inout; + } + + return update_layer_metadata; +}; +//---------------------------------------------------------------------------------------------------------------------- +template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( - const LastChunks &existing_last_chunks, + const std::size_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, const std::vector &new_leaf_tuples) const { TreeExtension tree_extension; @@ -223,10 +500,13 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio if (new_leaf_tuples.empty()) return tree_extension; - const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks; - const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks; + auto update_layer_metadata = get_update_leaf_layer_metadata( + old_n_leaf_tuples, + new_leaf_tuples.size(), + LEAF_TUPLE_SIZE, + m_leaf_layer_chunk_width); - tree_extension.leaves.start_idx = existing_last_chunks.next_start_leaf_index; + tree_extension.leaves.start_idx = update_layer_metadata.old_total_children; // Copy the leaves // TODO: don't copy here @@ -240,109 +520,53 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio }); } - auto &c1_layer_extensions_out = tree_extension.c1_layer_extensions; - auto &c2_layer_extensions_out = tree_extension.c2_layer_extensions; - - const std::vector flattened_leaves = this->flatten_leaves(new_leaf_tuples); + if (update_layer_metadata.need_old_last_parent) + CHECK_AND_ASSERT_THROW_MES(!existing_last_hashes.c2_last_hashes.empty(), "missing last c2 parent"); // Hash the leaf layer - LayerExtension leaf_parents; - hash_layer(m_c2, - c2_last_chunks.empty() ? nullptr : &c2_last_chunks[0], - flattened_leaves, - tree_extension.leaves.start_idx, - m_leaf_layer_chunk_width, - leaf_parents); - - c2_layer_extensions_out.emplace_back(std::move(leaf_parents)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; + auto leaf_parents = hash_children_chunks(m_c2, + nullptr, // We never need the old last child from leaf layer because the leaf layer is always append-only + update_layer_metadata.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr, + update_layer_metadata.start_offset, + update_layer_metadata.next_parent_start_index, + this->flatten_leaves(new_leaf_tuples), + m_leaf_layer_chunk_width + ); - const std::size_t next_root_layer_idx = c1_last_chunks.size() + c2_last_chunks.size(); + CHECK_AND_ASSERT_THROW_MES( + (leaf_parents.start_idx + leaf_parents.hashes.size()) == update_layer_metadata.new_total_parents, + "unexpected num leaf parents extended"); + + tree_extension.c2_layer_extensions.emplace_back(std::move(leaf_parents)); // Alternate between hashing c2 children, c1 children, c2, c1, ... bool parent_is_c1 = true; std::size_t c1_last_idx = 0; std::size_t c2_last_idx = 0; - // TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves) - while (true) + while (update_layer_metadata.new_total_parents > 1) { - const std::size_t updating_layer_idx = 1 + c1_last_idx + c2_last_idx; - const std::size_t updating_root_layer = updating_layer_idx == next_root_layer_idx; + MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1)); - const auto *c1_last_chunk_ptr = (c1_last_idx >= c1_last_chunks.size()) - ? nullptr - : &c1_last_chunks[c1_last_idx]; + const std::size_t new_total_children = update_layer_metadata.new_total_parents; - const auto *c2_last_chunk_ptr = (c2_last_idx >= c2_last_chunks.size()) - ? nullptr - : &c2_last_chunks[c2_last_idx]; + update_layer_metadata = this->set_next_layer_extension( + update_layer_metadata, + parent_is_c1, + existing_last_hashes, + c1_last_idx, + c2_last_idx, + tree_extension + ); - // TODO: templated function - if (parent_is_c1) - { - CHECK_AND_ASSERT_THROW_MES(c2_last_idx < c2_layer_extensions_out.size(), "missing c2 layer"); - - const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx]; - - std::vector c1_child_scalars; - const std::size_t next_child_start_idx = next_child_scalars_from_children(m_c2, - updating_root_layer, - c2_last_chunk_ptr, - c2_child_extension, - c1_child_scalars); - - LayerExtension c1_layer_extension; - hash_layer(m_c1, - c1_last_chunk_ptr, - c1_child_scalars, - next_child_start_idx, - m_c1_width, - c1_layer_extension); - - c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); - - // Check if we just added the root - if (c1_layer_extensions_out.back().hashes.size() == 1 && c1_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c2_last_idx; - } - else - { - CHECK_AND_ASSERT_THROW_MES(c1_last_idx < c1_layer_extensions_out.size(), "missing c1 layer"); - - const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx]; - - std::vector c2_child_scalars; - const std::size_t next_child_start_idx = next_child_scalars_from_children(m_c1, - updating_root_layer, - c1_last_chunk_ptr, - c1_child_extension, - c2_child_scalars); - - LayerExtension c2_layer_extension; - hash_layer(m_c2, - c2_last_chunk_ptr, - c2_child_scalars, - next_child_start_idx, - m_c2_width, - c2_layer_extension); - - c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); - - // Check if we just added the root - if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0) - return tree_extension; - - ++c1_last_idx; - } + // Sanity check to make sure we're making progress to exit the while loop + CHECK_AND_ASSERT_THROW_MES(update_layer_metadata.new_total_parents < new_total_children, + "expect fewer parents than children in every layer"); parent_is_c1 = !parent_is_c1; } + + return tree_extension; }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 361b9ade231..415d88df7c4 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -51,30 +51,37 @@ template struct LayerExtension final { std::size_t start_idx{0}; + bool update_existing_last_hash; std::vector hashes; }; -// Useful data from the last chunk in a layer -template -struct LastChunkData final +// Useful metadata for updating a layer +struct UpdateLayerMetadata final { - // The next starting index in the layer (referencing the "next" child chunk) - const std::size_t next_start_child_chunk_index; - // The existing hash of the last chunk of child scalars - // - Used to grow the existing last chunk in the layer - // - Only must be set if the existing last chunk isn't full - const typename C::Point last_parent; - // Whether or not the existing last parent in the layer needs to be updated - // - True if the last leaf layer chunk is not yet full - // - If true, next_start_child_chunk_index == existing layer size - // - If false, next_start_child_chunk_index == (existing layer size - 1), since updating existing last parent - const bool update_last_parent; - // The last child in the last chunk (and therefore the last child in the child layer) - // - Used to get the delta from the existing last child to the new last child - // - Only needs to be set if update_last_parent is true - // - Since the leaf layer is append-only, the layer above leaf layer does not actually need this value since the - // last leaf will never change (and therefore, we'll never need the delta to a prior leaf) - const typename C::Scalar last_child; + // The max chunk width of children used to hash into a parent + std::size_t parent_chunk_width; + + // Total children refers to the total number of elements in a layer + std::size_t old_total_children; + std::size_t new_total_children; + + // Total parents refers to the total number of hashes of chunks of children + std::size_t old_total_parents; + std::size_t new_total_parents; + + // When updating the tree, we use this boolean to know when we'll need to use the tree's existing old root in order + // to set a new layer after that root + // - We'll need to be sure the old root gets hashed when setting the next layer + bool setting_next_layer_after_old_root; + // When the last child in the child layer changes, we'll need to use its old value to update its parent hash + bool need_old_last_child; + // When the last parent in the layer changes, we'll need to use its old value to update itself + bool need_old_last_parent; + + // The first chunk that needs to be updated's first child's offset within that chunk + std::size_t start_offset; + // The parent's starting index in the layer + std::size_t next_parent_start_index; }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- @@ -130,14 +137,13 @@ class CurveTrees std::vector> c2_layer_extensions; }; - // Last chunk data from each layer in the tree + // Last hashes from each layer in the tree // - layers alternate between C1 and C2 - // - c2_last_chunks[0] is first layer after leaves, then c1_last_chunks[0], then c2_last_chunks[1], etc - struct LastChunks final + // - c2_last_hashes[0] refers to the layer after leaves, then c1_last_hashes[0], then c2_last_hashes[1], etc + struct LastHashes final { - std::size_t next_start_leaf_index{0}; - std::vector> c1_last_chunks; - std::vector> c2_last_chunks; + std::vector c1_last_hashes; + std::vector c2_last_hashes; }; //member functions @@ -145,14 +151,27 @@ class CurveTrees // Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) const; - // Take in the existing last chunks of each layer in the tree, as well as new leaves to add to the tree, - // and return a tree extension struct that can be used to extend a global tree - TreeExtension get_tree_extension(const LastChunks &existing_last_chunks, + // Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new + // leaves to add to the tree, and return a tree extension struct that can be used to extend a global tree + TreeExtension get_tree_extension(const std::size_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, const std::vector &new_leaf_tuples) const; // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] std::vector flatten_leaves(const std::vector &leaves) const; +private: + // Helper function used to set the next layer extension used to grow the next layer in the tree + // - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent + // layer of the leaf layer + UpdateLayerMetadata set_next_layer_extension( + const UpdateLayerMetadata &prev_layer_metadata, + const bool parent_is_c1, + const LastHashes &last_hashes, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, + TreeExtension &tree_extension_inout) const; + //public member variables public: // The curve interfaces diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 7ea2fd0c23a..598e6e566fc 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -40,28 +40,6 @@ // CurveTreesGlobalTree helpers //---------------------------------------------------------------------------------------------------------------------- template -static fcmp::curve_trees::LastChunkData get_last_child_layer_chunk(const bool update_last_parent, - const std::size_t parent_layer_size, - const typename C::Point &last_parent, - const typename C::Scalar &last_child) -{ - if (update_last_parent) - CHECK_AND_ASSERT_THROW_MES(parent_layer_size > 0, "empty parent layer"); - - // If updating last parent, the next start will be the last parent's index, else we start at the tip - const std::size_t next_start_child_chunk_index = update_last_parent - ? (parent_layer_size - 1) - : parent_layer_size; - - return fcmp::curve_trees::LastChunkData{ - .next_start_child_chunk_index = next_start_child_chunk_index, - .last_parent = last_parent, - .update_last_parent = update_last_parent, - .last_child = last_child - }; -} -//---------------------------------------------------------------------------------------------------------------------- -template static bool validate_layer(const C &curve, const CurveTreesGlobalTree::Layer &parents, const std::vector &child_scalars, @@ -102,9 +80,17 @@ static bool validate_layer(const C &curve, //---------------------------------------------------------------------------------------------------------------------- // CurveTreesGlobalTree implementations //---------------------------------------------------------------------------------------------------------------------- -CurveTreesV1::LastChunks CurveTreesGlobalTree::get_last_chunks() +std::size_t CurveTreesGlobalTree::get_num_leaf_tuples() const { - const auto &leaves = m_tree.leaves; + return m_tree.leaves.size(); +} +//---------------------------------------------------------------------------------------------------------------------- +CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes() const +{ + CurveTreesV1::LastHashes last_hashes_out; + auto &c1_last_hashes_out = last_hashes_out.c1_last_hashes; + auto &c2_last_hashes_out = last_hashes_out.c2_last_hashes; + const auto &c1_layers = m_tree.c1_layers; const auto &c2_layers = m_tree.c2_layers; @@ -112,95 +98,37 @@ CurveTreesV1::LastChunks CurveTreesGlobalTree::get_last_chunks() CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), "unexpected number of curve layers"); - CurveTreesV1::LastChunks last_chunks; - - // Since leaf layer is append-only, we know the next start will be right after all existing leaf tuple - const std::size_t num_leaf_tuples = leaves.size() * CurveTreesV1::LEAF_TUPLE_SIZE; - last_chunks.next_start_leaf_index = num_leaf_tuples; + c1_last_hashes_out.reserve(c1_layers.size()); + c2_last_hashes_out.reserve(c2_layers.size()); if (c2_layers.empty()) - return last_chunks; + return last_hashes_out; - auto &c1_last_chunks_out = last_chunks.c1_last_chunks; - auto &c2_last_chunks_out = last_chunks.c2_last_chunks; - - c1_last_chunks_out.reserve(c1_layers.size()); - c2_last_chunks_out.reserve(c2_layers.size()); - - // First push the last leaf chunk data into c2 chunks - const bool update_last_parent = (num_leaf_tuples % m_curve_trees.m_leaf_layer_chunk_width) > 0; - auto last_leaf_chunk = get_last_child_layer_chunk( - /*update_last_parent*/ update_last_parent, - /*parent_layer_size */ c2_layers[0].size(), - /*last_parent */ c2_layers[0].back(), - // Since the leaf layer is append-only, we'll never need access to the last child - /*last_child */ m_curve_trees.m_c2.zero_scalar()); - - c2_last_chunks_out.push_back(std::move(last_leaf_chunk)); - - // If there are no c1 layers, we're done - if (c1_layers.empty()) - return last_chunks; - - // Next parents will be c1 - bool parent_is_c1 = true; + // Next parents will be c2 + bool use_c2 = true; // Then get last chunks up until the root std::size_t c1_idx = 0; std::size_t c2_idx = 0; - while (c1_last_chunks_out.size() < c1_layers.size() || c2_last_chunks_out.size() < c2_layers.size()) + while (c1_last_hashes_out.size() < c1_layers.size() || c2_last_hashes_out.size() < c2_layers.size()) { - CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); - - // TODO: template the below if statement into another function - if (parent_is_c1) + if (use_c2) { - const Layer &child_layer = c2_layers[c2_idx]; - CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - - const Layer &parent_layer = c1_layers[c1_idx]; - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); - - const auto &last_child = m_curve_trees.m_c2.point_to_cycle_scalar(child_layer.back()); - - auto last_parent_chunk = get_last_child_layer_chunk(update_last_parent, - parent_layer.size(), - parent_layer.back(), - last_child); - - c1_last_chunks_out.push_back(std::move(last_parent_chunk)); - + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); + c2_last_hashes_out.push_back(c2_layers[c2_idx].back()); ++c2_idx; } else { - const Layer &child_layer = c1_layers[c1_idx]; - CHECK_AND_ASSERT_THROW_MES(!child_layer.empty(), "child layer is empty"); - - const Layer &parent_layer = c2_layers[c2_idx]; - CHECK_AND_ASSERT_THROW_MES(!parent_layer.empty(), "parent layer is empty"); - - const auto &last_child = m_curve_trees.m_c1.point_to_cycle_scalar(child_layer.back()); - - auto last_parent_chunk = get_last_child_layer_chunk(update_last_parent, - parent_layer.size(), - parent_layer.back(), - last_child); - - c2_last_chunks_out.push_back(std::move(last_parent_chunk)); - + CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); + c1_last_hashes_out.push_back(c1_layers[c1_idx].back()); ++c1_idx; } - // Alternate curves every iteration - parent_is_c1 = !parent_is_c1; + use_c2 = !use_c2; } - CHECK_AND_ASSERT_THROW_MES(c1_last_chunks_out.size() == c1_layers.size(), "unexpected c1 last chunks"); - CHECK_AND_ASSERT_THROW_MES(c2_last_chunks_out.size() == c2_layers.size(), "unexpected c2 last chunks"); - - return last_chunks; + return last_hashes_out; } //---------------------------------------------------------------------------------------------------------------------- void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_extension) @@ -250,7 +178,14 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e // We updated the last hash if (started_at_tip) + { + CHECK_AND_ASSERT_THROW_MES(c2_ext.update_existing_last_hash, "expect to be updating last hash"); c2_inout.back() = c2_ext.hashes.front(); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!c2_ext.update_existing_last_hash, "unexpected last hash update"); + } for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) c2_inout.emplace_back(c2_ext.hashes[i]); @@ -276,7 +211,14 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e // We updated the last hash if (started_at_tip) + { + CHECK_AND_ASSERT_THROW_MES(c1_ext.update_existing_last_hash, "expect to be updating last hash"); c1_inout.back() = c1_ext.hashes.front(); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!c1_ext.update_existing_last_hash, "unexpected last hash update"); + } for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) c1_inout.emplace_back(c1_ext.hashes[i]); @@ -803,6 +745,8 @@ void CurveTreesGlobalTree::trim_tree(const std::size_t new_num_leaves) //---------------------------------------------------------------------------------------------------------------------- bool CurveTreesGlobalTree::audit_tree() { + MDEBUG("Auditing global tree"); + const auto &leaves = m_tree.leaves; const auto &c1_layers = m_tree.c1_layers; const auto &c2_layers = m_tree.c2_layers; @@ -894,42 +838,33 @@ bool CurveTreesGlobalTree::audit_tree() //---------------------------------------------------------------------------------------------------------------------- // Logging helpers //---------------------------------------------------------------------------------------------------------------------- -void CurveTreesGlobalTree::log_last_chunks(const CurveTreesV1::LastChunks &last_chunks) +void CurveTreesGlobalTree::log_last_hashes(const CurveTreesV1::LastHashes &last_hashes) { - const auto &c1_last_chunks = last_chunks.c1_last_chunks; - const auto &c2_last_chunks = last_chunks.c2_last_chunks; + const auto &c1_last_hashes = last_hashes.c1_last_hashes; + const auto &c2_last_hashes = last_hashes.c2_last_hashes; - MDEBUG("Total of " << c1_last_chunks.size() << " Helios last chunks and " - << c2_last_chunks.size() << " Selene last chunks"); + MDEBUG("Total of " << c1_last_hashes.size() << " Helios layers and " << c2_last_hashes.size() << " Selene layers"); bool use_c2 = true; std::size_t c1_idx = 0; std::size_t c2_idx = 0; - for (std::size_t i = 0; i < (c1_last_chunks.size() + c2_last_chunks.size()); ++i) + for (std::size_t i = 0; i < (c1_last_hashes.size() + c2_last_hashes.size()); ++i) { if (use_c2) { - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_chunks.size(), "unexpected c2 layer"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_hashes.size(), "unexpected c2 layer"); - const fcmp::curve_trees::LastChunkData &last_chunk = c2_last_chunks[c2_idx]; - - MDEBUG("next_start_child_chunk_index: " << last_chunk.next_start_child_chunk_index - << " , last_parent: " << m_curve_trees.m_c2.to_string(last_chunk.last_parent) - << " , update_last_parent: " << last_chunk.update_last_parent - << " , last_child: " << m_curve_trees.m_c2.to_string(last_chunk.last_child)); + const auto &last_hash = c2_last_hashes[c2_idx]; + MDEBUG("c2_idx: " << c2_idx << " , last_hash: " << m_curve_trees.m_c2.to_string(last_hash)); ++c2_idx; } else { - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_chunks.size(), "unexpected c1 layer"); - - const fcmp::curve_trees::LastChunkData &last_chunk = c1_last_chunks[c1_idx]; + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_hashes.size(), "unexpected c1 layer"); - MDEBUG("next_start_child_chunk_index: " << last_chunk.next_start_child_chunk_index - << " , last_parent: " << m_curve_trees.m_c1.to_string(last_chunk.last_parent) - << " , update_last_parent: " << last_chunk.update_last_parent - << " , last_child: " << m_curve_trees.m_c1.to_string(last_chunk.last_child)); + const auto &last_hash = c1_last_hashes[c1_idx]; + MDEBUG("c1_idx: " << c1_idx << " , last_hash: " << m_curve_trees.m_c1.to_string(last_hash)); ++c1_idx; } @@ -1074,14 +1009,16 @@ static bool grow_tree(CurveTreesV1 &curve_trees, CurveTreesGlobalTree &global_tree, const std::size_t num_leaves) { - // Get the last chunk from each layer in the tree; empty if tree is empty - const auto last_chunks = global_tree.get_last_chunks(); + // Do initial tree reads + const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); + const CurveTreesV1::LastHashes last_hashes = global_tree.get_last_hashes(); - global_tree.log_last_chunks(last_chunks); + global_tree.log_last_hashes(last_hashes); // Get a tree extension object to the existing tree using randomly generated leaves // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves - const auto tree_extension = curve_trees.get_tree_extension(last_chunks, + const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, + last_hashes, generate_random_leaves(curve_trees, num_leaves)); global_tree.log_tree_extension(tree_extension); @@ -1179,54 +1116,45 @@ TEST(curve_trees, grow_tree) Helios helios; Selene selene; - LOG_PRINT_L1("Test grow tree with helios chunk width " << HELIOS_CHUNK_WIDTH - << ", selene chunk width " << SELENE_CHUNK_WIDTH); + // Constant for how deep we want the tree + const std::size_t TEST_N_LAYERS = 4; - auto curve_trees = CurveTreesV1( - helios, - selene, - HELIOS_CHUNK_WIDTH, - SELENE_CHUNK_WIDTH); + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + const std::size_t helios_chunk_width = 3; + const std::size_t selene_chunk_width = 2; - unit_test::BlockchainLMDBTest test_db; + static_assert(helios_chunk_width > 1, "helios width must be > 1"); + static_assert(selene_chunk_width > 1, "selene width must be > 1"); - static_assert(HELIOS_CHUNK_WIDTH > 1, "helios width must be > 1"); - static_assert(SELENE_CHUNK_WIDTH > 1, "selene width must be > 1"); + LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width); // Number of leaves for which x number of layers is required - const std::size_t NEED_1_LAYER = SELENE_CHUNK_WIDTH; - const std::size_t NEED_2_LAYERS = NEED_1_LAYER * HELIOS_CHUNK_WIDTH; - const std::size_t NEED_3_LAYERS = NEED_2_LAYERS * SELENE_CHUNK_WIDTH; - - const std::vector N_LEAVES{ - // Basic tests - 1, - 2, + std::size_t leaves_needed_for_n_layers = selene_chunk_width; + for (std::size_t i = 1; i < TEST_N_LAYERS; ++i) + { + const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; + leaves_needed_for_n_layers *= width; + } - // Test with number of leaves {-1,0,+1} relative to chunk width boundaries - NEED_1_LAYER-1, - NEED_1_LAYER, - NEED_1_LAYER+1, + auto curve_trees = CurveTreesV1( + helios, + selene, + helios_chunk_width, + selene_chunk_width); - NEED_2_LAYERS-1, - NEED_2_LAYERS, - NEED_2_LAYERS+1, + unit_test::BlockchainLMDBTest test_db; - NEED_3_LAYERS, - }; + // Increment to test for off-by-1 + ++leaves_needed_for_n_layers; - for (const std::size_t init_leaves : N_LEAVES) + // First initialize the tree with init_leaves + for (std::size_t init_leaves = 1; init_leaves < leaves_needed_for_n_layers; ++init_leaves) { // TODO: init tree once, then extend a copy of that tree - - for (const std::size_t ext_leaves : N_LEAVES) + // Then extend the tree with ext_leaves + for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) < leaves_needed_for_n_layers; ++ext_leaves) { - // Only test 3rd layer once because it's a huge test - if (init_leaves > 1 && ext_leaves == NEED_3_LAYERS) - continue; - if (ext_leaves > 1 && init_leaves == NEED_3_LAYERS) - continue; - ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, curve_trees)); ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); } diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index 24629b58ad8..845a2d118c9 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -65,8 +65,11 @@ class CurveTreesGlobalTree //public member functions public: - // Read the in-memory tree and get data from last chunks from each layer - CurveTreesV1::LastChunks get_last_chunks(); + // Read the in-memory tree and get the number of leaf tuples + std::size_t get_num_leaf_tuples() const; + + // Read the in-memory tree and get the last hashes from each layer in the tree + CurveTreesV1::LastHashes get_last_hashes() const; // Use the tree extension to extend the in-memory tree void extend_tree(const CurveTreesV1::TreeExtension &tree_extension); @@ -78,7 +81,7 @@ class CurveTreesGlobalTree bool audit_tree(); // logging helpers - void log_last_chunks(const CurveTreesV1::LastChunks &last_chunks); + void log_last_hashes(const CurveTreesV1::LastHashes &last_hashes); void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); void log_tree(); From 5ddca0ce111f45fb8bfcbbc21fef07f60161482a Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 8 Jul 2024 20:01:14 -0700 Subject: [PATCH 034/127] Implement and test trim_tree algo in memory --- src/fcmp/curve_trees.cpp | 467 ++++++++++++++++---- src/fcmp/curve_trees.h | 81 +++- src/fcmp/fcmp_rust/Cargo.lock | 61 ++- src/fcmp/fcmp_rust/fcmp++.h | 10 +- src/fcmp/fcmp_rust/src/lib.rs | 12 +- src/fcmp/tower_cycle.cpp | 20 +- src/fcmp/tower_cycle.h | 15 +- tests/unit_tests/curve_trees.cpp | 721 +++++++++++-------------------- tests/unit_tests/curve_trees.h | 27 +- 9 files changed, 794 insertions(+), 620 deletions(-) diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 2102cacf802..eaac8328881 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -177,7 +177,7 @@ static LayerExtension hash_children_chunks(const C &curve, return parents_out; }; //---------------------------------------------------------------------------------------------------------------------- -static UpdateLayerMetadata get_update_layer_metadata(const std::size_t old_total_children, +static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_total_children, const std::size_t new_total_children, const std::size_t parent_chunk_width, const bool last_child_will_change) @@ -265,7 +265,7 @@ static UpdateLayerMetadata get_update_layer_metadata(const std::size_t old_total << " , start_offset: " << offset << " , next_parent_start_index: " << next_parent_start_index); - return UpdateLayerMetadata{ + return GrowLayerInstructions{ .parent_chunk_width = parent_chunk_width, .old_total_children = old_total_children, .new_total_children = new_total_children, @@ -280,7 +280,7 @@ static UpdateLayerMetadata get_update_layer_metadata(const std::size_t old_total }; //---------------------------------------------------------------------------------------------------------------------- -static UpdateLayerMetadata get_update_leaf_layer_metadata(const std::size_t old_n_leaf_tuples, +static GrowLayerInstructions get_leaf_layer_grow_instructions(const std::size_t old_n_leaf_tuples, const std::size_t new_n_leaf_tuples, const std::size_t leaf_tuple_size, const std::size_t leaf_layer_chunk_width) @@ -332,7 +332,7 @@ static UpdateLayerMetadata get_update_leaf_layer_metadata(const std::size_t old_ << " , start_offset: " << offset << " , next_parent_start_index: " << next_parent_start_index); - return UpdateLayerMetadata{ + return GrowLayerInstructions{ .parent_chunk_width = leaf_layer_chunk_width, .old_total_children = old_total_children, .new_total_children = new_total_children, @@ -352,7 +352,7 @@ static UpdateLayerMetadata get_update_leaf_layer_metadata(const std::size_t old_ template static LayerExtension get_next_layer_extension(const C_CHILD &c_child, const C_PARENT &c_parent, - const UpdateLayerMetadata &update_layer_metadata, + const GrowLayerInstructions &grow_layer_instructions, const std::vector &child_last_hashes, const std::vector &parent_last_hashes, const std::vector> child_layer_extensions, @@ -372,7 +372,7 @@ static LayerExtension get_next_layer_extension(const C_CHILD &c_child, CHECK_AND_ASSERT_THROW_MES(last_updated_child_idx < child_layer_extensions.size(), "missing child layer"); const auto &child_extension = child_layer_extensions[last_updated_child_idx]; - if (update_layer_metadata.setting_next_layer_after_old_root) + if (grow_layer_instructions.setting_next_layer_after_old_root) { CHECK_AND_ASSERT_THROW_MES((last_updated_child_idx + 1) == child_last_hashes.size(), "unexpected last updated child idx"); @@ -380,14 +380,14 @@ static LayerExtension get_next_layer_extension(const C_CHILD &c_child, } const auto child_scalars = next_child_scalars_from_children(c_child, - update_layer_metadata.setting_next_layer_after_old_root ? child_last_hash : nullptr, + grow_layer_instructions.setting_next_layer_after_old_root ? child_last_hash : nullptr, child_extension); - if (update_layer_metadata.need_old_last_parent) + if (grow_layer_instructions.need_old_last_parent) CHECK_AND_ASSERT_THROW_MES(parent_last_hash != nullptr, "missing last parent"); typename C_PARENT::Scalar last_child_scalar; - if (update_layer_metadata.need_old_last_child) + if (grow_layer_instructions.need_old_last_child) { CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child"); last_child_scalar = c_child.point_to_cycle_scalar(*child_last_hash); @@ -396,21 +396,208 @@ static LayerExtension get_next_layer_extension(const C_CHILD &c_child, // Do the hashing LayerExtension layer_extension = hash_children_chunks( c_parent, - update_layer_metadata.need_old_last_child ? &last_child_scalar : nullptr, - update_layer_metadata.need_old_last_parent ? parent_last_hash : nullptr, - update_layer_metadata.start_offset, - update_layer_metadata.next_parent_start_index, + grow_layer_instructions.need_old_last_child ? &last_child_scalar : nullptr, + grow_layer_instructions.need_old_last_parent ? parent_last_hash : nullptr, + grow_layer_instructions.start_offset, + grow_layer_instructions.next_parent_start_index, child_scalars, - update_layer_metadata.parent_chunk_width + grow_layer_instructions.parent_chunk_width ); CHECK_AND_ASSERT_THROW_MES((layer_extension.start_idx + layer_extension.hashes.size()) == - update_layer_metadata.new_total_parents, + grow_layer_instructions.new_total_parents, "unexpected num parents extended"); return layer_extension; } //---------------------------------------------------------------------------------------------------------------------- +static TrimLayerInstructions get_trim_layer_instructions( + const std::size_t old_total_children, + const std::size_t new_total_children, + const std::size_t parent_chunk_width, + const bool last_child_will_change) +{ + CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "new total children must be > 0"); + CHECK_AND_ASSERT_THROW_MES(old_total_children >= new_total_children, + "old_total_children must be >= new_total_children"); + + // Calculate old and new total number of parents using totals for children + const std::size_t old_total_parents = 1 + ((old_total_children - 1) / parent_chunk_width); + const std::size_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); + + CHECK_AND_ASSERT_THROW_MES(old_total_parents >= new_total_parents, + "old_total_parents must be >= new_total_parents"); + CHECK_AND_ASSERT_THROW_MES(new_total_children > new_total_parents, + "new_total_children must be > new_total_parents"); + + const std::size_t old_offset = old_total_children % parent_chunk_width; + std::size_t new_offset = new_total_children % parent_chunk_width; + + // Get the number of existing children in what will become the new last chunk after trimming + const std::size_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0) + ? parent_chunk_width + : old_offset; + + MDEBUG("new_last_chunk_old_num_children: " << new_last_chunk_old_num_children << ", new_offset: " << new_offset); + + CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_children >= new_offset, + "unexpected new_last_chunk_old_num_children"); + + // Get the number of children we'll be trimming from the new last chunk + const std::size_t trim_n_children = new_offset == 0 + ? 0 // The last chunk wil remain full when the new_offset == 0 + : new_last_chunk_old_num_children - new_offset; + + // We use hash trim if we're trimming fewer elems in the last chunk than the number of elems remaining + const bool need_last_chunk_children_to_trim = trim_n_children > 0 && trim_n_children <= new_offset; + + // Otherwise we use hash_grow + const bool need_last_chunk_remaining_children = trim_n_children > 0 && trim_n_children > new_offset; + + CHECK_AND_ASSERT_THROW_MES(!(need_last_chunk_children_to_trim && need_last_chunk_remaining_children), + "cannot both need last children to trim and need the remaining children"); + + // TODO: cleaner conditional approach + // TODO: comments + const bool need_last_chunk_parent = !need_last_chunk_remaining_children && + (need_last_chunk_children_to_trim || last_child_will_change); + + const bool update_existing_last_hash = need_last_chunk_remaining_children || need_last_chunk_parent; + + std::size_t hash_offset = new_offset; + if (last_child_will_change) + { + hash_offset = hash_offset == 0 ? (parent_chunk_width - 1) : (hash_offset - 1); + + if (need_last_chunk_children_to_trim || need_last_chunk_remaining_children) + --new_offset; + } + + if (need_last_chunk_remaining_children) + { + hash_offset = 0; + } + + MDEBUG("parent_chunk_width: " << parent_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , need_last_chunk_children_to_trim: " << need_last_chunk_children_to_trim + << " , need_last_chunk_remaining_children: " << need_last_chunk_remaining_children + << " , need_last_chunk_parent: " << need_last_chunk_parent + << " , need_new_last_child: " << last_child_will_change + << " , update_existing_last_hash: " << update_existing_last_hash + << " , new_offset: " << new_offset + << " , hash_offset: " << hash_offset); + + return TrimLayerInstructions{ + .parent_chunk_width = parent_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .need_last_chunk_children_to_trim = need_last_chunk_children_to_trim, + .need_last_chunk_remaining_children = need_last_chunk_remaining_children, + .need_last_chunk_parent = need_last_chunk_parent, + .need_new_last_child = last_child_will_change, + .update_existing_last_hash = update_existing_last_hash, + .new_offset = new_offset, + .hash_offset = hash_offset, + }; +} +//---------------------------------------------------------------------------------------------------------------------- +template +static typename fcmp::curve_trees::LayerReduction get_next_layer_reduction( + const C_CHILD &c_child, + const C_PARENT &c_parent, + const TrimLayerInstructions &trim_layer_instructions, + const std::vector &parent_last_hashes, + const std::vector> &children_to_trim, + const std::vector &child_last_hashes, + const std::size_t parent_layer_idx, + const std::size_t child_layer_idx, + const std::vector> &child_reductions) +{ + LayerReduction layer_reduction_out; + + layer_reduction_out.new_total_parents = trim_layer_instructions.new_total_parents; + layer_reduction_out.update_existing_last_hash = trim_layer_instructions.update_existing_last_hash; + + typename C_PARENT::Point existing_hash = c_parent.m_hash_init_point; + if (trim_layer_instructions.need_last_chunk_parent) + { + CHECK_AND_ASSERT_THROW_MES(parent_last_hashes.size() > parent_layer_idx, "missing last parent hash"); + existing_hash = parent_last_hashes[parent_layer_idx]; + } + + std::vector child_scalars; + if (trim_layer_instructions.need_last_chunk_children_to_trim + || trim_layer_instructions.need_last_chunk_remaining_children) + { + // TODO: a clean way to do this without copying + CHECK_AND_ASSERT_THROW_MES(children_to_trim.size() > parent_layer_idx, "missing children to trim"); + child_scalars = children_to_trim[parent_layer_idx]; + } + + typename C_PARENT::Scalar new_last_child_scalar = c_parent.zero_scalar(); + if (trim_layer_instructions.need_new_last_child) + { + CHECK_AND_ASSERT_THROW_MES(child_layer_idx > 0, "child index cannot be 0 here"); + CHECK_AND_ASSERT_THROW_MES(child_reductions.size() == child_layer_idx, "unexpected child layer idx"); + const std::size_t last_child_layer_idx = child_layer_idx - 1; + const typename C_CHILD::Point &new_last_child = child_reductions.back().new_last_hash; + + new_last_child_scalar = c_child.point_to_cycle_scalar(new_last_child); + + if (trim_layer_instructions.need_last_chunk_remaining_children) + { + child_scalars.emplace_back(std::move(new_last_child_scalar)); + } + else if (!trim_layer_instructions.need_last_chunk_children_to_trim) + { + // TODO: cleaner conditional for this case + CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash"); + + const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx]; + auto old_last_child_scalar = c_child.point_to_cycle_scalar(old_last_child); + + child_scalars.emplace_back(std::move(old_last_child_scalar)); + } + } + + for (std::size_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Hashing child " << c_parent.to_string(child_scalars[i])); + + if (trim_layer_instructions.need_last_chunk_remaining_children) + { + MDEBUG("hash_grow: existing_hash: " << c_parent.to_string(existing_hash) + << " , hash_offset: " << trim_layer_instructions.hash_offset); + + layer_reduction_out.new_last_hash = c_parent.hash_grow( + existing_hash, + trim_layer_instructions.hash_offset, + c_parent.zero_scalar(), + typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}); + } + else + { + MDEBUG("hash_trim: existing_hash: " << c_parent.to_string(existing_hash) + << " , hash_offset: " << trim_layer_instructions.hash_offset + << " , new_last_child_scalar: " << c_parent.to_string(new_last_child_scalar)); + + layer_reduction_out.new_last_hash = c_parent.hash_trim( + existing_hash, + trim_layer_instructions.hash_offset, + typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}, + new_last_child_scalar); + } + + MDEBUG("Result hash: " << c_parent.to_string(layer_reduction_out.new_last_hash)); + + return layer_reduction_out; +} +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTrees public member functions //---------------------------------------------------------------------------------------------------------------------- @@ -430,63 +617,20 @@ CurveTrees::LeafTuple CurveTrees::output_to_leaf }; //---------------------------------------------------------------------------------------------------------------------- template -UpdateLayerMetadata CurveTrees::set_next_layer_extension( - const UpdateLayerMetadata &prev_layer_metadata, - const bool parent_is_c1, - const LastHashes &last_hashes, - std::size_t &c1_last_idx_inout, - std::size_t &c2_last_idx_inout, - TreeExtension &tree_extension_inout) const +std::vector CurveTrees::flatten_leaves(const std::vector &leaves) const { - const auto &c1_last_hashes = last_hashes.c1_last_hashes; - const auto &c2_last_hashes = last_hashes.c2_last_hashes; - - auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions; - auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions; - - const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; - - const auto update_layer_metadata = get_update_layer_metadata( - prev_layer_metadata.old_total_parents, - prev_layer_metadata.new_total_parents, - parent_chunk_width, - prev_layer_metadata.need_old_last_parent - ); - - if (parent_is_c1) - { - auto c1_layer_extension = get_next_layer_extension( - m_c2, - m_c1, - update_layer_metadata, - c2_last_hashes, - c1_last_hashes, - c2_layer_extensions_out, - c2_last_idx_inout, - c1_last_idx_inout - ); + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); - ++c2_last_idx_inout; - } - else + for (const auto &l : leaves) { - auto c2_layer_extension = get_next_layer_extension( - m_c1, - m_c2, - update_layer_metadata, - c1_last_hashes, - c2_last_hashes, - c1_layer_extensions_out, - c1_last_idx_inout, - c2_last_idx_inout - ); - - c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); - ++c1_last_idx_inout; + // TODO: implement without cloning + flattened_leaves.emplace_back(l.O_x); + flattened_leaves.emplace_back(l.I_x); + flattened_leaves.emplace_back(l.C_x); } - return update_layer_metadata; + return flattened_leaves; }; //---------------------------------------------------------------------------------------------------------------------- template @@ -500,13 +644,13 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio if (new_leaf_tuples.empty()) return tree_extension; - auto update_layer_metadata = get_update_leaf_layer_metadata( + auto grow_layer_instructions = get_leaf_layer_grow_instructions( old_n_leaf_tuples, new_leaf_tuples.size(), LEAF_TUPLE_SIZE, m_leaf_layer_chunk_width); - tree_extension.leaves.start_idx = update_layer_metadata.old_total_children; + tree_extension.leaves.start_idx = grow_layer_instructions.old_total_children; // Copy the leaves // TODO: don't copy here @@ -520,21 +664,21 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio }); } - if (update_layer_metadata.need_old_last_parent) + if (grow_layer_instructions.need_old_last_parent) CHECK_AND_ASSERT_THROW_MES(!existing_last_hashes.c2_last_hashes.empty(), "missing last c2 parent"); // Hash the leaf layer auto leaf_parents = hash_children_chunks(m_c2, nullptr, // We never need the old last child from leaf layer because the leaf layer is always append-only - update_layer_metadata.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr, - update_layer_metadata.start_offset, - update_layer_metadata.next_parent_start_index, + grow_layer_instructions.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr, + grow_layer_instructions.start_offset, + grow_layer_instructions.next_parent_start_index, this->flatten_leaves(new_leaf_tuples), m_leaf_layer_chunk_width ); CHECK_AND_ASSERT_THROW_MES( - (leaf_parents.start_idx + leaf_parents.hashes.size()) == update_layer_metadata.new_total_parents, + (leaf_parents.start_idx + leaf_parents.hashes.size()) == grow_layer_instructions.new_total_parents, "unexpected num leaf parents extended"); tree_extension.c2_layer_extensions.emplace_back(std::move(leaf_parents)); @@ -544,14 +688,14 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio std::size_t c1_last_idx = 0; std::size_t c2_last_idx = 0; - while (update_layer_metadata.new_total_parents > 1) + while (grow_layer_instructions.new_total_parents > 1) { MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1)); - const std::size_t new_total_children = update_layer_metadata.new_total_parents; + const std::size_t new_total_children = grow_layer_instructions.new_total_parents; - update_layer_metadata = this->set_next_layer_extension( - update_layer_metadata, + grow_layer_instructions = this->set_next_layer_extension( + grow_layer_instructions, parent_is_c1, existing_last_hashes, c1_last_idx, @@ -560,7 +704,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio ); // Sanity check to make sure we're making progress to exit the while loop - CHECK_AND_ASSERT_THROW_MES(update_layer_metadata.new_total_parents < new_total_children, + CHECK_AND_ASSERT_THROW_MES(grow_layer_instructions.new_total_parents < new_total_children, "expect fewer parents than children in every layer"); parent_is_c1 = !parent_is_c1; @@ -569,24 +713,171 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio return tree_extension; }; //---------------------------------------------------------------------------------------------------------------------- +template +std::vector CurveTrees::get_trim_instructions( + const std::size_t old_n_leaf_tuples, + const std::size_t trim_n_leaf_tuples) +{ + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); + + std::vector trim_instructions; + + // Get trim instructions for the leaf layer + { + const std::size_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE; + const std::size_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE; + + const std::size_t parent_chunk_width = m_leaf_layer_chunk_width; + + // Leaf layer's last child never changes since leaf layer is pop-/append-only + const bool last_child_will_change = false; + + auto trim_leaf_layer_instructions = get_trim_layer_instructions( + old_total_leaves, + new_total_leaves, + parent_chunk_width, + last_child_will_change); + + trim_instructions.emplace_back(std::move(trim_leaf_layer_instructions)); + } + + bool use_c2 = false; + while (trim_instructions.back().new_total_parents > 1) + { + auto trim_layer_instructions = get_trim_layer_instructions( + trim_instructions.back().old_total_parents, + trim_instructions.back().new_total_parents, + use_c2 ? m_c2_width : m_c1_width, + trim_instructions.back().update_existing_last_hash); + + trim_instructions.emplace_back(std::move(trim_layer_instructions)); + use_c2 = !use_c2; + } + + return trim_instructions; +} +//---------------------------------------------------------------------------------------------------------------------- +template +typename CurveTrees::TreeReduction CurveTrees::get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const +{ + TreeReduction tree_reduction_out; + tree_reduction_out.new_total_leaves = trim_instructions[0].new_total_children; + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + + for (const auto &trim_layer_instructions : trim_instructions) + { + MDEBUG("Trimming layer " << (c1_idx + c2_idx) << " (c1_idx: " << c1_idx << " , c2_idx: " << c2_idx << ")"); + + if (use_c2) + { + auto c2_layer_reduction_out = get_next_layer_reduction( + m_c1, + m_c2, + trim_layer_instructions, + last_hashes.c2_last_hashes, + children_to_trim.c2_children, + last_hashes.c1_last_hashes, + c2_idx, + c1_idx, + tree_reduction_out.c1_layer_reductions + ); + + tree_reduction_out.c2_layer_reductions.emplace_back(std::move(c2_layer_reduction_out)); + ++c2_idx; + } + else + { + auto c1_layer_reduction_out = get_next_layer_reduction( + m_c2, + m_c1, + trim_layer_instructions, + last_hashes.c1_last_hashes, + children_to_trim.c1_children, + last_hashes.c2_last_hashes, + c1_idx, + c2_idx, + tree_reduction_out.c2_layer_reductions + ); + + tree_reduction_out.c1_layer_reductions.emplace_back(std::move(c1_layer_reduction_out)); + ++c1_idx; + } + + + use_c2 = !use_c2; + } + + return tree_reduction_out; +}; +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTrees private member functions //---------------------------------------------------------------------------------------------------------------------- template -std::vector CurveTrees::flatten_leaves(const std::vector &leaves) const +GrowLayerInstructions CurveTrees::set_next_layer_extension( + const GrowLayerInstructions &prev_layer_instructions, + const bool parent_is_c1, + const LastHashes &last_hashes, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, + TreeExtension &tree_extension_inout) const { - std::vector flattened_leaves; - flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + const auto &c1_last_hashes = last_hashes.c1_last_hashes; + const auto &c2_last_hashes = last_hashes.c2_last_hashes; - for (const auto &l : leaves) + auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions; + + const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; + + const auto grow_layer_instructions = get_grow_layer_instructions( + prev_layer_instructions.old_total_parents, + prev_layer_instructions.new_total_parents, + parent_chunk_width, + prev_layer_instructions.need_old_last_parent + ); + + if (parent_is_c1) { - // TODO: implement without cloning - flattened_leaves.emplace_back(l.O_x); - flattened_leaves.emplace_back(l.I_x); - flattened_leaves.emplace_back(l.C_x); + auto c1_layer_extension = get_next_layer_extension( + m_c2, + m_c1, + grow_layer_instructions, + c2_last_hashes, + c1_last_hashes, + c2_layer_extensions_out, + c2_last_idx_inout, + c1_last_idx_inout + ); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + ++c2_last_idx_inout; } + else + { + auto c2_layer_extension = get_next_layer_extension( + m_c1, + m_c2, + grow_layer_instructions, + c1_last_hashes, + c2_last_hashes, + c1_layer_extensions_out, + c1_last_idx_inout, + c2_last_idx_inout + ); - return flattened_leaves; + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + ++c1_last_idx_inout; + } + + return grow_layer_instructions; }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 415d88df7c4..b4c3f10d197 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -55,8 +55,17 @@ struct LayerExtension final std::vector hashes; }; -// Useful metadata for updating a layer -struct UpdateLayerMetadata final +// A struct useful to trim a layer and update its last hash if necessary +template +struct LayerReduction final +{ + std::size_t new_total_parents{0}; + bool update_existing_last_hash; + typename C::Point new_last_hash; +}; + +// Useful metadata for growing a layer +struct GrowLayerInstructions final { // The max chunk width of children used to hash into a parent std::size_t parent_chunk_width; @@ -83,6 +92,32 @@ struct UpdateLayerMetadata final // The parent's starting index in the layer std::size_t next_parent_start_index; }; + +// Useful metadata for trimming a layer +struct TrimLayerInstructions final +{ + // The max chunk width of children used to hash into a parent + std::size_t parent_chunk_width; + + // Total children refers to the total number of elements in a layer + std::size_t old_total_children; + std::size_t new_total_children; + + // Total parents refers to the total number of hashes of chunks of children + std::size_t old_total_parents; + std::size_t new_total_parents; + + bool need_last_chunk_children_to_trim; + bool need_last_chunk_remaining_children; + bool need_last_chunk_parent; + bool need_new_last_child; + + bool update_existing_last_hash; + + std::size_t new_offset; + std::size_t hash_offset; +}; + //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // This class is useful help update the curve trees tree without needing to keep the entire tree in memory @@ -137,6 +172,16 @@ class CurveTrees std::vector> c2_layer_extensions; }; + // A struct useful to reduce the number of leaves in an existing tree + // - layers alternate between C1 and C2 + // - c2_layer_reductions[0] is first layer after leaves, then c1_layer_reductions[0], c2_layer_reductions[1], etc + struct TreeReduction final + { + std::size_t new_total_leaves; + std::vector> c1_layer_reductions; + std::vector> c2_layer_reductions; + }; + // Last hashes from each layer in the tree // - layers alternate between C1 and C2 // - c2_last_hashes[0] refers to the layer after leaves, then c1_last_hashes[0], then c2_last_hashes[1], etc @@ -146,26 +191,48 @@ class CurveTrees std::vector c2_last_hashes; }; + // The children we'll trim from each last chunk in the tree + // - layers alternate between C1 and C2 + // - c2_children[0] refers to the layer after leaves, then c1_children[0], then c2_children[1], etc + struct LastChunkChildrenToTrim final + { + std::vector> c1_children; + std::vector> c2_children; + }; + //member functions public: // Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) const; + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] + std::vector flatten_leaves(const std::vector &leaves) const; + // Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new - // leaves to add to the tree, and return a tree extension struct that can be used to extend a global tree + // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree TreeExtension get_tree_extension(const std::size_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, const std::vector &new_leaf_tuples) const; - // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] - std::vector flatten_leaves(const std::vector &leaves) const; + // Get instructions useful for trimming all existing layers in the tree + std::vector get_trim_instructions( + const std::size_t old_n_leaf_tuples, + const std::size_t trim_n_leaf_tuples); + + // Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from + // each last chunk, and the existing last hashes in what will become the new last parent of each layer, and return + // a tree reduction struct that can be used to trim a tree + TreeReduction get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const; private: // Helper function used to set the next layer extension used to grow the next layer in the tree // - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent // layer of the leaf layer - UpdateLayerMetadata set_next_layer_extension( - const UpdateLayerMetadata &prev_layer_metadata, + GrowLayerInstructions set_next_layer_extension( + const GrowLayerInstructions &prev_layer_instructions, const bool parent_is_c1, const LastHashes &last_hashes, std::size_t &c1_last_idx_inout, diff --git a/src/fcmp/fcmp_rust/Cargo.lock b/src/fcmp/fcmp_rust/Cargo.lock index fc27ca4b875..e9c67b67903 100644 --- a/src/fcmp/fcmp_rust/Cargo.lock +++ b/src/fcmp/fcmp_rust/Cargo.lock @@ -71,7 +71,7 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "ciphersuite" version = "0.4.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "blake2", "dalek-ff-group", @@ -130,9 +130,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", @@ -140,7 +140,6 @@ dependencies = [ "digest", "fiat-crypto", "group", - "platforms", "rand_core", "rustc_version", "subtle", @@ -161,7 +160,7 @@ dependencies = [ [[package]] name = "dalek-ff-group" version = "0.4.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "crypto-bigint", "curve25519-dalek", @@ -198,7 +197,7 @@ dependencies = [ [[package]] name = "ec-divisors" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "dalek-ff-group", "group", @@ -272,7 +271,7 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "flexible-transcript" version = "0.3.2" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "blake2", "digest", @@ -285,7 +284,7 @@ dependencies = [ [[package]] name = "full-chain-membership-proofs" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "ciphersuite", "ec-divisors", @@ -305,7 +304,7 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "generalized-bulletproofs" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "ciphersuite", "flexible-transcript", @@ -327,9 +326,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe739944a5406424e080edccb6add95685130b9f160d5407c639c7df0c5836b0" +checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a" dependencies = [ "typenum", ] @@ -368,7 +367,7 @@ dependencies = [ [[package]] name = "helioselene" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "crypto-bigint", "dalek-ff-group", @@ -429,11 +428,11 @@ dependencies = [ [[package]] name = "minimal-ed448" version = "0.4.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "crypto-bigint", "ff", - "generic-array 1.0.0", + "generic-array 1.1.0", "group", "rand_core", "rustversion", @@ -444,7 +443,7 @@ dependencies = [ [[package]] name = "multiexp" version = "0.4.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "ff", "group", @@ -481,12 +480,6 @@ dependencies = [ "spki", ] -[[package]] -name = "platforms" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" - [[package]] name = "primeorder" version = "0.13.6" @@ -498,9 +491,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -613,7 +606,7 @@ dependencies = [ [[package]] name = "std-shims" version = "0.1.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" dependencies = [ "hashbrown", "spin", @@ -621,15 +614,15 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.66" +version = "2.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "2f0209b68b3613b093e0ec905354eccaedcfe83b8cb37cbdeae64026c3064c16" dependencies = [ "proc-macro2", "quote", @@ -677,18 +670,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", @@ -697,9 +690,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index f12cbb55be7..f3da68a2401 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -113,21 +113,23 @@ SeleneScalar selene_zero_scalar(); CResult hash_grow_helios(HeliosPoint existing_hash, uintptr_t offset, - HeliosScalar first_child_after_offset, + HeliosScalar existing_child_at_offset, HeliosScalarSlice new_children); CResult hash_trim_helios(HeliosPoint existing_hash, uintptr_t offset, - HeliosScalarSlice children); + HeliosScalarSlice children, + HeliosScalar child_to_grow_back); CResult hash_grow_selene(SelenePoint existing_hash, uintptr_t offset, - SeleneScalar first_child_after_offset, + SeleneScalar existing_child_at_offset, SeleneScalarSlice new_children); CResult hash_trim_selene(SelenePoint existing_hash, uintptr_t offset, - SeleneScalarSlice children); + SeleneScalarSlice children, + SeleneScalar child_to_grow_back); } // extern "C" diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 3b9610b2889..e6f25625d92 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -152,14 +152,14 @@ impl CResult { pub extern "C" fn hash_grow_helios( existing_hash: HeliosPoint, offset: usize, - first_child_after_offset: HeliosScalar, + existing_child_at_offset: HeliosScalar, new_children: HeliosScalarSlice, ) -> CResult { let hash = hash_grow( helios_generators(), existing_hash, offset, - first_child_after_offset, + existing_child_at_offset, new_children.into(), ); @@ -178,12 +178,14 @@ pub extern "C" fn hash_trim_helios( existing_hash: HeliosPoint, offset: usize, children: HeliosScalarSlice, + child_to_grow_back: HeliosScalar, ) -> CResult { let hash = hash_trim( helios_generators(), existing_hash, offset, children.into(), + child_to_grow_back, ); if let Some(hash) = hash { @@ -200,14 +202,14 @@ pub extern "C" fn hash_trim_helios( pub extern "C" fn hash_grow_selene( existing_hash: SelenePoint, offset: usize, - first_child_after_offset: SeleneScalar, + existing_child_at_offset: SeleneScalar, new_children: SeleneScalarSlice, ) -> CResult { let hash = hash_grow( selene_generators(), existing_hash, offset, - first_child_after_offset, + existing_child_at_offset, new_children.into(), ); @@ -226,12 +228,14 @@ pub extern "C" fn hash_trim_selene( existing_hash: SelenePoint, offset: usize, children: SeleneScalarSlice, + child_to_grow_back: SeleneScalar, ) -> CResult { let hash = hash_trim( selene_generators(), existing_hash, offset, children.into(), + child_to_grow_back, ); if let Some(hash) = hash { diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index cd1ca939b4a..9c9ddd839cc 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -48,13 +48,13 @@ Selene::CycleScalar Selene::point_to_cycle_scalar(const Selene::Point &point) co Helios::Point Helios::hash_grow( const Helios::Point &existing_hash, const std::size_t offset, - const Helios::Scalar &first_child_after_offset, + const Helios::Scalar &existing_child_at_offset, const Helios::Chunk &new_children) const { fcmp_rust::CResult res = fcmp_rust::hash_grow_helios( existing_hash, offset, - first_child_after_offset, + existing_child_at_offset, new_children); if (res.err != 0) { throw std::runtime_error("failed to hash grow"); @@ -65,12 +65,14 @@ Helios::Point Helios::hash_grow( Helios::Point Helios::hash_trim( const Helios::Point &existing_hash, const std::size_t offset, - const Helios::Chunk &children) const + const Helios::Chunk &children, + const Helios::Scalar &child_to_grow_back) const { fcmp_rust::CResult res = fcmp_rust::hash_trim_helios( existing_hash, offset, - children); + children, + child_to_grow_back); if (res.err != 0) { throw std::runtime_error("failed to hash trim"); } @@ -80,13 +82,13 @@ Helios::Point Helios::hash_trim( Selene::Point Selene::hash_grow( const Selene::Point &existing_hash, const std::size_t offset, - const Selene::Scalar &first_child_after_offset, + const Selene::Scalar &existing_child_at_offset, const Selene::Chunk &new_children) const { fcmp_rust::CResult res = fcmp_rust::hash_grow_selene( existing_hash, offset, - first_child_after_offset, + existing_child_at_offset, new_children); if (res.err != 0) { throw std::runtime_error("failed to hash grow"); @@ -97,12 +99,14 @@ Selene::Point Selene::hash_grow( Selene::Point Selene::hash_trim( const Selene::Point &existing_hash, const std::size_t offset, - const Selene::Chunk &children) const + const Selene::Chunk &children, + const Selene::Scalar &child_to_grow_back) const { fcmp_rust::CResult res = fcmp_rust::hash_trim_selene( existing_hash, offset, - children); + children, + child_to_grow_back); if (res.err != 0) { throw std::runtime_error("failed to hash trim"); } diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index fde3d92ef3b..6e8cb6f507b 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -80,13 +80,14 @@ class Curve virtual typename C::Point hash_grow( const typename C::Point &existing_hash, const std::size_t offset, - const typename C::Scalar &first_child_after_offset, + const typename C::Scalar &existing_child_at_offset, const typename C::Chunk &new_children) const = 0; virtual typename C::Point hash_trim( const typename C::Point &existing_hash, const std::size_t offset, - const typename C::Chunk &children) const = 0; + const typename C::Chunk &children, + const typename C::Scalar &child_to_grow_back) const = 0; virtual typename C::Scalar zero_scalar() const = 0; @@ -124,13 +125,14 @@ class Helios final : public Curve Point hash_grow( const Point &existing_hash, const std::size_t offset, - const Scalar &first_child_after_offset, + const Scalar &existing_child_at_offset, const Chunk &new_children) const override; Point hash_trim( const Point &existing_hash, const std::size_t offset, - const Chunk &children) const override; + const Chunk &children, + const Scalar &child_to_grow_back) const override; Scalar zero_scalar() const override; @@ -163,13 +165,14 @@ class Selene final : public Curve Point hash_grow( const Point &existing_hash, const std::size_t offset, - const Scalar &first_child_after_offset, + const Scalar &existing_child_at_offset, const Chunk &new_children) const override; Point hash_trim( const Point &existing_hash, const std::size_t offset, - const Chunk &children) const override; + const Chunk &children, + const Scalar &child_to_grow_back) const override; Scalar zero_scalar() const override; diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 598e6e566fc..6bb99e7b870 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -230,520 +230,311 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e } } //---------------------------------------------------------------------------------------------------------------------- -// If we reached the new root, then clear all remaining elements in the tree above the root. Otherwise continue -template -static bool handle_root_after_trim(const std::size_t num_parents, - const std::size_t c1_expected_n_layers, - const std::size_t c2_expected_n_layers, - CurveTreesGlobalTree::Layer &parents_inout, - std::vector> &c1_layers_inout, - std::vector> &c2_layers_inout) +void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction) { - // We're at the root if there should only be 1 element in the layer - if (num_parents > 1) - return false; + // Trim the leaves + const std::size_t init_num_leaves = m_tree.leaves.size() * m_curve_trees.LEAF_TUPLE_SIZE; + CHECK_AND_ASSERT_THROW_MES(init_num_leaves > tree_reduction.new_total_leaves, "expected fewer new total leaves"); + CHECK_AND_ASSERT_THROW_MES((tree_reduction.new_total_leaves % m_curve_trees.LEAF_TUPLE_SIZE) == 0, + "unexpected new total leaves"); + const std::size_t new_total_leaf_tuples = tree_reduction.new_total_leaves / m_curve_trees.LEAF_TUPLE_SIZE; + while (m_tree.leaves.size() > new_total_leaf_tuples) + m_tree.leaves.pop_back(); + + // Trim the layers + const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions; + const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions; + CHECK_AND_ASSERT_THROW_MES(!c2_layer_reductions.empty(), "empty c2 layer reductions"); - MDEBUG("We have encountered the root, clearing remaining elements in the tree"); + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i) + { + // TODO: template below if statement + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layer_reductions.size(), "unexpected c2 layer reduction"); + const auto &c2_reduction = c2_layer_reductions[c2_idx]; - // Clear all parents after root - while (parents_inout.size() > 1) - parents_inout.pop_back(); + CHECK_AND_ASSERT_THROW_MES(c2_idx < m_tree.c2_layers.size(), "missing c2 layer"); + auto &c2_inout = m_tree.c2_layers[c2_idx]; - // Clear all remaining layers, if any - while (c1_layers_inout.size() > c1_expected_n_layers) - c1_layers_inout.pop_back(); + CHECK_AND_ASSERT_THROW_MES(c2_reduction.new_total_parents <= c2_inout.size(), + "unexpected c2 new total parents"); - while (c2_layers_inout.size() > c2_expected_n_layers) - c2_layers_inout.pop_back(); + c2_inout.resize(c2_reduction.new_total_parents); + c2_inout.shrink_to_fit(); - return true; -} -//---------------------------------------------------------------------------------------------------------------------- -// Trims the child layer and caches values needed to update and trim the child's parent layer -// TODO: work on consolidating this function with the leaf layer logic and simplifying edge case handling -template -static typename C_PARENT::Point trim_children(const C_CHILD &c_child, - const C_PARENT &c_parent, - const std::size_t parent_width, - const CurveTreesGlobalTree::Layer &parents, - const typename C_CHILD::Point &old_last_child_hash, - CurveTreesGlobalTree::Layer &children_inout, - std::size_t &last_parent_idx_inout, - typename C_PARENT::Point &old_last_parent_hash_out) -{ - const std::size_t old_num_children = children_inout.size(); - const std::size_t old_last_parent_idx = (old_num_children - 1) / parent_width; - const std::size_t old_last_offset = old_num_children % parent_width; + // We updated the last hash + if (c2_reduction.update_existing_last_hash) + { + c2_inout.back() = c2_reduction.new_last_hash; + } - const std::size_t new_num_children = last_parent_idx_inout + 1; - const std::size_t new_last_parent_idx = (new_num_children - 1) / parent_width; - const std::size_t new_last_offset = new_num_children % parent_width; + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layer_reductions.size(), "unexpected c1 layer reduction"); + const auto &c1_reduction = c1_layer_reductions[c1_idx]; - CHECK_AND_ASSERT_THROW_MES(old_num_children >= new_num_children, "unexpected new_num_children"); + CHECK_AND_ASSERT_THROW_MES(c1_idx < m_tree.c1_layers.size(), "missing c1 layer"); + auto &c1_inout = m_tree.c1_layers[c1_idx]; - last_parent_idx_inout = new_last_parent_idx; - old_last_parent_hash_out = parents[new_last_parent_idx]; + CHECK_AND_ASSERT_THROW_MES(c1_reduction.new_total_parents <= c1_inout.size(), + "unexpected c1 new total parents"); - MDEBUG("old_num_children: " << old_num_children << - " , old_last_parent_idx: " << old_last_parent_idx << - " , old_last_offset: " << old_last_offset << - " , old_last_parent_hash_out: " << c_parent.to_string(old_last_parent_hash_out) << - " , new_num_children: " << new_num_children << - " , new_last_parent_idx: " << new_last_parent_idx << - " , new_last_offset: " << new_last_offset); + c1_inout.resize(c1_reduction.new_total_parents); + c1_inout.shrink_to_fit(); - // TODO: consolidate logic handling this function with the edge case at the end of this function - if (old_num_children == new_num_children) - { - // No new children means we only updated the last child, so use it to get the new last parent - const auto new_last_child = c_child.point_to_cycle_scalar(children_inout.back()); - std::vector new_child_v{new_last_child}; - const auto &chunk = typename C_PARENT::Chunk{new_child_v.data(), new_child_v.size()}; - - const auto new_last_parent = c_parent.hash_grow( - /*existing_hash*/ old_last_parent_hash_out, - /*offset*/ (new_num_children - 1) % parent_width, - /*first_child_after_offset*/ c_child.point_to_cycle_scalar(old_last_child_hash), - /*children*/ chunk); - - MDEBUG("New last parent using updated last child " << c_parent.to_string(new_last_parent)); - return new_last_parent; - } + // We updated the last hash + if (c1_reduction.update_existing_last_hash) + { + c1_inout.back() = c1_reduction.new_last_hash; + } - // Get the number of existing children in what will become the new last chunk after trimming - const std::size_t new_last_chunk_old_num_children = (old_last_parent_idx > new_last_parent_idx - || old_last_offset == 0) - ? parent_width - : old_last_offset; - - CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_children > new_last_offset, - "unexpected new_last_chunk_old_num_children"); - - // Get the number of children we'll be trimming from the new last chunk - const std::size_t trim_n_children_from_new_last_chunk = new_last_offset == 0 - ? 0 // it wil remain full - : new_last_chunk_old_num_children - new_last_offset; - - // We use hash trim if we're removing fewer elems in the last chunk than the number of elems remaining - const bool last_chunk_use_hash_trim = trim_n_children_from_new_last_chunk > 0 - && trim_n_children_from_new_last_chunk < new_last_offset; - - MDEBUG("new_last_chunk_old_num_children: " << new_last_chunk_old_num_children << - " , trim_n_children_from_new_last_chunk: " << trim_n_children_from_new_last_chunk << - " , last_chunk_use_hash_trim: " << last_chunk_use_hash_trim); - - // If we're using hash_trim for the last chunk, we'll need to collect the children we're removing - // TODO: use a separate function to handle last_chunk_use_hash_trim case - std::vector new_last_chunk_children_to_trim; - if (last_chunk_use_hash_trim) - new_last_chunk_children_to_trim.reserve(trim_n_children_from_new_last_chunk); - - // Trim the children starting at the back of the child layer - MDEBUG("Trimming " << (old_num_children - new_num_children) << " children"); - while (children_inout.size() > new_num_children) - { - // If we're using hash_trim for the last chunk, collect children from the last chunk - if (last_chunk_use_hash_trim) - { - const std::size_t cur_last_parent_idx = (children_inout.size() - 1) / parent_width; - if (cur_last_parent_idx == new_last_parent_idx) - new_last_chunk_children_to_trim.emplace_back(std::move(children_inout.back())); + ++c1_idx; } - children_inout.pop_back(); + use_c2 = !use_c2; } - CHECK_AND_ASSERT_THROW_MES(children_inout.size() == new_num_children, "unexpected new children"); - // We're done trimming the children - - // If we're not using hash_trim for the last chunk, and we will be trimming from the new last chunk, then - // we'll need to collect the new last chunk's remaining children for hash_grow - // TODO: use a separate function to handle last_chunk_remaining_children case - std::vector last_chunk_remaining_children; - if (!last_chunk_use_hash_trim && new_last_offset > 0) - { - last_chunk_remaining_children.reserve(new_last_offset); - const std::size_t start_child_idx = new_last_parent_idx * parent_width; + // Delete remaining layers + m_tree.c1_layers.resize(c1_layer_reductions.size()); + m_tree.c2_layers.resize(c2_layer_reductions.size()); - CHECK_AND_ASSERT_THROW_MES((start_child_idx + new_last_offset) == children_inout.size(), - "unexpected start_child_idx"); + m_tree.c1_layers.shrink_to_fit(); + m_tree.c2_layers.shrink_to_fit(); +} +//---------------------------------------------------------------------------------------------------------------------- +template +static std::vector get_last_chunk_children_to_trim(const C_CHILD &c_child, + const fcmp::curve_trees::TrimLayerInstructions &trim_instructions, + const CurveTreesGlobalTree::Layer &child_layer) +{ + std::vector children_to_trim_out; - for (std::size_t i = start_child_idx; i < children_inout.size(); ++i) - { - CHECK_AND_ASSERT_THROW_MES(i < children_inout.size(), "unexpected child idx"); - last_chunk_remaining_children.push_back(children_inout[i]); - } - } + const std::size_t new_total_children = trim_instructions.new_total_children; + const std::size_t old_total_children = trim_instructions.old_total_children; + const std::size_t new_total_parents = trim_instructions.new_total_parents; + const std::size_t parent_chunk_width = trim_instructions.parent_chunk_width; + const std::size_t new_offset = trim_instructions.new_offset; - CHECK_AND_ASSERT_THROW_MES(!parents.empty(), "empty parent layer"); - CHECK_AND_ASSERT_THROW_MES(new_last_parent_idx < parents.size(), "unexpected new_last_parent_idx"); + CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "expected some new children"); + CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "expected more children than offset"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "expected some new parents"); - // Set the new last chunk's parent hash - if (last_chunk_use_hash_trim) + if (trim_instructions.need_last_chunk_children_to_trim) { - CHECK_AND_ASSERT_THROW_MES(new_last_chunk_children_to_trim.size() == trim_n_children_from_new_last_chunk, - "unexpected size of last child chunk"); - - // We need to reverse the order in order to match the order the children were initially inserted into the tree - std::reverse(new_last_chunk_children_to_trim.begin(), new_last_chunk_children_to_trim.end()); - - // Check if the last child changed - const auto &old_last_child = old_last_child_hash; - const auto &new_last_child = children_inout.back(); - - if (c_child.to_bytes(old_last_child) == c_child.to_bytes(new_last_child)) + std::size_t idx = ((new_total_parents - 1) * parent_chunk_width) + new_offset; + MDEBUG("Start trim from idx: " << idx); + do { - // If the last child didn't change, then simply trim the collected children - std::vector child_scalars; - fcmp::tower_cycle::extend_scalars_from_cycle_points(c_child, - new_last_chunk_children_to_trim, - child_scalars); + // TODO: consolidate do while inner logic with below + CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); + const auto &child_point = child_layer[idx]; - for (std::size_t i = 0; i < child_scalars.size(); ++i) - MDEBUG("Trimming child " << c_parent.to_string(child_scalars[i])); + auto child_scalar = c_child.point_to_cycle_scalar(child_point); + children_to_trim_out.push_back(std::move(child_scalar)); - const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}; - - const auto new_last_parent = c_parent.hash_trim( - old_last_parent_hash_out, - new_last_offset, - chunk); - - MDEBUG("New last parent using simple hash_trim " << c_parent.to_string(new_last_parent)); - return new_last_parent; + ++idx; } - - // The last child changed, so trim the old child, then grow the chunk by 1 with the new child - // TODO: implement prior_child_at_offset in hash_trim - new_last_chunk_children_to_trim.insert(new_last_chunk_children_to_trim.begin(), old_last_child); - - std::vector child_scalars; - fcmp::tower_cycle::extend_scalars_from_cycle_points(c_child, - new_last_chunk_children_to_trim, - child_scalars); - - for (std::size_t i = 0; i < child_scalars.size(); ++i) - MDEBUG("Trimming child " << c_parent.to_string(child_scalars[i])); - - const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}; - - CHECK_AND_ASSERT_THROW_MES(new_last_offset > 0, "new_last_offset must be >0"); - auto new_last_parent = c_parent.hash_trim( - old_last_parent_hash_out, - new_last_offset - 1, - chunk); - - std::vector new_last_child_scalar{c_child.point_to_cycle_scalar(new_last_child)}; - const auto &new_last_child_chunk = typename C_PARENT::Chunk{ - new_last_child_scalar.data(), - new_last_child_scalar.size()}; - - MDEBUG("Growing with new child: " << c_parent.to_string(new_last_child_scalar[0])); - - new_last_parent = c_parent.hash_grow( - new_last_parent, - new_last_offset - 1, - c_parent.zero_scalar(), - new_last_child_chunk); - - MDEBUG("New last parent using hash_trim AND updated last child " << c_parent.to_string(new_last_parent)); - return new_last_parent; + while ((idx < old_total_children) && (idx % parent_chunk_width != 0)); } - else if (!last_chunk_remaining_children.empty()) + else if (trim_instructions.need_last_chunk_remaining_children && new_offset > 0) { - // If we have reamining children in the new last chunk, and some children were trimmed from the chunk, then - // use hash_grow to calculate the new hash - std::vector child_scalars; - fcmp::tower_cycle::extend_scalars_from_cycle_points(c_child, - last_chunk_remaining_children, - child_scalars); - - const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}; - - auto new_last_parent = c_parent.hash_grow( - /*existing_hash*/ c_parent.m_hash_init_point, - /*offset*/ 0, - /*first_child_after_offset*/ c_parent.zero_scalar(), - /*children*/ chunk); - - MDEBUG("New last parent from re-growing last chunk " << c_parent.to_string(new_last_parent)); - return new_last_parent; - } + std::size_t idx = new_total_children - new_offset; + MDEBUG("Start grow remaining from idx: " << idx); + do + { + CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); + const auto &child_point = child_layer[idx]; - // Check if the last child updated - const auto &old_last_child = old_last_child_hash; - const auto &new_last_child = children_inout.back(); - const auto old_last_child_bytes = c_child.to_bytes(old_last_child); - const auto new_last_child_bytes = c_child.to_bytes(new_last_child); + auto child_scalar = c_child.point_to_cycle_scalar(child_point); + children_to_trim_out.push_back(std::move(child_scalar)); - if (old_last_child_bytes == new_last_child_bytes) - { - MDEBUG("The last child didn't update, nothing left to do"); - return old_last_parent_hash_out; + ++idx; + } + while ((idx < new_total_children) && (idx % parent_chunk_width != 0)); } - // TODO: try to consolidate handling this edge case with the case of old_num_children == new_num_children - MDEBUG("The last child changed, updating last chunk parent hash"); + return children_to_trim_out; +} +//---------------------------------------------------------------------------------------------------------------------- +// TODO: template +CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_children_to_trim( + const std::vector &trim_instructions) +{ + CurveTreesV1::LastChunkChildrenToTrim all_children_to_trim; - CHECK_AND_ASSERT_THROW_MES(new_last_offset == 0, "unexpected new last offset"); + // Leaf layer + CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); + const auto &trim_leaf_layer_instructions = trim_instructions[0]; - const auto old_last_child_scalar = c_child.point_to_cycle_scalar(old_last_child); - auto new_last_child_scalar = c_child.point_to_cycle_scalar(new_last_child); + const std::size_t new_total_children = trim_leaf_layer_instructions.new_total_children; + const std::size_t old_total_children = trim_leaf_layer_instructions.old_total_children; + const std::size_t new_total_parents = trim_leaf_layer_instructions.new_total_parents; + const std::size_t parent_chunk_width = trim_leaf_layer_instructions.parent_chunk_width; + const std::size_t new_offset = trim_leaf_layer_instructions.new_offset; - std::vector child_scalars{std::move(new_last_child_scalar)}; - const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}; + CHECK_AND_ASSERT_THROW_MES(new_total_children >= CurveTreesV1::LEAF_TUPLE_SIZE, "expected some new leaves"); + CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "expected more children than offset"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "expected some new parents"); - auto new_last_parent = c_parent.hash_grow( - /*existing_hash*/ old_last_parent_hash_out, - /*offset*/ parent_width - 1, - /*first_child_after_offset*/ old_last_child_scalar, - /*children*/ chunk); + std::vector leaves_to_trim; - MDEBUG("New last parent from updated last child " << c_parent.to_string(new_last_parent)); - return new_last_parent; -} -//---------------------------------------------------------------------------------------------------------------------- -void CurveTreesGlobalTree::trim_tree(const std::size_t new_num_leaves) -{ - // TODO: consolidate below logic with trim_children above - CHECK_AND_ASSERT_THROW_MES(new_num_leaves >= CurveTreesV1::LEAF_TUPLE_SIZE, - "tree must have at least 1 leaf tuple in it"); - CHECK_AND_ASSERT_THROW_MES(new_num_leaves % CurveTreesV1::LEAF_TUPLE_SIZE == 0, - "num leaves must be divisible by leaf tuple size"); - - auto &leaves_out = m_tree.leaves; - auto &c1_layers_out = m_tree.c1_layers; - auto &c2_layers_out = m_tree.c2_layers; - - const std::size_t old_num_leaves = leaves_out.size() * CurveTreesV1::LEAF_TUPLE_SIZE; - CHECK_AND_ASSERT_THROW_MES(old_num_leaves > new_num_leaves, "unexpected new num leaves"); - - const std::size_t old_last_leaf_parent_idx = (old_num_leaves - CurveTreesV1::LEAF_TUPLE_SIZE) - / m_curve_trees.m_leaf_layer_chunk_width; - const std::size_t old_last_leaf_offset = old_num_leaves % m_curve_trees.m_leaf_layer_chunk_width; - - const std::size_t new_last_leaf_parent_idx = (new_num_leaves - CurveTreesV1::LEAF_TUPLE_SIZE) - / m_curve_trees.m_leaf_layer_chunk_width; - const std::size_t new_last_leaf_offset = new_num_leaves % m_curve_trees.m_leaf_layer_chunk_width; - - MDEBUG("old_num_leaves: " << old_num_leaves << - ", old_last_leaf_parent_idx: " << old_last_leaf_parent_idx << - ", old_last_leaf_offset: " << old_last_leaf_offset << - ", new_num_leaves: " << new_num_leaves << - ", new_last_leaf_parent_idx: " << new_last_leaf_parent_idx << - ", new_last_leaf_offset: " << new_last_leaf_offset); - - // Get the number of existing leaves in what will become the new last chunk after trimming - const std::size_t new_last_chunk_old_num_leaves = (old_last_leaf_parent_idx > new_last_leaf_parent_idx - || old_last_leaf_offset == 0) - ? m_curve_trees.m_leaf_layer_chunk_width - : old_last_leaf_offset; - - CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_leaves > new_last_leaf_offset, - "unexpected last_chunk_old_num_leaves"); - - // Get the number of leaves we'll be trimming from the new last chunk - const std::size_t n_leaves_trim_from_new_last_chunk = new_last_leaf_offset == 0 - ? 0 // the last chunk wil remain full - : new_last_chunk_old_num_leaves - new_last_leaf_offset; - - // We use hash trim if we're removing fewer elems in the last chunk than the number of elems remaining - const bool last_chunk_use_hash_trim = n_leaves_trim_from_new_last_chunk > 0 - && n_leaves_trim_from_new_last_chunk < new_last_leaf_offset; - - MDEBUG("new_last_chunk_old_num_leaves: " << new_last_chunk_old_num_leaves << - ", n_leaves_trim_from_new_last_chunk: " << n_leaves_trim_from_new_last_chunk << - ", last_chunk_use_hash_trim: " << last_chunk_use_hash_trim); - - // If we're using hash_trim for the last chunk, we'll need to collect the leaves we're trimming from that chunk - std::vector new_last_chunk_leaves_to_trim; - if (last_chunk_use_hash_trim) - new_last_chunk_leaves_to_trim.reserve(n_leaves_trim_from_new_last_chunk); - - // Trim the leaves starting at the back of the leaf layer - const std::size_t new_num_leaf_tuples = new_num_leaves / CurveTreesV1::LEAF_TUPLE_SIZE; - while (leaves_out.size() > new_num_leaf_tuples) + // TODO: separate function + // TODO: calculate starting indexes in trim instructions, perhaps calculate end indexes also + if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim) { - // If we're using hash_trim for the last chunk, collect leaves from the last chunk to use later - if (last_chunk_use_hash_trim) + std::size_t idx = ((new_total_parents - 1) * parent_chunk_width) + new_offset; + MDEBUG("Start trim from idx: " << idx); + do { - // Check if we're now trimming leaves from what will be the new last chunk - const std::size_t num_leaves_remaining = (leaves_out.size() - 1) * CurveTreesV1::LEAF_TUPLE_SIZE; - const std::size_t cur_last_leaf_parent_idx = num_leaves_remaining / m_curve_trees.m_leaf_layer_chunk_width; + CHECK_AND_ASSERT_THROW_MES(idx % CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size"); + const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE; - if (cur_last_leaf_parent_idx == new_last_leaf_parent_idx) - { - // Add leaves in reverse order, because we're going to reverse the entire vector later on to get the - // correct trim order - new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().C_x)); - new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().I_x)); - new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().O_x)); - } - } + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high"); + const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx]; - leaves_out.pop_back(); - } - CHECK_AND_ASSERT_THROW_MES(leaves_out.size() == new_num_leaf_tuples, "unexpected size of new leaves"); - - const std::size_t cur_last_leaf_parent_idx = ((leaves_out.size() - 1) * CurveTreesV1::LEAF_TUPLE_SIZE) - / m_curve_trees.m_leaf_layer_chunk_width; - CHECK_AND_ASSERT_THROW_MES(cur_last_leaf_parent_idx == new_last_leaf_parent_idx, "unexpected last leaf parent idx"); + leaves_to_trim.push_back(leaf_tuple.O_x); + leaves_to_trim.push_back(leaf_tuple.I_x); + leaves_to_trim.push_back(leaf_tuple.C_x); - // If we're not using hash_trim for the last chunk, and the new last chunk is not full already, we'll need to - // collect the existing leaves to get the hash using hash_grow - std::vector last_chunk_remaining_leaves; - if (!last_chunk_use_hash_trim && new_last_leaf_offset > 0) + idx += CurveTreesV1::LEAF_TUPLE_SIZE; + } + while ((idx < old_total_children) && (idx % parent_chunk_width != 0)); + } + else if (trim_leaf_layer_instructions.need_last_chunk_remaining_children && new_offset > 0) { - last_chunk_remaining_leaves.reserve(new_last_leaf_offset); + std::size_t idx = new_total_children - new_offset; + do + { + CHECK_AND_ASSERT_THROW_MES(idx % CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size"); + const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE; - const std::size_t start_leaf_idx = new_last_leaf_parent_idx * m_curve_trees.m_leaf_layer_chunk_width; - MDEBUG("start_leaf_idx: " << start_leaf_idx << ", leaves_out.size(): " << leaves_out.size()); + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high"); + const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx]; - CHECK_AND_ASSERT_THROW_MES((start_leaf_idx + new_last_leaf_offset) == new_num_leaves, - "unexpected start_leaf_idx"); + leaves_to_trim.push_back(leaf_tuple.O_x); + leaves_to_trim.push_back(leaf_tuple.I_x); + leaves_to_trim.push_back(leaf_tuple.C_x); - for (std::size_t i = (start_leaf_idx / CurveTreesV1::LEAF_TUPLE_SIZE); i < leaves_out.size(); ++i) - { - CHECK_AND_ASSERT_THROW_MES(i < leaves_out.size(), "unexpected leaf idx"); - last_chunk_remaining_leaves.push_back(leaves_out[i].O_x); - last_chunk_remaining_leaves.push_back(leaves_out[i].I_x); - last_chunk_remaining_leaves.push_back(leaves_out[i].C_x); + idx += CurveTreesV1::LEAF_TUPLE_SIZE; } + while ((idx < new_total_children) && (idx % parent_chunk_width != 0)); } - CHECK_AND_ASSERT_THROW_MES(!c2_layers_out.empty(), "empty leaf parent layer"); - CHECK_AND_ASSERT_THROW_MES(cur_last_leaf_parent_idx < c2_layers_out[0].size(), - "unexpected cur_last_leaf_parent_idx"); + all_children_to_trim.c2_children.emplace_back(std::move(leaves_to_trim)); - // Set the new last leaf parent - Selene::Point old_last_c2_hash = std::move(c2_layers_out[0][cur_last_leaf_parent_idx]); - if (last_chunk_use_hash_trim) + bool parent_is_c2 = false; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 1; i < trim_instructions.size(); ++i) { - CHECK_AND_ASSERT_THROW_MES(new_last_chunk_leaves_to_trim.size() == n_leaves_trim_from_new_last_chunk, - "unexpected size of last leaf chunk"); + const auto &trim_layer_instructions = trim_instructions[i]; - // We need to reverse the order in order to match the order the leaves were initially inserted into the tree - std::reverse(new_last_chunk_leaves_to_trim.begin(), new_last_chunk_leaves_to_trim.end()); - - const Selene::Chunk trim_leaves{new_last_chunk_leaves_to_trim.data(), new_last_chunk_leaves_to_trim.size()}; - - for (std::size_t i = 0; i < new_last_chunk_leaves_to_trim.size(); ++i) - MDEBUG("Trimming leaf " << m_curve_trees.m_c2.to_string(new_last_chunk_leaves_to_trim[i])); - - auto new_last_leaf_parent = m_curve_trees.m_c2.hash_trim( - old_last_c2_hash, - new_last_leaf_offset, - trim_leaves); - - MDEBUG("New hash " << m_curve_trees.m_c2.to_string(new_last_leaf_parent)); + if (parent_is_c2) + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); - c2_layers_out[0][cur_last_leaf_parent_idx] = std::move(new_last_leaf_parent); - } - else if (new_last_leaf_offset > 0) - { - for (std::size_t i = 0; i < last_chunk_remaining_leaves.size(); ++i) - MDEBUG("Hashing leaf " << m_curve_trees.m_c2.to_string(last_chunk_remaining_leaves[i])); + auto children_to_trim = get_last_chunk_children_to_trim( + m_curve_trees.m_c1, + trim_layer_instructions, + m_tree.c1_layers[c1_idx]); - const auto &leaves = Selene::Chunk{last_chunk_remaining_leaves.data(), last_chunk_remaining_leaves.size()}; + all_children_to_trim.c2_children.emplace_back(std::move(children_to_trim)); + ++c1_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high"); - auto new_last_leaf_parent = m_curve_trees.m_c2.hash_grow( - /*existing_hash*/ m_curve_trees.m_c2.m_hash_init_point, - /*offset*/ 0, - /*first_child_after_offset*/ m_curve_trees.m_c2.zero_scalar(), - /*children*/ leaves); + auto children_to_trim = get_last_chunk_children_to_trim( + m_curve_trees.m_c2, + trim_layer_instructions, + m_tree.c2_layers[c2_idx]); - MDEBUG("Result hash " << m_curve_trees.m_c2.to_string(new_last_leaf_parent)); + all_children_to_trim.c1_children.emplace_back(std::move(children_to_trim)); + ++c2_idx; + } - c2_layers_out[0][cur_last_leaf_parent_idx] = std::move(new_last_leaf_parent); + parent_is_c2 = !parent_is_c2; } - if (handle_root_after_trim( - /*num_parents*/ cur_last_leaf_parent_idx + 1, - /*c1_expected_n_layers*/ 0, - /*c2_expected_n_layers*/ 1, - /*parents_inout*/ c2_layers_out[0], - /*c1_layers_inout*/ c1_layers_out, - /*c2_layers_inout*/ c2_layers_out)) - { - return; - } + return all_children_to_trim; +} +//---------------------------------------------------------------------------------------------------------------------- +CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes_to_trim( + const std::vector &trim_instructions) const +{ + CurveTreesV1::LastHashes last_hashes; + CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); - // Go layer-by-layer starting by trimming the c2 layer we just set, and updating the parent layer hashes - bool trim_c1 = true; + bool parent_is_c2 = true; std::size_t c1_idx = 0; std::size_t c2_idx = 0; - std::size_t last_parent_idx = cur_last_leaf_parent_idx; - Helios::Point old_last_c1_hash; - for (std::size_t i = 0; i < (c1_layers_out.size() + c2_layers_out.size()); ++i) + for (const auto &trim_layer_instructions : trim_instructions) { - MDEBUG("Trimming layer " << i); - - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers_out.size(), "unexpected c1 layer"); - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers_out.size(), "unexpected c2 layer"); - - auto &c1_layer_out = c1_layers_out[c1_idx]; - auto &c2_layer_out = c2_layers_out[c2_idx]; + const std::size_t new_total_parents = trim_layer_instructions.new_total_parents; + CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "no new parents"); - if (trim_c1) + if (parent_is_c2) { - // TODO: fewer params - auto new_last_parent = trim_children(m_curve_trees.m_c2, - m_curve_trees.m_c1, - m_curve_trees.m_c1_width, - c1_layer_out, - old_last_c2_hash, - c2_layer_out, - last_parent_idx, - old_last_c1_hash); - - // Update the last parent - c1_layer_out[last_parent_idx] = std::move(new_last_parent); - - if (handle_root_after_trim(last_parent_idx + 1, - c1_idx + 1, - c2_idx + 1, - c1_layer_out, - c1_layers_out, - c2_layers_out)) - { - return; - } + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high"); + const auto &c2_layer = m_tree.c2_layers[c2_idx]; + CHECK_AND_ASSERT_THROW_MES(c2_layer.size() >= new_total_parents, "not enough c2 parents"); + + last_hashes.c2_last_hashes.push_back(c2_layer[new_total_parents - 1]); ++c2_idx; } else { - // TODO: fewer params - auto new_last_parent = trim_children(m_curve_trees.m_c1, - m_curve_trees.m_c2, - m_curve_trees.m_c2_width, - c2_layer_out, - old_last_c1_hash, - c1_layer_out, - last_parent_idx, - old_last_c2_hash); - - // Update the last parent - c2_layer_out[last_parent_idx] = std::move(new_last_parent); - - if (handle_root_after_trim(last_parent_idx + 1, - c1_idx + 1, - c2_idx + 1, - c2_layer_out, - c1_layers_out, - c2_layers_out)) - { - return; - } + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); + const auto &c1_layer = m_tree.c1_layers[c1_idx]; + CHECK_AND_ASSERT_THROW_MES(c1_layer.size() >= new_total_parents, "not enough c1 parents"); + + last_hashes.c1_last_hashes.push_back(c1_layer[new_total_parents - 1]); ++c1_idx; } - trim_c1 = !trim_c1; + parent_is_c2 = !parent_is_c2; } + + return last_hashes; +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::trim_tree(const std::size_t trim_n_leaf_tuples) +{ + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + MDEBUG(old_n_leaf_tuples << " leaves in the tree, trimming " << trim_n_leaf_tuples); + + // Get trim instructions + const auto trim_instructions = m_curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); + MDEBUG("Acquired trim instructions for " << trim_instructions.size() << " layers"); + + // Do initial tree reads + const auto last_chunk_children_to_trim = this->get_all_last_chunk_children_to_trim(trim_instructions); + const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); + + // Get the new hashes, wrapped in a simple struct we can use to trim the tree + const auto tree_reduction = m_curve_trees.get_tree_reduction( + trim_instructions, + last_chunk_children_to_trim, + last_hashes_to_trim); + + // Use tree reduction to trim tree + this->reduce_tree(tree_reduction); + + const std::size_t new_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES((new_n_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, + "unexpected num leaves after trim"); } //---------------------------------------------------------------------------------------------------------------------- -bool CurveTreesGlobalTree::audit_tree() +bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) { MDEBUG("Auditing global tree"); @@ -752,6 +543,8 @@ bool CurveTreesGlobalTree::audit_tree() const auto &c2_layers = m_tree.c2_layers; CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree"); + CHECK_AND_ASSERT_MES(leaves.size() == expected_n_leaf_tuples, false, "unexpected num leaves"); + CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), false, "unexpected mismatch of c2 and c1 layers"); @@ -983,7 +776,7 @@ void CurveTreesGlobalTree::log_tree() //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t num_leaves) { std::vector tuples; @@ -1005,9 +798,18 @@ const std::vector generate_random_leaves(const CurveTre return tuples; } //---------------------------------------------------------------------------------------------------------------------- +static const Selene::Scalar generate_random_selene_scalar() +{ + crypto::secret_key s; + crypto::public_key S; + + crypto::generate_keys(S, s, s, false); + return fcmp::tower_cycle::ed_25519_point_to_scalar(S); +} +//---------------------------------------------------------------------------------------------------------------------- static bool grow_tree(CurveTreesV1 &curve_trees, CurveTreesGlobalTree &global_tree, - const std::size_t num_leaves) + const std::size_t new_n_leaf_tuples) { // Do initial tree reads const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); @@ -1019,7 +821,7 @@ static bool grow_tree(CurveTreesV1 &curve_trees, // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, last_hashes, - generate_random_leaves(curve_trees, num_leaves)); + generate_random_leaves(curve_trees, new_n_leaf_tuples)); global_tree.log_tree_extension(tree_extension); @@ -1029,7 +831,8 @@ static bool grow_tree(CurveTreesV1 &curve_trees, global_tree.log_tree(); // Validate tree structure and all hashes - return global_tree.audit_tree(); + const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples + new_n_leaf_tuples; + return global_tree.audit_tree(expected_n_leaf_tuples); } //---------------------------------------------------------------------------------------------------------------------- static bool grow_tree_in_memory(const std::size_t init_leaves, @@ -1059,25 +862,27 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, return true; } //---------------------------------------------------------------------------------------------------------------------- -static bool trim_tree_in_memory(const std::size_t init_leaves, - const std::size_t trim_leaves, +static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples, CurveTreesGlobalTree &&global_tree) { - // Trim the global tree by `trim_leaves` - LOG_PRINT_L1("Trimming " << trim_leaves << " leaves from tree"); + const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); + + // Trim the global tree by `trim_n_leaf_tuples` + LOG_PRINT_L1("Trimming " << trim_n_leaf_tuples << " leaf tuples from tree"); - CHECK_AND_ASSERT_MES(init_leaves > trim_leaves, false, "trimming too many leaves"); - const std::size_t new_num_leaves = init_leaves - trim_leaves; - global_tree.trim_tree(new_num_leaves * CurveTreesV1::LEAF_TUPLE_SIZE); + global_tree.trim_tree(trim_n_leaf_tuples); - MDEBUG("Finished trimming " << trim_leaves << " leaves from tree"); + MDEBUG("Finished trimming " << trim_n_leaf_tuples << " leaf tuples from tree"); global_tree.log_tree(); - bool res = global_tree.audit_tree(); + const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples - trim_n_leaf_tuples; + bool res = global_tree.audit_tree(expected_n_leaf_tuples); CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory"); - MDEBUG("Successfully trimmed " << trim_leaves << " leaves in memory"); + MDEBUG("Successfully trimmed " << trim_n_leaf_tuples << " leaves in memory"); return true; } //---------------------------------------------------------------------------------------------------------------------- @@ -1116,12 +921,9 @@ TEST(curve_trees, grow_tree) Helios helios; Selene selene; - // Constant for how deep we want the tree - const std::size_t TEST_N_LAYERS = 4; - // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree - const std::size_t helios_chunk_width = 3; - const std::size_t selene_chunk_width = 2; + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 2; static_assert(helios_chunk_width > 1, "helios width must be > 1"); static_assert(selene_chunk_width > 1, "selene width must be > 1"); @@ -1129,6 +931,9 @@ TEST(curve_trees, grow_tree) LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_chunk_width << ", selene chunk width " << selene_chunk_width); + // Constant for how deep we want the tree + static const std::size_t TEST_N_LAYERS = 4; + // Number of leaves for which x number of layers is required std::size_t leaves_needed_for_n_layers = selene_chunk_width; for (std::size_t i = 1; i < TEST_N_LAYERS; ++i) @@ -1153,7 +958,7 @@ TEST(curve_trees, grow_tree) { // TODO: init tree once, then extend a copy of that tree // Then extend the tree with ext_leaves - for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) < leaves_needed_for_n_layers; ++ext_leaves) + for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= leaves_needed_for_n_layers; ++ext_leaves) { ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, curve_trees)); ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index 845a2d118c9..b0a0f173c0a 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -35,14 +35,6 @@ using Helios = fcmp::curve_trees::Helios; using Selene = fcmp::curve_trees::Selene; using CurveTreesV1 = fcmp::curve_trees::CurveTreesV1; -const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, - const std::size_t num_leaves); - -// https://github.com/kayabaNerve/fcmp-plus-plus/blob -// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 -const std::size_t HELIOS_CHUNK_WIDTH = 38; -const std::size_t SELENE_CHUNK_WIDTH = 18; - // Helper class to read/write a global tree in memory. It's only used in testing because normally the tree isn't kept // in memory (it's stored in the db) class CurveTreesGlobalTree @@ -74,17 +66,30 @@ class CurveTreesGlobalTree // Use the tree extension to extend the in-memory tree void extend_tree(const CurveTreesV1::TreeExtension &tree_extension); - // Trim tree to the provided number of leaves - void trim_tree(const std::size_t new_num_leaves); + // Use the tree reduction to reduce the in-memory tree + void reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction); + + // Trim the provided number of leaf tuples from the tree + void trim_tree(const std::size_t trim_n_leaf_tuples); // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer - bool audit_tree(); + bool audit_tree(const std::size_t expected_n_leaf_tuples); // logging helpers void log_last_hashes(const CurveTreesV1::LastHashes &last_hashes); void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); void log_tree(); + // Read the in-memory tree and get data from what will be the last chunks after trimming the tree to the provided + // number of leaves + // - This function is useful to collect all tree data necessary to perform the actual trim operation + // - This function can return elems from each last chunk that will need to be trimmed + CurveTreesV1::LastHashes get_last_hashes_to_trim( + const std::vector &trim_instructions) const; + + CurveTreesV1::LastChunkChildrenToTrim get_all_last_chunk_children_to_trim( + const std::vector &trim_instructions); + private: CurveTreesV1 &m_curve_trees; Tree m_tree = Tree{}; From 4be2d7cf9143f1209e49aef564a1585914cd2573 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 8 Jul 2024 20:01:45 -0700 Subject: [PATCH 035/127] whitespace fixes --- tests/unit_tests/curve_trees.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 6bb99e7b870..d1b3e2334b6 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -396,7 +396,7 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE; CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high"); - const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx]; + const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx]; leaves_to_trim.push_back(leaf_tuple.O_x); leaves_to_trim.push_back(leaf_tuple.I_x); @@ -415,7 +415,7 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE; CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high"); - const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx]; + const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx]; leaves_to_trim.push_back(leaf_tuple.O_x); leaves_to_trim.push_back(leaf_tuple.I_x); @@ -512,7 +512,7 @@ void CurveTreesGlobalTree::trim_tree(const std::size_t trim_n_leaf_tuples) const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); MDEBUG(old_n_leaf_tuples << " leaves in the tree, trimming " << trim_n_leaf_tuples); - // Get trim instructions + // Get trim instructions const auto trim_instructions = m_curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); MDEBUG("Acquired trim instructions for " << trim_instructions.size() << " layers"); @@ -520,7 +520,7 @@ void CurveTreesGlobalTree::trim_tree(const std::size_t trim_n_leaf_tuples) const auto last_chunk_children_to_trim = this->get_all_last_chunk_children_to_trim(trim_instructions); const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); - // Get the new hashes, wrapped in a simple struct we can use to trim the tree + // Get the new hashes, wrapped in a simple struct we can use to trim the tree const auto tree_reduction = m_curve_trees.get_tree_reduction( trim_instructions, last_chunk_children_to_trim, From 55caee9a100075ab1b5107b22f34960da865084b Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 8 Jul 2024 20:01:59 -0700 Subject: [PATCH 036/127] Better tests for hash_trim --- tests/unit_tests/curve_trees.cpp | 305 +++++++++++++++++++------------ 1 file changed, 190 insertions(+), 115 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index d1b3e2334b6..7dbdf14b826 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -968,150 +968,225 @@ TEST(curve_trees, grow_tree) //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, trim_tree) { + // TODO: consolidate code from grow_tree test Helios helios; Selene selene; - LOG_PRINT_L1("Test trim tree with helios chunk width " << HELIOS_CHUNK_WIDTH - << ", selene chunk width " << SELENE_CHUNK_WIDTH); + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 3; - auto curve_trees = CurveTreesV1( - helios, - selene, - HELIOS_CHUNK_WIDTH, - SELENE_CHUNK_WIDTH); + static_assert(helios_chunk_width > 1, "helios width must be > 1"); + static_assert(selene_chunk_width > 1, "selene width must be > 1"); - unit_test::BlockchainLMDBTest test_db; + LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width); - static_assert(HELIOS_CHUNK_WIDTH > 1, "helios width must be > 1"); - static_assert(SELENE_CHUNK_WIDTH > 1, "selene width must be > 1"); + // Constant for how deep we want the tree + static const std::size_t TEST_N_LAYERS = 4; // Number of leaves for which x number of layers is required - const std::size_t NEED_1_LAYER = SELENE_CHUNK_WIDTH; - const std::size_t NEED_2_LAYERS = NEED_1_LAYER * HELIOS_CHUNK_WIDTH; - const std::size_t NEED_3_LAYERS = NEED_2_LAYERS * SELENE_CHUNK_WIDTH; - - const std::vector N_LEAVES{ - // Basic tests - 1, - 2, - - // Test with number of leaves {-1,0,+1} relative to chunk width boundaries - NEED_1_LAYER-1, - NEED_1_LAYER, - NEED_1_LAYER+1, - - NEED_2_LAYERS-1, - NEED_2_LAYERS, - NEED_2_LAYERS+1, - - NEED_3_LAYERS-1, - NEED_3_LAYERS, - NEED_3_LAYERS+1, - }; - - for (const std::size_t init_leaves : N_LEAVES) + std::size_t leaves_needed_for_n_layers = selene_chunk_width; + for (std::size_t i = 1; i < TEST_N_LAYERS; ++i) { - if (init_leaves == 1) - continue; + const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; + leaves_needed_for_n_layers *= width; + } - CurveTreesGlobalTree global_tree(curve_trees); + auto curve_trees = CurveTreesV1( + helios, + selene, + helios_chunk_width, + selene_chunk_width); + + // Increment to test for off-by-1 + ++leaves_needed_for_n_layers; - // Initialize global tree with `init_leaves` + // First initialize the tree with init_leaves + for (std::size_t init_leaves = 2; init_leaves <= leaves_needed_for_n_layers; ++init_leaves) +{ LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); + CurveTreesGlobalTree global_tree(curve_trees); + ASSERT_TRUE(grow_tree(curve_trees, global_tree, init_leaves)); - MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); - for (const std::size_t trim_leaves : N_LEAVES) + // Then extend the tree with ext_leaves + for (std::size_t trim_leaves = 1; trim_leaves < leaves_needed_for_n_layers; ++trim_leaves) { - // Can't trim more leaves than exist in tree, and tree must always have at least 1 leaf in it if (trim_leaves >= init_leaves) continue; - // Copy the already initialized tree + // Copy the already existing global tree CurveTreesGlobalTree tree_copy(global_tree); - ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, std::move(tree_copy))); + + ASSERT_TRUE(trim_tree_in_memory(trim_leaves, std::move(tree_copy))); } } } -// TODO: write tests with more layers, but smaller widths so the tests run in a reasonable amount of time //---------------------------------------------------------------------------------------------------------------------- // Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children TEST(curve_trees, hash_trim) { + // https://github.com/kayabaNerve/fcmp-plus-plus/blob + // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 + const std::size_t helios_chunk_width = 38; + const std::size_t selene_chunk_width = 18; + Helios helios; Selene selene; auto curve_trees = CurveTreesV1( helios, selene, - HELIOS_CHUNK_WIDTH, - SELENE_CHUNK_WIDTH); - - // Selene - // Generate 3 random leaf tuples - const std::size_t NUM_LEAF_TUPLES = 3; - const std::size_t NUM_LEAVES = NUM_LEAF_TUPLES * CurveTreesV1::LEAF_TUPLE_SIZE; - const auto grow_leaves = generate_random_leaves(curve_trees, NUM_LEAF_TUPLES); - const auto grow_children = curve_trees.flatten_leaves(grow_leaves); - const auto &grow_chunk = Selene::Chunk{grow_children.data(), grow_children.size()}; - - // Hash the leaves - const auto init_grow_result = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, - /*offset*/ 0, - /*first_child_after_offset*/ curve_trees.m_c2.zero_scalar(), - /*children*/ grow_chunk); - - // Trim the initial result - const std::size_t trim_offset = NUM_LEAVES - CurveTreesV1::LEAF_TUPLE_SIZE; - const auto &trimmed_child = Selene::Chunk{grow_children.data() + trim_offset, CurveTreesV1::LEAF_TUPLE_SIZE}; - const auto trim_result = curve_trees.m_c2.hash_trim( - init_grow_result, - trim_offset, - trimmed_child); - const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_result); - - // Now compare to calling hash_grow with the remaining children, excluding the trimmed child - const auto &remaining_children = Selene::Chunk{grow_children.data(), trim_offset}; - const auto remaining_children_hash = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, - /*offset*/ 0, - /*first_child_after_offset*/ curve_trees.m_c2.zero_scalar(), - /*children*/ remaining_children); - const auto grow_res_bytes = curve_trees.m_c2.to_bytes(remaining_children_hash); - - ASSERT_EQ(trim_res_bytes, grow_res_bytes); - - // Helios - // Get 2 helios scalars - std::vector grow_helios_scalars; - fcmp::tower_cycle::extend_scalars_from_cycle_points(curve_trees.m_c2, - {init_grow_result, trim_result}, - grow_helios_scalars); - const auto &grow_helios_chunk = Helios::Chunk{grow_helios_scalars.data(), grow_helios_scalars.size()}; - - // Get the initial hash of the 2 helios scalars - const auto helios_grow_result = curve_trees.m_c1.hash_grow( - /*existing_hash*/ curve_trees.m_c1.m_hash_init_point, - /*offset*/ 0, - /*first_child_after_offset*/ curve_trees.m_c1.zero_scalar(), - /*children*/ grow_helios_chunk); - - // Trim the initial result by 1 child - const auto &trimmed_helios_child = Helios::Chunk{grow_helios_scalars.data() + 1, 1}; - const auto trim_helios_result = curve_trees.m_c1.hash_trim( - helios_grow_result, - 1, - trimmed_helios_child); - const auto trim_helios_res_bytes = curve_trees.m_c1.to_bytes(trim_helios_result); - - // Now compare to calling hash_grow with the remaining children, excluding the trimmed child - const auto &remaining_helios_children = Helios::Chunk{grow_helios_scalars.data(), 1}; - const auto remaining_helios_children_hash = curve_trees.m_c1.hash_grow( - /*existing_hash*/ curve_trees.m_c1.m_hash_init_point, - /*offset*/ 0, - /*first_child_after_offset*/ curve_trees.m_c1.zero_scalar(), - /*children*/ remaining_helios_children); - const auto grow_helios_res_bytes = curve_trees.m_c1.to_bytes(remaining_helios_children_hash); - - ASSERT_EQ(trim_helios_res_bytes, grow_helios_res_bytes); + helios_chunk_width, + selene_chunk_width); + + // 1. Trim 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then trim to: {selene_scalar_0} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 2 scalars + std::vector init_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + // Trim selene_scalar_1 + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; + const auto trim_res = curve_trees.m_c2.hash_trim( + init_hash, + 1, + trimmed_children, + curve_trees.m_c2.zero_scalar()); + const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0} + std::vector remaining_children{selene_scalar_0}; + const auto grow_res = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees.m_c2.to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 3. Trim 2 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then trim to: {selene_scalar_0} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Get the initial hash of the 3 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; + const auto init_hash = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + // Trim the initial result by 2 children + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; + const auto trim_res = curve_trees.m_c2.hash_trim( + init_hash, + 1, + trimmed_children, + curve_trees.m_c2.zero_scalar()); + const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0} + std::vector remaining_children{selene_scalar_0}; + const auto grow_res = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees.m_c2.to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 3. Change 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then change to: {selene_scalar_0, selene_scalar_2} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 3 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Trim the 2nd child and grow with new child + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; + const auto trim_res = curve_trees.m_c2.hash_trim( + init_hash, + 1, + trimmed_children, + selene_scalar_2); + const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_2} + std::vector remaining_children{selene_scalar_0, selene_scalar_2}; + const auto grow_res = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees.m_c2.to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 4. Trim 2 then grow by 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then trim+grow to: {selene_scalar_0, selene_scalar_3} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Get the initial hash of the 3 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; + const auto init_hash = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + const auto selene_scalar_3 = generate_random_selene_scalar(); + + // Trim the initial result by 2 children+grow by 1 + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; + const auto trim_res = curve_trees.m_c2.hash_trim( + init_hash, + 1, + trimmed_children, + selene_scalar_3); + const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_3} + std::vector remaining_children{selene_scalar_0, selene_scalar_3}; + const auto grow_res = curve_trees.m_c2.hash_grow( + /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees.m_c2.to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } } From 42fd22c4eee91727041d9f9e0587c683fe8babe6 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 8 Jul 2024 22:26:53 -0700 Subject: [PATCH 037/127] Better organization --- src/fcmp/curve_trees.cpp | 2 +- src/fcmp/curve_trees.h | 2 +- tests/unit_tests/curve_trees.cpp | 108 +++++++++++++++---------------- 3 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index eaac8328881..52071bd31a8 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -716,7 +716,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio template std::vector CurveTrees::get_trim_instructions( const std::size_t old_n_leaf_tuples, - const std::size_t trim_n_leaf_tuples) + const std::size_t trim_n_leaf_tuples) const { CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index b4c3f10d197..77bd7bd5889 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -217,7 +217,7 @@ class CurveTrees // Get instructions useful for trimming all existing layers in the tree std::vector get_trim_instructions( const std::size_t old_n_leaf_tuples, - const std::size_t trim_n_leaf_tuples); + const std::size_t trim_n_leaf_tuples) const; // Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from // each last chunk, and the existing last hashes in what will become the new last parent of each layer, and return diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 7dbdf14b826..d552344d428 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -77,6 +77,60 @@ static bool validate_layer(const C &curve, return true; } //---------------------------------------------------------------------------------------------------------------------- +template +static std::vector get_last_chunk_children_to_trim(const C_CHILD &c_child, + const fcmp::curve_trees::TrimLayerInstructions &trim_instructions, + const CurveTreesGlobalTree::Layer &child_layer) +{ + std::vector children_to_trim_out; + + const std::size_t new_total_children = trim_instructions.new_total_children; + const std::size_t old_total_children = trim_instructions.old_total_children; + const std::size_t new_total_parents = trim_instructions.new_total_parents; + const std::size_t parent_chunk_width = trim_instructions.parent_chunk_width; + const std::size_t new_offset = trim_instructions.new_offset; + + CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "expected some new children"); + CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "expected more children than offset"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "expected some new parents"); + + if (trim_instructions.need_last_chunk_children_to_trim) + { + std::size_t idx = ((new_total_parents - 1) * parent_chunk_width) + new_offset; + MDEBUG("Start trim from idx: " << idx); + do + { + // TODO: consolidate do while inner logic with below + CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); + const auto &child_point = child_layer[idx]; + + auto child_scalar = c_child.point_to_cycle_scalar(child_point); + children_to_trim_out.push_back(std::move(child_scalar)); + + ++idx; + } + while ((idx < old_total_children) && (idx % parent_chunk_width != 0)); + } + else if (trim_instructions.need_last_chunk_remaining_children && new_offset > 0) + { + std::size_t idx = new_total_children - new_offset; + MDEBUG("Start grow remaining from idx: " << idx); + do + { + CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); + const auto &child_point = child_layer[idx]; + + auto child_scalar = c_child.point_to_cycle_scalar(child_point); + children_to_trim_out.push_back(std::move(child_scalar)); + + ++idx; + } + while ((idx < new_total_children) && (idx % parent_chunk_width != 0)); + } + + return children_to_trim_out; +} +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTreesGlobalTree implementations //---------------------------------------------------------------------------------------------------------------------- @@ -308,60 +362,6 @@ void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_r m_tree.c2_layers.shrink_to_fit(); } //---------------------------------------------------------------------------------------------------------------------- -template -static std::vector get_last_chunk_children_to_trim(const C_CHILD &c_child, - const fcmp::curve_trees::TrimLayerInstructions &trim_instructions, - const CurveTreesGlobalTree::Layer &child_layer) -{ - std::vector children_to_trim_out; - - const std::size_t new_total_children = trim_instructions.new_total_children; - const std::size_t old_total_children = trim_instructions.old_total_children; - const std::size_t new_total_parents = trim_instructions.new_total_parents; - const std::size_t parent_chunk_width = trim_instructions.parent_chunk_width; - const std::size_t new_offset = trim_instructions.new_offset; - - CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "expected some new children"); - CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "expected more children than offset"); - CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "expected some new parents"); - - if (trim_instructions.need_last_chunk_children_to_trim) - { - std::size_t idx = ((new_total_parents - 1) * parent_chunk_width) + new_offset; - MDEBUG("Start trim from idx: " << idx); - do - { - // TODO: consolidate do while inner logic with below - CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); - const auto &child_point = child_layer[idx]; - - auto child_scalar = c_child.point_to_cycle_scalar(child_point); - children_to_trim_out.push_back(std::move(child_scalar)); - - ++idx; - } - while ((idx < old_total_children) && (idx % parent_chunk_width != 0)); - } - else if (trim_instructions.need_last_chunk_remaining_children && new_offset > 0) - { - std::size_t idx = new_total_children - new_offset; - MDEBUG("Start grow remaining from idx: " << idx); - do - { - CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); - const auto &child_point = child_layer[idx]; - - auto child_scalar = c_child.point_to_cycle_scalar(child_point); - children_to_trim_out.push_back(std::move(child_scalar)); - - ++idx; - } - while ((idx < new_total_children) && (idx % parent_chunk_width != 0)); - } - - return children_to_trim_out; -} -//---------------------------------------------------------------------------------------------------------------------- // TODO: template CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_children_to_trim( const std::vector &trim_instructions) From f50ad5baacbff42f210d2a43ed813e007857ee1f Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 9 Jul 2024 21:02:21 -0700 Subject: [PATCH 038/127] trim_tree db impl + db test + some housekeeping --- src/blockchain_db/blockchain_db.h | 4 +- src/blockchain_db/lmdb/db_lmdb.cpp | 364 ++++++++++++++++++++++++++++- src/blockchain_db/lmdb/db_lmdb.h | 15 +- src/blockchain_db/testdb.h | 3 +- src/fcmp/curve_trees.cpp | 41 +++- src/fcmp/curve_trees.h | 9 +- tests/unit_tests/curve_trees.cpp | 163 +++++++------ 7 files changed, 495 insertions(+), 104 deletions(-) diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index ef42ce41fe3..528d7777b66 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -1769,8 +1769,10 @@ class BlockchainDB virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves) = 0; + virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) = 0; + // TODO: description - virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const = 0; + virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t expected_n_leaf_tuples) const = 0; // // Hard fork related storage diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index ff611076b28..3de6c016ebd 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1326,7 +1326,7 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree const auto &leaves = tree_extension.leaves; for (std::size_t i = 0; i < leaves.tuples.size(); ++i) { - MDB_val_copy k(i + leaves.start_idx); + MDB_val_copy k(i + leaves.start_leaf_tuple_idx); MDB_val_set(v, leaves.tuples[i]); // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. @@ -1435,6 +1435,189 @@ void BlockchainLMDB::grow_layer(const C &curve, } } +void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) +{ + // TODO: block_wtxn_start like pop_block, then call BlockchainDB::trim_tree + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(leaves) + CURSOR(layers) + + CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); + + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); + + const auto trim_instructions = curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); + + // Do initial tree reads + const auto last_chunk_children_to_trim = this->get_last_chunk_children_to_trim(curve_trees, trim_instructions); + const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); + + // Get the new hashes, wrapped in a simple struct we can use to trim the tree + const auto tree_reduction = curve_trees.get_tree_reduction( + trim_instructions, + last_chunk_children_to_trim, + last_hashes_to_trim); + + // Use tree reduction to trim tree + CHECK_AND_ASSERT_THROW_MES((tree_reduction.new_total_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, + "unexpected new total leaves"); + + MDEBUG("Trimming " << trim_n_leaf_tuples << " leaf tuples"); + + // Trim the leaves + // TODO: trim_leaves + for (std::size_t i = 0; i < trim_n_leaf_tuples; ++i) + { + std::size_t last_leaf_tuple_idx = (old_n_leaf_tuples - 1 - i); + + MDB_val_copy k(last_leaf_tuple_idx); + MDB_val v; + int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_SET); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); + + result = mdb_cursor_del(m_cur_leaves, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing leaf: ", result).c_str())); + + MDEBUG("Successfully removed leaf at last_leaf_tuple_idx: " << last_leaf_tuple_idx); + } + + // Trim the layers + // TODO: trim_layers + const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions; + const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions; + CHECK_AND_ASSERT_THROW_MES(!c2_layer_reductions.empty(), "empty c2 layer reductions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layer_reductions.size(), "unexpected c2 layer reduction"); + const auto &c2_reduction = c2_layer_reductions[c2_idx]; + trim_layer(c2_reduction, i); + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layer_reductions.size(), "unexpected c1 layer reduction"); + const auto &c1_reduction = c1_layer_reductions[c1_idx]; + trim_layer(c1_reduction, i); + ++c1_idx; + } + + use_c2 = !use_c2; + } + + // Trim any remaining layers in layers after the root + // TODO: trim_leftovers_after_root + const std::size_t expected_root_idx = c2_layer_reductions.size() + c1_layer_reductions.size() - 1; + while (1) + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); + + const std::size_t last_layer_idx = *(std::size_t *)k.mv_data; + + if (last_layer_idx > expected_root_idx) + { + // Delete all elements in layers after the root + result = mdb_cursor_del(m_cur_layers, MDB_NODUPDATA); + } + else if (last_layer_idx < expected_root_idx) + { + throw0(DB_ERROR("Encountered unexpected last elem in tree before the root")); + } + else // last_layer_idx == expected_root_idx + { + // We've trimmed all layers past the root, we're done + break; + } + } +} + +template +void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &layer_reduction, + const std::size_t layer_idx) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(layers) + + MDB_val_copy k(layer_idx); + + // Get the number of existing elements in the layer + // TODO: get_num_elems_in_layer + std::size_t old_n_elems_in_layer = 0; + { + // Get the first record in a layer so we can then get the last record + MDB_val v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_SET); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); + + // TODO: why can't I just use MDB_LAST_DUP once and get the last record? + + result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + old_n_elems_in_layer = (1 + lv->child_chunk_idx); + } + + CHECK_AND_ASSERT_THROW_MES(old_n_elems_in_layer >= layer_reduction.new_total_parents, + "unexpected old n elems in layer"); + const std::size_t trim_n_elems_in_layer = old_n_elems_in_layer - layer_reduction.new_total_parents; + + // Delete the elements + for (std::size_t i = 0; i < trim_n_elems_in_layer; ++i) + { + std::size_t last_elem_idx = (old_n_elems_in_layer - 1 - i); + MDB_val_set(v, last_elem_idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get elem: ", result).c_str())); + + result = mdb_cursor_del(m_cur_layers, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing elem: ", result).c_str())); + + MDEBUG("Successfully removed elem at layer_idx: " << layer_idx << " , last_elem_idx: " << last_elem_idx); + } + + // Update the last element if needed + if (layer_reduction.update_existing_last_hash) + { + layer_val lv; + lv.child_chunk_idx = layer_reduction.new_total_parents - 1; + lv.child_chunk_hash = layer_reduction.new_last_hash; + MDB_val_set(v, lv); + + // We expect to overwrite the existing hash + // TODO: make sure the hash already exists and is the existing last hash + int result = mdb_cursor_put(m_cur_layers, &k, &v, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to update chunk hash: ", result).c_str())); + } +} + std::size_t BlockchainLMDB::get_num_leaf_tuples() const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -1443,8 +1626,6 @@ std::size_t BlockchainLMDB::get_num_leaf_tuples() const TXN_PREFIX_RDONLY(); RCURSOR(leaves) - fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes; - // Get the number of leaf tuples in the tree std::uint64_t n_leaf_tuples = 0; @@ -1519,7 +1700,171 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes return last_hashes; } -bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const +fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_last_chunk_children_to_trim( + const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::vector &trim_instructions) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim last_chunk_children_to_trim; + auto &c1_last_children_out = last_chunk_children_to_trim.c1_children; + auto &c2_last_children_out = last_chunk_children_to_trim.c2_children; + + // Get the leaves to trim + // TODO: separate function for leaves + { + CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); + const auto &trim_leaf_layer_instructions = trim_instructions[0]; + + std::vector leaves_to_trim; + + if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim || + (trim_leaf_layer_instructions.need_last_chunk_remaining_children && trim_leaf_layer_instructions.new_offset > 0)) + { + std::size_t idx = trim_leaf_layer_instructions.start_trim_idx; + CHECK_AND_ASSERT_THROW_MES(idx % fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0, + "expected divisble by leaf tuple size"); + + const std::size_t leaf_tuple_idx = idx / fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + MDB_val_copy k(leaf_tuple_idx); + + MDB_cursor_op leaf_op = MDB_SET; + do + { + MDB_val v; + int result = mdb_cursor_get(m_cur_leaves, &k, &v, leaf_op); + leaf_op = MDB_NEXT; + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); + + const auto leaf = *(fcmp::curve_trees::CurveTreesV1::LeafTuple *)v.mv_data; + + leaves_to_trim.push_back(leaf.O_x); + leaves_to_trim.push_back(leaf.I_x); + leaves_to_trim.push_back(leaf.C_x); + + idx += fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + } + while (idx < trim_leaf_layer_instructions.end_trim_idx); + } + + c2_last_children_out.emplace_back(std::move(leaves_to_trim)); + } + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer, getting children to trim + // TODO: separate function for layers + bool parent_is_c1 = true; + for (std::size_t i = 1; i < trim_instructions.size(); ++i) + { + const auto &trim_layer_instructions = trim_instructions[i]; + + std::vector c1_children; + std::vector c2_children; + + if (trim_layer_instructions.need_last_chunk_children_to_trim || + (trim_layer_instructions.need_last_chunk_remaining_children && trim_layer_instructions.new_offset > 0)) + { + const std::size_t layer_idx = (i - 1); + std::size_t idx = trim_layer_instructions.start_trim_idx; + + MDB_val_set(k, layer_idx); + MDB_val_set(v, idx); + MDB_cursor_op op = MDB_GET_BOTH; + do + { + MDEBUG("Getting child to trim at layer_idx: " << layer_idx << " , idx: " << idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, op); + op = MDB_NEXT_DUP; + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("layer elem not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get layer elem: ", result).c_str())); + + if (parent_is_c1) + { + const auto *lv = (layer_val *)v.mv_data; + auto child_scalar = curve_trees.m_c2.point_to_cycle_scalar(lv->child_chunk_hash); + c1_children.emplace_back(std::move(child_scalar)); + } + else + { + const auto *lv = (layer_val *)v.mv_data; + auto child_scalar = curve_trees.m_c1.point_to_cycle_scalar(lv->child_chunk_hash); + c2_children.emplace_back(std::move(child_scalar)); + } + + ++idx; + } + while (idx < trim_layer_instructions.end_trim_idx); + } + + if (parent_is_c1) + c1_last_children_out.emplace_back(std::move(c1_children)); + else + c2_last_children_out.emplace_back(std::move(c2_children)); + + parent_is_c1 = !parent_is_c1; + } + + TXN_POSTFIX_RDONLY(); + + return last_chunk_children_to_trim; +} + +fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_trim( + const std::vector &trim_instructions) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer + std::size_t layer_idx = 0; + for (const auto &trim_layer_instructions : trim_instructions) + { + const std::size_t new_last_idx = trim_layer_instructions.new_total_parents - 1; + + MDB_val_copy k(layer_idx); + MDB_val_set(v, new_last_idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("layer elem not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get layer elem: ", result).c_str())); + + if ((layer_idx % 2) == 0) + { + const auto *lv = (layer_val *)v.mv_data; + last_hashes_out.c2_last_hashes.push_back(lv->child_chunk_hash); + } + else + { + const auto *lv = (layer_val *)v.mv_data; + last_hashes_out.c1_last_hashes.push_back(lv->child_chunk_hash); + } + + ++layer_idx; + } + + TXN_POSTFIX_RDONLY(); + + return last_hashes_out; +} + +bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::size_t expected_n_leaf_tuples) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1528,6 +1873,9 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre RCURSOR(leaves) RCURSOR(layers) + const std::size_t actual_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(actual_n_leaf_tuples == expected_n_leaf_tuples, false, "unexpected num leaf tuples"); + // Check chunks of leaves hash into first layer as expected std::size_t layer_idx = 0; std::size_t child_chunk_idx = 0; @@ -1696,7 +2044,7 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, // No more children, expect to be done auditing layer and ready to move up a layer if (result != MDB_NOTFOUND) throw0(DB_ERROR(lmdb_error("unexpected parent result at parent_layer_idx " + std::to_string(parent_layer_idx) - + " , child_chunk_idx " + std::to_string(child_chunk_idx), result).c_str())); + + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); MDEBUG("Finished auditing layer " << layer_idx); TXN_POSTFIX_RDONLY(); @@ -1704,8 +2052,12 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, } // End condition B: check if finished auditing the tree - if (child_chunk_idx == 0 && child_chunk.size() == 1 && result == MDB_NOTFOUND) + if (child_chunk_idx == 0 && child_chunk.size() == 1) { + if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected parent of root at parent_layer_idx " + std::to_string(parent_layer_idx) + + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); + MDEBUG("Encountered root at layer_idx " << layer_idx); TXN_POSTFIX_RDONLY(); return true; diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index e2dfb60bc41..317d238820a 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -367,7 +367,10 @@ class BlockchainLMDB : public BlockchainDB virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves); - virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const; + virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples); + + virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::size_t expected_n_leaf_tuples) const; private: void do_resize(uint64_t size_increase=0); @@ -418,10 +421,20 @@ class BlockchainLMDB : public BlockchainDB const std::size_t c_idx, const std::size_t layer_idx); + template + void trim_layer(const fcmp::curve_trees::LayerReduction &layer_reduction, const std::size_t layer_idx); + std::size_t get_num_leaf_tuples() const; fcmp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; + fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim get_last_chunk_children_to_trim( + const fcmp::curve_trees::CurveTreesV1 &curve_trees, + const std::vector &trim_instructions) const; + + fcmp::curve_trees::CurveTreesV1::LastHashes get_last_hashes_to_trim( + const std::vector &trim_instructions) const; + template bool audit_layer(const C_CHILD &c_child, const C_PARENT &c_parent, diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 618b102d161..2b3e3e8412b 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -118,7 +118,8 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void remove_spent_key(const crypto::key_image& k_image) override {} virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves) override {}; - virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees) const override { return false; }; + virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) override {}; + virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t expected_n_leaf_tuples) const override { return false; }; virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 52071bd31a8..070b23c3183 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -478,6 +478,22 @@ static TrimLayerInstructions get_trim_layer_instructions( hash_offset = 0; } + std::size_t start_trim_idx = 0; + std::size_t end_trim_idx = 0; + if (need_last_chunk_children_to_trim) + { + const std::size_t chunk_boundary_start = (new_total_parents - 1) * parent_chunk_width; + const std::size_t chunk_boundary_end = chunk_boundary_start + parent_chunk_width; + + start_trim_idx = chunk_boundary_start + new_offset; + end_trim_idx = std::min(chunk_boundary_end, old_total_children); + } + else if (need_last_chunk_remaining_children && new_offset > 0) + { + start_trim_idx = new_total_children - new_offset; + end_trim_idx = new_total_children; + } + MDEBUG("parent_chunk_width: " << parent_chunk_width << " , old_total_children: " << old_total_children << " , new_total_children: " << new_total_children @@ -489,7 +505,9 @@ static TrimLayerInstructions get_trim_layer_instructions( << " , need_new_last_child: " << last_child_will_change << " , update_existing_last_hash: " << update_existing_last_hash << " , new_offset: " << new_offset - << " , hash_offset: " << hash_offset); + << " , hash_offset: " << hash_offset + << " , start_trim_idx: " << start_trim_idx + << " , end_trim_idx: " << end_trim_idx); return TrimLayerInstructions{ .parent_chunk_width = parent_chunk_width, @@ -504,6 +522,8 @@ static TrimLayerInstructions get_trim_layer_instructions( .update_existing_last_hash = update_existing_last_hash, .new_offset = new_offset, .hash_offset = hash_offset, + .start_trim_idx = start_trim_idx, + .end_trim_idx = end_trim_idx, }; } //---------------------------------------------------------------------------------------------------------------------- @@ -545,9 +565,9 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc { CHECK_AND_ASSERT_THROW_MES(child_layer_idx > 0, "child index cannot be 0 here"); CHECK_AND_ASSERT_THROW_MES(child_reductions.size() == child_layer_idx, "unexpected child layer idx"); - const std::size_t last_child_layer_idx = child_layer_idx - 1; - const typename C_CHILD::Point &new_last_child = child_reductions.back().new_last_hash; + CHECK_AND_ASSERT_THROW_MES(child_reductions.back().update_existing_last_hash, "expected new last child"); + const typename C_CHILD::Point &new_last_child = child_reductions.back().new_last_hash; new_last_child_scalar = c_child.point_to_cycle_scalar(new_last_child); if (trim_layer_instructions.need_last_chunk_remaining_children) @@ -557,6 +577,7 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc else if (!trim_layer_instructions.need_last_chunk_children_to_trim) { // TODO: cleaner conditional for this case + const std::size_t last_child_layer_idx = child_layer_idx - 1; CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash"); const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx]; @@ -583,8 +604,8 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc else { MDEBUG("hash_trim: existing_hash: " << c_parent.to_string(existing_hash) - << " , hash_offset: " << trim_layer_instructions.hash_offset - << " , new_last_child_scalar: " << c_parent.to_string(new_last_child_scalar)); + << " , hash_offset: " << trim_layer_instructions.hash_offset + << " , child_to_grow_back: " << c_parent.to_string(new_last_child_scalar)); layer_reduction_out.new_last_hash = c_parent.hash_trim( existing_hash, @@ -650,7 +671,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio LEAF_TUPLE_SIZE, m_leaf_layer_chunk_width); - tree_extension.leaves.start_idx = grow_layer_instructions.old_total_children; + tree_extension.leaves.start_leaf_tuple_idx = grow_layer_instructions.old_total_children / LEAF_TUPLE_SIZE; // Copy the leaves // TODO: don't copy here @@ -764,8 +785,12 @@ typename CurveTrees::TreeReduction CurveTrees::get_tree_reductio const LastChunkChildrenToTrim &children_to_trim, const LastHashes &last_hashes) const { - TreeReduction tree_reduction_out; - tree_reduction_out.new_total_leaves = trim_instructions[0].new_total_children; + TreeReduction tree_reduction_out; + + CHECK_AND_ASSERT_THROW_MES((trim_instructions[0].new_total_children % LEAF_TUPLE_SIZE) == 0, + "unexpected new total leaves"); + const std::size_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE; + tree_reduction_out.new_total_leaf_tuples = new_total_leaf_tuples; bool use_c2 = true; std::size_t c1_idx = 0; diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 77bd7bd5889..c31307e6646 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -116,6 +116,9 @@ struct TrimLayerInstructions final std::size_t new_offset; std::size_t hash_offset; + + std::size_t start_trim_idx; + std::size_t end_trim_idx; }; //---------------------------------------------------------------------------------------------------------------------- @@ -156,8 +159,8 @@ class CurveTrees // Contiguous leaves in the tree, starting a specified start_idx in the leaf layer struct Leaves final { - // Starting index in the leaf layer - std::size_t start_idx{0}; + // Starting leaf tuple index in the leaf layer + std::size_t start_leaf_tuple_idx{0}; // Contiguous leaves in a tree that start at the start_idx std::vector tuples; }; @@ -177,7 +180,7 @@ class CurveTrees // - c2_layer_reductions[0] is first layer after leaves, then c1_layer_reductions[0], c2_layer_reductions[1], etc struct TreeReduction final { - std::size_t new_total_leaves; + std::size_t new_total_leaf_tuples; std::vector> c1_layer_reductions; std::vector> c2_layer_reductions; }; diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index d552344d428..2e0103ff770 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -79,42 +79,18 @@ static bool validate_layer(const C &curve, //---------------------------------------------------------------------------------------------------------------------- template static std::vector get_last_chunk_children_to_trim(const C_CHILD &c_child, - const fcmp::curve_trees::TrimLayerInstructions &trim_instructions, - const CurveTreesGlobalTree::Layer &child_layer) + const CurveTreesGlobalTree::Layer &child_layer, + const bool need_last_chunk_children_to_trim, + const bool need_last_chunk_remaining_children, + const std::size_t new_offset, + const std::size_t start_trim_idx, + const std::size_t end_trim_idx) { std::vector children_to_trim_out; - - const std::size_t new_total_children = trim_instructions.new_total_children; - const std::size_t old_total_children = trim_instructions.old_total_children; - const std::size_t new_total_parents = trim_instructions.new_total_parents; - const std::size_t parent_chunk_width = trim_instructions.parent_chunk_width; - const std::size_t new_offset = trim_instructions.new_offset; - - CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "expected some new children"); - CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "expected more children than offset"); - CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "expected some new parents"); - - if (trim_instructions.need_last_chunk_children_to_trim) - { - std::size_t idx = ((new_total_parents - 1) * parent_chunk_width) + new_offset; - MDEBUG("Start trim from idx: " << idx); - do - { - // TODO: consolidate do while inner logic with below - CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); - const auto &child_point = child_layer[idx]; - - auto child_scalar = c_child.point_to_cycle_scalar(child_point); - children_to_trim_out.push_back(std::move(child_scalar)); - - ++idx; - } - while ((idx < old_total_children) && (idx % parent_chunk_width != 0)); - } - else if (trim_instructions.need_last_chunk_remaining_children && new_offset > 0) + if (need_last_chunk_children_to_trim || (need_last_chunk_remaining_children && new_offset > 0)) { - std::size_t idx = new_total_children - new_offset; - MDEBUG("Start grow remaining from idx: " << idx); + std::size_t idx = start_trim_idx; + MDEBUG("Start trim from idx: " << idx << " , ending trim at: " << end_trim_idx); do { CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); @@ -125,7 +101,7 @@ static std::vector get_last_chunk_children_to_trim(co ++idx; } - while ((idx < new_total_children) && (idx % parent_chunk_width != 0)); + while (idx < end_trim_idx); } return children_to_trim_out; @@ -188,8 +164,7 @@ CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes() const void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_extension) { // Add the leaves - const std::size_t init_num_leaves = m_tree.leaves.size() * m_curve_trees.LEAF_TUPLE_SIZE; - CHECK_AND_ASSERT_THROW_MES(init_num_leaves == tree_extension.leaves.start_idx, + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() == tree_extension.leaves.start_leaf_tuple_idx, "unexpected leaf start idx"); m_tree.leaves.reserve(m_tree.leaves.size() + tree_extension.leaves.tuples.size()); @@ -287,12 +262,9 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction) { // Trim the leaves - const std::size_t init_num_leaves = m_tree.leaves.size() * m_curve_trees.LEAF_TUPLE_SIZE; - CHECK_AND_ASSERT_THROW_MES(init_num_leaves > tree_reduction.new_total_leaves, "expected fewer new total leaves"); - CHECK_AND_ASSERT_THROW_MES((tree_reduction.new_total_leaves % m_curve_trees.LEAF_TUPLE_SIZE) == 0, - "unexpected new total leaves"); - const std::size_t new_total_leaf_tuples = tree_reduction.new_total_leaves / m_curve_trees.LEAF_TUPLE_SIZE; - while (m_tree.leaves.size() > new_total_leaf_tuples) + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > tree_reduction.new_total_leaf_tuples, + "expected fewer new total leaves"); + while (m_tree.leaves.size() > tree_reduction.new_total_leaf_tuples) m_tree.leaves.pop_back(); // Trim the layers @@ -372,23 +344,15 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); const auto &trim_leaf_layer_instructions = trim_instructions[0]; - const std::size_t new_total_children = trim_leaf_layer_instructions.new_total_children; - const std::size_t old_total_children = trim_leaf_layer_instructions.old_total_children; - const std::size_t new_total_parents = trim_leaf_layer_instructions.new_total_parents; - const std::size_t parent_chunk_width = trim_leaf_layer_instructions.parent_chunk_width; - const std::size_t new_offset = trim_leaf_layer_instructions.new_offset; - - CHECK_AND_ASSERT_THROW_MES(new_total_children >= CurveTreesV1::LEAF_TUPLE_SIZE, "expected some new leaves"); - CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "expected more children than offset"); - CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "expected some new parents"); + const std::size_t new_offset = trim_leaf_layer_instructions.new_offset; std::vector leaves_to_trim; // TODO: separate function - // TODO: calculate starting indexes in trim instructions, perhaps calculate end indexes also - if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim) + if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim || + (trim_leaf_layer_instructions.need_last_chunk_remaining_children && new_offset > 0)) { - std::size_t idx = ((new_total_parents - 1) * parent_chunk_width) + new_offset; + std::size_t idx = trim_leaf_layer_instructions.start_trim_idx; MDEBUG("Start trim from idx: " << idx); do { @@ -404,26 +368,7 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c idx += CurveTreesV1::LEAF_TUPLE_SIZE; } - while ((idx < old_total_children) && (idx % parent_chunk_width != 0)); - } - else if (trim_leaf_layer_instructions.need_last_chunk_remaining_children && new_offset > 0) - { - std::size_t idx = new_total_children - new_offset; - do - { - CHECK_AND_ASSERT_THROW_MES(idx % CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size"); - const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE; - - CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high"); - const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx]; - - leaves_to_trim.push_back(leaf_tuple.O_x); - leaves_to_trim.push_back(leaf_tuple.I_x); - leaves_to_trim.push_back(leaf_tuple.C_x); - - idx += CurveTreesV1::LEAF_TUPLE_SIZE; - } - while ((idx < new_total_children) && (idx % parent_chunk_width != 0)); + while (idx < trim_leaf_layer_instructions.end_trim_idx); } all_children_to_trim.c2_children.emplace_back(std::move(leaves_to_trim)); @@ -433,16 +378,28 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c std::size_t c2_idx = 0; for (std::size_t i = 1; i < trim_instructions.size(); ++i) { + MDEBUG("Getting trim instructions for layer " << i); + const auto &trim_layer_instructions = trim_instructions[i]; + const bool need_last_chunk_children_to_trim = trim_layer_instructions.need_last_chunk_children_to_trim; + const bool need_last_chunk_remaining_children = trim_layer_instructions.need_last_chunk_remaining_children; + const std::size_t new_offset = trim_layer_instructions.new_offset; + const std::size_t start_trim_idx = trim_layer_instructions.start_trim_idx; + const std::size_t end_trim_idx = trim_layer_instructions.end_trim_idx; + if (parent_is_c2) { CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); auto children_to_trim = get_last_chunk_children_to_trim( m_curve_trees.m_c1, - trim_layer_instructions, - m_tree.c1_layers[c1_idx]); + m_tree.c1_layers[c1_idx], + need_last_chunk_children_to_trim, + need_last_chunk_remaining_children, + new_offset, + start_trim_idx, + end_trim_idx); all_children_to_trim.c2_children.emplace_back(std::move(children_to_trim)); ++c1_idx; @@ -453,8 +410,12 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c auto children_to_trim = get_last_chunk_children_to_trim( m_curve_trees.m_c2, - trim_layer_instructions, - m_tree.c2_layers[c2_idx]); + m_tree.c2_layers[c2_idx], + need_last_chunk_children_to_trim, + need_last_chunk_remaining_children, + new_offset, + start_trim_idx, + end_trim_idx); all_children_to_trim.c1_children.emplace_back(std::move(children_to_trim)); ++c2_idx; @@ -674,7 +635,7 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension MDEBUG("Tree extension has " << tree_extension.leaves.tuples.size() << " leaves, " << c1_extensions.size() << " helios layers, " << c2_extensions.size() << " selene layers"); - MDEBUG("Leaf start idx: " << tree_extension.leaves.start_idx); + MDEBUG("Leaf start idx: " << tree_extension.leaves.start_leaf_tuple_idx); for (std::size_t i = 0; i < tree_extension.leaves.tuples.size(); ++i) { const auto &leaf = tree_extension.leaves.tuples[i]; @@ -683,7 +644,7 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension const auto I_x = m_curve_trees.m_c2.to_string(leaf.I_x); const auto C_x = m_curve_trees.m_c2.to_string(leaf.C_x); - MDEBUG("Leaf idx " << ((i*CurveTreesV1::LEAF_TUPLE_SIZE) + tree_extension.leaves.start_idx) + MDEBUG("Leaf tuple idx " << (tree_extension.leaves.start_leaf_tuple_idx) << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); } @@ -899,13 +860,15 @@ static bool grow_tree_db(const std::size_t init_leaves, LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees), false, "failed to add initial leaves to db"); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves), false, + "failed to add initial leaves to db"); MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " << ext_leaves << " leaves"); test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, ext_leaves)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees), false, "failed to extend tree in db"); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves + ext_leaves), false, + "failed to extend tree in db"); MDEBUG("Successfully extended tree in db by " << ext_leaves << " leaves"); } @@ -913,6 +876,35 @@ static bool grow_tree_db(const std::size_t init_leaves, return true; } //---------------------------------------------------------------------------------------------------------------------- +static bool trim_tree_db(const std::size_t init_leaves, + const std::size_t trim_leaves, + CurveTreesV1 &curve_trees, + unit_test::BlockchainLMDBTest &test_db) +{ + INIT_BLOCKCHAIN_LMDB_TEST_DB(); + + { + cryptonote::db_wtxn_guard guard(test_db.m_db); + + LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then trimming by " << trim_leaves << " leaves"); + + test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves)); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves), false, + "failed to add initial leaves to db"); + + MDEBUG("Successfully added initial " << init_leaves << " leaves to db, trimming by " + << trim_leaves << " leaves"); + + test_db.m_db->trim_tree(curve_trees, trim_leaves); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves - trim_leaves), false, + "failed to trim tree in db"); + + MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); + } + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // Test //---------------------------------------------------------------------------------------------------------------------- @@ -999,6 +991,8 @@ TEST(curve_trees, trim_tree) helios_chunk_width, selene_chunk_width); + unit_test::BlockchainLMDBTest test_db; + // Increment to test for off-by-1 ++leaves_needed_for_n_layers; @@ -1020,6 +1014,7 @@ TEST(curve_trees, trim_tree) CurveTreesGlobalTree tree_copy(global_tree); ASSERT_TRUE(trim_tree_in_memory(trim_leaves, std::move(tree_copy))); + ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, curve_trees, test_db)); } } } @@ -1076,7 +1071,7 @@ TEST(curve_trees, hash_trim) ASSERT_EQ(trim_res_bytes, grow_res_bytes); } - // 3. Trim 2 + // 2. Trim 2 { // Start by hashing: {selene_scalar_0, selene_scalar_1, selene_scalar_2} // Then trim to: {selene_scalar_0} @@ -1151,7 +1146,7 @@ TEST(curve_trees, hash_trim) ASSERT_EQ(trim_res_bytes, grow_res_bytes); } - // 4. Trim 2 then grow by 1 + // 4. Trim 2 and grow back by 1 { // Start by hashing: {selene_scalar_0, selene_scalar_1, selene_scalar_2} // Then trim+grow to: {selene_scalar_0, selene_scalar_3} From 8a89c20f3b8f4af22bda67291d26af5799145bf3 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 17 Jul 2024 19:03:23 -0700 Subject: [PATCH 039/127] lmdb migration to init curve trees tree from existing outputs still rough --- src/blockchain_db/lmdb/db_lmdb.cpp | 474 ++++++++++++++++++++++++++++- src/blockchain_db/lmdb/db_lmdb.h | 9 + src/fcmp/curve_trees.h | 8 + tests/unit_tests/curve_trees.cpp | 91 +++--- 4 files changed, 514 insertions(+), 68 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 3de6c016ebd..8df9f25ff62 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -54,7 +54,7 @@ using epee::string_tools::pod_to_hex; using namespace crypto; // Increase when the DB structure changes -#define VERSION 5 +#define VERSION 6 namespace { @@ -89,6 +89,23 @@ inline void throw1(const T &e) #define MDB_val_str(var, val) MDB_val var = {strlen(val) + 1, (void *)val} +#define DELETE_DB(x) do { \ + result = mdb_txn_begin(m_env, NULL, 0, txn); \ + if (result) \ + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); \ + result = mdb_dbi_open(txn, x, 0, &dbi); \ + if (!result) { \ + result = mdb_drop(txn, dbi, 1); \ + if (result) \ + throw0(DB_ERROR(lmdb_error("Failed to delete " x ": ", result).c_str())); \ + txn.commit(); \ + } \ + else \ + { \ + txn.abort(); \ + }; \ + } while(0) + template struct MDB_val_copy: public MDB_val { @@ -199,7 +216,8 @@ namespace * * spent_keys input hash - * - * leaves leaf_idx {O.x, I.x, C.x} + * locked_outputs block ID [{leaf tuple}...] + * leaves leaf_idx {leaf tuple} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * * txpool_meta txn hash txn metadata @@ -232,6 +250,7 @@ const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts"; const char* const LMDB_SPENT_KEYS = "spent_keys"; // Curve trees tree types +const char* const LMDB_LOCKED_OUTPUTS = "locked_outputs"; const char* const LMDB_LEAVES = "leaves"; const char* const LMDB_LAYERS = "layers"; @@ -333,7 +352,22 @@ typedef struct mdb_block_info_4 uint64_t bi_long_term_block_weight; } mdb_block_info_4; -typedef mdb_block_info_4 mdb_block_info; +typedef struct mdb_block_info_5 +{ + uint64_t bi_height; + uint64_t bi_timestamp; + uint64_t bi_coins; + uint64_t bi_weight; // a size_t really but we need 32-bit compat + uint64_t bi_diff_lo; + uint64_t bi_diff_hi; + crypto::hash bi_hash; + uint64_t bi_cum_rct; + uint64_t bi_long_term_block_weight; + uint64_t bi_n_leaf_tuples; + std::array bi_tree_root; +} mdb_block_info_5; + +typedef mdb_block_info_5 mdb_block_info; typedef struct blk_height { crypto::hash bh_hash; @@ -1305,6 +1339,9 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves) { + if (new_leaves.empty()) + return; + // TODO: block_wtxn_start like pop_block, then call BlockchainDB::grow_tree LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1645,6 +1682,42 @@ std::size_t BlockchainLMDB::get_num_leaf_tuples() const return n_leaf_tuples; } +std::array BlockchainLMDB::get_tree_root() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + std::array root; + + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); + if (result == MDB_SUCCESS) + { + const std::size_t layer_idx = *(std::size_t*)k.mv_data; + if ((layer_idx % 2) == 0) + { + const auto *lv = (layer_val *)v.mv_data; + root = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(lv->child_chunk_hash); + } + else + { + const auto *lv = (layer_val *)v.mv_data; + root = fcmp::curve_trees::curve_trees_v1.m_c1.to_bytes(lv->child_chunk_hash); + } + } + else if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); + } + + TXN_POSTFIX_RDONLY(); + + return root; +} + fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes() const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -2245,6 +2318,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); + lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); @@ -2267,6 +2341,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_block_heights, compare_hash32); mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); + mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); mdb_set_dupsort(txn, m_leaves, compare_uint64); mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); @@ -2446,6 +2521,8 @@ void BlockchainLMDB::reset() throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str())); if (auto result = mdb_drop(txn, m_spent_keys, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str())); + if (auto result = mdb_drop(txn, m_locked_outputs, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_outputs: ", result).c_str())); if (auto result = mdb_drop(txn, m_leaves, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str())); if (auto result = mdb_drop(txn, m_layers, 0)) @@ -5747,19 +5824,6 @@ void BlockchainLMDB::migrate_0_1() } txn.abort(); -#define DELETE_DB(x) do { \ - LOG_PRINT_L1(" " x ":"); \ - result = mdb_txn_begin(m_env, NULL, 0, txn); \ - if (result) \ - throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); \ - result = mdb_dbi_open(txn, x, 0, &dbi); \ - if (!result) { \ - result = mdb_drop(txn, dbi, 1); \ - if (result) \ - throw0(DB_ERROR(lmdb_error("Failed to delete " x ": ", result).c_str())); \ - txn.commit(); \ - } } while(0) - DELETE_DB("tx_heights"); DELETE_DB("output_txs"); DELETE_DB("output_indices"); @@ -6494,6 +6558,382 @@ void BlockchainLMDB::migrate_4_5() txn.commit(); } +void BlockchainLMDB::migrate_5_6() +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + uint64_t i; + int result; + mdb_txn_safe txn(false); + MDB_val k, v; + char *ptr; + + MGINFO_YELLOW("Migrating blockchain from DB version 5 to 6 - this may take a while:"); + + // Reset the locked outputs table since not sure of a simple way to continue from where it left off (outputs aren't inserted in order) + MDB_dbi dbi; + DELETE_DB("locked_outputs"); + DELETE_DB("leaves"); + DELETE_DB("layers"); + DELETE_DB("block_infn"); + + // TODO: if I instead iterate over every block's outputs and go in order that way, I'd know where to leave off based on + // the new block_infn table. Problem is that's less efficient (read block tx hashes, use tx hashes to read output ID's, read outputs) + + do + { + // 1. Set up locked outputs table + { + LOG_PRINT_L1("Setting up a locked outputs table (step 1/2 of full-chain membership proof migration)"); + + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); + mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); + txn.commit(); + + if (!m_batch_transactions) + set_batch_transactions(true); + batch_start(1000); + txn.m_txn = m_write_txn->m_txn; + + MDB_cursor *c_output_amounts, *c_locked_outputs; + MDB_val k, v; + + MDB_cursor_op op = MDB_FIRST; + + i = 0; + while (1) + { + if (!(i % 1000)) + { + if (i) + { + LOGIF(el::Level::Info) + { + // TODO: total num elems in m_output_amounts + std::cout << i << " / TODO outputs \r" << std::flush; + } + txn.commit(); + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + m_write_txn->m_txn = txn.m_txn; + m_write_batch_txn->m_txn = txn.m_txn; + memset(&m_wcursors, 0, sizeof(m_wcursors)); + } + + result = mdb_cursor_open(txn, m_output_amounts, &c_output_amounts); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for output amounts: ", result).c_str())); + + result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); + + // Advance the output_amounts cursor to the current + if (i) + { + result = mdb_cursor_get(c_output_amounts, &k, &v, MDB_GET_BOTH); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to advance cursor for output amounts: ", result).c_str())); + } + } + + result = mdb_cursor_get(c_output_amounts, &k, &v, op); + op = MDB_NEXT; + if (result == MDB_NOTFOUND) + { + batch_stop(); + break; + } + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get a record from output amounts: ", result).c_str())); + + uint64_t amount = *(const uint64_t*)k.mv_data; + output_data_t output_data; + if (amount == 0) + { + const outkey *okp = (const outkey *)v.mv_data; + output_data = okp->data; + } + else + { + const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; + memcpy(&output_data, &okp->data, sizeof(pre_rct_output_data_t)); + output_data.commitment = rct::zeroCommit(amount); + } + + // Only valid keys can be used to construct fcmp's + if (!check_key(output_data.pubkey)) + { + MERROR("Invalid output pub key: " << output_data.pubkey); + continue; + } + + // Torsion clear the output pub key + // TODO: don't need to decompress and recompress points, can be optimized + rct::key torsion_cleared_pubkey = rct::scalarmultKey(rct::pk2rct(output_data.pubkey), rct::INV_EIGHT); + torsion_cleared_pubkey = rct::scalarmult8(torsion_cleared_pubkey); + + // Get the block in which the output will unlock + // TODO: separate function that should also be used when syncing + uint64_t unlock_height; + // TODO: double triple check off by 1 + if (output_data.unlock_time == 0) + { + unlock_height = output_data.height + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE; + } + else if (output_data.unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER) + { + unlock_height = output_data.unlock_time; + } + else + { + // Interpret the output_data.unlock_time as time + // TODO: hardcode correct times for each network and take in nettype + const auto hf_v15_time = 1656629118; + const auto hf_v15_height = 2689608; + + // Use the last hard fork's time and block combo to convert the time-based timelock into an unlock block + // TODO: consider taking into account 60s block times when that was consensus + if (hf_v15_time > output_data.unlock_time) + { + const auto seconds_since_unlock = hf_v15_time - output_data.unlock_time; + const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2; + CHECK_AND_ASSERT_THROW_MES(hf_v15_height > blocks_since_unlock, "unexpected blocks since unlock"); + unlock_height = hf_v15_height - blocks_since_unlock; + } + else + { + const auto seconds_until_unlock = output_data.unlock_time - hf_v15_time; + const auto blocks_until_unlock = seconds_until_unlock / DIFFICULTY_TARGET_V2; + unlock_height = hf_v15_height + blocks_until_unlock; + } + + /* Note: it's possible for the output to be spent before it reaches the unlock_height; this is ok. It can't + be spent again using an fcmp because it'll have a duplicate key image. It's possible for the output to + unlock by old rules, and then re-lock again. This is also ok, we just need to be sure that the new hf rules + use this unlock_height. + */ + + // TODO: double check the accuracy of this calculation + MDEBUG("unlock time: " << output_data.unlock_time << " , unlock_height: " << unlock_height); + } + + // Get the leaf tuple + const auto leaf_tuple = fcmp::curve_trees::curve_trees_v1.output_to_leaf_tuple( + rct::rct2pk(torsion_cleared_pubkey), + rct::rct2pk(output_data.commitment)); + + if (unlock_height == 60) + MDEBUG(fcmp::curve_trees::curve_trees_v1.m_c2.to_string(leaf_tuple.O_x)); + + // Now add the leaf tuple to the locked outputs table + MDB_val_set(k_height, unlock_height); + MDB_val_set(v_tuple, leaf_tuple); + + // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent + // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height + // FIXME: if a dupe is removed from the locked outputs table and then re-inserted, the tree from a migration can look different than a tree constructed from syncing + result = mdb_cursor_put(c_locked_outputs, &k_height, &v_tuple, MDB_NODUPDATA); + if (result != MDB_SUCCESS && result != MDB_KEYEXIST) + throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); + if (result == MDB_KEYEXIST) + MDEBUG("Duplicate output pub key encountered: " << output_data.pubkey); + + ++i; + } + } + + // 2. Set up the curve trees merkle tree + { + LOG_PRINT_L1("Setting up a merkle tree using existing cryptonote outputs (step 2/2 of full-chain membership proof migration)"); + + if (!m_batch_transactions) + set_batch_transactions(true); + batch_start(1000); + txn.m_txn = m_write_txn->m_txn; + + /* the block_info table name is the same but the old version and new version + * have incompatible data. Create a new table. We want the name to be similar + * to the old name so that it will occupy the same location in the DB. + */ + MDB_dbi o_block_info = m_block_info; + lmdb_db_open(txn, "block_infn", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for block_infn"); + mdb_set_dupsort(txn, m_block_info, compare_uint64); + + // Open new leaves and layers tables + lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); + lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); + + mdb_set_dupsort(txn, m_leaves, compare_uint64); + mdb_set_dupsort(txn, m_layers, compare_uint64); + + MDB_cursor *c_locked_outputs, *c_new_block_info, *c_old_block_info; + + MDB_val k, v; + MDB_val k_blk, v_blk; + + MDB_cursor_op op = MDB_FIRST; + + const uint64_t n_blocks = height(); + + i = 0; + while (i < n_blocks) + { + if (!(i % 1000)) + { + if (i) + { + LOGIF(el::Level::Info) + { + std::cout << i << " / " << n_blocks << " blocks \r" << std::flush; + } + txn.commit(); + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + m_write_txn->m_txn = txn.m_txn; + m_write_batch_txn->m_txn = txn.m_txn; + memset(&m_wcursors, 0, sizeof(m_wcursors)); + } + + result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); + + result = mdb_cursor_open(txn, m_block_info, &c_new_block_info); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_infn: ", result).c_str())); + result = mdb_cursor_open(txn, o_block_info, &c_old_block_info); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_info: ", result).c_str())); + + // Advance the c_old_block_info cursor to the current + if (i) + { + result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_GET_BOTH); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to advance cursor for old block infos: ", result).c_str())); + } + } + + MDB_val_set(k_height, i); + + // Get all the locked outputs at that height + std::vector leaf_tuples; + + // TODO: double check this gets all leaf tuples when it does multiple iters + MDB_cursor_op op = MDB_SET; + while (1) + { + result = mdb_cursor_get(c_locked_outputs, &k_height, &v, op); + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get next locked outputs: ", result).c_str())); + op = MDB_NEXT_MULTIPLE; + + const uint64_t h = *(const uint64_t*)k_height.mv_data; + if (h != i) + throw0(DB_ERROR(("Height " + std::to_string(h) + " not the expected" + std::to_string(i)).c_str())); + + const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTuple*)v.mv_data); + const auto range_end = range_begin + v.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTuple); + + auto it = range_begin; + + // The first MDB_NEXT_MULTIPLE includes the val from MDB_SET, so skip it + if (leaf_tuples.size() == 1) + ++it; + + while (it < range_end) + { + leaf_tuples.push_back(*it); + ++it; + } + } + + CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to grow tree"); + this->grow_tree(fcmp::curve_trees::curve_trees_v1, leaf_tuples); + + // TODO: Remove locked outputs from the locked outputs table after adding them to tree + + // Now update block info with num leaves in tree and new merkle root + const std::size_t n_leaf_tuples = this->get_num_leaf_tuples(); + const auto root = this->get_tree_root(); + + MDEBUG("n_leaf_tuples: " << n_leaf_tuples); + + // Get old block_info and use it to set the new one with new values + result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_NEXT); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to get a record from block_info: ", result).c_str())); + const mdb_block_info_4 *bi_old = (const mdb_block_info_4*)v_blk.mv_data; + mdb_block_info_5 bi; + bi.bi_height = bi_old->bi_height; + bi.bi_timestamp = bi_old->bi_timestamp; + bi.bi_coins = bi_old->bi_coins; + bi.bi_weight = bi_old->bi_weight; + bi.bi_diff_lo = bi_old->bi_diff_lo; + bi.bi_diff_hi = bi_old->bi_diff_hi; + bi.bi_hash = bi_old->bi_hash; + bi.bi_cum_rct = bi_old->bi_cum_rct; + bi.bi_long_term_block_weight = bi_old->bi_long_term_block_weight; + bi.bi_n_leaf_tuples = n_leaf_tuples; + bi.bi_tree_root = root; + + MDB_val_set(nv, bi); + result = mdb_cursor_put(c_new_block_info, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to put a record into block_infn: ", result).c_str())); + + // TODO: delete old block info records + // /* we delete the old records immediately, so the overall DB and mapsize should not grow. + // * This is a little slower than just letting mdb_drop() delete it all at the end, but + // * it saves a significant amount of disk space. + // */ + // result = mdb_cursor_del(c_old_block_info, 0); + // if (result) + // throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_info: ", result).c_str())); + + ++i; + } + batch_stop(); + + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + /* Delete the old table */ + result = mdb_drop(txn, o_block_info, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete old block_info table: ", result).c_str())); + + MDB_cursor *c_cur = c_new_block_info; + RENAME_DB("block_infn"); + mdb_dbi_close(m_env, m_block_info); + + lmdb_db_open(txn, "block_info", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for block_infn"); + mdb_set_dupsort(txn, m_block_info, compare_uint64); + + txn.commit(); + } + } while(0); + + uint32_t version = 6; + v.mv_data = (void *)&version; + v.mv_size = sizeof(version); + MDB_val_str(vk, "version"); + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + result = mdb_put(txn, m_properties, &vk, &v, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update version for the db: ", result).c_str())); + txn.commit(); +} + void BlockchainLMDB::migrate(const uint32_t oldversion) { if (oldversion < 1) @@ -6506,6 +6946,8 @@ void BlockchainLMDB::migrate(const uint32_t oldversion) migrate_3_4(); if (oldversion < 5) migrate_4_5(); + if (oldversion < 6) + migrate_5_6(); } } // namespace cryptonote diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 317d238820a..e71d17bbde5 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -64,6 +64,7 @@ typedef struct mdb_txn_cursors MDB_cursor *m_txc_spent_keys; + MDB_cursor *m_txc_locked_outputs; MDB_cursor *m_txc_leaves; MDB_cursor *m_txc_layers; @@ -90,6 +91,7 @@ typedef struct mdb_txn_cursors #define m_cur_tx_indices m_cursors->m_txc_tx_indices #define m_cur_tx_outputs m_cursors->m_txc_tx_outputs #define m_cur_spent_keys m_cursors->m_txc_spent_keys +#define m_cur_locked_outputs m_cursors->m_txc_locked_outputs #define m_cur_leaves m_cursors->m_txc_leaves #define m_cur_layers m_cursors->m_txc_layers #define m_cur_txpool_meta m_cursors->m_txc_txpool_meta @@ -114,6 +116,7 @@ typedef struct mdb_rflags bool m_rf_tx_indices; bool m_rf_tx_outputs; bool m_rf_spent_keys; + bool m_rf_locked_outputs; bool m_rf_leaves; bool m_rf_layers; bool m_rf_txpool_meta; @@ -426,6 +429,8 @@ class BlockchainLMDB : public BlockchainDB std::size_t get_num_leaf_tuples() const; + std::array get_tree_root() const; + fcmp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim get_last_chunk_children_to_trim( @@ -485,6 +490,9 @@ class BlockchainLMDB : public BlockchainDB // migrate from DB version 4 to 5 void migrate_4_5(); + // migrate from DB version 5 to 6 + void migrate_5_6(); + void cleanup_batch(); private: @@ -507,6 +515,7 @@ class BlockchainLMDB : public BlockchainDB MDB_dbi m_spent_keys; + MDB_dbi m_locked_outputs; MDB_dbi m_leaves; MDB_dbi m_layers; diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index c31307e6646..9a25963ae42 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -260,6 +260,14 @@ class CurveTrees using Helios = tower_cycle::Helios; using Selene = tower_cycle::Selene; using CurveTreesV1 = CurveTrees; + +// https://github.com/kayabaNerve/fcmp-plus-plus/blob +// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 +static const std::size_t HELIOS_CHUNK_WIDTH = 38; +static const std::size_t SELENE_CHUNK_WIDTH = 18; +static const Helios HELIOS; +static const Selene SELENE; +static const CurveTreesV1 curve_trees_v1(HELIOS, SELENE, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace curve_trees diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 2e0103ff770..b264b675690 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -998,7 +998,7 @@ TEST(curve_trees, trim_tree) // First initialize the tree with init_leaves for (std::size_t init_leaves = 2; init_leaves <= leaves_needed_for_n_layers; ++init_leaves) -{ + { LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); CurveTreesGlobalTree global_tree(curve_trees); @@ -1022,19 +1022,6 @@ TEST(curve_trees, trim_tree) // Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children TEST(curve_trees, hash_trim) { - // https://github.com/kayabaNerve/fcmp-plus-plus/blob - // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 - const std::size_t helios_chunk_width = 38; - const std::size_t selene_chunk_width = 18; - - Helios helios; - Selene selene; - auto curve_trees = CurveTreesV1( - helios, - selene, - helios_chunk_width, - selene_chunk_width); - // 1. Trim 1 { // Start by hashing: {selene_scalar_0, selene_scalar_1} @@ -1044,29 +1031,29 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 2 scalars std::vector init_children{selene_scalar_0, selene_scalar_1}; - const auto init_hash = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); // Trim selene_scalar_1 const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; - const auto trim_res = curve_trees.m_c2.hash_trim( + const auto trim_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_trim( init_hash, 1, trimmed_children, - curve_trees.m_c2.zero_scalar()); - const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_res); + fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar()); + const auto trim_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0} std::vector remaining_children{selene_scalar_0}; - const auto grow_res = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = curve_trees.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1081,29 +1068,29 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 3 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; - const auto init_hash = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); // Trim the initial result by 2 children const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; - const auto trim_res = curve_trees.m_c2.hash_trim( + const auto trim_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_trim( init_hash, 1, trimmed_children, - curve_trees.m_c2.zero_scalar()); - const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_res); + fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar()); + const auto trim_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0} std::vector remaining_children{selene_scalar_0}; - const auto grow_res = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = curve_trees.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1117,31 +1104,31 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 3 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1}; - const auto init_hash = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); const auto selene_scalar_2 = generate_random_selene_scalar(); // Trim the 2nd child and grow with new child const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; - const auto trim_res = curve_trees.m_c2.hash_trim( + const auto trim_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_trim( init_hash, 1, trimmed_children, selene_scalar_2); - const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_res); + const auto trim_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_2} std::vector remaining_children{selene_scalar_0, selene_scalar_2}; - const auto grow_res = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = curve_trees.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1156,31 +1143,31 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 3 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; - const auto init_hash = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); const auto selene_scalar_3 = generate_random_selene_scalar(); // Trim the initial result by 2 children+grow by 1 const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; - const auto trim_res = curve_trees.m_c2.hash_trim( + const auto trim_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_trim( init_hash, 1, trimmed_children, selene_scalar_3); - const auto trim_res_bytes = curve_trees.m_c2.to_bytes(trim_res); + const auto trim_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_3} std::vector remaining_children{selene_scalar_0, selene_scalar_3}; - const auto grow_res = curve_trees.m_c2.hash_grow( - /*existing_hash*/ curve_trees.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ curve_trees.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = curve_trees.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } From 306488b690d6e6454d333128bd929cf52d0e2792 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 24 Jul 2024 12:13:39 -0700 Subject: [PATCH 040/127] Implemented growing the tree on sync + lots of cleaning - validate output and commitment in tuple conversion function - function to get_unlock_height from height in chain + unlock_time - tx_outs_to_leaf_tuples function - cleaned up trim impl (reduced num params in instructions and conditional complexity) - renamed locked_outputs table to locked_leaves (clearer tie to merkle tree) - size_t -> uint64_t for db compatibility across 32-bit and 64-bit machines - added hash_grow tests --- src/blockchain_db/blockchain_db.cpp | 24 +- src/blockchain_db/blockchain_db.h | 17 +- src/blockchain_db/lmdb/db_lmdb.cpp | 398 +++++++++--------- src/blockchain_db/lmdb/db_lmdb.h | 33 +- src/blockchain_db/testdb.h | 6 +- .../cryptonote_format_utils.cpp | 53 +++ .../cryptonote_format_utils.h | 3 + src/fcmp/CMakeLists.txt | 2 + src/fcmp/curve_trees.cpp | 235 +++++++---- src/fcmp/curve_trees.h | 93 ++-- src/ringct/rctTypes.h | 1 + tests/block_weight/CMakeLists.txt | 1 + tests/block_weight/block_weight.cpp | 2 + tests/core_tests/chaingen.cpp | 3 +- tests/unit_tests/curve_trees.cpp | 73 +++- tests/unit_tests/hardfork.cpp | 37 +- tests/unit_tests/long_term_block_weight.cpp | 1 + 17 files changed, 616 insertions(+), 366 deletions(-) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 894eb15c7c5..b5840240e4d 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -294,9 +294,31 @@ uint64_t BlockchainDB::add_block( const std::pair& blck TIME_MEASURE_FINISH(time1); time_add_transaction += time1; + // When adding a block, we also need to add all the leaf tuples included in + // the block to a table keeping track of locked leaf tuples. Once those leaf + // tuples unlock, we use them to grow the tree. + std::multimap leaf_tuples_by_unlock_height; + + // Get miner tx's leaf tuples + fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples( + blk.miner_tx, + prev_height, + true/*miner_tx*/, + leaf_tuples_by_unlock_height); + + // Get all other txs' leaf tuples + for (const auto &txp : txs) + { + fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples( + txp.first, + prev_height, + false/*miner_tx*/, + leaf_tuples_by_unlock_height); + } + // call out to subclass implementation to add the block & metadata time1 = epee::misc_utils::get_tick_count(); - add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash); + add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash, leaf_tuples_by_unlock_height); TIME_MEASURE_FINISH(time1); time_add_block1 += time1; diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 528d7777b66..522fe5838b8 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -399,6 +399,7 @@ class BlockchainDB * @param cumulative_difficulty the accumulated difficulty after this block * @param coins_generated the number of coins generated total after this block * @param blk_hash the hash of the block + * @param leaf_tuples_by_unlock_height the leaves from this block to add to the merkle tree */ virtual void add_block( const block& blk , size_t block_weight @@ -407,6 +408,7 @@ class BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const std::multimap& leaf_tuples_by_unlock_height ) = 0; /** @@ -1394,6 +1396,17 @@ class BlockchainDB */ virtual uint64_t get_num_outputs(const uint64_t& amount) const = 0; + // returns the total number of global outputs + /** + * @brief fetches the total number of global outputs + * + * The subclass should return a count of all outputs, + * or zero if there are none. + * * + * @return the number of global outputs + */ + virtual uint64_t get_num_global_outputs() const = 0; + /** * @brief return index of the first element (should be hidden, but isn't) * @@ -1769,10 +1782,10 @@ class BlockchainDB virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves) = 0; - virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) = 0; + virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) = 0; // TODO: description - virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t expected_n_leaf_tuples) const = 0; + virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t expected_n_leaf_tuples) const = 0; // // Hard fork related storage diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 8df9f25ff62..a2f3470cd60 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -216,7 +216,7 @@ namespace * * spent_keys input hash - * - * locked_outputs block ID [{leaf tuple}...] + * locked_leaves block ID [{leaf tuple}...] * leaves leaf_idx {leaf tuple} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * @@ -249,8 +249,8 @@ const char* const LMDB_OUTPUT_TXS = "output_txs"; const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts"; const char* const LMDB_SPENT_KEYS = "spent_keys"; -// Curve trees tree types -const char* const LMDB_LOCKED_OUTPUTS = "locked_outputs"; +// Curve trees merkle tree tables +const char* const LMDB_LOCKED_LEAVES = "locked_leaves"; const char* const LMDB_LEAVES = "leaves"; const char* const LMDB_LAYERS = "layers"; @@ -817,7 +817,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uin } void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, - uint64_t num_rct_outs, const crypto::hash& blk_hash) + uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_height) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -845,6 +845,14 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l throw0(BLOCK_PARENT_DNE("Top block is not new block's parent")); } + // Grow the tree with outputs that unlock at this block height + const auto unlocked_leaf_tuples = this->get_locked_leaf_tuples_at_height(m_height); + + // TODO: double check consistent order for inserting outputs into the tree + this->grow_tree(fcmp::curve_trees::curve_trees_v1, unlocked_leaf_tuples); + + // TODO: remove unlocked_leaf_tuples from the locked outputs table + int result = 0; MDB_val_set(key, m_height); @@ -878,6 +886,8 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l bi.bi_cum_rct += bi_prev->bi_cum_rct; } bi.bi_long_term_block_weight = long_term_block_weight; + bi.bi_n_leaf_tuples = this->get_num_leaf_tuples(); + bi.bi_tree_root = this->get_tree_root(); MDB_val_set(val, bi); result = mdb_cursor_put(m_cur_block_info, (MDB_val *)&zerokval, &val, MDB_APPENDDUP); @@ -888,6 +898,21 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l if (result) throw0(DB_ERROR(lmdb_error("Failed to add block height by hash to db transaction: ", result).c_str())); + CURSOR(locked_leaves) + + // Add the locked leaf tuples from this block to the locked outputs table + for (const auto &locked_tuple : leaf_tuples_by_unlock_height) + { + MDB_val_set(k_height, locked_tuple.first); + MDB_val_set(v_tuple, locked_tuple.second); + + // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent + // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height + result = mdb_cursor_put(m_cur_locked_leaves, &k_height, &v_tuple, MDB_NODUPDATA); + if (result != MDB_SUCCESS && result != MDB_KEYEXIST) + throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); + } + // we use weight as a proxy for size, since we don't have size but weight is >= size // and often actually equal m_cum_size += block_weight; @@ -1347,10 +1372,12 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree check_open(); mdb_txn_cursors *m_cursors = &m_wcursors; + CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to grow tree"); + CURSOR(leaves) // Get the number of leaf tuples that exist in the tree - const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); // Read every layer's last hashes const auto last_hashes = this->get_tree_last_hashes(); @@ -1361,9 +1388,9 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree // Insert the leaves // TODO: grow_leaves const auto &leaves = tree_extension.leaves; - for (std::size_t i = 0; i < leaves.tuples.size(); ++i) + for (uint64_t i = 0; i < leaves.tuples.size(); ++i) { - MDB_val_copy k(i + leaves.start_leaf_tuple_idx); + MDB_val_copy k(i + leaves.start_leaf_tuple_idx); MDB_val_set(v, leaves.tuples[i]); // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. @@ -1381,11 +1408,11 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); bool use_c2 = true; - std::size_t c2_idx = 0; - std::size_t c1_idx = 0; - for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + uint64_t c2_idx = 0; + uint64_t c1_idx = 0; + for (uint64_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) { - const std::size_t layer_idx = c2_idx + c1_idx; + const uint64_t layer_idx = c2_idx + c1_idx; MDEBUG("Growing layer " << layer_idx); if (use_c2) @@ -1422,8 +1449,8 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree template void BlockchainLMDB::grow_layer(const C &curve, const std::vector> &layer_extensions, - const std::size_t ext_idx, - const std::size_t layer_idx) + const uint64_t ext_idx, + const uint64_t layer_idx) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1438,7 +1465,7 @@ void BlockchainLMDB::grow_layer(const C &curve, // TODO: make sure ext.start_idx lines up with the end of the layer - MDB_val_copy k(layer_idx); + MDB_val_copy k(layer_idx); if (ext.update_existing_last_hash) { @@ -1456,7 +1483,7 @@ void BlockchainLMDB::grow_layer(const C &curve, } // Now add all the new hashes found in the extension - for (std::size_t i = ext.update_existing_last_hash ? 1 : 0; i < ext.hashes.size(); ++i) + for (uint64_t i = ext.update_existing_last_hash ? 1 : 0; i < ext.hashes.size(); ++i) { layer_val lv; lv.child_chunk_idx = i + ext.start_idx; @@ -1472,7 +1499,7 @@ void BlockchainLMDB::grow_layer(const C &curve, } } -void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) +void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) { // TODO: block_wtxn_start like pop_block, then call BlockchainDB::trim_tree LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -1484,7 +1511,7 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); - const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); const auto trim_instructions = curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); @@ -1507,11 +1534,11 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree // Trim the leaves // TODO: trim_leaves - for (std::size_t i = 0; i < trim_n_leaf_tuples; ++i) + for (uint64_t i = 0; i < trim_n_leaf_tuples; ++i) { - std::size_t last_leaf_tuple_idx = (old_n_leaf_tuples - 1 - i); + uint64_t last_leaf_tuple_idx = (old_n_leaf_tuples - 1 - i); - MDB_val_copy k(last_leaf_tuple_idx); + MDB_val_copy k(last_leaf_tuple_idx); MDB_val v; int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_SET); if (result == MDB_NOTFOUND) @@ -1533,9 +1560,9 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree CHECK_AND_ASSERT_THROW_MES(!c2_layer_reductions.empty(), "empty c2 layer reductions"); bool use_c2 = true; - std::size_t c2_idx = 0; - std::size_t c1_idx = 0; - for (std::size_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i) + uint64_t c2_idx = 0; + uint64_t c1_idx = 0; + for (uint64_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i) { if (use_c2) { @@ -1557,7 +1584,7 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree // Trim any remaining layers in layers after the root // TODO: trim_leftovers_after_root - const std::size_t expected_root_idx = c2_layer_reductions.size() + c1_layer_reductions.size() - 1; + const uint64_t expected_root_idx = c2_layer_reductions.size() + c1_layer_reductions.size() - 1; while (1) { MDB_val k, v; @@ -1565,7 +1592,7 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); - const std::size_t last_layer_idx = *(std::size_t *)k.mv_data; + const uint64_t last_layer_idx = *(uint64_t *)k.mv_data; if (last_layer_idx > expected_root_idx) { @@ -1586,7 +1613,7 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree template void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &layer_reduction, - const std::size_t layer_idx) + const uint64_t layer_idx) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1594,11 +1621,11 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &laye CURSOR(layers) - MDB_val_copy k(layer_idx); + MDB_val_copy k(layer_idx); // Get the number of existing elements in the layer // TODO: get_num_elems_in_layer - std::size_t old_n_elems_in_layer = 0; + uint64_t old_n_elems_in_layer = 0; { // Get the first record in a layer so we can then get the last record MDB_val v; @@ -1606,8 +1633,6 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &laye if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); - // TODO: why can't I just use MDB_LAST_DUP once and get the last record? - result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); @@ -1618,12 +1643,12 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &laye CHECK_AND_ASSERT_THROW_MES(old_n_elems_in_layer >= layer_reduction.new_total_parents, "unexpected old n elems in layer"); - const std::size_t trim_n_elems_in_layer = old_n_elems_in_layer - layer_reduction.new_total_parents; + const uint64_t trim_n_elems_in_layer = old_n_elems_in_layer - layer_reduction.new_total_parents; // Delete the elements - for (std::size_t i = 0; i < trim_n_elems_in_layer; ++i) + for (uint64_t i = 0; i < trim_n_elems_in_layer; ++i) { - std::size_t last_elem_idx = (old_n_elems_in_layer - 1 - i); + uint64_t last_elem_idx = (old_n_elems_in_layer - 1 - i); MDB_val_set(v, last_elem_idx); int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); @@ -1655,7 +1680,7 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &laye } } -std::size_t BlockchainLMDB::get_num_leaf_tuples() const +uint64_t BlockchainLMDB::get_num_leaf_tuples() const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1672,7 +1697,7 @@ std::size_t BlockchainLMDB::get_num_leaf_tuples() const if (result == MDB_NOTFOUND) n_leaf_tuples = 0; else if (result == MDB_SUCCESS) - n_leaf_tuples = (1 + (*(const std::size_t*)k.mv_data)); + n_leaf_tuples = (1 + (*(const uint64_t*)k.mv_data)); else throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); } @@ -1697,7 +1722,7 @@ std::array BlockchainLMDB::get_tree_root() const int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); if (result == MDB_SUCCESS) { - const std::size_t layer_idx = *(std::size_t*)k.mv_data; + const uint64_t layer_idx = *(uint64_t*)k.mv_data; if ((layer_idx % 2) == 0) { const auto *lv = (layer_val *)v.mv_data; @@ -1731,10 +1756,10 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes auto &c2_last_hashes = last_hashes.c2_last_hashes; // Traverse the tree layer-by-layer starting at the layer closest to leaf layer - std::size_t layer_idx = 0; + uint64_t layer_idx = 0; while (1) { - MDB_val_copy k(layer_idx); + MDB_val_copy k(layer_idx); MDB_val v; // Get the first record in a layer so we can then get the last record @@ -1744,8 +1769,6 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); - // TODO: why can't I just use MDB_LAST_DUP once and get the last record? - // Get the last record in a layer result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); if (result != MDB_SUCCESS) @@ -1795,15 +1818,14 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las std::vector leaves_to_trim; - if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim || - (trim_leaf_layer_instructions.need_last_chunk_remaining_children && trim_leaf_layer_instructions.new_offset > 0)) + if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx) { - std::size_t idx = trim_leaf_layer_instructions.start_trim_idx; + uint64_t idx = trim_leaf_layer_instructions.start_trim_idx; CHECK_AND_ASSERT_THROW_MES(idx % fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size"); - const std::size_t leaf_tuple_idx = idx / fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; - MDB_val_copy k(leaf_tuple_idx); + const uint64_t leaf_tuple_idx = idx / fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + MDB_val_copy k(leaf_tuple_idx); MDB_cursor_op leaf_op = MDB_SET; do @@ -1833,18 +1855,17 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las // Traverse the tree layer-by-layer starting at the layer closest to leaf layer, getting children to trim // TODO: separate function for layers bool parent_is_c1 = true; - for (std::size_t i = 1; i < trim_instructions.size(); ++i) + for (uint64_t i = 1; i < trim_instructions.size(); ++i) { const auto &trim_layer_instructions = trim_instructions[i]; std::vector c1_children; std::vector c2_children; - if (trim_layer_instructions.need_last_chunk_children_to_trim || - (trim_layer_instructions.need_last_chunk_remaining_children && trim_layer_instructions.new_offset > 0)) + if (trim_layer_instructions.end_trim_idx > trim_layer_instructions.start_trim_idx) { - const std::size_t layer_idx = (i - 1); - std::size_t idx = trim_layer_instructions.start_trim_idx; + const uint64_t layer_idx = (i - 1); + uint64_t idx = trim_layer_instructions.start_trim_idx; MDB_val_set(k, layer_idx); MDB_val_set(v, idx); @@ -1903,12 +1924,12 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_t fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; // Traverse the tree layer-by-layer starting at the layer closest to leaf layer - std::size_t layer_idx = 0; + uint64_t layer_idx = 0; for (const auto &trim_layer_instructions : trim_instructions) { - const std::size_t new_last_idx = trim_layer_instructions.new_total_parents - 1; + const uint64_t new_last_idx = trim_layer_instructions.new_total_parents - 1; - MDB_val_copy k(layer_idx); + MDB_val_copy k(layer_idx); MDB_val_set(v, new_last_idx); int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); @@ -1937,7 +1958,7 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_t } bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::size_t expected_n_leaf_tuples) const + const uint64_t expected_n_leaf_tuples) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1946,12 +1967,23 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre RCURSOR(leaves) RCURSOR(layers) - const std::size_t actual_n_leaf_tuples = this->get_num_leaf_tuples(); + const uint64_t actual_n_leaf_tuples = this->get_num_leaf_tuples(); CHECK_AND_ASSERT_MES(actual_n_leaf_tuples == expected_n_leaf_tuples, false, "unexpected num leaf tuples"); + if (actual_n_leaf_tuples == 0) + { + // Make sure layers table is also empty + MDB_stat db_stats; + int result = mdb_stat(m_txn, m_layers, &db_stats); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to query m_layers: ", result).c_str())); + CHECK_AND_ASSERT_MES(db_stats.ms_entries == 0, false, "unexpected num layer entries"); + return true; + } + // Check chunks of leaves hash into first layer as expected - std::size_t layer_idx = 0; - std::size_t child_chunk_idx = 0; + uint64_t layer_idx = 0; + uint64_t child_chunk_idx = 0; MDB_cursor_op leaf_op = MDB_FIRST; while (1) { @@ -1978,7 +2010,7 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre } // Get the actual leaf chunk hash from the db - MDB_val_copy k_parent(layer_idx); + MDB_val_copy k_parent(layer_idx); MDB_val_set(v_parent, child_chunk_idx); MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx); @@ -2004,11 +2036,12 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre const fcmp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; // Hash the chunk of leaves - for (std::size_t i = 0; i < leaves.size(); ++i) + for (uint64_t i = 0; i < leaves.size(); ++i) MDEBUG("Hashing " << curve_trees.m_c2.to_string(leaves[i])); const fcmp::curve_trees::Selene::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve_trees.m_c2, chunk); - MDEBUG("chunk_hash " << curve_trees.m_c2.to_string(chunk_hash) << " (" << leaves.size() << " leaves)"); + MDEBUG("chunk_hash " << curve_trees.m_c2.to_string(chunk_hash) << " , hash init point: " + << curve_trees.m_c2.to_string(curve_trees.m_c2.m_hash_init_point) << " (" << leaves.size() << " leaves)"); // Now compare to value from the db const auto *lv = (layer_val *)v_parent.mv_data; @@ -2064,10 +2097,10 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre template bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, const C_PARENT &c_parent, - const std::size_t layer_idx, - const std::size_t child_start_idx, - const std::size_t child_chunk_idx, - const std::size_t chunk_width) const + const uint64_t layer_idx, + const uint64_t child_start_idx, + const uint64_t child_chunk_idx, + const uint64_t chunk_width) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -2082,7 +2115,7 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, std::vector child_chunk; child_chunk.reserve(chunk_width); - MDB_val_copy k_child(layer_idx); + MDB_val_copy k_child(layer_idx); MDB_val_set(v_child, child_start_idx); MDB_cursor_op op_child = MDB_GET_BOTH; while (1) @@ -2102,8 +2135,8 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, } // Get the actual chunk hash from the db - const std::size_t parent_layer_idx = layer_idx + 1; - MDB_val_copy k_parent(parent_layer_idx); + const uint64_t parent_layer_idx = layer_idx + 1; + MDB_val_copy k_parent(parent_layer_idx); MDB_val_set(v_parent, child_chunk_idx); // Check for end conditions @@ -2146,11 +2179,12 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, child_scalars.emplace_back(c_child.point_to_cycle_scalar(child)); const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()}; - for (std::size_t i = 0; i < child_scalars.size(); ++i) + for (uint64_t i = 0; i < child_scalars.size(); ++i) MDEBUG("Hashing " << c_parent.to_string(child_scalars[i])); const auto chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk); - MDEBUG("chunk_hash " << c_parent.to_string(chunk_hash) << " (" << child_scalars.size() << " children)"); + MDEBUG("chunk_hash " << c_parent.to_string(chunk_hash) << " , hash init point: " + << c_parent.to_string(c_parent.m_hash_init_point) << " (" << child_scalars.size() << " children)"); const auto *lv = (layer_val *)v_parent.mv_data; MDEBUG("Actual chunk hash " << c_parent.to_string(lv->child_chunk_hash)); @@ -2169,6 +2203,57 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, chunk_width); } +std::vector BlockchainLMDB::get_locked_leaf_tuples_at_height( + const uint64_t height) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(locked_leaves) + + MDB_val_set(k_height, height); + MDB_val v_tuple; + + // Get all the locked outputs at that height + std::vector leaf_tuples; + + // TODO: double check this gets all leaf tuples when it does multiple iters + MDB_cursor_op op = MDB_SET; + while (1) + { + int result = mdb_cursor_get(m_cur_locked_leaves, &k_height, &v_tuple, op); + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get next locked outputs: ", result).c_str())); + op = MDB_NEXT_MULTIPLE; + + const uint64_t h = *(const uint64_t*)k_height.mv_data; + if (h != height) + throw0(DB_ERROR(("Height " + std::to_string(h) + " not the expected" + std::to_string(height)).c_str())); + + const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTuple*)v_tuple.mv_data); + const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTuple); + + auto it = range_begin; + + // The first MDB_NEXT_MULTIPLE includes the val from MDB_SET, so skip it + if (leaf_tuples.size() == 1) + ++it; + + while (it < range_end) + { + leaf_tuples.push_back(*it); + ++it; + } + } + + TXN_POSTFIX_RDONLY(); + + return leaf_tuples; +} + BlockchainLMDB::~BlockchainLMDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -2318,7 +2403,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); - lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); + lmdb_db_open(txn, LMDB_LOCKED_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_leaves, "Failed to open db handle for m_locked_leaves"); lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); @@ -2341,7 +2426,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_block_heights, compare_hash32); mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); - mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); + mdb_set_dupsort(txn, m_locked_leaves, compare_uint64); mdb_set_dupsort(txn, m_leaves, compare_uint64); mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); @@ -2521,8 +2606,8 @@ void BlockchainLMDB::reset() throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str())); if (auto result = mdb_drop(txn, m_spent_keys, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str())); - if (auto result = mdb_drop(txn, m_locked_outputs, 0)) - throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_outputs: ", result).c_str())); + if (auto result = mdb_drop(txn, m_locked_leaves, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_leaves: ", result).c_str())); if (auto result = mdb_drop(txn, m_leaves, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str())); if (auto result = mdb_drop(txn, m_layers, 0)) @@ -4314,6 +4399,27 @@ uint64_t BlockchainLMDB::get_num_outputs(const uint64_t& amount) const return num_elems; } +uint64_t BlockchainLMDB::get_num_global_outputs() const +{ + LOG_PRINT_L3("BlockchainLMDB:: " << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(output_amounts); + + MDB_stat db_stats; + int result = mdb_stat(m_txn, m_output_amounts, &db_stats); + uint64_t count = 0; + if (result != MDB_NOTFOUND) + { + if (result) + throw0(DB_ERROR(lmdb_error("Failed to query m_output_amounts: ", result).c_str())); + count = db_stats.ms_entries; + } + TXN_POSTFIX_RDONLY(); + return count; +} + output_data_t BlockchainLMDB::get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -6569,15 +6675,18 @@ void BlockchainLMDB::migrate_5_6() MGINFO_YELLOW("Migrating blockchain from DB version 5 to 6 - this may take a while:"); - // Reset the locked outputs table since not sure of a simple way to continue from where it left off (outputs aren't inserted in order) + // Reset all updated tables from migration since not sure of a simple and efficient way to continue if the migration + // stops before it's finished (outputs aren't inserted in order) MDB_dbi dbi; - DELETE_DB("locked_outputs"); + DELETE_DB("locked_leaves"); DELETE_DB("leaves"); DELETE_DB("layers"); DELETE_DB("block_infn"); // TODO: if I instead iterate over every block's outputs and go in order that way, I'd know where to leave off based on // the new block_infn table. Problem is that's less efficient (read block tx hashes, use tx hashes to read output ID's, read outputs) + // ... Could also require outputs be inserted all-or-nothing first, and then can pick up where left off for the tree + // if any of leaves, layers, or block_infn tables exist, then locked_leaves migration should be complete do { @@ -6588,8 +6697,8 @@ void BlockchainLMDB::migrate_5_6() result = mdb_txn_begin(m_env, NULL, 0, txn); if (result) throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); - lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); - mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); + lmdb_db_open(txn, LMDB_LOCKED_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_leaves, "Failed to open db handle for m_locked_leaves"); + mdb_set_dupsort(txn, m_locked_leaves, compare_uint64); txn.commit(); if (!m_batch_transactions) @@ -6597,11 +6706,13 @@ void BlockchainLMDB::migrate_5_6() batch_start(1000); txn.m_txn = m_write_txn->m_txn; - MDB_cursor *c_output_amounts, *c_locked_outputs; + MDB_cursor *c_output_amounts, *c_locked_leaves; MDB_val k, v; MDB_cursor_op op = MDB_FIRST; + const uint64_t n_outputs = this->get_num_global_outputs(); + i = 0; while (1) { @@ -6627,7 +6738,7 @@ void BlockchainLMDB::migrate_5_6() if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for output amounts: ", result).c_str())); - result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); + result = mdb_cursor_open(txn, m_locked_leaves, &c_locked_leaves); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); @@ -6664,70 +6775,22 @@ void BlockchainLMDB::migrate_5_6() output_data.commitment = rct::zeroCommit(amount); } - // Only valid keys can be used to construct fcmp's - if (!check_key(output_data.pubkey)) - { - MERROR("Invalid output pub key: " << output_data.pubkey); - continue; - } - - // Torsion clear the output pub key - // TODO: don't need to decompress and recompress points, can be optimized - rct::key torsion_cleared_pubkey = rct::scalarmultKey(rct::pk2rct(output_data.pubkey), rct::INV_EIGHT); - torsion_cleared_pubkey = rct::scalarmult8(torsion_cleared_pubkey); - - // Get the block in which the output will unlock - // TODO: separate function that should also be used when syncing - uint64_t unlock_height; - // TODO: double triple check off by 1 - if (output_data.unlock_time == 0) + // Convert the output into a leaf tuple + fcmp::curve_trees::CurveTreesV1::LeafTuple leaf_tuple; + try { - unlock_height = output_data.height + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE; + leaf_tuple = fcmp::curve_trees::curve_trees_v1.output_to_leaf_tuple( + output_data.pubkey, + rct::rct2pk(output_data.commitment)); } - else if (output_data.unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER) + catch(...) { - unlock_height = output_data.unlock_time; - } - else - { - // Interpret the output_data.unlock_time as time - // TODO: hardcode correct times for each network and take in nettype - const auto hf_v15_time = 1656629118; - const auto hf_v15_height = 2689608; - - // Use the last hard fork's time and block combo to convert the time-based timelock into an unlock block - // TODO: consider taking into account 60s block times when that was consensus - if (hf_v15_time > output_data.unlock_time) - { - const auto seconds_since_unlock = hf_v15_time - output_data.unlock_time; - const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2; - CHECK_AND_ASSERT_THROW_MES(hf_v15_height > blocks_since_unlock, "unexpected blocks since unlock"); - unlock_height = hf_v15_height - blocks_since_unlock; - } - else - { - const auto seconds_until_unlock = output_data.unlock_time - hf_v15_time; - const auto blocks_until_unlock = seconds_until_unlock / DIFFICULTY_TARGET_V2; - unlock_height = hf_v15_height + blocks_until_unlock; - } - - /* Note: it's possible for the output to be spent before it reaches the unlock_height; this is ok. It can't - be spent again using an fcmp because it'll have a duplicate key image. It's possible for the output to - unlock by old rules, and then re-lock again. This is also ok, we just need to be sure that the new hf rules - use this unlock_height. - */ - - // TODO: double check the accuracy of this calculation - MDEBUG("unlock time: " << output_data.unlock_time << " , unlock_height: " << unlock_height); + // Invalid outputs can't be added to the tree + continue; } - // Get the leaf tuple - const auto leaf_tuple = fcmp::curve_trees::curve_trees_v1.output_to_leaf_tuple( - rct::rct2pk(torsion_cleared_pubkey), - rct::rct2pk(output_data.commitment)); - - if (unlock_height == 60) - MDEBUG(fcmp::curve_trees::curve_trees_v1.m_c2.to_string(leaf_tuple.O_x)); + // Get the block in which the output will unlock + const uint64_t unlock_height = cryptonote::get_unlock_height(output_data.unlock_time, output_data.height); // Now add the leaf tuple to the locked outputs table MDB_val_set(k_height, unlock_height); @@ -6735,8 +6798,7 @@ void BlockchainLMDB::migrate_5_6() // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height - // FIXME: if a dupe is removed from the locked outputs table and then re-inserted, the tree from a migration can look different than a tree constructed from syncing - result = mdb_cursor_put(c_locked_outputs, &k_height, &v_tuple, MDB_NODUPDATA); + result = mdb_cursor_put(c_locked_leaves, &k_height, &v_tuple, MDB_NODUPDATA); if (result != MDB_SUCCESS && result != MDB_KEYEXIST) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); if (result == MDB_KEYEXIST) @@ -6770,13 +6832,10 @@ void BlockchainLMDB::migrate_5_6() mdb_set_dupsort(txn, m_leaves, compare_uint64); mdb_set_dupsort(txn, m_layers, compare_uint64); - MDB_cursor *c_locked_outputs, *c_new_block_info, *c_old_block_info; + MDB_cursor *c_locked_leaves, *c_new_block_info, *c_old_block_info; - MDB_val k, v; MDB_val k_blk, v_blk; - MDB_cursor_op op = MDB_FIRST; - const uint64_t n_blocks = height(); i = 0; @@ -6799,7 +6858,7 @@ void BlockchainLMDB::migrate_5_6() memset(&m_wcursors, 0, sizeof(m_wcursors)); } - result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); + result = mdb_cursor_open(txn, m_locked_leaves, &c_locked_leaves); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); @@ -6819,53 +6878,12 @@ void BlockchainLMDB::migrate_5_6() } } - MDB_val_set(k_height, i); - // Get all the locked outputs at that height - std::vector leaf_tuples; - - // TODO: double check this gets all leaf tuples when it does multiple iters - MDB_cursor_op op = MDB_SET; - while (1) - { - result = mdb_cursor_get(c_locked_outputs, &k_height, &v, op); - if (result == MDB_NOTFOUND) - break; - if (result != MDB_SUCCESS) - throw0(DB_ERROR(lmdb_error("Failed to get next locked outputs: ", result).c_str())); - op = MDB_NEXT_MULTIPLE; - - const uint64_t h = *(const uint64_t*)k_height.mv_data; - if (h != i) - throw0(DB_ERROR(("Height " + std::to_string(h) + " not the expected" + std::to_string(i)).c_str())); - - const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTuple*)v.mv_data); - const auto range_end = range_begin + v.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTuple); - - auto it = range_begin; - - // The first MDB_NEXT_MULTIPLE includes the val from MDB_SET, so skip it - if (leaf_tuples.size() == 1) - ++it; - - while (it < range_end) - { - leaf_tuples.push_back(*it); - ++it; - } - } - - CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to grow tree"); + const auto leaf_tuples = this->get_locked_leaf_tuples_at_height(i); this->grow_tree(fcmp::curve_trees::curve_trees_v1, leaf_tuples); // TODO: Remove locked outputs from the locked outputs table after adding them to tree - // Now update block info with num leaves in tree and new merkle root - const std::size_t n_leaf_tuples = this->get_num_leaf_tuples(); - const auto root = this->get_tree_root(); - - MDEBUG("n_leaf_tuples: " << n_leaf_tuples); - // Get old block_info and use it to set the new one with new values result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_NEXT); if (result) @@ -6881,8 +6899,10 @@ void BlockchainLMDB::migrate_5_6() bi.bi_hash = bi_old->bi_hash; bi.bi_cum_rct = bi_old->bi_cum_rct; bi.bi_long_term_block_weight = bi_old->bi_long_term_block_weight; - bi.bi_n_leaf_tuples = n_leaf_tuples; - bi.bi_tree_root = root; + bi.bi_n_leaf_tuples = this->get_num_leaf_tuples(); + bi.bi_tree_root = this->get_tree_root(); + + MDEBUG("Height: " << i << " , n_leaf_tuples: " << bi.bi_n_leaf_tuples); MDB_val_set(nv, bi); result = mdb_cursor_put(c_new_block_info, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index e71d17bbde5..1944779623b 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -27,6 +27,7 @@ #pragma once #include +#include #include "blockchain_db/blockchain_db.h" #include "cryptonote_basic/blobdatatype.h" // for type blobdata @@ -64,7 +65,7 @@ typedef struct mdb_txn_cursors MDB_cursor *m_txc_spent_keys; - MDB_cursor *m_txc_locked_outputs; + MDB_cursor *m_txc_locked_leaves; MDB_cursor *m_txc_leaves; MDB_cursor *m_txc_layers; @@ -91,7 +92,7 @@ typedef struct mdb_txn_cursors #define m_cur_tx_indices m_cursors->m_txc_tx_indices #define m_cur_tx_outputs m_cursors->m_txc_tx_outputs #define m_cur_spent_keys m_cursors->m_txc_spent_keys -#define m_cur_locked_outputs m_cursors->m_txc_locked_outputs +#define m_cur_locked_leaves m_cursors->m_txc_locked_leaves #define m_cur_leaves m_cursors->m_txc_leaves #define m_cur_layers m_cursors->m_txc_layers #define m_cur_txpool_meta m_cursors->m_txc_txpool_meta @@ -116,7 +117,7 @@ typedef struct mdb_rflags bool m_rf_tx_indices; bool m_rf_tx_outputs; bool m_rf_spent_keys; - bool m_rf_locked_outputs; + bool m_rf_locked_leaves; bool m_rf_leaves; bool m_rf_layers; bool m_rf_txpool_meta; @@ -277,6 +278,7 @@ class BlockchainLMDB : public BlockchainDB virtual uint64_t get_tx_block_height(const crypto::hash& h) const; virtual uint64_t get_num_outputs(const uint64_t& amount) const; + virtual uint64_t get_num_global_outputs() const; virtual output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const; virtual void get_output_key(const epee::span &amounts, const std::vector &offsets, std::vector &outputs, bool allow_partial = false) const; @@ -370,10 +372,10 @@ class BlockchainLMDB : public BlockchainDB virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves); - virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples); + virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples); virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::size_t expected_n_leaf_tuples) const; + const uint64_t expected_n_leaf_tuples) const; private: void do_resize(uint64_t size_increase=0); @@ -389,6 +391,7 @@ class BlockchainLMDB : public BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& block_hash + , const std::multimap& leaf_tuples_by_unlock_height ); virtual void remove_block(); @@ -421,13 +424,13 @@ class BlockchainLMDB : public BlockchainDB template void grow_layer(const C &curve, const std::vector> &layer_extensions, - const std::size_t c_idx, - const std::size_t layer_idx); + const uint64_t c_idx, + const uint64_t layer_idx); template - void trim_layer(const fcmp::curve_trees::LayerReduction &layer_reduction, const std::size_t layer_idx); + void trim_layer(const fcmp::curve_trees::LayerReduction &layer_reduction, const uint64_t layer_idx); - std::size_t get_num_leaf_tuples() const; + uint64_t get_num_leaf_tuples() const; std::array get_tree_root() const; @@ -443,10 +446,12 @@ class BlockchainLMDB : public BlockchainDB template bool audit_layer(const C_CHILD &c_child, const C_PARENT &c_parent, - const std::size_t layer_idx, - const std::size_t child_start_idx, - const std::size_t child_chunk_idx, - const std::size_t chunk_width) const; + const uint64_t layer_idx, + const uint64_t child_start_idx, + const uint64_t child_chunk_idx, + const uint64_t chunk_width) const; + + std::vector get_locked_leaf_tuples_at_height(const uint64_t height); uint64_t num_outputs() const; @@ -515,7 +520,7 @@ class BlockchainLMDB : public BlockchainDB MDB_dbi m_spent_keys; - MDB_dbi m_locked_outputs; + MDB_dbi m_locked_leaves; MDB_dbi m_leaves; MDB_dbi m_layers; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 2b3e3e8412b..5ca53b8c065 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -100,6 +100,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual std::vector get_tx_list(const std::vector& hlist) const override { return std::vector(); } virtual uint64_t get_tx_block_height(const crypto::hash& h) const override { return 0; } virtual uint64_t get_num_outputs(const uint64_t& amount) const override { return 1; } + virtual uint64_t get_num_global_outputs() const override { return 1; } virtual uint64_t get_indexing_base() const override { return 0; } virtual cryptonote::output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const override { return cryptonote::output_data_t(); } virtual cryptonote::tx_out_index get_output_tx_and_index_from_global(const uint64_t& index) const override { return cryptonote::tx_out_index(); } @@ -118,8 +119,8 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void remove_spent_key(const crypto::key_image& k_image) override {} virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &new_leaves) override {}; - virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) override {}; - virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t expected_n_leaf_tuples) const override { return false; }; + virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) override {}; + virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t expected_n_leaf_tuples) const override { return false; }; virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } @@ -148,6 +149,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const std::multimap& leaf_tuples_by_unlock_height ) override { } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} diff --git a/src/cryptonote_basic/cryptonote_format_utils.cpp b/src/cryptonote_basic/cryptonote_format_utils.cpp index ca56c2bc346..cb400c35869 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.cpp +++ b/src/cryptonote_basic/cryptonote_format_utils.cpp @@ -1644,4 +1644,57 @@ namespace cryptonote sc_sub((unsigned char*)key.data, (const unsigned char*)key.data, (const unsigned char*)hash.data); return key; } + //--------------------------------------------------------------- + // TODO: write tests for this func + uint64_t get_unlock_height(uint64_t unlock_time, uint64_t height_included_in_chain) + { + uint64_t unlock_height = 0; + const uint64_t default_unlock_height = height_included_in_chain + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE; + + // TODO: double triple check off by 1 + if (unlock_time == 0) + { + unlock_height = default_unlock_height; + } + else if (unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER) + { + unlock_height = unlock_time; + } + else + { + // Interpret the unlock_time as time + // TODO: hardcode correct times for each network and take in nettype + const auto hf_v15_time = 1656629118; + const auto hf_v15_height = 2689608; + + // Use the last hard fork's time and block combo to convert the time-based timelock into an unlock block + // TODO: consider taking into account 60s block times when that was consensus + if (hf_v15_time > unlock_time) + { + const auto seconds_since_unlock = hf_v15_time - unlock_time; + const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2; + CHECK_AND_ASSERT_THROW_MES(hf_v15_height > blocks_since_unlock, "unexpected blocks since unlock"); + unlock_height = hf_v15_height - blocks_since_unlock; + } + else + { + const auto seconds_until_unlock = unlock_time - hf_v15_time; + const auto blocks_until_unlock = seconds_until_unlock / DIFFICULTY_TARGET_V2; + unlock_height = hf_v15_height + blocks_until_unlock; + } + + /* Note: since this function was introduced for the hf that included fcmp's, it's possible for an output to be + spent before it reaches the unlock_height going by the old rules; this is ok. It can't be spent again because + it'll have a duplicate key image. It's also possible for an output to unlock by old rules, and then re-lock + again at the fork. This is also ok, we just need to be sure that the new hf rules use this unlock_height + starting at the fork for fcmp's. + */ + + // TODO: double check the accuracy of this calculation + MDEBUG("unlock time: " << unlock_time << " , unlock_height: " << unlock_height); + } + + // Can't unlock earlier than the default unlock height + return std::max(unlock_height, default_unlock_height); + } } diff --git a/src/cryptonote_basic/cryptonote_format_utils.h b/src/cryptonote_basic/cryptonote_format_utils.h index fc7dfcd8590..f81e57fdca9 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.h +++ b/src/cryptonote_basic/cryptonote_format_utils.h @@ -265,6 +265,9 @@ namespace cryptonote crypto::secret_key encrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); crypto::secret_key decrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); + + uint64_t get_unlock_height(uint64_t unlock_time, uint64_t height_included_in_chain); + #define CHECKED_GET_SPECIFIC_VARIANT(variant_var, specific_type, variable_name, fail_return_val) \ CHECK_AND_ASSERT_MES(variant_var.type() == typeid(specific_type), fail_return_val, "wrong variant type: " << variant_var.type().name() << ", expected " << typeid(specific_type).name()); \ specific_type& variable_name = boost::get(variant_var); diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt index b7aca03fb82..22e5e5a80cb 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp/CMakeLists.txt @@ -44,7 +44,9 @@ monero_add_library_with_deps( target_link_libraries(fcmp PUBLIC crypto + cryptonote_basic epee + ringct PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/fcmp_rust/libfcmp_rust.a ${EXTRA_LIBRARIES}) diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 070b23c3183..c797f4f7c05 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -26,7 +26,9 @@ // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include "cryptonote_basic/cryptonote_format_utils.h" #include "curve_trees.h" +#include "ringct/rctOps.h" namespace fcmp @@ -92,10 +94,10 @@ template static LayerExtension hash_children_chunks(const C &curve, const typename C::Scalar *old_last_child, const typename C::Point *old_last_parent, - const std::size_t start_offset, - const std::size_t next_parent_start_index, + const uint64_t start_offset, + const uint64_t next_parent_start_index, const std::vector &new_child_scalars, - const std::size_t chunk_width) + const uint64_t chunk_width) { LayerExtension parents_out; parents_out.start_idx = next_parent_start_index; @@ -106,7 +108,7 @@ static LayerExtension hash_children_chunks(const C &curve, CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width"); // See how many children we need to fill up the existing last chunk - std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); + uint64_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); @@ -148,7 +150,7 @@ static LayerExtension hash_children_chunks(const C &curve, } // Hash chunks of child scalars to create the parent hashes - std::size_t chunk_start_idx = chunk_size; + uint64_t chunk_start_idx = chunk_size; while (chunk_start_idx < new_child_scalars.size()) { chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); @@ -177,9 +179,9 @@ static LayerExtension hash_children_chunks(const C &curve, return parents_out; }; //---------------------------------------------------------------------------------------------------------------------- -static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_total_children, - const std::size_t new_total_children, - const std::size_t parent_chunk_width, +static GrowLayerInstructions get_grow_layer_instructions(const uint64_t old_total_children, + const uint64_t new_total_children, + const uint64_t parent_chunk_width, const bool last_child_will_change) { // 1. Check pre-conditions on total number of children @@ -198,10 +200,10 @@ static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_t // 2. Calculate old and new total number of parents using totals for children // If there's only 1 child, then it must be the old root and thus it would have no old parents - const std::size_t old_total_parents = old_total_children > 1 + const uint64_t old_total_parents = old_total_children > 1 ? (1 + ((old_total_children - 1) / parent_chunk_width)) : 0; - const std::size_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); // 3. Check pre-conditions on total number of parents CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents, @@ -218,7 +220,7 @@ static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_t // 4. Set the current offset in the last chunk // - Note: this value starts at the last child in the last chunk, but it might need to be decremented by 1 if we're // changing that last child - std::size_t offset = old_total_parents > 0 + uint64_t offset = old_total_parents > 0 ? (old_total_children % parent_chunk_width) : 0; @@ -245,7 +247,7 @@ static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_t const bool need_old_last_parent = need_old_last_child || adding_members_to_existing_last_chunk; // 9. Set the next parent's start index - std::size_t next_parent_start_index = old_total_parents; + uint64_t next_parent_start_index = old_total_parents; if (need_old_last_parent) { // If we're updating the last parent, we need to bring the starting parent index back 1 @@ -280,23 +282,21 @@ static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_t }; //---------------------------------------------------------------------------------------------------------------------- -static GrowLayerInstructions get_leaf_layer_grow_instructions(const std::size_t old_n_leaf_tuples, - const std::size_t new_n_leaf_tuples, - const std::size_t leaf_tuple_size, - const std::size_t leaf_layer_chunk_width) +static GrowLayerInstructions get_leaf_layer_grow_instructions(const uint64_t old_n_leaf_tuples, + const uint64_t new_n_leaf_tuples, + const uint64_t leaf_tuple_size, + const uint64_t leaf_layer_chunk_width) { - // TODO: comments - // The leaf layer can never be the root layer const bool setting_next_layer_after_old_root = false; - const std::size_t old_total_children = old_n_leaf_tuples * leaf_tuple_size; - const std::size_t new_total_children = (old_n_leaf_tuples + new_n_leaf_tuples) * leaf_tuple_size; + const uint64_t old_total_children = old_n_leaf_tuples * leaf_tuple_size; + const uint64_t new_total_children = (old_n_leaf_tuples + new_n_leaf_tuples) * leaf_tuple_size; - const std::size_t old_total_parents = old_total_children > 0 + const uint64_t old_total_parents = old_total_children > 0 ? (1 + ((old_total_children - 1) / leaf_layer_chunk_width)) : 0; - const std::size_t new_total_parents = 1 + ((new_total_children - 1) / leaf_layer_chunk_width); + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / leaf_layer_chunk_width); CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children, "new_total_children must be >= old_total_children"); @@ -306,14 +306,14 @@ static GrowLayerInstructions get_leaf_layer_grow_instructions(const std::size_t // Since leaf layer is append-only, no leaf can ever change and we'll never need an old leaf const bool need_old_last_child = false; - const std::size_t offset = old_total_children % leaf_layer_chunk_width; + const uint64_t offset = old_total_children % leaf_layer_chunk_width; const bool last_chunk_is_full = offset == 0; const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full && new_total_children > old_total_children; const bool need_old_last_parent = adding_members_to_existing_last_chunk; - std::size_t next_parent_start_index = old_total_parents; + uint64_t next_parent_start_index = old_total_parents; if (need_old_last_parent) { // If we're updating the last parent, we need to bring the starting parent index back 1 @@ -356,8 +356,8 @@ static LayerExtension get_next_layer_extension(const C_CHILD &c_child, const std::vector &child_last_hashes, const std::vector &parent_last_hashes, const std::vector> child_layer_extensions, - const std::size_t last_updated_child_idx, - const std::size_t last_updated_parent_idx) + const uint64_t last_updated_child_idx, + const uint64_t last_updated_parent_idx) { // TODO: comments const auto *child_last_hash = (last_updated_child_idx >= child_last_hashes.size()) @@ -412,9 +412,9 @@ static LayerExtension get_next_layer_extension(const C_CHILD &c_child, } //---------------------------------------------------------------------------------------------------------------------- static TrimLayerInstructions get_trim_layer_instructions( - const std::size_t old_total_children, - const std::size_t new_total_children, - const std::size_t parent_chunk_width, + const uint64_t old_total_children, + const uint64_t new_total_children, + const uint64_t parent_chunk_width, const bool last_child_will_change) { CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "new total children must be > 0"); @@ -422,19 +422,19 @@ static TrimLayerInstructions get_trim_layer_instructions( "old_total_children must be >= new_total_children"); // Calculate old and new total number of parents using totals for children - const std::size_t old_total_parents = 1 + ((old_total_children - 1) / parent_chunk_width); - const std::size_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); + const uint64_t old_total_parents = 1 + ((old_total_children - 1) / parent_chunk_width); + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); CHECK_AND_ASSERT_THROW_MES(old_total_parents >= new_total_parents, "old_total_parents must be >= new_total_parents"); CHECK_AND_ASSERT_THROW_MES(new_total_children > new_total_parents, "new_total_children must be > new_total_parents"); - const std::size_t old_offset = old_total_children % parent_chunk_width; - std::size_t new_offset = new_total_children % parent_chunk_width; + const uint64_t old_offset = old_total_children % parent_chunk_width; + const uint64_t new_offset = new_total_children % parent_chunk_width; // Get the number of existing children in what will become the new last chunk after trimming - const std::size_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0) + const uint64_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0) ? parent_chunk_width : old_offset; @@ -444,7 +444,7 @@ static TrimLayerInstructions get_trim_layer_instructions( "unexpected new_last_chunk_old_num_children"); // Get the number of children we'll be trimming from the new last chunk - const std::size_t trim_n_children = new_offset == 0 + const uint64_t trim_n_children = new_offset == 0 ? 0 // The last chunk wil remain full when the new_offset == 0 : new_last_chunk_old_num_children - new_offset; @@ -457,43 +457,49 @@ static TrimLayerInstructions get_trim_layer_instructions( CHECK_AND_ASSERT_THROW_MES(!(need_last_chunk_children_to_trim && need_last_chunk_remaining_children), "cannot both need last children to trim and need the remaining children"); - // TODO: cleaner conditional approach - // TODO: comments - const bool need_last_chunk_parent = !need_last_chunk_remaining_children && - (need_last_chunk_children_to_trim || last_child_will_change); + // If we're trimming from the new last chunk OR an element in the new last chunk will change, then we're going to + // update the existing last hash, since its children are changing + const bool update_existing_last_hash = trim_n_children > 0 || last_child_will_change; - const bool update_existing_last_hash = need_last_chunk_remaining_children || need_last_chunk_parent; + // If we're trimming using remaining children, then we're just going to call hash_grow as if the chunk is being + // hashed for the first time, and so we don't need the existing last hash in that case, even if the hash is updating + const bool need_existing_last_hash = update_existing_last_hash && !need_last_chunk_remaining_children; - std::size_t hash_offset = new_offset; + // We need to decrement the offset we use to hash the chunk if the last child is changing + uint64_t hash_offset = new_offset; if (last_child_will_change) { - hash_offset = hash_offset == 0 ? (parent_chunk_width - 1) : (hash_offset - 1); - - if (need_last_chunk_children_to_trim || need_last_chunk_remaining_children) - --new_offset; - } - - if (need_last_chunk_remaining_children) - { - hash_offset = 0; + hash_offset = hash_offset == 0 + ? (parent_chunk_width - 1) // chunk is full, so decrement full width by 1 + : (hash_offset - 1); } - std::size_t start_trim_idx = 0; - std::size_t end_trim_idx = 0; + // Set the child index range so the caller knows which children to read from the tree + uint64_t start_trim_idx = 0; + uint64_t end_trim_idx = 0; if (need_last_chunk_children_to_trim) { - const std::size_t chunk_boundary_start = (new_total_parents - 1) * parent_chunk_width; - const std::size_t chunk_boundary_end = chunk_boundary_start + parent_chunk_width; + // We'll call hash_trim to trim the children between [offset, last chunk end] + const uint64_t chunk_boundary_start = (new_total_parents - 1) * parent_chunk_width; + const uint64_t chunk_boundary_end = chunk_boundary_start + parent_chunk_width; - start_trim_idx = chunk_boundary_start + new_offset; + start_trim_idx = chunk_boundary_start + hash_offset; end_trim_idx = std::min(chunk_boundary_end, old_total_children); } - else if (need_last_chunk_remaining_children && new_offset > 0) + else if (need_last_chunk_remaining_children) { - start_trim_idx = new_total_children - new_offset; + // We'll call hash_grow with the remaining children between [0, offset] + CHECK_AND_ASSERT_THROW_MES(new_total_children >= hash_offset, "hash_offset is unexpectedly high"); + start_trim_idx = new_total_children - hash_offset; end_trim_idx = new_total_children; } + // If we're trimming using remaining children, then we're just going to call hash_grow with offset 0 + if (need_last_chunk_remaining_children) + { + hash_offset = 0; + } + MDEBUG("parent_chunk_width: " << parent_chunk_width << " , old_total_children: " << old_total_children << " , new_total_children: " << new_total_children @@ -501,10 +507,9 @@ static TrimLayerInstructions get_trim_layer_instructions( << " , new_total_parents: " << new_total_parents << " , need_last_chunk_children_to_trim: " << need_last_chunk_children_to_trim << " , need_last_chunk_remaining_children: " << need_last_chunk_remaining_children - << " , need_last_chunk_parent: " << need_last_chunk_parent + << " , need_existing_last_hash: " << need_existing_last_hash << " , need_new_last_child: " << last_child_will_change << " , update_existing_last_hash: " << update_existing_last_hash - << " , new_offset: " << new_offset << " , hash_offset: " << hash_offset << " , start_trim_idx: " << start_trim_idx << " , end_trim_idx: " << end_trim_idx); @@ -515,12 +520,11 @@ static TrimLayerInstructions get_trim_layer_instructions( .new_total_children = new_total_children, .old_total_parents = old_total_parents, .new_total_parents = new_total_parents, + .update_existing_last_hash = update_existing_last_hash, .need_last_chunk_children_to_trim = need_last_chunk_children_to_trim, .need_last_chunk_remaining_children = need_last_chunk_remaining_children, - .need_last_chunk_parent = need_last_chunk_parent, + .need_existing_last_hash = need_existing_last_hash, .need_new_last_child = last_child_will_change, - .update_existing_last_hash = update_existing_last_hash, - .new_offset = new_offset, .hash_offset = hash_offset, .start_trim_idx = start_trim_idx, .end_trim_idx = end_trim_idx, @@ -535,8 +539,8 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc const std::vector &parent_last_hashes, const std::vector> &children_to_trim, const std::vector &child_last_hashes, - const std::size_t parent_layer_idx, - const std::size_t child_layer_idx, + const uint64_t parent_layer_idx, + const uint64_t child_layer_idx, const std::vector> &child_reductions) { LayerReduction layer_reduction_out; @@ -544,18 +548,17 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc layer_reduction_out.new_total_parents = trim_layer_instructions.new_total_parents; layer_reduction_out.update_existing_last_hash = trim_layer_instructions.update_existing_last_hash; - typename C_PARENT::Point existing_hash = c_parent.m_hash_init_point; - if (trim_layer_instructions.need_last_chunk_parent) - { + if (trim_layer_instructions.need_existing_last_hash) CHECK_AND_ASSERT_THROW_MES(parent_last_hashes.size() > parent_layer_idx, "missing last parent hash"); - existing_hash = parent_last_hashes[parent_layer_idx]; - } + + const typename C_PARENT::Point &existing_hash = trim_layer_instructions.need_existing_last_hash + ? parent_last_hashes[parent_layer_idx] + : c_parent.m_hash_init_point; std::vector child_scalars; if (trim_layer_instructions.need_last_chunk_children_to_trim || trim_layer_instructions.need_last_chunk_remaining_children) { - // TODO: a clean way to do this without copying CHECK_AND_ASSERT_THROW_MES(children_to_trim.size() > parent_layer_idx, "missing children to trim"); child_scalars = children_to_trim[parent_layer_idx]; } @@ -576,8 +579,8 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc } else if (!trim_layer_instructions.need_last_chunk_children_to_trim) { - // TODO: cleaner conditional for this case - const std::size_t last_child_layer_idx = child_layer_idx - 1; + // Falling to this conditional means we're not trimming at all, just updating the old last child + const uint64_t last_child_layer_idx = child_layer_idx - 1; CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash"); const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx]; @@ -624,9 +627,29 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc //---------------------------------------------------------------------------------------------------------------------- template<> CurveTrees::LeafTuple CurveTrees::output_to_leaf_tuple( - const crypto::public_key &O, - const crypto::public_key &C) const + const crypto::public_key &output_pubkey, + const crypto::public_key &commitment) const { + CHECK_AND_ASSERT_THROW_MES(crypto::check_key(output_pubkey), "invalid output pub key"); + + const auto clear_torsion = [](const crypto::public_key &key) + { + // TODO: don't need to decompress and recompress points, can be optimized + rct::key torsion_cleared_key = rct::scalarmultKey(rct::pk2rct(key), rct::INV_EIGHT); + torsion_cleared_key = rct::scalarmult8(torsion_cleared_key); + + CHECK_AND_ASSERT_THROW_MES(torsion_cleared_key != rct::I, "cannot equal identity"); + + return torsion_cleared_key; + }; + + // Torsion clear the output pub key and commitment + const rct::key rct_O = clear_torsion(output_pubkey); + const rct::key rct_C = clear_torsion(commitment); + + const crypto::public_key O = rct::rct2pk(rct_O); + const crypto::public_key C = rct::rct2pk(rct_C); + crypto::ec_point I; crypto::derive_key_image_generator(O, I); @@ -654,9 +677,43 @@ std::vector CurveTrees::flatten_leaves(const std::v return flattened_leaves; }; //---------------------------------------------------------------------------------------------------------------------- +template <> +void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transaction &tx, + const uint64_t tx_height, + const bool miner_tx, + std::multimap::LeafTuple> &leaf_tuples_by_unlock_height_inout) const +{ + const uint64_t unlock_height = cryptonote::get_unlock_height(tx.unlock_time, tx_height); + + for (std::size_t i = 0; i < tx.vout.size(); ++i) + { + const auto &out = tx.vout[i]; + + crypto::public_key output_public_key; + if (!cryptonote::get_output_public_key(out, output_public_key)) + throw std::runtime_error("Could not get an output public key from a tx output."); + + const rct::key commitment = (miner_tx || tx.version < 2) + ? rct::zeroCommit(out.amount) + : tx.rct_signatures.outPk[i].mask; + + try + { + // Throws an error if output is invalid; we don't want leaf tuples from invalid outputs + auto leaf_tuple = output_to_leaf_tuple( + output_public_key, + rct::rct2pk(commitment)); + + leaf_tuples_by_unlock_height_inout.emplace(unlock_height, std::move(leaf_tuple)); + } + catch (...) + { /*continue*/ }; + } +} +//---------------------------------------------------------------------------------------------------------------------- template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( - const std::size_t old_n_leaf_tuples, + const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, const std::vector &new_leaf_tuples) const { @@ -707,13 +764,13 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio // Alternate between hashing c2 children, c1 children, c2, c1, ... bool parent_is_c1 = true; - std::size_t c1_last_idx = 0; - std::size_t c2_last_idx = 0; + uint64_t c1_last_idx = 0; + uint64_t c2_last_idx = 0; while (grow_layer_instructions.new_total_parents > 1) { MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1)); - const std::size_t new_total_children = grow_layer_instructions.new_total_parents; + const uint64_t new_total_children = grow_layer_instructions.new_total_parents; grow_layer_instructions = this->set_next_layer_extension( grow_layer_instructions, @@ -736,8 +793,8 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio //---------------------------------------------------------------------------------------------------------------------- template std::vector CurveTrees::get_trim_instructions( - const std::size_t old_n_leaf_tuples, - const std::size_t trim_n_leaf_tuples) const + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const { CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); @@ -746,10 +803,10 @@ std::vector CurveTrees::get_trim_instructions( // Get trim instructions for the leaf layer { - const std::size_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE; - const std::size_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE; + const uint64_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE; + const uint64_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE; - const std::size_t parent_chunk_width = m_leaf_layer_chunk_width; + const uint64_t parent_chunk_width = m_leaf_layer_chunk_width; // Leaf layer's last child never changes since leaf layer is pop-/append-only const bool last_child_will_change = false; @@ -789,12 +846,12 @@ typename CurveTrees::TreeReduction CurveTrees::get_tree_reductio CHECK_AND_ASSERT_THROW_MES((trim_instructions[0].new_total_children % LEAF_TUPLE_SIZE) == 0, "unexpected new total leaves"); - const std::size_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE; + const uint64_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE; tree_reduction_out.new_total_leaf_tuples = new_total_leaf_tuples; bool use_c2 = true; - std::size_t c1_idx = 0; - std::size_t c2_idx = 0; + uint64_t c1_idx = 0; + uint64_t c2_idx = 0; for (const auto &trim_layer_instructions : trim_instructions) { @@ -850,8 +907,8 @@ GrowLayerInstructions CurveTrees::set_next_layer_extension( const GrowLayerInstructions &prev_layer_instructions, const bool parent_is_c1, const LastHashes &last_hashes, - std::size_t &c1_last_idx_inout, - std::size_t &c2_last_idx_inout, + uint64_t &c1_last_idx_inout, + uint64_t &c2_last_idx_inout, TreeExtension &tree_extension_inout) const { const auto &c1_last_hashes = last_hashes.c1_last_hashes; @@ -860,7 +917,7 @@ GrowLayerInstructions CurveTrees::set_next_layer_extension( auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions; auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions; - const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; + const uint64_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; const auto grow_layer_instructions = get_grow_layer_instructions( prev_layer_instructions.old_total_parents, diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 9a25963ae42..6bc6b599f31 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -28,10 +28,12 @@ #pragma once +#include "cryptonote_basic/cryptonote_basic.h" #include "crypto/crypto.h" #include "misc_log_ex.h" #include "tower_cycle.h" +#include #include @@ -50,7 +52,7 @@ typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_ch template struct LayerExtension final { - std::size_t start_idx{0}; + uint64_t start_idx{0}; bool update_existing_last_hash; std::vector hashes; }; @@ -59,7 +61,7 @@ struct LayerExtension final template struct LayerReduction final { - std::size_t new_total_parents{0}; + uint64_t new_total_parents{0}; bool update_existing_last_hash; typename C::Point new_last_hash; }; @@ -68,15 +70,15 @@ struct LayerReduction final struct GrowLayerInstructions final { // The max chunk width of children used to hash into a parent - std::size_t parent_chunk_width; + uint64_t parent_chunk_width; // Total children refers to the total number of elements in a layer - std::size_t old_total_children; - std::size_t new_total_children; + uint64_t old_total_children; + uint64_t new_total_children; // Total parents refers to the total number of hashes of chunks of children - std::size_t old_total_parents; - std::size_t new_total_parents; + uint64_t old_total_parents; + uint64_t new_total_parents; // When updating the tree, we use this boolean to know when we'll need to use the tree's existing old root in order // to set a new layer after that root @@ -88,37 +90,43 @@ struct GrowLayerInstructions final bool need_old_last_parent; // The first chunk that needs to be updated's first child's offset within that chunk - std::size_t start_offset; + uint64_t start_offset; // The parent's starting index in the layer - std::size_t next_parent_start_index; + uint64_t next_parent_start_index; }; // Useful metadata for trimming a layer struct TrimLayerInstructions final { // The max chunk width of children used to hash into a parent - std::size_t parent_chunk_width; + uint64_t parent_chunk_width; // Total children refers to the total number of elements in a layer - std::size_t old_total_children; - std::size_t new_total_children; + uint64_t old_total_children; + uint64_t new_total_children; // Total parents refers to the total number of hashes of chunks of children - std::size_t old_total_parents; - std::size_t new_total_parents; + uint64_t old_total_parents; + uint64_t new_total_parents; + // True if the new last chunk's existing parent hash will need to be updated + bool update_existing_last_hash; + + // Whether we need to explicitly trim children from the new last chunk bool need_last_chunk_children_to_trim; + // Whether we need to trim by growing using the remaining children from the new last chunk bool need_last_chunk_remaining_children; - bool need_last_chunk_parent; + // Whether we need the new last chunk's existing parent hash in order to complete the trim + bool need_existing_last_hash; + // Whether we need the new last child from the new last chunk in order to complete the trim bool need_new_last_child; - bool update_existing_last_hash; + // The offset to use when hashing the last chunk + uint64_t hash_offset; - std::size_t new_offset; - std::size_t hash_offset; - - std::size_t start_trim_idx; - std::size_t end_trim_idx; + // The starting and ending indexes of the children we're going to need to trim the last chunk + uint64_t start_trim_idx; + uint64_t end_trim_idx; }; //---------------------------------------------------------------------------------------------------------------------- @@ -130,7 +138,7 @@ template class CurveTrees { public: - CurveTrees(const C1 &c1, const C2 &c2, const std::size_t c1_width, const std::size_t c2_width): + CurveTrees(const C1 &c1, const C2 &c2, const uint64_t c1_width, const uint64_t c2_width): m_c1{c1}, m_c2{c2}, m_c1_width{c1_width}, @@ -147,20 +155,20 @@ class CurveTrees struct LeafTuple final { // Output ed25519 point x-coordinate - const typename C2::Scalar O_x; + typename C2::Scalar O_x; // Key image generator x-coordinate - const typename C2::Scalar I_x; + typename C2::Scalar I_x; // Commitment x-coordinate - const typename C2::Scalar C_x; + typename C2::Scalar C_x; }; - static const std::size_t LEAF_TUPLE_SIZE = 3; + static const uint64_t LEAF_TUPLE_SIZE = 3; static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); // Contiguous leaves in the tree, starting a specified start_idx in the leaf layer struct Leaves final { // Starting leaf tuple index in the leaf layer - std::size_t start_leaf_tuple_idx{0}; + uint64_t start_leaf_tuple_idx{0}; // Contiguous leaves in a tree that start at the start_idx std::vector tuples; }; @@ -180,7 +188,7 @@ class CurveTrees // - c2_layer_reductions[0] is first layer after leaves, then c1_layer_reductions[0], c2_layer_reductions[1], etc struct TreeReduction final { - std::size_t new_total_leaf_tuples; + uint64_t new_total_leaf_tuples; std::vector> c1_layer_reductions; std::vector> c2_layer_reductions; }; @@ -206,21 +214,27 @@ class CurveTrees //member functions public: // Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree - LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) const; + LeafTuple output_to_leaf_tuple(const crypto::public_key &output_pubkey, const crypto::public_key &C) const; // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] std::vector flatten_leaves(const std::vector &leaves) const; + // Convert cryptonote tx outs to leaf tuples, grouped by the leaf tuple unlock height + void tx_outs_to_leaf_tuples(const cryptonote::transaction &tx, + const uint64_t tx_height, + const bool miner_tx, + std::multimap &leaf_tuples_by_unlock_height_inout) const; + // Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree - TreeExtension get_tree_extension(const std::size_t old_n_leaf_tuples, + TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, const std::vector &new_leaf_tuples) const; // Get instructions useful for trimming all existing layers in the tree std::vector get_trim_instructions( - const std::size_t old_n_leaf_tuples, - const std::size_t trim_n_leaf_tuples) const; + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const; // Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from // each last chunk, and the existing last hashes in what will become the new last parent of each layer, and return @@ -238,8 +252,8 @@ class CurveTrees const GrowLayerInstructions &prev_layer_instructions, const bool parent_is_c1, const LastHashes &last_hashes, - std::size_t &c1_last_idx_inout, - std::size_t &c2_last_idx_inout, + uint64_t &c1_last_idx_inout, + uint64_t &c2_last_idx_inout, TreeExtension &tree_extension_inout) const; //public member variables @@ -249,12 +263,11 @@ class CurveTrees const C2 &m_c2; // The leaf layer has a distinct chunk width than the other layers - // TODO: public function for update_last_parent, and make this private - const std::size_t m_leaf_layer_chunk_width; + const uint64_t m_leaf_layer_chunk_width; // The chunk widths of the layers in the tree tied to each curve - const std::size_t m_c1_width; - const std::size_t m_c2_width; + const uint64_t m_c1_width; + const uint64_t m_c2_width; }; //---------------------------------------------------------------------------------------------------------------------- using Helios = tower_cycle::Helios; @@ -263,8 +276,8 @@ using CurveTreesV1 = CurveTrees; // https://github.com/kayabaNerve/fcmp-plus-plus/blob // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 -static const std::size_t HELIOS_CHUNK_WIDTH = 38; -static const std::size_t SELENE_CHUNK_WIDTH = 18; +static const uint64_t HELIOS_CHUNK_WIDTH = 38; +static const uint64_t SELENE_CHUNK_WIDTH = 18; static const Helios HELIOS; static const Selene SELENE; static const CurveTreesV1 curve_trees_v1(HELIOS, SELENE, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index 247f25fffbb..20b952c5e62 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -84,6 +84,7 @@ namespace rct { return bytes[i]; } bool operator==(const key &k) const { return !crypto_verify_32(bytes, k.bytes); } + bool operator!=(const key &k) const { return crypto_verify_32(bytes, k.bytes); } unsigned char bytes[32]; }; typedef std::vector keyV; //vector of keys diff --git a/tests/block_weight/CMakeLists.txt b/tests/block_weight/CMakeLists.txt index f622d5a3d30..be6b12350d1 100644 --- a/tests/block_weight/CMakeLists.txt +++ b/tests/block_weight/CMakeLists.txt @@ -38,6 +38,7 @@ target_link_libraries(block_weight PRIVATE cryptonote_core blockchain_db + fcmp ${EXTRA_LIBRARIES}) add_test( diff --git a/tests/block_weight/block_weight.cpp b/tests/block_weight/block_weight.cpp index 44ccf1e6462..26b0f1ab589 100644 --- a/tests/block_weight/block_weight.cpp +++ b/tests/block_weight/block_weight.cpp @@ -32,6 +32,7 @@ #include #include "cryptonote_core/cryptonote_core.h" #include "blockchain_db/testdb.h" +#include "fcmp/curve_trees.h" #define LONG_TERM_BLOCK_WEIGHT_WINDOW 5000 @@ -64,6 +65,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const std::multimap& leaf_tuples_by_unlock_height ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/core_tests/chaingen.cpp b/tests/core_tests/chaingen.cpp index 05a6ce1f90f..b0f77a0242c 100644 --- a/tests/core_tests/chaingen.cpp +++ b/tests/core_tests/chaingen.cpp @@ -88,6 +88,7 @@ namespace , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const std::multimap& leaf_tuples_by_unlock_height ) override { blocks.push_back({blk, blk_hash}); @@ -171,7 +172,7 @@ static std::unique_ptr init_blockchain(const std: const block *blk = &boost::get(ev); auto blk_hash = get_block_hash(*blk); - bdb->add_block(*blk, 1, 1, 1, 0, 0, blk_hash); + bdb->add_block(*blk, 1, 1, 1, 0, 0, blk_hash, {}); } bool r = bap->blockchain.init(bdb, nettype, true, test_options, 2, nullptr); diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index b264b675690..2dc98d0ad4f 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -82,12 +82,11 @@ static std::vector get_last_chunk_children_to_trim(co const CurveTreesGlobalTree::Layer &child_layer, const bool need_last_chunk_children_to_trim, const bool need_last_chunk_remaining_children, - const std::size_t new_offset, const std::size_t start_trim_idx, const std::size_t end_trim_idx) { std::vector children_to_trim_out; - if (need_last_chunk_children_to_trim || (need_last_chunk_remaining_children && new_offset > 0)) + if (end_trim_idx > start_trim_idx) { std::size_t idx = start_trim_idx; MDEBUG("Start trim from idx: " << idx << " , ending trim at: " << end_trim_idx); @@ -344,13 +343,10 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); const auto &trim_leaf_layer_instructions = trim_instructions[0]; - const std::size_t new_offset = trim_leaf_layer_instructions.new_offset; - std::vector leaves_to_trim; // TODO: separate function - if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim || - (trim_leaf_layer_instructions.need_last_chunk_remaining_children && new_offset > 0)) + if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx) { std::size_t idx = trim_leaf_layer_instructions.start_trim_idx; MDEBUG("Start trim from idx: " << idx); @@ -384,7 +380,6 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c const bool need_last_chunk_children_to_trim = trim_layer_instructions.need_last_chunk_children_to_trim; const bool need_last_chunk_remaining_children = trim_layer_instructions.need_last_chunk_remaining_children; - const std::size_t new_offset = trim_layer_instructions.new_offset; const std::size_t start_trim_idx = trim_layer_instructions.start_trim_idx; const std::size_t end_trim_idx = trim_layer_instructions.end_trim_idx; @@ -397,7 +392,6 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c m_tree.c1_layers[c1_idx], need_last_chunk_children_to_trim, need_last_chunk_remaining_children, - new_offset, start_trim_idx, end_trim_idx); @@ -413,7 +407,6 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c m_tree.c2_layers[c2_idx], need_last_chunk_children_to_trim, need_last_chunk_remaining_children, - new_offset, start_trim_idx, end_trim_idx); @@ -1102,7 +1095,7 @@ TEST(curve_trees, hash_trim) const auto selene_scalar_0 = generate_random_selene_scalar(); const auto selene_scalar_1 = generate_random_selene_scalar(); - // Get the initial hash of the 3 selene scalars + // Get the initial hash of the 2 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1}; const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, @@ -1172,3 +1165,63 @@ TEST(curve_trees, hash_trim) ASSERT_EQ(trim_res_bytes, grow_res_bytes); } } +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, hash_grow) +{ + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 2 selene scalars + std::vector all_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + + // Extend with a new child + const auto selene_scalar_2 = generate_random_selene_scalar(); + std::vector new_children{selene_scalar_2}; + const auto ext_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + init_hash, + all_children.size(), + fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + Selene::Chunk{new_children.data(), new_children.size()}); + const auto ext_hash_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(ext_hash); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2} + all_children.push_back(selene_scalar_2); + const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); + + ASSERT_EQ(ext_hash_bytes, grow_res_bytes); + + // Extend again with a new child + const auto selene_scalar_3 = generate_random_selene_scalar(); + new_children.clear(); + new_children = {selene_scalar_3}; + const auto ext_hash2 = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + ext_hash, + all_children.size(), + fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + Selene::Chunk{new_children.data(), new_children.size()}); + const auto ext_hash_bytes2 = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(ext_hash2); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} + all_children.push_back(selene_scalar_3); + const auto grow_res2 = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + /*offset*/ 0, + /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + const auto grow_res_bytes2 = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res2); + + ASSERT_EQ(ext_hash_bytes2, grow_res_bytes2); +} diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index 56958a0d85f..6c7a221d0ee 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -54,6 +54,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const std::multimap& leaf_tuples_by_unlock_height ) override { blocks.push_back(blk); } @@ -107,20 +108,20 @@ TEST(major, Only) ASSERT_FALSE(hf.add(mkblock(0, 2), 0)); ASSERT_FALSE(hf.add(mkblock(2, 2), 0)); ASSERT_TRUE(hf.add(mkblock(1, 2), 0)); - db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); // block height 1, only version 1 is accepted ASSERT_FALSE(hf.add(mkblock(0, 2), 1)); ASSERT_FALSE(hf.add(mkblock(2, 2), 1)); ASSERT_TRUE(hf.add(mkblock(1, 2), 1)); - db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); // block height 2, only version 2 is accepted ASSERT_FALSE(hf.add(mkblock(0, 2), 2)); ASSERT_FALSE(hf.add(mkblock(1, 2), 2)); ASSERT_FALSE(hf.add(mkblock(3, 2), 2)); ASSERT_TRUE(hf.add(mkblock(2, 2), 2)); - db.add_block(mkblock(2, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(2, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); } TEST(empty_hardforks, Success) @@ -134,7 +135,7 @@ TEST(empty_hardforks, Success) ASSERT_TRUE(hf.get_state(time(NULL) + 3600*24*400) == HardFork::Ready); for (uint64_t h = 0; h <= 10; ++h) { - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } ASSERT_EQ(hf.get(0), 1); @@ -168,14 +169,14 @@ TEST(check_for_height, Success) for (uint64_t h = 0; h <= 4; ++h) { ASSERT_TRUE(hf.check_for_height(mkblock(1, 1), h)); ASSERT_FALSE(hf.check_for_height(mkblock(2, 2), h)); // block version is too high - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 5; h <= 10; ++h) { ASSERT_FALSE(hf.check_for_height(mkblock(1, 1), h)); // block version is too low ASSERT_TRUE(hf.check_for_height(mkblock(2, 2), h)); - db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -192,19 +193,19 @@ TEST(get, next_version) for (uint64_t h = 0; h <= 4; ++h) { ASSERT_EQ(2, hf.get_next_version()); - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 5; h <= 9; ++h) { ASSERT_EQ(4, hf.get_next_version()); - db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 10; h <= 15; ++h) { ASSERT_EQ(4, hf.get_next_version()); - db.add_block(mkblock(hf, h, 4), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 4), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -245,7 +246,7 @@ TEST(steps_asap, Success) hf.init(); for (uint64_t h = 0; h < 10; ++h) { - db.add_block(mkblock(hf, h, 9), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 9), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -272,7 +273,7 @@ TEST(steps_1, Success) hf.init(); for (uint64_t h = 0 ; h < 10; ++h) { - db.add_block(mkblock(hf, h, h+1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, h+1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -297,7 +298,7 @@ TEST(reorganize, Same) // index 0 1 2 3 4 5 6 7 8 9 static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; for (uint64_t h = 0; h < 20; ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -328,7 +329,7 @@ TEST(reorganize, Changed) static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9 }; for (uint64_t h = 0; h < 16; ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE (hf.add(db.get_block_from_height(h), h)); } @@ -348,7 +349,7 @@ TEST(reorganize, Changed) ASSERT_EQ(db.height(), 3); hf.reorganize_from_block_height(2); for (uint64_t h = 3; h < 16; ++h) { - db.add_block(mkblock(hf, h, block_versions_new[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions_new[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); ASSERT_EQ (ret, h < 15); } @@ -372,7 +373,7 @@ TEST(voting, threshold) for (uint64_t h = 0; h <= 8; ++h) { uint8_t v = 1 + !!(h % 8); - db.add_block(mkblock(hf, h, v), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, v), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); if (h >= 8 && threshold == 87) { // for threshold 87, we reach the threshold at height 7, so from height 8, hard fork to version 2, but 8 tries to add 1 @@ -406,7 +407,7 @@ TEST(voting, different_thresholds) static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4 }; for (uint64_t h = 0; h < sizeof(block_versions) / sizeof(block_versions[0]); ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); ASSERT_EQ(ret, true); } @@ -459,7 +460,7 @@ TEST(voting, info) ASSERT_EQ(expected_thresholds[h], threshold); ASSERT_EQ(4, voting); - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -522,7 +523,7 @@ TEST(reorganize, changed) #define ADD(v, h, a) \ do { \ cryptonote::block b = mkblock(hf, h, v); \ - db.add_block(b, 0, 0, 0, 0, 0, crypto::hash()); \ + db.add_block(b, 0, 0, 0, 0, 0, crypto::hash(), {}); \ ASSERT_##a(hf.add(b, h)); \ } while(0) #define ADD_TRUE(v, h) ADD(v, h, TRUE) diff --git a/tests/unit_tests/long_term_block_weight.cpp b/tests/unit_tests/long_term_block_weight.cpp index f7ef262e61e..6e76de218a6 100644 --- a/tests/unit_tests/long_term_block_weight.cpp +++ b/tests/unit_tests/long_term_block_weight.cpp @@ -58,6 +58,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const std::multimap& leaf_tuples_by_unlock_height ) override { blocks.push_back({block_weight, long_term_block_weight}); } From 634e12e9ad2b4aa24ca528681833329de9725824 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 25 Jul 2024 12:37:41 -0700 Subject: [PATCH 041/127] Guarantee insertion order into the tree using global output ID - Leaves enter the tree in the block they unlock, in the order they appear in the chain --- src/blockchain_db/blockchain_db.cpp | 37 ++++++++--- src/blockchain_db/blockchain_db.h | 36 +++++------ src/blockchain_db/lmdb/db_lmdb.cpp | 63 ++++++++----------- src/blockchain_db/lmdb/db_lmdb.h | 11 ++-- src/blockchain_db/testdb.h | 7 +-- .../cryptonote_format_utils.cpp | 6 +- src/fcmp/curve_trees.cpp | 56 ++++++++++++----- src/fcmp/curve_trees.h | 13 +++- tests/block_weight/block_weight.cpp | 2 +- tests/core_tests/chaingen.cpp | 2 +- tests/unit_tests/curve_trees.cpp | 24 ++++--- tests/unit_tests/hardfork.cpp | 2 +- tests/unit_tests/long_term_block_weight.cpp | 2 +- 13 files changed, 152 insertions(+), 109 deletions(-) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index b5840240e4d..e34149e48ed 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -179,7 +179,7 @@ void BlockchainDB::pop_block() pop_block(blk, txs); } -void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash* tx_hash_ptr, const crypto::hash* tx_prunable_hash_ptr) +std::vector BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash* tx_hash_ptr, const crypto::hash* tx_prunable_hash_ptr) { const transaction &tx = txp.first; @@ -223,7 +223,7 @@ void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair uint64_t tx_id = add_transaction_data(blk_hash, txp, tx_hash, tx_prunable_hash); - std::vector amount_output_indices(tx.vout.size()); + std::vector output_indices(tx.vout.size()); // iterate tx.vout using indices instead of C++11 foreach syntax because // we need the index @@ -231,21 +231,35 @@ void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair { // miner v2 txes have their coinbase output in one single out to save space, // and we store them as rct outputs with an identity mask + // note: tx_outs_to_leaf_tuples in curve_trees.cpp mirrors this logic if (miner_tx && tx.version == 2) { cryptonote::tx_out vout = tx.vout[i]; rct::key commitment = rct::zeroCommit(vout.amount); vout.amount = 0; - amount_output_indices[i] = add_output(tx_hash, vout, i, tx.unlock_time, + output_indices[i] = add_output(tx_hash, vout, i, tx.unlock_time, &commitment); } else { - amount_output_indices[i] = add_output(tx_hash, tx.vout[i], i, tx.unlock_time, + output_indices[i] = add_output(tx_hash, tx.vout[i], i, tx.unlock_time, tx.version > 1 ? &tx.rct_signatures.outPk[i].mask : NULL); } } + + std::vector amount_output_indices; + std::vector output_ids; + amount_output_indices.reserve(output_indices.size()); + output_ids.reserve(output_indices.size()); + for (const auto &o_idx : output_indices) + { + amount_output_indices.push_back(o_idx.amount_index); + output_ids.push_back(o_idx.output_id); + } + add_tx_amount_output_indices(tx_id, amount_output_indices); + + return output_ids; } uint64_t BlockchainDB::add_block( const std::pair& blck @@ -273,9 +287,12 @@ uint64_t BlockchainDB::add_block( const std::pair& blck time1 = epee::misc_utils::get_tick_count(); + std::vector> output_ids; + output_ids.reserve(1 + txs.size()); + uint64_t num_rct_outs = 0; blobdata miner_bd = tx_to_blob(blk.miner_tx); - add_transaction(blk_hash, std::make_pair(blk.miner_tx, blobdata_ref(miner_bd))); + output_ids.push_back(add_transaction(blk_hash, std::make_pair(blk.miner_tx, blobdata_ref(miner_bd)))); if (blk.miner_tx.version == 2) num_rct_outs += blk.miner_tx.vout.size(); int tx_i = 0; @@ -283,7 +300,7 @@ uint64_t BlockchainDB::add_block( const std::pair& blck for (const std::pair& tx : txs) { tx_hash = blk.tx_hashes[tx_i]; - add_transaction(blk_hash, tx, &tx_hash); + output_ids.push_back(add_transaction(blk_hash, tx, &tx_hash)); for (const auto &vout: tx.first.vout) { if (vout.amount == 0) @@ -297,20 +314,22 @@ uint64_t BlockchainDB::add_block( const std::pair& blck // When adding a block, we also need to add all the leaf tuples included in // the block to a table keeping track of locked leaf tuples. Once those leaf // tuples unlock, we use them to grow the tree. - std::multimap leaf_tuples_by_unlock_height; + std::multimap leaf_tuples_by_unlock_height; // Get miner tx's leaf tuples fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples( blk.miner_tx, + output_ids[0], prev_height, true/*miner_tx*/, leaf_tuples_by_unlock_height); // Get all other txs' leaf tuples - for (const auto &txp : txs) + for (std::size_t i = 0; i < txs.size(); ++i) { fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples( - txp.first, + txs[i].first, + output_ids[i+1], prev_height, false/*miner_tx*/, leaf_tuples_by_unlock_height); diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 522fe5838b8..05d95bdac14 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -188,6 +188,14 @@ struct txpool_tx_meta_t } }; +/** + * @brief a struct containing output indexes for convenience + */ +struct output_indexes_t +{ + uint64_t amount_index; + uint64_t output_id; +}; #define DBF_SAFE 1 #define DBF_FAST 2 @@ -408,7 +416,7 @@ class BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_height ) = 0; /** @@ -473,8 +481,9 @@ class BlockchainDB * future, this tracking (of the number, at least) should be moved to * this class, as it is necessary and the same among all BlockchainDB. * - * It returns an amount output index, which is the index of the output - * for its specified amount. + * It returns the output indexes, which contains an amount output index (the + * index of the output for its specified amount) and output id (the global + * index of the output among all outputs of any amount). * * This data should be stored in such a manner that the only thing needed to * reverse the process is the tx_out. @@ -487,9 +496,9 @@ class BlockchainDB * @param local_index index of the output in its transaction * @param unlock_time unlock time/height of the output * @param commitment the rct commitment to the output amount - * @return amount output index + * @return output indexes */ - virtual uint64_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) = 0; + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) = 0; /** * @brief store amount output indices for a tx's outputs @@ -570,8 +579,10 @@ class BlockchainDB * @param tx the transaction to add * @param tx_hash_ptr the hash of the transaction, if already calculated * @param tx_prunable_hash_ptr the hash of the prunable part of the transaction, if already calculated + * + * @return the global output ids of all outputs inserted */ - void add_transaction(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash* tx_hash_ptr = NULL, const crypto::hash* tx_prunable_hash_ptr = NULL); + std::vector add_transaction(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash* tx_hash_ptr = NULL, const crypto::hash* tx_prunable_hash_ptr = NULL); mutable uint64_t time_tx_exists = 0; //!< a performance metric uint64_t time_commit1 = 0; //!< a performance metric @@ -1396,17 +1407,6 @@ class BlockchainDB */ virtual uint64_t get_num_outputs(const uint64_t& amount) const = 0; - // returns the total number of global outputs - /** - * @brief fetches the total number of global outputs - * - * The subclass should return a count of all outputs, - * or zero if there are none. - * * - * @return the number of global outputs - */ - virtual uint64_t get_num_global_outputs() const = 0; - /** * @brief return index of the first element (should be hidden, but isn't) * @@ -1780,7 +1780,7 @@ class BlockchainDB // TODO: description and make private virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves) = 0; + const std::vector &new_leaves) = 0; virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) = 0; diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index a2f3470cd60..dc2057120bd 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -216,7 +216,7 @@ namespace * * spent_keys input hash - * - * locked_leaves block ID [{leaf tuple}...] + * locked_leaves block ID [{output ID, leaf tuple}...] * leaves leaf_idx {leaf tuple} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * @@ -817,7 +817,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uin } void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, - uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_height) + uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_height) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -848,10 +848,9 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l // Grow the tree with outputs that unlock at this block height const auto unlocked_leaf_tuples = this->get_locked_leaf_tuples_at_height(m_height); - // TODO: double check consistent order for inserting outputs into the tree this->grow_tree(fcmp::curve_trees::curve_trees_v1, unlocked_leaf_tuples); - // TODO: remove unlocked_leaf_tuples from the locked outputs table + // TODO: remove locked from the locked outputs table int result = 0; @@ -1119,7 +1118,7 @@ void BlockchainLMDB::remove_transaction_data(const crypto::hash& tx_hash, const throw1(DB_ERROR("Failed to add removal of tx index to db transaction")); } -uint64_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, +output_indexes_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, @@ -1183,7 +1182,10 @@ uint64_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, if ((result = mdb_cursor_put(m_cur_output_amounts, &val_amount, &data, MDB_APPENDDUP))) throw0(DB_ERROR(lmdb_error("Failed to add output pubkey to db transaction: ", result).c_str())); - return ok.amount_index; + return output_indexes_t{ + .amount_index = ok.amount_index, + .output_id = ok.output_id + }; } void BlockchainLMDB::add_tx_amount_output_indices(const uint64_t tx_id, @@ -1362,12 +1364,11 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves) + const std::vector &new_leaves) { if (new_leaves.empty()) return; - // TODO: block_wtxn_start like pop_block, then call BlockchainDB::grow_tree LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); mdb_txn_cursors *m_cursors = &m_wcursors; @@ -1970,6 +1971,8 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre const uint64_t actual_n_leaf_tuples = this->get_num_leaf_tuples(); CHECK_AND_ASSERT_MES(actual_n_leaf_tuples == expected_n_leaf_tuples, false, "unexpected num leaf tuples"); + MDEBUG("Auditing tree with " << actual_n_leaf_tuples << " leaf tuples"); + if (actual_n_leaf_tuples == 0) { // Make sure layers table is also empty @@ -2203,7 +2206,7 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, chunk_width); } -std::vector BlockchainLMDB::get_locked_leaf_tuples_at_height( +std::vector BlockchainLMDB::get_locked_leaf_tuples_at_height( const uint64_t height) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -2216,7 +2219,7 @@ std::vector BlockchainLMDB::get_lock MDB_val v_tuple; // Get all the locked outputs at that height - std::vector leaf_tuples; + std::vector leaf_tuples; // TODO: double check this gets all leaf tuples when it does multiple iters MDB_cursor_op op = MDB_SET; @@ -2233,8 +2236,8 @@ std::vector BlockchainLMDB::get_lock if (h != height) throw0(DB_ERROR(("Height " + std::to_string(h) + " not the expected" + std::to_string(height)).c_str())); - const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTuple*)v_tuple.mv_data); - const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTuple); + const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTupleContext*)v_tuple.mv_data); + const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTupleContext); auto it = range_begin; @@ -4399,27 +4402,6 @@ uint64_t BlockchainLMDB::get_num_outputs(const uint64_t& amount) const return num_elems; } -uint64_t BlockchainLMDB::get_num_global_outputs() const -{ - LOG_PRINT_L3("BlockchainLMDB:: " << __func__); - check_open(); - - TXN_PREFIX_RDONLY(); - RCURSOR(output_amounts); - - MDB_stat db_stats; - int result = mdb_stat(m_txn, m_output_amounts, &db_stats); - uint64_t count = 0; - if (result != MDB_NOTFOUND) - { - if (result) - throw0(DB_ERROR(lmdb_error("Failed to query m_output_amounts: ", result).c_str())); - count = db_stats.ms_entries; - } - TXN_POSTFIX_RDONLY(); - return count; -} - output_data_t BlockchainLMDB::get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -6688,6 +6670,9 @@ void BlockchainLMDB::migrate_5_6() // ... Could also require outputs be inserted all-or-nothing first, and then can pick up where left off for the tree // if any of leaves, layers, or block_infn tables exist, then locked_leaves migration should be complete + // TODO: I can keep track of the contiguous output_id inserted in a separate table used strictly for this migration + // On next run, read all outputs until we reach the highest contiguous output_id, then continue from there + do { // 1. Set up locked outputs table @@ -6711,7 +6696,7 @@ void BlockchainLMDB::migrate_5_6() MDB_cursor_op op = MDB_FIRST; - const uint64_t n_outputs = this->get_num_global_outputs(); + const uint64_t n_outputs = this->num_outputs(); i = 0; while (1) @@ -6763,23 +6748,25 @@ void BlockchainLMDB::migrate_5_6() uint64_t amount = *(const uint64_t*)k.mv_data; output_data_t output_data; + fcmp::curve_trees::CurveTreesV1::LeafTupleContext tuple_context; if (amount == 0) { const outkey *okp = (const outkey *)v.mv_data; output_data = okp->data; + tuple_context.output_id = okp->output_id; } else { const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; memcpy(&output_data, &okp->data, sizeof(pre_rct_output_data_t)); output_data.commitment = rct::zeroCommit(amount); + tuple_context.output_id = okp->output_id; } // Convert the output into a leaf tuple - fcmp::curve_trees::CurveTreesV1::LeafTuple leaf_tuple; try { - leaf_tuple = fcmp::curve_trees::curve_trees_v1.output_to_leaf_tuple( + tuple_context.leaf_tuple = fcmp::curve_trees::curve_trees_v1.output_to_leaf_tuple( output_data.pubkey, rct::rct2pk(output_data.commitment)); } @@ -6792,9 +6779,9 @@ void BlockchainLMDB::migrate_5_6() // Get the block in which the output will unlock const uint64_t unlock_height = cryptonote::get_unlock_height(output_data.unlock_time, output_data.height); - // Now add the leaf tuple to the locked outputs table + // Now add the leaf tuple to the locked leaves table MDB_val_set(k_height, unlock_height); - MDB_val_set(v_tuple, leaf_tuple); + MDB_val_set(v_tuple, tuple_context); // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 1944779623b..ab2d37e2871 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -278,7 +278,6 @@ class BlockchainLMDB : public BlockchainDB virtual uint64_t get_tx_block_height(const crypto::hash& h) const; virtual uint64_t get_num_outputs(const uint64_t& amount) const; - virtual uint64_t get_num_global_outputs() const; virtual output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const; virtual void get_output_key(const epee::span &amounts, const std::vector &offsets, std::vector &outputs, bool allow_partial = false) const; @@ -370,7 +369,7 @@ class BlockchainLMDB : public BlockchainDB // make private virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves); + const std::vector &new_leaves); virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples); @@ -391,7 +390,7 @@ class BlockchainLMDB : public BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& block_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_height ); virtual void remove_block(); @@ -400,7 +399,7 @@ class BlockchainLMDB : public BlockchainDB virtual void remove_transaction_data(const crypto::hash& tx_hash, const transaction& tx); - virtual uint64_t add_output(const crypto::hash& tx_hash, + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, @@ -451,7 +450,7 @@ class BlockchainLMDB : public BlockchainDB const uint64_t child_chunk_idx, const uint64_t chunk_width) const; - std::vector get_locked_leaf_tuples_at_height(const uint64_t height); + std::vector get_locked_leaf_tuples_at_height(const uint64_t height); uint64_t num_outputs() const; @@ -547,6 +546,8 @@ class BlockchainLMDB : public BlockchainDB mdb_txn_cursors m_wcursors; mutable boost::thread_specific_ptr m_tinfo; + // TODO: m_curve_trees + #if defined(__arm__) // force a value so it can compile with 32-bit ARM constexpr static uint64_t DEFAULT_MAPSIZE = 1LL << 31; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 5ca53b8c065..6925141cba2 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -100,7 +100,6 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual std::vector get_tx_list(const std::vector& hlist) const override { return std::vector(); } virtual uint64_t get_tx_block_height(const crypto::hash& h) const override { return 0; } virtual uint64_t get_num_outputs(const uint64_t& amount) const override { return 1; } - virtual uint64_t get_num_global_outputs() const override { return 1; } virtual uint64_t get_indexing_base() const override { return 0; } virtual cryptonote::output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const override { return cryptonote::output_data_t(); } virtual cryptonote::tx_out_index get_output_tx_and_index_from_global(const uint64_t& index) const override { return cryptonote::tx_out_index(); } @@ -113,12 +112,12 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void remove_block() override { } virtual uint64_t add_transaction_data(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash& tx_hash, const crypto::hash& tx_prunable_hash) override {return 0;} virtual void remove_transaction_data(const crypto::hash& tx_hash, const cryptonote::transaction& tx) override {} - virtual uint64_t add_output(const crypto::hash& tx_hash, const cryptonote::tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) override {return 0;} + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const cryptonote::tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) override {return {0, 0};} virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves) override {}; + const std::vector &new_leaves) override {}; virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) override {}; virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t expected_n_leaf_tuples) const override { return false; }; @@ -149,7 +148,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_height ) override { } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} diff --git a/src/cryptonote_basic/cryptonote_format_utils.cpp b/src/cryptonote_basic/cryptonote_format_utils.cpp index cb400c35869..03f77c05148 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.cpp +++ b/src/cryptonote_basic/cryptonote_format_utils.cpp @@ -1673,8 +1673,10 @@ namespace cryptonote { const auto seconds_since_unlock = hf_v15_time - unlock_time; const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2; - CHECK_AND_ASSERT_THROW_MES(hf_v15_height > blocks_since_unlock, "unexpected blocks since unlock"); - unlock_height = hf_v15_height - blocks_since_unlock; + + unlock_height = hf_v15_height >= blocks_since_unlock + ? (hf_v15_height - blocks_since_unlock) + : default_unlock_height; } else { diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index c797f4f7c05..13c265c9f5f 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -630,25 +630,27 @@ CurveTrees::LeafTuple CurveTrees::output_to_leaf const crypto::public_key &output_pubkey, const crypto::public_key &commitment) const { - CHECK_AND_ASSERT_THROW_MES(crypto::check_key(output_pubkey), "invalid output pub key"); + if (!crypto::check_key(output_pubkey)) + throw std::runtime_error("invalid output pub key"); - const auto clear_torsion = [](const crypto::public_key &key) + const auto clear_torsion = [](const crypto::public_key &key, const std::string &s) { // TODO: don't need to decompress and recompress points, can be optimized rct::key torsion_cleared_key = rct::scalarmultKey(rct::pk2rct(key), rct::INV_EIGHT); torsion_cleared_key = rct::scalarmult8(torsion_cleared_key); - CHECK_AND_ASSERT_THROW_MES(torsion_cleared_key != rct::I, "cannot equal identity"); + if (torsion_cleared_key == rct::I) + throw std::runtime_error(s + " cannot equal identity"); return torsion_cleared_key; }; // Torsion clear the output pub key and commitment - const rct::key rct_O = clear_torsion(output_pubkey); - const rct::key rct_C = clear_torsion(commitment); + const rct::key rct_O = clear_torsion(output_pubkey, "output pub key"); + const rct::key rct_C = clear_torsion(commitment, "commitment"); - const crypto::public_key O = rct::rct2pk(rct_O); - const crypto::public_key C = rct::rct2pk(rct_C); + const crypto::public_key &O = rct::rct2pk(rct_O); + const crypto::public_key &C = rct::rct2pk(rct_C); crypto::ec_point I; crypto::derive_key_image_generator(O, I); @@ -679,12 +681,15 @@ std::vector CurveTrees::flatten_leaves(const std::v //---------------------------------------------------------------------------------------------------------------------- template <> void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transaction &tx, + const std::vector &output_ids, const uint64_t tx_height, const bool miner_tx, - std::multimap::LeafTuple> &leaf_tuples_by_unlock_height_inout) const + std::multimap::LeafTupleContext> &leaf_tuples_by_unlock_height_inout) const { const uint64_t unlock_height = cryptonote::get_unlock_height(tx.unlock_time, tx_height); + CHECK_AND_ASSERT_THROW_MES(tx.vout.size() == output_ids.size(), "unexpected size of output ids"); + for (std::size_t i = 0; i < tx.vout.size(); ++i) { const auto &out = tx.vout[i]; @@ -693,7 +698,13 @@ void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transa if (!cryptonote::get_output_public_key(out, output_public_key)) throw std::runtime_error("Could not get an output public key from a tx output."); - const rct::key commitment = (miner_tx || tx.version < 2) + static_assert(CURRENT_TRANSACTION_VERSION == 2, "This section of code was written with 2 tx versions in mind. " + "Revisit this section and update for the new tx version."); + + if (!miner_tx && tx.version == 2) + CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); + + const rct::key commitment = (miner_tx || tx.version != 2) ? rct::zeroCommit(out.amount) : tx.rct_signatures.outPk[i].mask; @@ -704,7 +715,12 @@ void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transa output_public_key, rct::rct2pk(commitment)); - leaf_tuples_by_unlock_height_inout.emplace(unlock_height, std::move(leaf_tuple)); + auto tuple_context = CurveTrees::LeafTupleContext{ + .output_id = output_ids[i], + .leaf_tuple = std::move(leaf_tuple), + }; + + leaf_tuples_by_unlock_height_inout.emplace(unlock_height, std::move(tuple_context)); } catch (...) { /*continue*/ }; @@ -715,7 +731,7 @@ template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - const std::vector &new_leaf_tuples) const + const std::vector &new_leaf_tuples) const { TreeExtension tree_extension; @@ -730,15 +746,21 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio tree_extension.leaves.start_leaf_tuple_idx = grow_layer_instructions.old_total_children / LEAF_TUPLE_SIZE; - // Copy the leaves + // Sort the leaves by order they appear in the chain + // TODO: don't copy here + std::vector sorted_leaf_tuples = new_leaf_tuples; + const auto sort_fn = [](const LeafTupleContext &a, const LeafTupleContext &b) { return a.output_id < b.output_id; }; + std::sort(sorted_leaf_tuples.begin(), sorted_leaf_tuples.end(), sort_fn); + + // Copy the sorted leaves into the tree extension struct // TODO: don't copy here tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); - for (const auto &leaf : new_leaf_tuples) + for (const auto &leaf : sorted_leaf_tuples) { tree_extension.leaves.tuples.emplace_back(LeafTuple{ - .O_x = leaf.O_x, - .I_x = leaf.I_x, - .C_x = leaf.C_x + .O_x = leaf.leaf_tuple.O_x, + .I_x = leaf.leaf_tuple.I_x, + .C_x = leaf.leaf_tuple.C_x }); } @@ -751,7 +773,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio grow_layer_instructions.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr, grow_layer_instructions.start_offset, grow_layer_instructions.next_parent_start_index, - this->flatten_leaves(new_leaf_tuples), + this->flatten_leaves(tree_extension.leaves.tuples), m_leaf_layer_chunk_width ); diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 6bc6b599f31..05802c68c2f 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -164,6 +164,14 @@ class CurveTrees static const uint64_t LEAF_TUPLE_SIZE = 3; static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); + // Contextual wrapper for leaf tuple + struct LeafTupleContext final + { + // Global output ID useful to order the leaf tuple for insertion into the tree + uint64_t output_id; + LeafTuple leaf_tuple; + }; + // Contiguous leaves in the tree, starting a specified start_idx in the leaf layer struct Leaves final { @@ -221,15 +229,16 @@ class CurveTrees // Convert cryptonote tx outs to leaf tuples, grouped by the leaf tuple unlock height void tx_outs_to_leaf_tuples(const cryptonote::transaction &tx, + const std::vector &output_ids, const uint64_t tx_height, const bool miner_tx, - std::multimap &leaf_tuples_by_unlock_height_inout) const; + std::multimap &leaf_tuples_by_unlock_height_inout) const; // Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - const std::vector &new_leaf_tuples) const; + const std::vector &new_leaf_tuples) const; // Get instructions useful for trimming all existing layers in the tree std::vector get_trim_instructions( diff --git a/tests/block_weight/block_weight.cpp b/tests/block_weight/block_weight.cpp index 26b0f1ab589..281bdb17ca2 100644 --- a/tests/block_weight/block_weight.cpp +++ b/tests/block_weight/block_weight.cpp @@ -65,7 +65,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_height ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/core_tests/chaingen.cpp b/tests/core_tests/chaingen.cpp index b0f77a0242c..c1876dbc136 100644 --- a/tests/core_tests/chaingen.cpp +++ b/tests/core_tests/chaingen.cpp @@ -88,7 +88,7 @@ namespace , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_height ) override { blocks.push_back({blk, blk_hash}); diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 2dc98d0ad4f..7acac6c4c02 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -730,13 +730,14 @@ void CurveTreesGlobalTree::log_tree() //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, - const std::size_t num_leaves) +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, + const std::size_t old_n_leaf_tuples, + const std::size_t new_n_leaf_tuples) { - std::vector tuples; - tuples.reserve(num_leaves); + std::vector tuples; + tuples.reserve(new_n_leaf_tuples); - for (std::size_t i = 0; i < num_leaves; ++i) + for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) { // Generate random output tuple crypto::secret_key o,c; @@ -746,7 +747,10 @@ static const std::vector generate_random_leaves(const C auto leaf_tuple = curve_trees.output_to_leaf_tuple(O, C); - tuples.emplace_back(std::move(leaf_tuple)); + tuples.emplace_back(fcmp::curve_trees::CurveTreesV1::LeafTupleContext{ + .output_id = old_n_leaf_tuples + i, + .leaf_tuple = std::move(leaf_tuple), + }); } return tuples; @@ -775,7 +779,7 @@ static bool grow_tree(CurveTreesV1 &curve_trees, // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, last_hashes, - generate_random_leaves(curve_trees, new_n_leaf_tuples)); + generate_random_leaves(curve_trees, old_n_leaf_tuples, new_n_leaf_tuples)); global_tree.log_tree_extension(tree_extension); @@ -852,14 +856,14 @@ static bool grow_tree_db(const std::size_t init_leaves, LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); - test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves)); + test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, 0, init_leaves)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves), false, "failed to add initial leaves to db"); MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " << ext_leaves << " leaves"); - test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, ext_leaves)); + test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves, ext_leaves)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves + ext_leaves), false, "failed to extend tree in db"); @@ -881,7 +885,7 @@ static bool trim_tree_db(const std::size_t init_leaves, LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then trimming by " << trim_leaves << " leaves"); - test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves)); + test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, 0, init_leaves)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves), false, "failed to add initial leaves to db"); diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index 6c7a221d0ee..d5103809cb3 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -54,7 +54,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_height ) override { blocks.push_back(blk); } diff --git a/tests/unit_tests/long_term_block_weight.cpp b/tests/unit_tests/long_term_block_weight.cpp index 6e76de218a6..68352775982 100644 --- a/tests/unit_tests/long_term_block_weight.cpp +++ b/tests/unit_tests/long_term_block_weight.cpp @@ -58,7 +58,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_height ) override { blocks.push_back({block_weight, long_term_block_weight}); } From 93795b4c9d0ab551c5f4f6876500154e113fae23 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 25 Jul 2024 14:02:06 -0700 Subject: [PATCH 042/127] Match output unlock time (fix off by 1) --- src/blockchain_db/blockchain_db.cpp | 16 ++++----- src/blockchain_db/blockchain_db.h | 4 +-- src/blockchain_db/lmdb/db_lmdb.cpp | 32 +++++++++--------- src/blockchain_db/lmdb/db_lmdb.h | 4 +-- src/blockchain_db/testdb.h | 2 +- .../cryptonote_format_utils.cpp | 33 ++++++++++--------- .../cryptonote_format_utils.h | 3 +- src/fcmp/curve_trees.cpp | 25 +++++++------- src/fcmp/curve_trees.h | 2 +- tests/block_weight/block_weight.cpp | 2 +- tests/core_tests/chaingen.cpp | 2 +- tests/unit_tests/curve_trees.cpp | 3 +- tests/unit_tests/hardfork.cpp | 2 +- tests/unit_tests/long_term_block_weight.cpp | 2 +- 14 files changed, 69 insertions(+), 63 deletions(-) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index e34149e48ed..59ef9564259 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -288,11 +288,11 @@ uint64_t BlockchainDB::add_block( const std::pair& blck time1 = epee::misc_utils::get_tick_count(); std::vector> output_ids; - output_ids.reserve(1 + txs.size()); + output_ids.reserve(txs.size()); uint64_t num_rct_outs = 0; blobdata miner_bd = tx_to_blob(blk.miner_tx); - output_ids.push_back(add_transaction(blk_hash, std::make_pair(blk.miner_tx, blobdata_ref(miner_bd)))); + std::vector miner_output_ids = add_transaction(blk_hash, std::make_pair(blk.miner_tx, blobdata_ref(miner_bd))); if (blk.miner_tx.version == 2) num_rct_outs += blk.miner_tx.vout.size(); int tx_i = 0; @@ -314,30 +314,30 @@ uint64_t BlockchainDB::add_block( const std::pair& blck // When adding a block, we also need to add all the leaf tuples included in // the block to a table keeping track of locked leaf tuples. Once those leaf // tuples unlock, we use them to grow the tree. - std::multimap leaf_tuples_by_unlock_height; + std::multimap leaf_tuples_by_unlock_block; // Get miner tx's leaf tuples fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples( blk.miner_tx, - output_ids[0], + miner_output_ids, prev_height, true/*miner_tx*/, - leaf_tuples_by_unlock_height); + leaf_tuples_by_unlock_block); // Get all other txs' leaf tuples for (std::size_t i = 0; i < txs.size(); ++i) { fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples( txs[i].first, - output_ids[i+1], + output_ids[i], prev_height, false/*miner_tx*/, - leaf_tuples_by_unlock_height); + leaf_tuples_by_unlock_block); } // call out to subclass implementation to add the block & metadata time1 = epee::misc_utils::get_tick_count(); - add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash, leaf_tuples_by_unlock_height); + add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash, leaf_tuples_by_unlock_block); TIME_MEASURE_FINISH(time1); time_add_block1 += time1; diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 05d95bdac14..7ef14df8275 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -407,7 +407,7 @@ class BlockchainDB * @param cumulative_difficulty the accumulated difficulty after this block * @param coins_generated the number of coins generated total after this block * @param blk_hash the hash of the block - * @param leaf_tuples_by_unlock_height the leaves from this block to add to the merkle tree + * @param leaf_tuples_by_unlock_block the leaves from this block to add to the merkle tree */ virtual void add_block( const block& blk , size_t block_weight @@ -416,7 +416,7 @@ class BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_block ) = 0; /** diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index dc2057120bd..bf04f07404f 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -817,7 +817,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uin } void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, - uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_height) + uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_block) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -846,7 +846,7 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l } // Grow the tree with outputs that unlock at this block height - const auto unlocked_leaf_tuples = this->get_locked_leaf_tuples_at_height(m_height); + const auto unlocked_leaf_tuples = this->get_locked_leaf_tuples_at_block_id(m_height); this->grow_tree(fcmp::curve_trees::curve_trees_v1, unlocked_leaf_tuples); @@ -900,14 +900,14 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l CURSOR(locked_leaves) // Add the locked leaf tuples from this block to the locked outputs table - for (const auto &locked_tuple : leaf_tuples_by_unlock_height) + for (const auto &locked_tuple : leaf_tuples_by_unlock_block) { - MDB_val_set(k_height, locked_tuple.first); + MDB_val_set(k_block_id, locked_tuple.first); MDB_val_set(v_tuple, locked_tuple.second); // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height - result = mdb_cursor_put(m_cur_locked_leaves, &k_height, &v_tuple, MDB_NODUPDATA); + result = mdb_cursor_put(m_cur_locked_leaves, &k_block_id, &v_tuple, MDB_NODUPDATA); if (result != MDB_SUCCESS && result != MDB_KEYEXIST) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); } @@ -2206,8 +2206,8 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, chunk_width); } -std::vector BlockchainLMDB::get_locked_leaf_tuples_at_height( - const uint64_t height) +std::vector BlockchainLMDB::get_locked_leaf_tuples_at_block_id( + uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -2215,7 +2215,7 @@ std::vector BlockchainLMDB::g TXN_PREFIX_RDONLY(); RCURSOR(locked_leaves) - MDB_val_set(k_height, height); + MDB_val_set(k_block_id, block_id); MDB_val v_tuple; // Get all the locked outputs at that height @@ -2225,16 +2225,16 @@ std::vector BlockchainLMDB::g MDB_cursor_op op = MDB_SET; while (1) { - int result = mdb_cursor_get(m_cur_locked_leaves, &k_height, &v_tuple, op); + int result = mdb_cursor_get(m_cur_locked_leaves, &k_block_id, &v_tuple, op); if (result == MDB_NOTFOUND) break; if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get next locked outputs: ", result).c_str())); op = MDB_NEXT_MULTIPLE; - const uint64_t h = *(const uint64_t*)k_height.mv_data; - if (h != height) - throw0(DB_ERROR(("Height " + std::to_string(h) + " not the expected" + std::to_string(height)).c_str())); + const uint64_t blk_id = *(const uint64_t*)k_block_id.mv_data; + if (blk_id != block_id) + throw0(DB_ERROR(("Height " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTupleContext*)v_tuple.mv_data); const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTupleContext); @@ -6777,15 +6777,15 @@ void BlockchainLMDB::migrate_5_6() } // Get the block in which the output will unlock - const uint64_t unlock_height = cryptonote::get_unlock_height(output_data.unlock_time, output_data.height); + const uint64_t unlock_block = cryptonote::get_unlock_block_index(output_data.unlock_time, output_data.height); // Now add the leaf tuple to the locked leaves table - MDB_val_set(k_height, unlock_height); + MDB_val_set(k_block_id, unlock_block); MDB_val_set(v_tuple, tuple_context); // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height - result = mdb_cursor_put(c_locked_leaves, &k_height, &v_tuple, MDB_NODUPDATA); + result = mdb_cursor_put(c_locked_leaves, &k_block_id, &v_tuple, MDB_NODUPDATA); if (result != MDB_SUCCESS && result != MDB_KEYEXIST) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); if (result == MDB_KEYEXIST) @@ -6866,7 +6866,7 @@ void BlockchainLMDB::migrate_5_6() } // Get all the locked outputs at that height - const auto leaf_tuples = this->get_locked_leaf_tuples_at_height(i); + const auto leaf_tuples = this->get_locked_leaf_tuples_at_block_id(i); this->grow_tree(fcmp::curve_trees::curve_trees_v1, leaf_tuples); // TODO: Remove locked outputs from the locked outputs table after adding them to tree diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index ab2d37e2871..3391cd6f81e 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -390,7 +390,7 @@ class BlockchainLMDB : public BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& block_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_block ); virtual void remove_block(); @@ -450,7 +450,7 @@ class BlockchainLMDB : public BlockchainDB const uint64_t child_chunk_idx, const uint64_t chunk_width) const; - std::vector get_locked_leaf_tuples_at_height(const uint64_t height); + std::vector get_locked_leaf_tuples_at_block_id(uint64_t block_id); uint64_t num_outputs() const; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 6925141cba2..48fab60760e 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -148,7 +148,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_block ) override { } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} diff --git a/src/cryptonote_basic/cryptonote_format_utils.cpp b/src/cryptonote_basic/cryptonote_format_utils.cpp index 03f77c05148..b8fbd12cc81 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.cpp +++ b/src/cryptonote_basic/cryptonote_format_utils.cpp @@ -1645,20 +1645,23 @@ namespace cryptonote return key; } //--------------------------------------------------------------- - // TODO: write tests for this func - uint64_t get_unlock_height(uint64_t unlock_time, uint64_t height_included_in_chain) + // TODO: write tests for this func that match with current daemon logic + uint64_t get_unlock_block_index(uint64_t unlock_time, uint64_t block_included_in_chain) { - uint64_t unlock_height = 0; - const uint64_t default_unlock_height = height_included_in_chain + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE; + uint64_t unlock_block_index = 0; + + static_assert(CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE > 0, "unexpected default spendable age"); + const uint64_t default_block_index = block_included_in_chain + (CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE - 1); - // TODO: double triple check off by 1 if (unlock_time == 0) { - unlock_height = default_unlock_height; + unlock_block_index = default_block_index; } else if (unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER) { - unlock_height = unlock_time; + // The unlock_time in this case is supposed to be the chain height at which the output unlocks + // The chain height is 1 higher than the highest block index, so we subtract 1 for this delta + unlock_block_index = unlock_time > 1 ? (unlock_time - 1) : 0; } else { @@ -1674,29 +1677,29 @@ namespace cryptonote const auto seconds_since_unlock = hf_v15_time - unlock_time; const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2; - unlock_height = hf_v15_height >= blocks_since_unlock + unlock_block_index = hf_v15_height > blocks_since_unlock ? (hf_v15_height - blocks_since_unlock) - : default_unlock_height; + : default_block_index; } else { const auto seconds_until_unlock = unlock_time - hf_v15_time; const auto blocks_until_unlock = seconds_until_unlock / DIFFICULTY_TARGET_V2; - unlock_height = hf_v15_height + blocks_until_unlock; + unlock_block_index = hf_v15_height + blocks_until_unlock; } /* Note: since this function was introduced for the hf that included fcmp's, it's possible for an output to be - spent before it reaches the unlock_height going by the old rules; this is ok. It can't be spent again because + spent before it reaches the unlock_block_index going by the old rules; this is ok. It can't be spent again b/c it'll have a duplicate key image. It's also possible for an output to unlock by old rules, and then re-lock - again at the fork. This is also ok, we just need to be sure that the new hf rules use this unlock_height + again at the fork. This is also ok, we just need to be sure that the new hf rules use this unlock_block_index starting at the fork for fcmp's. */ // TODO: double check the accuracy of this calculation - MDEBUG("unlock time: " << unlock_time << " , unlock_height: " << unlock_height); + MDEBUG("unlock time: " << unlock_time << " , unlock_block_index: " << unlock_block_index); } - // Can't unlock earlier than the default unlock height - return std::max(unlock_height, default_unlock_height); + // Can't unlock earlier than the default unlock block + return std::max(unlock_block_index, default_block_index); } } diff --git a/src/cryptonote_basic/cryptonote_format_utils.h b/src/cryptonote_basic/cryptonote_format_utils.h index f81e57fdca9..c1757c7a702 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.h +++ b/src/cryptonote_basic/cryptonote_format_utils.h @@ -266,7 +266,8 @@ namespace cryptonote crypto::secret_key encrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); crypto::secret_key decrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); - uint64_t get_unlock_height(uint64_t unlock_time, uint64_t height_included_in_chain); + // Returns the block index in which the provided unlock_time unlocks + uint64_t get_unlock_block_index(uint64_t unlock_time, uint64_t block_included_in_chain); #define CHECKED_GET_SPECIFIC_VARIANT(variant_var, specific_type, variable_name, fail_return_val) \ CHECK_AND_ASSERT_MES(variant_var.type() == typeid(specific_type), fail_return_val, "wrong variant type: " << variant_var.type().name() << ", expected " << typeid(specific_type).name()); \ diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 13c265c9f5f..25efaf85822 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -684,9 +684,9 @@ void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transa const std::vector &output_ids, const uint64_t tx_height, const bool miner_tx, - std::multimap::LeafTupleContext> &leaf_tuples_by_unlock_height_inout) const + std::multimap::LeafTupleContext> &leaf_tuples_by_unlock_block_inout) const { - const uint64_t unlock_height = cryptonote::get_unlock_height(tx.unlock_time, tx_height); + const uint64_t unlock_block = cryptonote::get_unlock_block_index(tx.unlock_time, tx_height); CHECK_AND_ASSERT_THROW_MES(tx.vout.size() == output_ids.size(), "unexpected size of output ids"); @@ -708,22 +708,23 @@ void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transa ? rct::zeroCommit(out.amount) : tx.rct_signatures.outPk[i].mask; + CurveTrees::LeafTupleContext tuple_context; + tuple_context.output_id = output_ids[i]; + try { - // Throws an error if output is invalid; we don't want leaf tuples from invalid outputs - auto leaf_tuple = output_to_leaf_tuple( + // Convert output to leaf tuple; throws if output is invalid + tuple_context.leaf_tuple = output_to_leaf_tuple( output_public_key, rct::rct2pk(commitment)); - - auto tuple_context = CurveTrees::LeafTupleContext{ - .output_id = output_ids[i], - .leaf_tuple = std::move(leaf_tuple), - }; - - leaf_tuples_by_unlock_height_inout.emplace(unlock_height, std::move(tuple_context)); } catch (...) - { /*continue*/ }; + { + // We don't want leaf tuples from invalid outputs in the tree + continue; + }; + + leaf_tuples_by_unlock_block_inout.emplace(unlock_block, std::move(tuple_context)); } } //---------------------------------------------------------------------------------------------------------------------- diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 05802c68c2f..3230d41ff24 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -232,7 +232,7 @@ class CurveTrees const std::vector &output_ids, const uint64_t tx_height, const bool miner_tx, - std::multimap &leaf_tuples_by_unlock_height_inout) const; + std::multimap &leaf_tuples_by_unlock_block_inout) const; // Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree diff --git a/tests/block_weight/block_weight.cpp b/tests/block_weight/block_weight.cpp index 281bdb17ca2..30f94ddfd6f 100644 --- a/tests/block_weight/block_weight.cpp +++ b/tests/block_weight/block_weight.cpp @@ -65,7 +65,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/core_tests/chaingen.cpp b/tests/core_tests/chaingen.cpp index c1876dbc136..30e9f920fb1 100644 --- a/tests/core_tests/chaingen.cpp +++ b/tests/core_tests/chaingen.cpp @@ -88,7 +88,7 @@ namespace , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({blk, blk_hash}); diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 7acac6c4c02..d9879ca436f 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -828,7 +828,8 @@ static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples, CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); // Trim the global tree by `trim_n_leaf_tuples` - LOG_PRINT_L1("Trimming " << trim_n_leaf_tuples << " leaf tuples from tree"); + LOG_PRINT_L1("Trimming " << trim_n_leaf_tuples << " leaf tuples from tree with " + << old_n_leaf_tuples << " leaves in memory"); global_tree.trim_tree(trim_n_leaf_tuples); diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index d5103809cb3..6061b58e2db 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -54,7 +54,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back(blk); } diff --git a/tests/unit_tests/long_term_block_weight.cpp b/tests/unit_tests/long_term_block_weight.cpp index 68352775982..d405a632529 100644 --- a/tests/unit_tests/long_term_block_weight.cpp +++ b/tests/unit_tests/long_term_block_weight.cpp @@ -58,7 +58,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_height + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } From db12610d941af67474f3e79e6f840bc6fda26ecb Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 25 Jul 2024 16:12:17 -0700 Subject: [PATCH 043/127] Remove leaves from locked leaves table upon insertion to tree --- src/blockchain_db/lmdb/db_lmdb.cpp | 37 ++++++++++++++----- src/blockchain_db/lmdb/db_lmdb.h | 2 + .../cryptonote_format_utils.cpp | 2 +- src/fcmp/curve_trees.cpp | 2 +- 4 files changed, 32 insertions(+), 11 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index bf04f07404f..9290998c620 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -847,10 +847,10 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l // Grow the tree with outputs that unlock at this block height const auto unlocked_leaf_tuples = this->get_locked_leaf_tuples_at_block_id(m_height); - this->grow_tree(fcmp::curve_trees::curve_trees_v1, unlocked_leaf_tuples); - // TODO: remove locked from the locked outputs table + // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table + this->del_locked_leaf_tuples_at_block_id(m_height); int result = 0; @@ -899,7 +899,7 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l CURSOR(locked_leaves) - // Add the locked leaf tuples from this block to the locked outputs table + // Add the locked leaf tuples from this block to the locked leaves table for (const auto &locked_tuple : leaf_tuples_by_unlock_block) { MDB_val_set(k_block_id, locked_tuple.first); @@ -2218,10 +2218,9 @@ std::vector BlockchainLMDB::g MDB_val_set(k_block_id, block_id); MDB_val v_tuple; - // Get all the locked outputs at that height + // Get all the locked outputs at the provided block id std::vector leaf_tuples; - // TODO: double check this gets all leaf tuples when it does multiple iters MDB_cursor_op op = MDB_SET; while (1) { @@ -2234,7 +2233,7 @@ std::vector BlockchainLMDB::g const uint64_t blk_id = *(const uint64_t*)k_block_id.mv_data; if (blk_id != block_id) - throw0(DB_ERROR(("Height " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); + throw0(DB_ERROR(("Blk id " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTupleContext*)v_tuple.mv_data); const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTupleContext); @@ -2257,6 +2256,27 @@ std::vector BlockchainLMDB::g return leaf_tuples; } +void BlockchainLMDB::del_locked_leaf_tuples_at_block_id(uint64_t block_id) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(locked_leaves) + + MDB_val_set(k_block_id, block_id); + + int result = mdb_cursor_get(m_cur_locked_leaves, &k_block_id, NULL, MDB_SET); + if (result == MDB_NOTFOUND) + return; + if (result != MDB_SUCCESS) + throw1(DB_ERROR(lmdb_error("Error finding locked leaf tuples to remove: ", result).c_str())); + + result = mdb_cursor_del(m_cur_locked_leaves, MDB_NODUPDATA); + if (result) + throw1(DB_ERROR(lmdb_error("Error removing locked leaf tuples: ", result).c_str())); +} + BlockchainLMDB::~BlockchainLMDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -6869,7 +6889,8 @@ void BlockchainLMDB::migrate_5_6() const auto leaf_tuples = this->get_locked_leaf_tuples_at_block_id(i); this->grow_tree(fcmp::curve_trees::curve_trees_v1, leaf_tuples); - // TODO: Remove locked outputs from the locked outputs table after adding them to tree + // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table + this->del_locked_leaf_tuples_at_block_id(i); // Get old block_info and use it to set the new one with new values result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_NEXT); @@ -6889,8 +6910,6 @@ void BlockchainLMDB::migrate_5_6() bi.bi_n_leaf_tuples = this->get_num_leaf_tuples(); bi.bi_tree_root = this->get_tree_root(); - MDEBUG("Height: " << i << " , n_leaf_tuples: " << bi.bi_n_leaf_tuples); - MDB_val_set(nv, bi); result = mdb_cursor_put(c_new_block_info, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP); if (result) diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 3391cd6f81e..b363077bc86 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -452,6 +452,8 @@ class BlockchainLMDB : public BlockchainDB std::vector get_locked_leaf_tuples_at_block_id(uint64_t block_id); + void del_locked_leaf_tuples_at_block_id(uint64_t block_id); + uint64_t num_outputs() const; // Hard fork diff --git a/src/cryptonote_basic/cryptonote_format_utils.cpp b/src/cryptonote_basic/cryptonote_format_utils.cpp index b8fbd12cc81..05dcac4e20d 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.cpp +++ b/src/cryptonote_basic/cryptonote_format_utils.cpp @@ -1661,7 +1661,7 @@ namespace cryptonote { // The unlock_time in this case is supposed to be the chain height at which the output unlocks // The chain height is 1 higher than the highest block index, so we subtract 1 for this delta - unlock_block_index = unlock_time > 1 ? (unlock_time - 1) : 0; + unlock_block_index = unlock_time > 0 ? (unlock_time - 1) : 0; } else { diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 25efaf85822..0c48f55d2d6 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -710,7 +710,7 @@ void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transa CurveTrees::LeafTupleContext tuple_context; tuple_context.output_id = output_ids[i]; - + try { // Convert output to leaf tuple; throws if output is invalid From b585a7f40818c9ea3bec3608f725f62f6e8c5268 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 25 Jul 2024 16:30:50 -0700 Subject: [PATCH 044/127] Remove copy in get_tree_extension and better named funcs --- src/blockchain_db/blockchain_db.h | 2 +- src/blockchain_db/lmdb/db_lmdb.cpp | 16 ++++++++-------- src/blockchain_db/lmdb/db_lmdb.h | 4 ++-- src/blockchain_db/testdb.h | 2 +- src/fcmp/curve_trees.cpp | 8 +++----- src/fcmp/curve_trees.h | 2 +- tests/unit_tests/curve_trees.cpp | 16 ++++++++++++---- 7 files changed, 28 insertions(+), 22 deletions(-) diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 7ef14df8275..309a366415c 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -1780,7 +1780,7 @@ class BlockchainDB // TODO: description and make private virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves) = 0; + std::vector &&new_leaves) = 0; virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) = 0; diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 9290998c620..36c89ab5088 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -846,8 +846,8 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l } // Grow the tree with outputs that unlock at this block height - const auto unlocked_leaf_tuples = this->get_locked_leaf_tuples_at_block_id(m_height); - this->grow_tree(fcmp::curve_trees::curve_trees_v1, unlocked_leaf_tuples); + auto unlocked_leaf_tuples = this->get_leaf_tuples_at_unlock_block_id(m_height); + this->grow_tree(fcmp::curve_trees::curve_trees_v1, std::move(unlocked_leaf_tuples)); // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table this->del_locked_leaf_tuples_at_block_id(m_height); @@ -1364,7 +1364,7 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves) + std::vector &&new_leaves) { if (new_leaves.empty()) return; @@ -1384,7 +1384,7 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree const auto last_hashes = this->get_tree_last_hashes(); // Use the number of leaf tuples and the existing last hashes to get a struct we can use to extend the tree - const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, last_hashes, new_leaves); + const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_leaves)); // Insert the leaves // TODO: grow_leaves @@ -2206,7 +2206,7 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, chunk_width); } -std::vector BlockchainLMDB::get_locked_leaf_tuples_at_block_id( +std::vector BlockchainLMDB::get_leaf_tuples_at_unlock_block_id( uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -6885,9 +6885,9 @@ void BlockchainLMDB::migrate_5_6() } } - // Get all the locked outputs at that height - const auto leaf_tuples = this->get_locked_leaf_tuples_at_block_id(i); - this->grow_tree(fcmp::curve_trees::curve_trees_v1, leaf_tuples); + // Get the leaf tuples that unlock at the given block + auto unlocked_leaf_tuples = this->get_leaf_tuples_at_unlock_block_id(i); + this->grow_tree(fcmp::curve_trees::curve_trees_v1, std::move(unlocked_leaf_tuples)); // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table this->del_locked_leaf_tuples_at_block_id(i); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index b363077bc86..d490ce6e6d2 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -369,7 +369,7 @@ class BlockchainLMDB : public BlockchainDB // make private virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves); + std::vector &&new_leaves); virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples); @@ -450,7 +450,7 @@ class BlockchainLMDB : public BlockchainDB const uint64_t child_chunk_idx, const uint64_t chunk_width) const; - std::vector get_locked_leaf_tuples_at_block_id(uint64_t block_id); + std::vector get_leaf_tuples_at_unlock_block_id(uint64_t block_id); void del_locked_leaf_tuples_at_block_id(uint64_t block_id); diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 48fab60760e..bdbd6f2ad80 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -117,7 +117,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const std::vector &new_leaves) override {}; + std::vector &&new_leaves) override {}; virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) override {}; virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t expected_n_leaf_tuples) const override { return false; }; diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 0c48f55d2d6..bcdc85c7cb4 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -732,7 +732,7 @@ template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - const std::vector &new_leaf_tuples) const + std::vector &&new_leaf_tuples) const { TreeExtension tree_extension; @@ -748,15 +748,13 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio tree_extension.leaves.start_leaf_tuple_idx = grow_layer_instructions.old_total_children / LEAF_TUPLE_SIZE; // Sort the leaves by order they appear in the chain - // TODO: don't copy here - std::vector sorted_leaf_tuples = new_leaf_tuples; const auto sort_fn = [](const LeafTupleContext &a, const LeafTupleContext &b) { return a.output_id < b.output_id; }; - std::sort(sorted_leaf_tuples.begin(), sorted_leaf_tuples.end(), sort_fn); + std::sort(new_leaf_tuples.begin(), new_leaf_tuples.end(), sort_fn); // Copy the sorted leaves into the tree extension struct // TODO: don't copy here tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); - for (const auto &leaf : sorted_leaf_tuples) + for (const auto &leaf : new_leaf_tuples) { tree_extension.leaves.tuples.emplace_back(LeafTuple{ .O_x = leaf.leaf_tuple.O_x, diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 3230d41ff24..fcad2a0d9b0 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -238,7 +238,7 @@ class CurveTrees // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - const std::vector &new_leaf_tuples) const; + std::vector &&new_leaf_tuples) const; // Get instructions useful for trimming all existing layers in the tree std::vector get_trim_instructions( diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index d9879ca436f..5a3bb555520 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -775,11 +775,13 @@ static bool grow_tree(CurveTreesV1 &curve_trees, global_tree.log_last_hashes(last_hashes); + auto new_leaf_tuples = generate_random_leaves(curve_trees, old_n_leaf_tuples, new_n_leaf_tuples); + // Get a tree extension object to the existing tree using randomly generated leaves // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, last_hashes, - generate_random_leaves(curve_trees, old_n_leaf_tuples, new_n_leaf_tuples)); + std::move(new_leaf_tuples)); global_tree.log_tree_extension(tree_extension); @@ -857,14 +859,18 @@ static bool grow_tree_db(const std::size_t init_leaves, LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); - test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, 0, init_leaves)); + auto init_leaf_tuples = generate_random_leaves(curve_trees, 0, init_leaves); + + test_db.m_db->grow_tree(curve_trees, std::move(init_leaf_tuples)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves), false, "failed to add initial leaves to db"); MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " << ext_leaves << " leaves"); - test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, init_leaves, ext_leaves)); + auto ext_leaf_tuples = generate_random_leaves(curve_trees, init_leaves, ext_leaves); + + test_db.m_db->grow_tree(curve_trees, std::move(ext_leaf_tuples)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves + ext_leaves), false, "failed to extend tree in db"); @@ -886,7 +892,9 @@ static bool trim_tree_db(const std::size_t init_leaves, LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then trimming by " << trim_leaves << " leaves"); - test_db.m_db->grow_tree(curve_trees, generate_random_leaves(curve_trees, 0, init_leaves)); + auto init_leaf_tuples = generate_random_leaves(curve_trees, 0, init_leaves); + + test_db.m_db->grow_tree(curve_trees, std::move(init_leaf_tuples)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves), false, "failed to add initial leaves to db"); From af4de996cb145cc9b2c4b8af9b1b493350d686ad Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 May 2024 20:14:39 -0400 Subject: [PATCH 045/127] Use a pointer for the value in CResult Some toolchains complained CResult was an incomplete type. This attempts to resolve that. Do not merge unless it actually fixes things. --- src/fcmp/fcmp_rust/fcmp++.h | 2 +- src/fcmp/fcmp_rust/src/lib.rs | 18 ++++++------------ src/fcmp/tower_cycle.cpp | 18 ++++++++++++------ 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index f3da68a2401..ccfa2b90d26 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -74,7 +74,7 @@ struct SelenePoint { template struct CResult { - T value; + T* value; void* err; }; diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index e6f25625d92..16d17510ba0 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -130,19 +130,19 @@ impl<'a, T> From> for &'a [T] { #[repr(C)] pub struct CResult { - value: T, + value: *const T, err: *const E, } impl CResult { fn ok(value: T) -> Self { CResult { - value, + value: Box::into_raw(Box::new(value)), err: core::ptr::null(), } } - fn err(default: T, err: E) -> Self { + fn err(err: E) -> Self { CResult { - value: default, + value: core::ptr::null(), err: Box::into_raw(Box::new(err)), } } @@ -166,10 +166,7 @@ pub extern "C" fn hash_grow_helios( if let Some(hash) = hash { CResult::ok(hash) } else { - CResult::err( - HeliosPoint::identity(), - io::Error::new(io::ErrorKind::Other, "failed to grow hash"), - ) + CResult::err(io::Error::new(io::ErrorKind::Other, "failed to grow hash")) } } @@ -216,10 +213,7 @@ pub extern "C" fn hash_grow_selene( if let Some(hash) = hash { CResult::ok(hash) } else { - CResult::err( - SelenePoint::identity(), - io::Error::new(io::ErrorKind::Other, "failed to grow hash"), - ) + CResult::err(io::Error::new(io::ErrorKind::Other, "failed to grow hash")) } } diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 9c9ddd839cc..2bf6f7459fc 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -51,15 +51,18 @@ Helios::Point Helios::hash_grow( const Helios::Scalar &existing_child_at_offset, const Helios::Chunk &new_children) const { - fcmp_rust::CResult res = fcmp_rust::hash_grow_helios( + fcmp_rust::CResult result = fcmp_rust::hash_grow_helios( existing_hash, offset, existing_child_at_offset, new_children); - if (res.err != 0) { + if (result.err != nullptr) { throw std::runtime_error("failed to hash grow"); } - return res.value; + typename Helios::Point res; + memcpy(&res, result.value, sizeof(typename Selene::Point)); + free(result.value); + return res; } //---------------------------------------------------------------------------------------------------------------------- Helios::Point Helios::hash_trim( @@ -85,15 +88,18 @@ Selene::Point Selene::hash_grow( const Selene::Scalar &existing_child_at_offset, const Selene::Chunk &new_children) const { - fcmp_rust::CResult res = fcmp_rust::hash_grow_selene( + fcmp_rust::CResult result = fcmp_rust::hash_grow_selene( existing_hash, offset, existing_child_at_offset, new_children); - if (res.err != 0) { + if (result.err != nullptr) { throw std::runtime_error("failed to hash grow"); } - return res.value; + typename Selene::Point res; + memcpy(&res, result.value, sizeof(typename Selene::Point)); + free(result.value); + return res; } //---------------------------------------------------------------------------------------------------------------------- Selene::Point Selene::hash_trim( From d6ca63618ecea98cc53405607b2103b5baab8670 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 24 May 2024 19:12:08 -0700 Subject: [PATCH 046/127] use void * to try to fix CResult --- src/fcmp/fcmp_rust/fcmp++.h | 12 +++--------- src/fcmp/tower_cycle.cpp | 20 ++++++++++++++------ 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index ccfa2b90d26..2648d3c7ff4 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -72,9 +72,8 @@ struct SelenePoint { // ----- End deps C bindings ----- -template struct CResult { - T* value; + void* value; void* err; }; @@ -111,17 +110,12 @@ HeliosScalar helios_zero_scalar(); SeleneScalar selene_zero_scalar(); -CResult hash_grow_helios(HeliosPoint existing_hash, +CResult hash_grow_helios(HeliosPoint existing_hash, uintptr_t offset, HeliosScalar existing_child_at_offset, HeliosScalarSlice new_children); -CResult hash_trim_helios(HeliosPoint existing_hash, - uintptr_t offset, - HeliosScalarSlice children, - HeliosScalar child_to_grow_back); - -CResult hash_grow_selene(SelenePoint existing_hash, +CResult hash_grow_selene(SelenePoint existing_hash, uintptr_t offset, SeleneScalar existing_child_at_offset, SeleneScalarSlice new_children); diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 2bf6f7459fc..5679a310d78 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -51,14 +51,18 @@ Helios::Point Helios::hash_grow( const Helios::Scalar &existing_child_at_offset, const Helios::Chunk &new_children) const { - fcmp_rust::CResult result = fcmp_rust::hash_grow_helios( + auto result = fcmp_rust::hash_grow_helios( existing_hash, offset, existing_child_at_offset, new_children); - if (result.err != nullptr) { - throw std::runtime_error("failed to hash grow"); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash grow"); } + typename Helios::Point res; memcpy(&res, result.value, sizeof(typename Selene::Point)); free(result.value); @@ -88,14 +92,18 @@ Selene::Point Selene::hash_grow( const Selene::Scalar &existing_child_at_offset, const Selene::Chunk &new_children) const { - fcmp_rust::CResult result = fcmp_rust::hash_grow_selene( + auto result = fcmp_rust::hash_grow_selene( existing_hash, offset, existing_child_at_offset, new_children); - if (result.err != nullptr) { - throw std::runtime_error("failed to hash grow"); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash grow"); } + typename Selene::Point res; memcpy(&res, result.value, sizeof(typename Selene::Point)); free(result.value); From 8b76958485986d065a56d905be588f5f0b1f3732 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 19:18:39 -0400 Subject: [PATCH 047/127] Rust cross compilation --- .github/workflows/depends.yml | 13 +++++++++ src/fcmp/fcmp_rust/CMakeLists.txt | 48 +++++++++++++++++++++++++++++-- 2 files changed, 59 insertions(+), 2 deletions(-) diff --git a/.github/workflows/depends.yml b/.github/workflows/depends.yml index aafb8e56a91..c6d42f3fa80 100644 --- a/.github/workflows/depends.yml +++ b/.github/workflows/depends.yml @@ -31,36 +31,47 @@ jobs: toolchain: - name: "RISCV 64bit" host: "riscv64-linux-gnu" + rust_host: "riscv64gc-unknown-linux-gnu" packages: "python3 gperf g++-riscv64-linux-gnu" - name: "ARM v7" host: "arm-linux-gnueabihf" + rust_host: "armv7-unknown-linux-gnueabihf" packages: "python3 gperf g++-arm-linux-gnueabihf" - name: "ARM v8" host: "aarch64-linux-gnu" + rust_host: "aarch64-unknown-linux-gnu" packages: "python3 gperf g++-aarch64-linux-gnu" - name: "i686 Win" host: "i686-w64-mingw32" + rust_host: "i686-pc-windows-gnu" packages: "python3 g++-mingw-w64-i686" - name: "i686 Linux" host: "i686-pc-linux-gnu" + rust_host: "i686-unknown-linux-gnu" packages: "gperf cmake g++-multilib python3-zmq" - name: "Win64" host: "x86_64-w64-mingw32" + rust_host: "x86_64-pc-windows-gnu" packages: "cmake python3 g++-mingw-w64-x86-64" - name: "x86_64 Linux" host: "x86_64-unknown-linux-gnu" + rust_host: "x86_64-unknown-linux-gnu" packages: "gperf cmake python3-zmq libdbus-1-dev libharfbuzz-dev" - name: "Cross-Mac x86_64" host: "x86_64-apple-darwin" + rust_host: "x86_64-apple-darwin" packages: "cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev python3-setuptools-git" - name: "Cross-Mac aarch64" host: "aarch64-apple-darwin" + rust_host: "aarch64-apple-darwin" packages: "cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev python3-setuptools-git" - name: "x86_64 Freebsd" host: "x86_64-unknown-freebsd" + rust_host: "x86_64-unknown-freebsd" packages: "clang-8 gperf cmake python3-zmq libdbus-1-dev libharfbuzz-dev" - name: "ARMv8 Android" host: "aarch64-linux-android" + rust_host: "aarch64-linux-android" packages: "gperf cmake python3" name: ${{ matrix.toolchain.name }} steps: @@ -95,6 +106,8 @@ jobs: run: ${{env.APT_SET_CONF}} - name: install dependencies run: sudo apt update; sudo apt -y install build-essential libtool cmake autotools-dev automake pkg-config bsdmainutils curl git ca-certificates ccache ${{ matrix.toolchain.packages }} + - name: install rust target + run: rustup update stable; rustup target add ${{ matrix.toolchain.rust_host }} - name: prepare w64-mingw32 if: ${{ matrix.toolchain.host == 'x86_64-w64-mingw32' || matrix.toolchain.host == 'i686-w64-mingw32' }} run: | diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index cb25b33783c..ae97858b6cb 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -26,11 +26,55 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +if(MINGW) + set(RUST_PLATFORM "pc-windows") + set(RUST_TOOLCHAIN "-gnu") +elseif(MSVC) + set(RUST_PLATFORM "pc-windows") + set(RUST_TOOLCHAIN "-msvc") +elseif(APPLE) + set(RUST_PLATFORM "apple-darwin") + set(RUST_TOOLCHAIN "") +elseif(FREEBSD) + set(RUST_PLATFORM "unknown-freebsd") + set(RUST_TOOLCHAIN "") +elseif(OPENBSD) + set(RUST_PLATFORM "unknown-openbsd") + set(RUST_TOOLCHAIN "") +elseif(ANDROID) + set(RUST_PLATFORM "linux-android") + if(ARCH_ID MATCHES "(arm|armv7)") + set(RUST_TOOLCHAIN "eabi") + else() + set(RUST_TOOLCHAIN "") + endif() +elseif(DRAGONFLY) + set(RUST_PLATFORM "unknown-dragonfly") + set(RUST_TOOLCHAIN "") +elseif(CMAKE_SYSTEM_NAME MATCHES "(SunOS|Solaris)") + if(ARCH_ID MATCHES "x86_64") + set(RUST_PLATFORM "pc-solaris") + set(RUST_TOOLCHAIN "") + elseif(ARCH_ID MATCHES "sparcv9") + set(RUST_PLATFORM "sun-solaris") + set(RUST_TOOLCHAIN "") + endif() +else() + set(RUST_PLATFORM "unknown-linux") + if(ARCH_ID MATCHES "armv7") + set(RUST_TOOLCHAIN "-gnueabi") + else() + set(RUST_TOOLCHAIN "-gnu") + endif() +endif() + +set(RUST_TARGET "${ARCH_ID}-${RUST_PLATFORM}${RUST_TOOLCHAIN}") + if (CMAKE_BUILD_TYPE STREQUAL "Debug") - set(CARGO_CMD cargo build) + set(CARGO_CMD cargo build --target "${RUST_TARGET}") set(TARGET_DIR "debug") else () - set(CARGO_CMD cargo build --release) + set(CARGO_CMD cargo build --target "${RUST_TARGET}" --release) set(TARGET_DIR "release") endif () From 389274aee9b6d2c9760d30cab3e251eec2ad8642 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 19:29:00 -0400 Subject: [PATCH 048/127] Correct path to the staticlib --- src/fcmp/fcmp_rust/CMakeLists.txt | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index ae97858b6cb..df0a6018bae 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -26,6 +26,12 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +if(ARCH_ID MATCHES "arm64") + set(RUST_ARCH "aarch64") +else() + set(RUST_ARCH "${ARCH_ID}") +endif() + if(MINGW) set(RUST_PLATFORM "pc-windows") set(RUST_TOOLCHAIN "-gnu") @@ -68,7 +74,7 @@ else() endif() endif() -set(RUST_TARGET "${ARCH_ID}-${RUST_PLATFORM}${RUST_TOOLCHAIN}") +set(RUST_TARGET "${RUST_ARCH}-${RUST_PLATFORM}${RUST_TOOLCHAIN}") if (CMAKE_BUILD_TYPE STREQUAL "Debug") set(CARGO_CMD cargo build --target "${RUST_TARGET}") @@ -94,7 +100,7 @@ add_custom_command( OUTPUT ${FCMP_RUST_LIB} COMMAND CARGO_TARGET_DIR=${CMAKE_CURRENT_BINARY_DIR} ${CARGO_CMD} COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/fcmp++.h ${FCMP_RUST_HEADER} - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_DIR}/libfcmp_rust.a ${FCMP_RUST_LIB} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${RUST_TARGET}/${TARGET_DIR}/libfcmp_rust.a ${FCMP_RUST_LIB} COMMAND echo "Finished copying fcmp rust targets" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} VERBATIM From 75faba1db65645e44762c21fae434375c2e13462 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 19:54:09 -0400 Subject: [PATCH 049/127] Increase misc discrepancies in ARM ARCH spec which we support --- src/fcmp/fcmp_rust/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index df0a6018bae..5e37bb099a0 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -26,7 +26,7 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -if(ARCH_ID MATCHES "arm64") +if(ARCH_ID MATCHES "(arm64|armv8a)") set(RUST_ARCH "aarch64") else() set(RUST_ARCH "${ARCH_ID}") From 8b279a06668caad686725fad145e110fe1c71fb5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 19:56:20 -0400 Subject: [PATCH 050/127] i386 -> i686, riscv64 -> riscv64gc Also includes most of what was intended for the prior commit. --- src/fcmp/fcmp_rust/CMakeLists.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index 5e37bb099a0..218eff0fb57 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -26,8 +26,14 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -if(ARCH_ID MATCHES "(arm64|armv8a)") +if(ARCH_ID MATCHES "(arm64|armv8a|armv8-a)") set(RUST_ARCH "aarch64") +elseif(ARCH_ID MATCHES "armv7-a") + set(RUST_ARCH "armv7") +elseif(ARCH_ID MATCHES "i386") + set(RUST_ARCH "i686") +elseif(ARCH_ID MATCHES "riscv64") + set(RUST_ARCH "riscv64gc") else() set(RUST_ARCH "${ARCH_ID}") endif() From 8c47c0d282728556c6c3359d54035bad3e4dd922 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 20:44:02 -0400 Subject: [PATCH 051/127] Further match off RUST_ARCH, not ARCH_ID --- src/fcmp/fcmp_rust/CMakeLists.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index 218eff0fb57..1e4879ab06b 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -55,7 +55,7 @@ elseif(OPENBSD) set(RUST_TOOLCHAIN "") elseif(ANDROID) set(RUST_PLATFORM "linux-android") - if(ARCH_ID MATCHES "(arm|armv7)") + if(RUST_ARCH MATCHES "(arm|armv7)") set(RUST_TOOLCHAIN "eabi") else() set(RUST_TOOLCHAIN "") @@ -64,16 +64,16 @@ elseif(DRAGONFLY) set(RUST_PLATFORM "unknown-dragonfly") set(RUST_TOOLCHAIN "") elseif(CMAKE_SYSTEM_NAME MATCHES "(SunOS|Solaris)") - if(ARCH_ID MATCHES "x86_64") + if(RUST_ARCH MATCHES "x86_64") set(RUST_PLATFORM "pc-solaris") set(RUST_TOOLCHAIN "") - elseif(ARCH_ID MATCHES "sparcv9") + elseif(RUST_ARCH MATCHES "sparcv9") set(RUST_PLATFORM "sun-solaris") set(RUST_TOOLCHAIN "") endif() else() set(RUST_PLATFORM "unknown-linux") - if(ARCH_ID MATCHES "armv7") + if(RUST_ARCH MATCHES "armv7") set(RUST_TOOLCHAIN "-gnueabi") else() set(RUST_TOOLCHAIN "-gnu") From 03679d1342ccb988a75aaa37a88f8a4973bf9d8f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 20:44:15 -0400 Subject: [PATCH 052/127] Install Rust when doing the Windows build --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 89c86af2e7a..43d2f2df8cf 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -74,6 +74,8 @@ jobs: curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-21.9-1-any.pkg.tar.zst pacman --noconfirm -U mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst mingw-w64-x86_64-protobuf-21.9-1-any.pkg.tar.zst + - name: Install rust + run: rustup update stable - name: build run: | ${{env.CCACHE_SETTINGS}} From e5ed23208ddbf763a57187be921a587f94995138 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 20:49:07 -0400 Subject: [PATCH 053/127] Use the armv7 HF Rust toolchain --- src/fcmp/fcmp_rust/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index 1e4879ab06b..d89d4e3b6d9 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -74,7 +74,8 @@ elseif(CMAKE_SYSTEM_NAME MATCHES "(SunOS|Solaris)") else() set(RUST_PLATFORM "unknown-linux") if(RUST_ARCH MATCHES "armv7") - set(RUST_TOOLCHAIN "-gnueabi") + # Rust does support non-HF, yet Monero assumes HF for armv7 + set(RUST_TOOLCHAIN "-gnueabihf") else() set(RUST_TOOLCHAIN "-gnu") endif() From d69e6bda1c93972d5e5b71f035fe7fb5c81e6911 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 21:02:28 -0400 Subject: [PATCH 054/127] Install Rust via the msys2 package on Windows (not the unavailable rustup) --- .github/workflows/build.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 43d2f2df8cf..f35c4185c97 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -68,14 +68,12 @@ jobs: - uses: msys2/setup-msys2@v2 with: update: true - install: mingw-w64-x86_64-toolchain make mingw-w64-x86_64-cmake mingw-w64-x86_64-ccache mingw-w64-x86_64-boost mingw-w64-x86_64-openssl mingw-w64-x86_64-zeromq mingw-w64-x86_64-libsodium mingw-w64-x86_64-hidapi mingw-w64-x86_64-libusb mingw-w64-x86_64-unbound git + install: mingw-w64-x86_64-toolchain make mingw-w64-x86_64-cmake mingw-w64-x86_64-ccache mingw-w64-x86_64-boost mingw-w64-x86_64-openssl mingw-w64-x86_64-zeromq mingw-w64-x86_64-libsodium mingw-w64-x86_64-hidapi mingw-w64-x86_64-libusb mingw-w64-x86_64-unbound git mingw-w64_x86_64-rust - shell: msys2 {0} run: | curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-21.9-1-any.pkg.tar.zst pacman --noconfirm -U mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst mingw-w64-x86_64-protobuf-21.9-1-any.pkg.tar.zst - - name: Install rust - run: rustup update stable - name: build run: | ${{env.CCACHE_SETTINGS}} From 0620be1f5a4d7d019f6fae265224282935c91853 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 21:29:23 -0400 Subject: [PATCH 055/127] Cross-compile from Ubuntu 22.04 LLVM 17 can't talk with binutils 2.34 for RISC-V specifically. This updates binutils to 2.38. Upstream issue is https://github.com/rust-lang/rust/issues/117101. --- .github/workflows/depends.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/depends.yml b/.github/workflows/depends.yml index c6d42f3fa80..148a72832a2 100644 --- a/.github/workflows/depends.yml +++ b/.github/workflows/depends.yml @@ -22,7 +22,7 @@ env: jobs: build-cross: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 env: CCACHE_TEMPDIR: /tmp/.ccache-temp strategy: From 38f19350209a39c3a45b8e4eeb61df31b27b6ac4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 22:08:57 -0400 Subject: [PATCH 056/127] Ubuntu 20.04, Rust 1.72 --- .github/workflows/depends.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/depends.yml b/.github/workflows/depends.yml index 148a72832a2..2c1f3d6ea6b 100644 --- a/.github/workflows/depends.yml +++ b/.github/workflows/depends.yml @@ -22,7 +22,7 @@ env: jobs: build-cross: - runs-on: ubuntu-22.04 + runs-on: ubuntu-20.04 env: CCACHE_TEMPDIR: /tmp/.ccache-temp strategy: @@ -107,7 +107,9 @@ jobs: - name: install dependencies run: sudo apt update; sudo apt -y install build-essential libtool cmake autotools-dev automake pkg-config bsdmainutils curl git ca-certificates ccache ${{ matrix.toolchain.packages }} - name: install rust target - run: rustup update stable; rustup target add ${{ matrix.toolchain.rust_host }} + # We can't use the latest Rust due to LLVM 17 not working with old `ld`s (such as in Ubuntu 20.04) for RISC-V + # We could update ld (a pain), update Ubuntu (requires a large amount of changes), or downgrade Rust + run: rustup toolchain install 1.72; rustup default 1.72; rustup target add ${{ matrix.toolchain.rust_host }} - name: prepare w64-mingw32 if: ${{ matrix.toolchain.host == 'x86_64-w64-mingw32' || matrix.toolchain.host == 'i686-w64-mingw32' }} run: | From 5d6a7fd0b593c6121c5211c128f103efe7eab4a2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 25 May 2024 22:29:16 -0400 Subject: [PATCH 057/127] _x86_64 -> -x86_64 --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f35c4185c97..73741297794 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -68,7 +68,7 @@ jobs: - uses: msys2/setup-msys2@v2 with: update: true - install: mingw-w64-x86_64-toolchain make mingw-w64-x86_64-cmake mingw-w64-x86_64-ccache mingw-w64-x86_64-boost mingw-w64-x86_64-openssl mingw-w64-x86_64-zeromq mingw-w64-x86_64-libsodium mingw-w64-x86_64-hidapi mingw-w64-x86_64-libusb mingw-w64-x86_64-unbound git mingw-w64_x86_64-rust + install: mingw-w64-x86_64-toolchain make mingw-w64-x86_64-cmake mingw-w64-x86_64-ccache mingw-w64-x86_64-boost mingw-w64-x86_64-openssl mingw-w64-x86_64-zeromq mingw-w64-x86_64-libsodium mingw-w64-x86_64-hidapi mingw-w64-x86_64-libusb mingw-w64-x86_64-unbound git mingw-w64-x86_64-rust - shell: msys2 {0} run: | curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst From 866473675c113ff98627374d02ba0c6c4543b361 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 02:57:04 -0400 Subject: [PATCH 058/127] Normalize x86-64 to x86_64 --- src/fcmp/fcmp_rust/CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index d89d4e3b6d9..20c205a9aae 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -26,7 +26,9 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -if(ARCH_ID MATCHES "(arm64|armv8a|armv8-a)") +if(ARCH_ID MATCHES "x86-64)") + set(RUST_ARCH "x86_64") +elseif(ARCH_ID MATCHES "(arm64|armv8a|armv8-a)") set(RUST_ARCH "aarch64") elseif(ARCH_ID MATCHES "armv7-a") set(RUST_ARCH "armv7") From 170324ae68e2fb4cb6382a2185dd9621dfff2a71 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 03:07:52 -0400 Subject: [PATCH 059/127] Rust 1.69 --- .github/workflows/build.yml | 2 ++ .github/workflows/depends.yml | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 73741297794..317534c0b29 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -74,6 +74,8 @@ jobs: curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-21.9-1-any.pkg.tar.zst pacman --noconfirm -U mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst mingw-w64-x86_64-protobuf-21.9-1-any.pkg.tar.zst + # Update binutils if MinGW due to https://github.com/rust-lang/rust/issues/112368 + pacman -Syu --needed mingw-w64-x86_64-gcc --noconfirm - name: build run: | ${{env.CCACHE_SETTINGS}} diff --git a/.github/workflows/depends.yml b/.github/workflows/depends.yml index 2c1f3d6ea6b..e5daa4e814d 100644 --- a/.github/workflows/depends.yml +++ b/.github/workflows/depends.yml @@ -109,7 +109,8 @@ jobs: - name: install rust target # We can't use the latest Rust due to LLVM 17 not working with old `ld`s (such as in Ubuntu 20.04) for RISC-V # We could update ld (a pain), update Ubuntu (requires a large amount of changes), or downgrade Rust - run: rustup toolchain install 1.72; rustup default 1.72; rustup target add ${{ matrix.toolchain.rust_host }} + # We can't use Rust 1.70 due to LLVM 16 requiring ld >= 2.40 when building for Windows + run: rustup toolchain install 1.69; rustup default 1.69; rustup target add ${{ matrix.toolchain.rust_host }} - name: prepare w64-mingw32 if: ${{ matrix.toolchain.host == 'x86_64-w64-mingw32' || matrix.toolchain.host == 'i686-w64-mingw32' }} run: | From 98569b0e7f8bb0e330cb1276598024c8dd8a5f51 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 03:08:08 -0400 Subject: [PATCH 060/127] LTO off Attempts to solve conflicts on armv7. --- src/fcmp/fcmp_rust/Cargo.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/fcmp/fcmp_rust/Cargo.toml b/src/fcmp/fcmp_rust/Cargo.toml index b6ac65d6c5b..828cd2c2ecc 100644 --- a/src/fcmp/fcmp_rust/Cargo.toml +++ b/src/fcmp/fcmp_rust/Cargo.toml @@ -21,3 +21,9 @@ full-chain-membership-proofs = { git = "https://github.com/kayabaNerve/fcmp-plus [patch.crates-io] crypto-bigint = { git = "https://github.com/kayabaNerve/crypto-bigint", branch = "c-repr" } + +[profile.dev] +lto = "off" + +[profile.release] +lto = "off" From 6d6a2e4bd274a75812f9203d3392cbc0f5c578d2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 03:10:57 -0400 Subject: [PATCH 061/127] Correct typo in MATCHES statement --- src/fcmp/fcmp_rust/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index 20c205a9aae..0dab5c08bea 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -26,7 +26,7 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -if(ARCH_ID MATCHES "x86-64)") +if(ARCH_ID MATCHES "x86-64") set(RUST_ARCH "x86_64") elseif(ARCH_ID MATCHES "(arm64|armv8a|armv8-a)") set(RUST_ARCH "aarch64") From c6327cc0357e3a60af083a378bf61f6be11e3b3c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 03:39:02 -0400 Subject: [PATCH 062/127] Correct in-tree code to Rust 1.69 --- src/fcmp/fcmp_rust/Cargo.lock | 1 + src/fcmp/fcmp_rust/Cargo.toml | 2 ++ src/fcmp/fcmp_rust/src/lib.rs | 10 +++++----- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/fcmp/fcmp_rust/Cargo.lock b/src/fcmp/fcmp_rust/Cargo.lock index e9c67b67903..18edc208f94 100644 --- a/src/fcmp/fcmp_rust/Cargo.lock +++ b/src/fcmp/fcmp_rust/Cargo.lock @@ -249,6 +249,7 @@ dependencies = [ "generalized-bulletproofs", "helioselene", "rand_core", + "std-shims", ] [[package]] diff --git a/src/fcmp/fcmp_rust/Cargo.toml b/src/fcmp/fcmp_rust/Cargo.toml index 828cd2c2ecc..f4efeff9c11 100644 --- a/src/fcmp/fcmp_rust/Cargo.toml +++ b/src/fcmp/fcmp_rust/Cargo.toml @@ -8,6 +8,8 @@ name = "fcmp_rust" crate-type = ["staticlib"] [dependencies] +std-shims = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + rand_core = { version = "0.6", features = ["getrandom"] } transcript = { package = "flexible-transcript", git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["recommended"] } diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 16d17510ba0..7907e692702 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -1,4 +1,4 @@ -use std::{io, sync::OnceLock}; +use std_shims::sync::OnceLock; use rand_core::OsRng; @@ -154,7 +154,7 @@ pub extern "C" fn hash_grow_helios( offset: usize, existing_child_at_offset: HeliosScalar, new_children: HeliosScalarSlice, -) -> CResult { +) -> CResult { let hash = hash_grow( helios_generators(), existing_hash, @@ -166,7 +166,7 @@ pub extern "C" fn hash_grow_helios( if let Some(hash) = hash { CResult::ok(hash) } else { - CResult::err(io::Error::new(io::ErrorKind::Other, "failed to grow hash")) + CResult::err(()) } } @@ -201,7 +201,7 @@ pub extern "C" fn hash_grow_selene( offset: usize, existing_child_at_offset: SeleneScalar, new_children: SeleneScalarSlice, -) -> CResult { +) -> CResult { let hash = hash_grow( selene_generators(), existing_hash, @@ -213,7 +213,7 @@ pub extern "C" fn hash_grow_selene( if let Some(hash) = hash { CResult::ok(hash) } else { - CResult::err(io::Error::new(io::ErrorKind::Other, "failed to grow hash")) + CResult::err(()) } } From 8eb3f29e687d4549b4ab6587871167ef02e49305 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 04:24:42 -0400 Subject: [PATCH 063/127] Link additional libs on Windows --- src/fcmp/fcmp_rust/CMakeLists.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index 0dab5c08bea..e1401d76eb2 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -115,6 +115,4 @@ add_custom_command( VERBATIM ) -#monero_find_all_headers(fcmp_rust_headers "${FCMP_RUST_HEADER_DIR}") add_custom_target(fcmp_rust DEPENDS ${FCMP_RUST_LIB}) -#target_link_libraries(fcmp ${FCMP_RUST_LIB}) From da9f1017038e0e06e80c2d6c2e583fa921e4cadf Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 04:27:50 -0400 Subject: [PATCH 064/127] Abort on panic, fix 32-bit Windows undefined reference --- src/fcmp/fcmp_rust/Cargo.toml | 2 ++ src/fcmp/fcmp_rust/src/lib.rs | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/src/fcmp/fcmp_rust/Cargo.toml b/src/fcmp/fcmp_rust/Cargo.toml index f4efeff9c11..871a6cb664a 100644 --- a/src/fcmp/fcmp_rust/Cargo.toml +++ b/src/fcmp/fcmp_rust/Cargo.toml @@ -26,6 +26,8 @@ crypto-bigint = { git = "https://github.com/kayabaNerve/crypto-bigint", branch = [profile.dev] lto = "off" +panic = "abort" [profile.release] lto = "off" +panic = "abort" diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 7907e692702..69f66eb49ea 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -241,3 +241,7 @@ pub extern "C" fn hash_trim_selene( ) } } + +// https://github.com/rust-lang/rust/issues/79609 +#[no_mangle] +pub extern "C" fn _Unwind_Resume() {} From 16536f3d2b277820a3c93ba6ab536f94d0324605 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 05:01:40 -0400 Subject: [PATCH 065/127] Only provide dummy _Unwind_Resume on x86 Windows --- src/fcmp/fcmp_rust/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 69f66eb49ea..78aa50edada 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -243,5 +243,6 @@ pub extern "C" fn hash_trim_selene( } // https://github.com/rust-lang/rust/issues/79609 +#[cfg(all(target_os = "windows", target_arch = "x86"))] #[no_mangle] pub extern "C" fn _Unwind_Resume() {} From 1a44ceb905b798c111ac21c9c7cffeee0d9fcf68 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 05:02:42 -0400 Subject: [PATCH 066/127] Link additional libs on Windows (yet actually adding the relevant changes) --- src/fcmp/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt index 22e5e5a80cb..4357875340e 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp/CMakeLists.txt @@ -41,6 +41,10 @@ monero_add_library_with_deps( ${fcmp_sources} ${fcmp_headers}) +if(WIN32) + target_link_libraries(fcmp ws2_32 ntdll userenv) +endif() + target_link_libraries(fcmp PUBLIC crypto From 23be5f6c2896a6da0fdf48a6b84fe5d919922224 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 26 May 2024 05:25:32 -0400 Subject: [PATCH 067/127] Use a single target_link_libraries call --- src/fcmp/CMakeLists.txt | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt index 4357875340e..5204931db80 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp/CMakeLists.txt @@ -42,7 +42,9 @@ monero_add_library_with_deps( ${fcmp_headers}) if(WIN32) - target_link_libraries(fcmp ws2_32 ntdll userenv) + set(EXTRA_RUST_LIBRARIES ws2_32 ntdll userenv) +else() + set(EXTRA_RUST_LIBRARIES ) endif() target_link_libraries(fcmp @@ -53,4 +55,5 @@ target_link_libraries(fcmp ringct PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/fcmp_rust/libfcmp_rust.a - ${EXTRA_LIBRARIES}) + ${EXTRA_LIBRARIES} + ${EXTRA_RUST_LIBRARIES}) From aadea07b5152b4a63322de245a30b8c38a34f153 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 25 Jul 2024 18:06:53 -0700 Subject: [PATCH 068/127] Touch up merge for cross-compilation fixes --- src/fcmp/fcmp_rust/src/lib.rs | 14 ++++---------- src/fcmp/tower_cycle.cpp | 32 +++++++++++++++++++++++--------- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 78aa50edada..6e48ed5e6dc 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -176,7 +176,7 @@ pub extern "C" fn hash_trim_helios( offset: usize, children: HeliosScalarSlice, child_to_grow_back: HeliosScalar, -) -> CResult { +) -> CResult { let hash = hash_trim( helios_generators(), existing_hash, @@ -188,10 +188,7 @@ pub extern "C" fn hash_trim_helios( if let Some(hash) = hash { CResult::ok(hash) } else { - CResult::err( - HeliosPoint::identity(), - io::Error::new(io::ErrorKind::Other, "failed to trim hash"), - ) + CResult::err(()) } } @@ -223,7 +220,7 @@ pub extern "C" fn hash_trim_selene( offset: usize, children: SeleneScalarSlice, child_to_grow_back: SeleneScalar, -) -> CResult { +) -> CResult { let hash = hash_trim( selene_generators(), existing_hash, @@ -235,10 +232,7 @@ pub extern "C" fn hash_trim_selene( if let Some(hash) = hash { CResult::ok(hash) } else { - CResult::err( - SelenePoint::identity(), - io::Error::new(io::ErrorKind::Other, "failed to trim hash"), - ) + CResult::err(()) } } diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 5679a310d78..e1ce3f6f9af 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -64,7 +64,7 @@ Helios::Point Helios::hash_grow( } typename Helios::Point res; - memcpy(&res, result.value, sizeof(typename Selene::Point)); + memcpy(&res, result.value, sizeof(typename Helios::Point)); free(result.value); return res; } @@ -75,15 +75,22 @@ Helios::Point Helios::hash_trim( const Helios::Chunk &children, const Helios::Scalar &child_to_grow_back) const { - fcmp_rust::CResult res = fcmp_rust::hash_trim_helios( + auto result = fcmp_rust::hash_trim_helios( existing_hash, offset, children, child_to_grow_back); - if (res.err != 0) { - throw std::runtime_error("failed to hash trim"); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash trim"); } - return res.value; + + typename Helios::Point res; + memcpy(&res, result.value, sizeof(typename Helios::Point)); + free(result.value); + return res; } //---------------------------------------------------------------------------------------------------------------------- Selene::Point Selene::hash_grow( @@ -116,15 +123,22 @@ Selene::Point Selene::hash_trim( const Selene::Chunk &children, const Selene::Scalar &child_to_grow_back) const { - fcmp_rust::CResult res = fcmp_rust::hash_trim_selene( + auto result = fcmp_rust::hash_trim_selene( existing_hash, offset, children, child_to_grow_back); - if (res.err != 0) { - throw std::runtime_error("failed to hash trim"); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash trim"); } - return res.value; + + typename Selene::Point res; + memcpy(&res, result.value, sizeof(typename Selene::Point)); + free(result.value); + return res; } //---------------------------------------------------------------------------------------------------------------------- Helios::Scalar Helios::zero_scalar() const From 420b4b6a781e3aef6a8e23cec39dc2b9d54eb263 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 26 Jul 2024 12:24:07 -0700 Subject: [PATCH 069/127] Resolve cross-compile errors uint64_t -> size_t where value is expected bounded to small value --- src/blockchain_db/blockchain_db.h | 1 + src/blockchain_db/lmdb/db_lmdb.cpp | 3 +- src/blockchain_db/lmdb/db_lmdb.h | 1 - src/fcmp/curve_trees.cpp | 54 +++++++++++++++--------------- src/fcmp/curve_trees.h | 24 ++++++------- 5 files changed, 42 insertions(+), 41 deletions(-) diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 309a366415c..f0ee3b19f0b 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -32,6 +32,7 @@ #include #include +#include #include #include "common/command_line.h" #include "crypto/hash.h" diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 36c89ab5088..cbdb20e0674 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -2035,7 +2035,7 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str())); // Get the expected leaf chunk hash - const auto leaves = curve_trees.flatten_leaves(leaf_tuples_chunk); + const std::vector leaves = curve_trees.flatten_leaves(leaf_tuples_chunk); const fcmp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; // Hash the chunk of leaves @@ -6766,6 +6766,7 @@ void BlockchainLMDB::migrate_5_6() if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get a record from output amounts: ", result).c_str())); + // Read the output data uint64_t amount = *(const uint64_t*)k.mv_data; output_data_t output_data; fcmp::curve_trees::CurveTreesV1::LeafTupleContext tuple_context; diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index d490ce6e6d2..c1a1027ce38 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -27,7 +27,6 @@ #pragma once #include -#include #include "blockchain_db/blockchain_db.h" #include "cryptonote_basic/blobdatatype.h" // for type blobdata diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index bcdc85c7cb4..3e93ca3dc9a 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -94,10 +94,10 @@ template static LayerExtension hash_children_chunks(const C &curve, const typename C::Scalar *old_last_child, const typename C::Point *old_last_parent, - const uint64_t start_offset, + const std::size_t start_offset, const uint64_t next_parent_start_index, const std::vector &new_child_scalars, - const uint64_t chunk_width) + const std::size_t chunk_width) { LayerExtension parents_out; parents_out.start_idx = next_parent_start_index; @@ -108,7 +108,7 @@ static LayerExtension hash_children_chunks(const C &curve, CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width"); // See how many children we need to fill up the existing last chunk - uint64_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); + std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); @@ -150,7 +150,7 @@ static LayerExtension hash_children_chunks(const C &curve, } // Hash chunks of child scalars to create the parent hashes - uint64_t chunk_start_idx = chunk_size; + std::size_t chunk_start_idx = chunk_size; while (chunk_start_idx < new_child_scalars.size()) { chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); @@ -181,7 +181,7 @@ static LayerExtension hash_children_chunks(const C &curve, //---------------------------------------------------------------------------------------------------------------------- static GrowLayerInstructions get_grow_layer_instructions(const uint64_t old_total_children, const uint64_t new_total_children, - const uint64_t parent_chunk_width, + const std::size_t parent_chunk_width, const bool last_child_will_change) { // 1. Check pre-conditions on total number of children @@ -220,7 +220,7 @@ static GrowLayerInstructions get_grow_layer_instructions(const uint64_t old_tota // 4. Set the current offset in the last chunk // - Note: this value starts at the last child in the last chunk, but it might need to be decremented by 1 if we're // changing that last child - uint64_t offset = old_total_parents > 0 + std::size_t offset = old_total_parents > 0 ? (old_total_children % parent_chunk_width) : 0; @@ -284,8 +284,8 @@ static GrowLayerInstructions get_grow_layer_instructions(const uint64_t old_tota //---------------------------------------------------------------------------------------------------------------------- static GrowLayerInstructions get_leaf_layer_grow_instructions(const uint64_t old_n_leaf_tuples, const uint64_t new_n_leaf_tuples, - const uint64_t leaf_tuple_size, - const uint64_t leaf_layer_chunk_width) + const std::size_t leaf_tuple_size, + const std::size_t leaf_layer_chunk_width) { // The leaf layer can never be the root layer const bool setting_next_layer_after_old_root = false; @@ -306,7 +306,7 @@ static GrowLayerInstructions get_leaf_layer_grow_instructions(const uint64_t old // Since leaf layer is append-only, no leaf can ever change and we'll never need an old leaf const bool need_old_last_child = false; - const uint64_t offset = old_total_children % leaf_layer_chunk_width; + const std::size_t offset = old_total_children % leaf_layer_chunk_width; const bool last_chunk_is_full = offset == 0; const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full @@ -356,8 +356,8 @@ static LayerExtension get_next_layer_extension(const C_CHILD &c_child, const std::vector &child_last_hashes, const std::vector &parent_last_hashes, const std::vector> child_layer_extensions, - const uint64_t last_updated_child_idx, - const uint64_t last_updated_parent_idx) + const std::size_t last_updated_child_idx, + const std::size_t last_updated_parent_idx) { // TODO: comments const auto *child_last_hash = (last_updated_child_idx >= child_last_hashes.size()) @@ -414,7 +414,7 @@ static LayerExtension get_next_layer_extension(const C_CHILD &c_child, static TrimLayerInstructions get_trim_layer_instructions( const uint64_t old_total_children, const uint64_t new_total_children, - const uint64_t parent_chunk_width, + const std::size_t parent_chunk_width, const bool last_child_will_change) { CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "new total children must be > 0"); @@ -430,8 +430,8 @@ static TrimLayerInstructions get_trim_layer_instructions( CHECK_AND_ASSERT_THROW_MES(new_total_children > new_total_parents, "new_total_children must be > new_total_parents"); - const uint64_t old_offset = old_total_children % parent_chunk_width; - const uint64_t new_offset = new_total_children % parent_chunk_width; + const std::size_t old_offset = old_total_children % parent_chunk_width; + const std::size_t new_offset = new_total_children % parent_chunk_width; // Get the number of existing children in what will become the new last chunk after trimming const uint64_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0) @@ -444,7 +444,7 @@ static TrimLayerInstructions get_trim_layer_instructions( "unexpected new_last_chunk_old_num_children"); // Get the number of children we'll be trimming from the new last chunk - const uint64_t trim_n_children = new_offset == 0 + const std::size_t trim_n_children = new_offset == 0 ? 0 // The last chunk wil remain full when the new_offset == 0 : new_last_chunk_old_num_children - new_offset; @@ -466,7 +466,7 @@ static TrimLayerInstructions get_trim_layer_instructions( const bool need_existing_last_hash = update_existing_last_hash && !need_last_chunk_remaining_children; // We need to decrement the offset we use to hash the chunk if the last child is changing - uint64_t hash_offset = new_offset; + std::size_t hash_offset = new_offset; if (last_child_will_change) { hash_offset = hash_offset == 0 @@ -539,8 +539,8 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc const std::vector &parent_last_hashes, const std::vector> &children_to_trim, const std::vector &child_last_hashes, - const uint64_t parent_layer_idx, - const uint64_t child_layer_idx, + const std::size_t parent_layer_idx, + const std::size_t child_layer_idx, const std::vector> &child_reductions) { LayerReduction layer_reduction_out; @@ -580,7 +580,7 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc else if (!trim_layer_instructions.need_last_chunk_children_to_trim) { // Falling to this conditional means we're not trimming at all, just updating the old last child - const uint64_t last_child_layer_idx = child_layer_idx - 1; + const std::size_t last_child_layer_idx = child_layer_idx - 1; CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash"); const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx]; @@ -785,8 +785,8 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio // Alternate between hashing c2 children, c1 children, c2, c1, ... bool parent_is_c1 = true; - uint64_t c1_last_idx = 0; - uint64_t c2_last_idx = 0; + std::size_t c1_last_idx = 0; + std::size_t c2_last_idx = 0; while (grow_layer_instructions.new_total_parents > 1) { MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1)); @@ -827,7 +827,7 @@ std::vector CurveTrees::get_trim_instructions( const uint64_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE; const uint64_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE; - const uint64_t parent_chunk_width = m_leaf_layer_chunk_width; + const std::size_t parent_chunk_width = m_leaf_layer_chunk_width; // Leaf layer's last child never changes since leaf layer is pop-/append-only const bool last_child_will_change = false; @@ -871,8 +871,8 @@ typename CurveTrees::TreeReduction CurveTrees::get_tree_reductio tree_reduction_out.new_total_leaf_tuples = new_total_leaf_tuples; bool use_c2 = true; - uint64_t c1_idx = 0; - uint64_t c2_idx = 0; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; for (const auto &trim_layer_instructions : trim_instructions) { @@ -928,8 +928,8 @@ GrowLayerInstructions CurveTrees::set_next_layer_extension( const GrowLayerInstructions &prev_layer_instructions, const bool parent_is_c1, const LastHashes &last_hashes, - uint64_t &c1_last_idx_inout, - uint64_t &c2_last_idx_inout, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, TreeExtension &tree_extension_inout) const { const auto &c1_last_hashes = last_hashes.c1_last_hashes; @@ -938,7 +938,7 @@ GrowLayerInstructions CurveTrees::set_next_layer_extension( auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions; auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions; - const uint64_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; + const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; const auto grow_layer_instructions = get_grow_layer_instructions( prev_layer_instructions.old_total_parents, diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index fcad2a0d9b0..598341213a5 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -70,7 +70,7 @@ struct LayerReduction final struct GrowLayerInstructions final { // The max chunk width of children used to hash into a parent - uint64_t parent_chunk_width; + std::size_t parent_chunk_width; // Total children refers to the total number of elements in a layer uint64_t old_total_children; @@ -90,7 +90,7 @@ struct GrowLayerInstructions final bool need_old_last_parent; // The first chunk that needs to be updated's first child's offset within that chunk - uint64_t start_offset; + std::size_t start_offset; // The parent's starting index in the layer uint64_t next_parent_start_index; }; @@ -99,7 +99,7 @@ struct GrowLayerInstructions final struct TrimLayerInstructions final { // The max chunk width of children used to hash into a parent - uint64_t parent_chunk_width; + std::size_t parent_chunk_width; // Total children refers to the total number of elements in a layer uint64_t old_total_children; @@ -122,7 +122,7 @@ struct TrimLayerInstructions final bool need_new_last_child; // The offset to use when hashing the last chunk - uint64_t hash_offset; + std::size_t hash_offset; // The starting and ending indexes of the children we're going to need to trim the last chunk uint64_t start_trim_idx; @@ -161,7 +161,7 @@ class CurveTrees // Commitment x-coordinate typename C2::Scalar C_x; }; - static const uint64_t LEAF_TUPLE_SIZE = 3; + static const std::size_t LEAF_TUPLE_SIZE = 3; static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); // Contextual wrapper for leaf tuple @@ -261,8 +261,8 @@ class CurveTrees const GrowLayerInstructions &prev_layer_instructions, const bool parent_is_c1, const LastHashes &last_hashes, - uint64_t &c1_last_idx_inout, - uint64_t &c2_last_idx_inout, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, TreeExtension &tree_extension_inout) const; //public member variables @@ -272,11 +272,11 @@ class CurveTrees const C2 &m_c2; // The leaf layer has a distinct chunk width than the other layers - const uint64_t m_leaf_layer_chunk_width; + const std::size_t m_leaf_layer_chunk_width; // The chunk widths of the layers in the tree tied to each curve - const uint64_t m_c1_width; - const uint64_t m_c2_width; + const std::size_t m_c1_width; + const std::size_t m_c2_width; }; //---------------------------------------------------------------------------------------------------------------------- using Helios = tower_cycle::Helios; @@ -285,8 +285,8 @@ using CurveTreesV1 = CurveTrees; // https://github.com/kayabaNerve/fcmp-plus-plus/blob // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 -static const uint64_t HELIOS_CHUNK_WIDTH = 38; -static const uint64_t SELENE_CHUNK_WIDTH = 18; +static const std::size_t HELIOS_CHUNK_WIDTH = 38; +static const std::size_t SELENE_CHUNK_WIDTH = 18; static const Helios HELIOS; static const Selene SELENE; static const CurveTreesV1 curve_trees_v1(HELIOS, SELENE, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); From d36b6fe96fcd99e06f6cbb13db7bd92dba1b778f Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 26 Jul 2024 12:44:10 -0700 Subject: [PATCH 070/127] resolve rebase to master issues --- src/fcmp/fcmp_rust/fcmp++.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index 2648d3c7ff4..f1ef5c17cd0 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -115,12 +115,17 @@ CResult hash_grow_helios(HeliosPoint existing_hash, HeliosScalar existing_child_at_offset, HeliosScalarSlice new_children); +CResult hash_trim_helios(HeliosPoint existing_hash, + uintptr_t offset, + HeliosScalarSlice children, + HeliosScalar child_to_grow_back); + CResult hash_grow_selene(SelenePoint existing_hash, uintptr_t offset, SeleneScalar existing_child_at_offset, SeleneScalarSlice new_children); -CResult hash_trim_selene(SelenePoint existing_hash, +CResult hash_trim_selene(SelenePoint existing_hash, uintptr_t offset, SeleneScalarSlice children, SeleneScalar child_to_grow_back); From c383087955bf98ed0e85d41d64653b2f1b9cc3d2 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 26 Jul 2024 15:47:42 -0700 Subject: [PATCH 071/127] Instantiate m_curve_trees on BlockchainLMDB class in c'tor --- src/blockchain_db/blockchain_db.cpp | 5 +- src/blockchain_db/blockchain_db.h | 11 +-- src/blockchain_db/lmdb/db_lmdb.cpp | 83 ++++++++++-------- src/blockchain_db/lmdb/db_lmdb.h | 13 +-- src/blockchain_db/testdb.h | 7 +- src/fcmp/curve_trees.h | 10 +-- tests/unit_tests/curve_trees.cpp | 130 ++++++++++++++-------------- tests/unit_tests/unit_tests_utils.h | 9 +- 8 files changed, 136 insertions(+), 132 deletions(-) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 59ef9564259..b44777955e7 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -317,7 +317,8 @@ uint64_t BlockchainDB::add_block( const std::pair& blck std::multimap leaf_tuples_by_unlock_block; // Get miner tx's leaf tuples - fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples( + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + m_curve_trees->tx_outs_to_leaf_tuples( blk.miner_tx, miner_output_ids, prev_height, @@ -327,7 +328,7 @@ uint64_t BlockchainDB::add_block( const std::pair& blck // Get all other txs' leaf tuples for (std::size_t i = 0; i < txs.size(); ++i) { - fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples( + m_curve_trees->tx_outs_to_leaf_tuples( txs[i].first, output_ids[i], prev_height, diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index f0ee3b19f0b..09036fbb440 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -591,12 +591,14 @@ class BlockchainDB HardFork* m_hardfork; + fcmp::curve_trees::CurveTreesV1* m_curve_trees; + public: /** * @brief An empty constructor. */ - BlockchainDB(): m_hardfork(NULL), m_open(false) { } + BlockchainDB(): m_hardfork(NULL), m_open(false), m_curve_trees(NULL) { } /** * @brief An empty destructor. @@ -1780,13 +1782,12 @@ class BlockchainDB virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; // TODO: description and make private - virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - std::vector &&new_leaves) = 0; + virtual void grow_tree(std::vector &&new_leaves) = 0; - virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) = 0; + virtual void trim_tree(const uint64_t trim_n_leaf_tuples) = 0; // TODO: description - virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t expected_n_leaf_tuples) const = 0; + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const = 0; // // Hard fork related storage diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index cbdb20e0674..0573e1b96d5 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -847,7 +847,7 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l // Grow the tree with outputs that unlock at this block height auto unlocked_leaf_tuples = this->get_leaf_tuples_at_unlock_block_id(m_height); - this->grow_tree(fcmp::curve_trees::curve_trees_v1, std::move(unlocked_leaf_tuples)); + this->grow_tree(std::move(unlocked_leaf_tuples)); // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table this->del_locked_leaf_tuples_at_block_id(m_height); @@ -1363,8 +1363,7 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } } -void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - std::vector &&new_leaves) +void BlockchainLMDB::grow_tree(std::vector &&new_leaves) { if (new_leaves.empty()) return; @@ -1384,7 +1383,8 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree const auto last_hashes = this->get_tree_last_hashes(); // Use the number of leaf tuples and the existing last hashes to get a struct we can use to extend the tree - const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_leaves)); + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + const auto tree_extension = m_curve_trees->get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_leaves)); // Insert the leaves // TODO: grow_leaves @@ -1422,7 +1422,7 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree throw0(DB_ERROR(("Growing odd c2 layer, expected even layer idx for c1: " + std::to_string(layer_idx)).c_str())); - this->grow_layer(curve_trees.m_c2, + this->grow_layer(m_curve_trees->m_c2, c2_extensions, c2_idx, layer_idx); @@ -1435,7 +1435,7 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree throw0(DB_ERROR(("Growing even c1 layer, expected odd layer idx for c2: " + std::to_string(layer_idx)).c_str())); - this->grow_layer(curve_trees.m_c1, + this->grow_layer(m_curve_trees->m_c1, c1_extensions, c1_idx, layer_idx); @@ -1500,7 +1500,7 @@ void BlockchainLMDB::grow_layer(const C &curve, } } -void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) +void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) { // TODO: block_wtxn_start like pop_block, then call BlockchainDB::trim_tree LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -1515,14 +1515,15 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); - const auto trim_instructions = curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + const auto trim_instructions = m_curve_trees->get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); // Do initial tree reads - const auto last_chunk_children_to_trim = this->get_last_chunk_children_to_trim(curve_trees, trim_instructions); + const auto last_chunk_children_to_trim = this->get_last_chunk_children_to_trim(trim_instructions); const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); // Get the new hashes, wrapped in a simple struct we can use to trim the tree - const auto tree_reduction = curve_trees.get_tree_reduction( + const auto tree_reduction = m_curve_trees->get_tree_reduction( trim_instructions, last_chunk_children_to_trim, last_hashes_to_trim); @@ -1727,12 +1728,12 @@ std::array BlockchainLMDB::get_tree_root() const if ((layer_idx % 2) == 0) { const auto *lv = (layer_val *)v.mv_data; - root = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(lv->child_chunk_hash); + root = m_curve_trees->m_c2.to_bytes(lv->child_chunk_hash); } else { const auto *lv = (layer_val *)v.mv_data; - root = fcmp::curve_trees::curve_trees_v1.m_c1.to_bytes(lv->child_chunk_hash); + root = m_curve_trees->m_c1.to_bytes(lv->child_chunk_hash); } } else if (result != MDB_NOTFOUND) @@ -1798,12 +1799,13 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes } fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_last_chunk_children_to_trim( - const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &trim_instructions) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + TXN_PREFIX_RDONLY(); RCURSOR(layers) @@ -1885,13 +1887,13 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las if (parent_is_c1) { const auto *lv = (layer_val *)v.mv_data; - auto child_scalar = curve_trees.m_c2.point_to_cycle_scalar(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c2.point_to_cycle_scalar(lv->child_chunk_hash); c1_children.emplace_back(std::move(child_scalar)); } else { const auto *lv = (layer_val *)v.mv_data; - auto child_scalar = curve_trees.m_c1.point_to_cycle_scalar(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c1.point_to_cycle_scalar(lv->child_chunk_hash); c2_children.emplace_back(std::move(child_scalar)); } @@ -1958,8 +1960,7 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_t return last_hashes_out; } -bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const uint64_t expected_n_leaf_tuples) const +bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1984,6 +1985,8 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre return true; } + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + // Check chunks of leaves hash into first layer as expected uint64_t layer_idx = 0; uint64_t child_chunk_idx = 0; @@ -1992,7 +1995,7 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre { // Get next leaf chunk std::vector leaf_tuples_chunk; - leaf_tuples_chunk.reserve(curve_trees.m_c2_width); + leaf_tuples_chunk.reserve(m_curve_trees->m_c2_width); // Iterate until chunk is full or we get to the end of all leaves while (1) @@ -2008,7 +2011,7 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre const auto leaf = *(fcmp::curve_trees::CurveTreesV1::LeafTuple *)v.mv_data; leaf_tuples_chunk.push_back(leaf); - if (leaf_tuples_chunk.size() == curve_trees.m_c2_width) + if (leaf_tuples_chunk.size() == m_curve_trees->m_c2_width) break; } @@ -2035,23 +2038,23 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str())); // Get the expected leaf chunk hash - const std::vector leaves = curve_trees.flatten_leaves(leaf_tuples_chunk); + const auto leaves = m_curve_trees->flatten_leaves(leaf_tuples_chunk); const fcmp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; // Hash the chunk of leaves for (uint64_t i = 0; i < leaves.size(); ++i) - MDEBUG("Hashing " << curve_trees.m_c2.to_string(leaves[i])); + MDEBUG("Hashing " << m_curve_trees->m_c2.to_string(leaves[i])); - const fcmp::curve_trees::Selene::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve_trees.m_c2, chunk); - MDEBUG("chunk_hash " << curve_trees.m_c2.to_string(chunk_hash) << " , hash init point: " - << curve_trees.m_c2.to_string(curve_trees.m_c2.m_hash_init_point) << " (" << leaves.size() << " leaves)"); + const fcmp::curve_trees::Selene::Point chunk_hash = fcmp::curve_trees::get_new_parent(m_curve_trees->m_c2, chunk); + MDEBUG("chunk_hash " << m_curve_trees->m_c2.to_string(chunk_hash) << " , hash init point: " + << m_curve_trees->m_c2.to_string(m_curve_trees->m_c2.m_hash_init_point) << " (" << leaves.size() << " leaves)"); // Now compare to value from the db const auto *lv = (layer_val *)v_parent.mv_data; - MDEBUG("Actual leaf chunk hash " << curve_trees.m_c2.to_string(lv->child_chunk_hash)); + MDEBUG("Actual leaf chunk hash " << m_curve_trees->m_c2.to_string(lv->child_chunk_hash)); - const auto expected_bytes = curve_trees.m_c2.to_bytes(chunk_hash); - const auto actual_bytes = curve_trees.m_c2.to_bytes(lv->child_chunk_hash); + const auto expected_bytes = m_curve_trees->m_c2.to_bytes(chunk_hash); + const auto actual_bytes = m_curve_trees->m_c2.to_bytes(lv->child_chunk_hash); CHECK_AND_ASSERT_MES(expected_bytes == actual_bytes, false, "unexpected leaf chunk hash"); ++child_chunk_idx; @@ -2065,12 +2068,12 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre if (parent_is_c1) { if (this->audit_layer( - /*c_child*/ curve_trees.m_c2, - /*c_parent*/ curve_trees.m_c1, + /*c_child*/ m_curve_trees->m_c2, + /*c_parent*/ m_curve_trees->m_c1, layer_idx, /*child_start_idx*/ 0, /*child_chunk_idx*/ 0, - /*chunk_width*/ curve_trees.m_c1_width)) + /*chunk_width*/ m_curve_trees->m_c1_width)) { break; } @@ -2078,12 +2081,12 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre else { if (this->audit_layer( - /*c_child*/ curve_trees.m_c1, - /*c_parent*/ curve_trees.m_c2, + /*c_child*/ m_curve_trees->m_c1, + /*c_parent*/ m_curve_trees->m_c2, layer_idx, /*child_start_idx*/ 0, /*child_chunk_idx*/ 0, - /*chunk_width*/ curve_trees.m_c2_width)) + /*chunk_width*/ m_curve_trees->m_c2_width)) { break; } @@ -2291,7 +2294,7 @@ BlockchainLMDB::~BlockchainLMDB() BlockchainLMDB::close(); } -BlockchainLMDB::BlockchainLMDB(bool batch_transactions): BlockchainDB() +BlockchainLMDB::BlockchainLMDB(bool batch_transactions, fcmp::curve_trees::CurveTreesV1 *curve_trees): BlockchainDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); // initialize folder to something "safe" just in case @@ -2308,6 +2311,8 @@ BlockchainLMDB::BlockchainLMDB(bool batch_transactions): BlockchainDB() // reset may also need changing when initialize things here m_hardfork = nullptr; + + m_curve_trees = curve_trees; } void BlockchainLMDB::open(const std::string& filename, const int db_flags) @@ -2320,6 +2325,9 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) if (m_open) throw0(DB_OPEN_FAILURE("Attempted to open db, but it's already open")); + if (m_curve_trees == nullptr) + throw0(DB_OPEN_FAILURE("curve trees not set yet, must be set before opening db")); + boost::filesystem::path direc(filename); if (!boost::filesystem::exists(direc) && !boost::filesystem::create_directories(direc)) { @@ -6727,8 +6735,7 @@ void BlockchainLMDB::migrate_5_6() { LOGIF(el::Level::Info) { - // TODO: total num elems in m_output_amounts - std::cout << i << " / TODO outputs \r" << std::flush; + std::cout << i << " / " << n_outputs << " \r" << std::flush; } txn.commit(); result = mdb_txn_begin(m_env, NULL, 0, txn); @@ -6787,7 +6794,7 @@ void BlockchainLMDB::migrate_5_6() // Convert the output into a leaf tuple try { - tuple_context.leaf_tuple = fcmp::curve_trees::curve_trees_v1.output_to_leaf_tuple( + tuple_context.leaf_tuple = m_curve_trees->output_to_leaf_tuple( output_data.pubkey, rct::rct2pk(output_data.commitment)); } @@ -6888,7 +6895,7 @@ void BlockchainLMDB::migrate_5_6() // Get the leaf tuples that unlock at the given block auto unlocked_leaf_tuples = this->get_leaf_tuples_at_unlock_block_id(i); - this->grow_tree(fcmp::curve_trees::curve_trees_v1, std::move(unlocked_leaf_tuples)); + this->grow_tree(std::move(unlocked_leaf_tuples)); // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table this->del_locked_leaf_tuples_at_block_id(i); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index c1a1027ce38..86ece765a1a 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -193,7 +193,7 @@ struct mdb_txn_safe class BlockchainLMDB : public BlockchainDB { public: - BlockchainLMDB(bool batch_transactions=true); + BlockchainLMDB(bool batch_transactions=true, fcmp::curve_trees::CurveTreesV1 *curve_trees=&fcmp::curve_trees::CURVE_TREES_V1); ~BlockchainLMDB(); virtual void open(const std::string& filename, const int mdb_flags=0); @@ -367,13 +367,11 @@ class BlockchainLMDB : public BlockchainDB static int compare_string(const MDB_val *a, const MDB_val *b); // make private - virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - std::vector &&new_leaves); + virtual void grow_tree(std::vector &&new_leaves); - virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples); + virtual void trim_tree(const uint64_t trim_n_leaf_tuples); - virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - const uint64_t expected_n_leaf_tuples) const; + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const; private: void do_resize(uint64_t size_increase=0); @@ -435,7 +433,6 @@ class BlockchainLMDB : public BlockchainDB fcmp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim get_last_chunk_children_to_trim( - const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::vector &trim_instructions) const; fcmp::curve_trees::CurveTreesV1::LastHashes get_last_hashes_to_trim( @@ -547,8 +544,6 @@ class BlockchainLMDB : public BlockchainDB mdb_txn_cursors m_wcursors; mutable boost::thread_specific_ptr m_tinfo; - // TODO: m_curve_trees - #if defined(__arm__) // force a value so it can compile with 32-bit ARM constexpr static uint64_t DEFAULT_MAPSIZE = 1LL << 31; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index bdbd6f2ad80..0a643876d88 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -116,10 +116,9 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} - virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, - std::vector &&new_leaves) override {}; - virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) override {}; - virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t expected_n_leaf_tuples) const override { return false; }; + virtual void grow_tree(std::vector &&new_leaves) override {}; + virtual void trim_tree(const uint64_t trim_n_leaf_tuples) override {}; + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 598341213a5..2e31bdce0b7 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -285,11 +285,11 @@ using CurveTreesV1 = CurveTrees; // https://github.com/kayabaNerve/fcmp-plus-plus/blob // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 -static const std::size_t HELIOS_CHUNK_WIDTH = 38; -static const std::size_t SELENE_CHUNK_WIDTH = 18; -static const Helios HELIOS; -static const Selene SELENE; -static const CurveTreesV1 curve_trees_v1(HELIOS, SELENE, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); +const std::size_t HELIOS_CHUNK_WIDTH = 38; +const std::size_t SELENE_CHUNK_WIDTH = 18; +const Helios HELIOS; +const Selene SELENE; +static CurveTreesV1 CURVE_TREES_V1(HELIOS, SELENE, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace curve_trees diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 5a3bb555520..04bacf0972c 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -852,7 +852,7 @@ static bool grow_tree_db(const std::size_t init_leaves, CurveTreesV1 &curve_trees, unit_test::BlockchainLMDBTest &test_db) { - INIT_BLOCKCHAIN_LMDB_TEST_DB(); + INIT_BLOCKCHAIN_LMDB_TEST_DB(&curve_trees); { cryptonote::db_wtxn_guard guard(test_db.m_db); @@ -861,8 +861,8 @@ static bool grow_tree_db(const std::size_t init_leaves, auto init_leaf_tuples = generate_random_leaves(curve_trees, 0, init_leaves); - test_db.m_db->grow_tree(curve_trees, std::move(init_leaf_tuples)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves), false, + test_db.m_db->grow_tree(std::move(init_leaf_tuples)); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, "failed to add initial leaves to db"); MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " @@ -870,8 +870,8 @@ static bool grow_tree_db(const std::size_t init_leaves, auto ext_leaf_tuples = generate_random_leaves(curve_trees, init_leaves, ext_leaves); - test_db.m_db->grow_tree(curve_trees, std::move(ext_leaf_tuples)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves + ext_leaves), false, + test_db.m_db->grow_tree(std::move(ext_leaf_tuples)); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves + ext_leaves), false, "failed to extend tree in db"); MDEBUG("Successfully extended tree in db by " << ext_leaves << " leaves"); @@ -885,7 +885,7 @@ static bool trim_tree_db(const std::size_t init_leaves, CurveTreesV1 &curve_trees, unit_test::BlockchainLMDBTest &test_db) { - INIT_BLOCKCHAIN_LMDB_TEST_DB(); + INIT_BLOCKCHAIN_LMDB_TEST_DB(&curve_trees); { cryptonote::db_wtxn_guard guard(test_db.m_db); @@ -894,15 +894,15 @@ static bool trim_tree_db(const std::size_t init_leaves, auto init_leaf_tuples = generate_random_leaves(curve_trees, 0, init_leaves); - test_db.m_db->grow_tree(curve_trees, std::move(init_leaf_tuples)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves), false, + test_db.m_db->grow_tree(std::move(init_leaf_tuples)); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, "failed to add initial leaves to db"); MDEBUG("Successfully added initial " << init_leaves << " leaves to db, trimming by " << trim_leaves << " leaves"); - test_db.m_db->trim_tree(curve_trees, trim_leaves); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(curve_trees, init_leaves - trim_leaves), false, + test_db.m_db->trim_tree(trim_leaves); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves - trim_leaves), false, "failed to trim tree in db"); MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); @@ -1037,29 +1037,29 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 2 scalars std::vector init_children{selene_scalar_0, selene_scalar_1}; - const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); // Trim selene_scalar_1 const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; - const auto trim_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_trim( + const auto trim_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_trim( init_hash, 1, trimmed_children, - fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar()); - const auto trim_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(trim_res); + fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar()); + const auto trim_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0} std::vector remaining_children{selene_scalar_0}; - const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1074,29 +1074,29 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 3 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; - const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); // Trim the initial result by 2 children const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; - const auto trim_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_trim( + const auto trim_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_trim( init_hash, 1, trimmed_children, - fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar()); - const auto trim_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(trim_res); + fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar()); + const auto trim_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0} std::vector remaining_children{selene_scalar_0}; - const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1110,31 +1110,31 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 2 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1}; - const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); const auto selene_scalar_2 = generate_random_selene_scalar(); // Trim the 2nd child and grow with new child const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; - const auto trim_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_trim( + const auto trim_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_trim( init_hash, 1, trimmed_children, selene_scalar_2); - const auto trim_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(trim_res); + const auto trim_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_2} std::vector remaining_children{selene_scalar_0, selene_scalar_2}; - const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1149,31 +1149,31 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 3 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; - const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); const auto selene_scalar_3 = generate_random_selene_scalar(); // Trim the initial result by 2 children+grow by 1 const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; - const auto trim_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_trim( + const auto trim_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_trim( init_hash, 1, trimmed_children, selene_scalar_3); - const auto trim_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(trim_res); + const auto trim_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_3} std::vector remaining_children{selene_scalar_0, selene_scalar_3}; - const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1189,30 +1189,30 @@ TEST(curve_trees, hash_grow) // Get the initial hash of the 2 selene scalars std::vector all_children{selene_scalar_0, selene_scalar_1}; - const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); // Extend with a new child const auto selene_scalar_2 = generate_random_selene_scalar(); std::vector new_children{selene_scalar_2}; - const auto ext_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + const auto ext_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( init_hash, all_children.size(), - fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), Selene::Chunk{new_children.data(), new_children.size()}); - const auto ext_hash_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(ext_hash); + const auto ext_hash_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(ext_hash); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2} all_children.push_back(selene_scalar_2); - const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); ASSERT_EQ(ext_hash_bytes, grow_res_bytes); @@ -1220,21 +1220,21 @@ TEST(curve_trees, hash_grow) const auto selene_scalar_3 = generate_random_selene_scalar(); new_children.clear(); new_children = {selene_scalar_3}; - const auto ext_hash2 = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( + const auto ext_hash2 = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( ext_hash, all_children.size(), - fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), Selene::Chunk{new_children.data(), new_children.size()}); - const auto ext_hash_bytes2 = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(ext_hash2); + const auto ext_hash_bytes2 = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(ext_hash2); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} all_children.push_back(selene_scalar_3); - const auto grow_res2 = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, + const auto grow_res2 = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); - const auto grow_res_bytes2 = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res2); + const auto grow_res_bytes2 = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res2); ASSERT_EQ(ext_hash_bytes2, grow_res_bytes2); } diff --git a/tests/unit_tests/unit_tests_utils.h b/tests/unit_tests/unit_tests_utils.h index 13a3c4b58bc..b6f7d7f460d 100644 --- a/tests/unit_tests/unit_tests_utils.h +++ b/tests/unit_tests/unit_tests_utils.h @@ -34,6 +34,7 @@ #include "blockchain_db/blockchain_db.h" #include "blockchain_db/lmdb/db_lmdb.h" +#include "fcmp/curve_trees.h" #include "misc_log_ex.h" #include @@ -83,10 +84,10 @@ namespace unit_test remove_files(); } - void init_new_db() + void init_new_db(fcmp::curve_trees::CurveTreesV1 *curve_trees) { CHECK_AND_ASSERT_THROW_MES(this->m_db == nullptr, "expected nullptr m_db"); - this->m_db = new cryptonote::BlockchainLMDB(); + this->m_db = new cryptonote::BlockchainLMDB(true/*batch_transactions*/, curve_trees); const auto temp_db_path = boost::filesystem::unique_path(); const std::string dir_path = m_temp_db_dir + temp_db_path.string(); @@ -111,8 +112,8 @@ namespace unit_test }; } -#define INIT_BLOCKCHAIN_LMDB_TEST_DB() \ - test_db.init_new_db(); \ +#define INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees) \ + test_db.init_new_db(curve_trees); \ auto hardfork = cryptonote::HardFork(*test_db.m_db, 1, 0); \ test_db.init_hardfork(&hardfork); \ auto scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){ \ From 729e31df743ea36e43cca77e0c7dac9d81a6a57c Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 26 Jul 2024 16:18:13 -0700 Subject: [PATCH 072/127] include fcmp/curve_trees.h in db_lmdb.h --- src/blockchain_db/lmdb/db_lmdb.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 86ece765a1a..5d15ecf59ef 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -30,6 +30,7 @@ #include "blockchain_db/blockchain_db.h" #include "cryptonote_basic/blobdatatype.h" // for type blobdata +#include "fcmp/curve_trees.h" #include "ringct/rctTypes.h" #include From 16a8ce3a4562487acb6cef911c7a74c2aadfe138 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 26 Jul 2024 21:53:19 -0400 Subject: [PATCH 073/127] Add * point from bytes --- src/fcmp/fcmp_rust/src/lib.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 6e48ed5e6dc..60197349f14 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -78,6 +78,22 @@ pub extern "C" fn selene_point_to_bytes(selene_point: SelenePoint) -> *const u8 c_u8_32(selene_point.to_bytes()) } +#[allow(clippy::not_unsafe_ptr_arg_deref)] +#[no_mangle] +pub extern "C" fn helios_point_from_bytes(helios_point: *const u8) -> HeliosPoint { + let mut helios_point = unsafe { core::slice::from_raw_parts(helios_point, 32) }; + // TODO: Return an error here (instead of unwrapping) + ::read_G(&mut helios_point).unwrap() +} + +#[allow(clippy::not_unsafe_ptr_arg_deref)] +#[no_mangle] +pub extern "C" fn selene_point_from_bytes(selene_point: *const u8) -> SelenePoint { + let mut selene_point = unsafe { core::slice::from_raw_parts(selene_point, 32) }; + // TODO: Return an error here (instead of unwrapping) + ::read_G(&mut selene_point).unwrap() +} + // Get the x coordinate of the ed25519 point #[allow(clippy::not_unsafe_ptr_arg_deref)] #[no_mangle] From 54d5d0d5c7ce1dd0d84ece841fb0d62d519db21e Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 31 Jul 2024 18:13:23 -0700 Subject: [PATCH 074/127] fcmp++: add support for new fcmp types in cryptonote::transaction - Replace CLSAGs with a single fcmp_pp - fcmp_pp is an opaque vector of bytes. The length of the vector is calculated from the number of inputs on serialization (i.e. the length is not serialized, only the raw bytes are serialized) - Includes tests for binary serialization happy path and errors --- src/cryptonote_basic/cryptonote_basic.h | 3 +- .../cryptonote_boost_serialization.h | 18 +- .../cryptonote_format_utils.cpp | 6 +- src/fcmp/proof.h | 45 +++++ src/ringct/rctSigs.cpp | 5 +- src/ringct/rctSigs.h | 4 + src/ringct/rctTypes.h | 40 +++- tests/unit_tests/serialization.cpp | 177 ++++++++++++++++++ 8 files changed, 276 insertions(+), 22 deletions(-) create mode 100644 src/fcmp/proof.h diff --git a/src/cryptonote_basic/cryptonote_basic.h b/src/cryptonote_basic/cryptonote_basic.h index a50ae9c32d8..94624760939 100644 --- a/src/cryptonote_basic/cryptonote_basic.h +++ b/src/cryptonote_basic/cryptonote_basic.h @@ -306,7 +306,8 @@ namespace cryptonote ar.tag("rctsig_prunable"); ar.begin_object(); r = rct_signatures.p.serialize_rctsig_prunable(ar, rct_signatures.type, vin.size(), vout.size(), - vin.size() > 0 && vin[0].type() == typeid(txin_to_key) ? boost::get(vin[0]).key_offsets.size() - 1 : 0); + (vin.empty() || vin[0].type() != typeid(txin_to_key) || rct_signatures.type == rct::RCTTypeFcmpPlusPlus) + ? 0 : boost::get(vin[0]).key_offsets.size() - 1); if (!r || !ar.good()) return false; ar.end_object(); } diff --git a/src/cryptonote_basic/cryptonote_boost_serialization.h b/src/cryptonote_basic/cryptonote_boost_serialization.h index 8948c650cd4..81da98a7819 100644 --- a/src/cryptonote_basic/cryptonote_boost_serialization.h +++ b/src/cryptonote_basic/cryptonote_boost_serialization.h @@ -330,7 +330,7 @@ namespace boost a & x.type; if (x.type == rct::RCTTypeNull) return; - if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus) + if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus && x.type != rct::RCTTypeFcmpPlusPlus) throw boost::archive::archive_exception(boost::archive::archive_exception::other_exception, "Unsupported rct type"); // a & x.message; message is not serialized, as it can be reconstructed from the tx data // a & x.mixRing; mixRing is not serialized, as it can be reconstructed from the offsets @@ -339,6 +339,8 @@ namespace boost a & x.ecdhInfo; serializeOutPk(a, x.outPk, ver); a & x.txnFee; + if (x.type == rct::RCTTypeFcmpPlusPlus) + a & x.referenceBlock; } template @@ -354,6 +356,8 @@ namespace boost a & x.MGs; if (ver >= 1u) a & x.CLSAGs; + if (ver >= 3u) + a & x.fcmp_pp; if (x.rangeSigs.empty()) a & x.pseudoOuts; } @@ -364,7 +368,7 @@ namespace boost a & x.type; if (x.type == rct::RCTTypeNull) return; - if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus) + if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus && x.type != rct::RCTTypeFcmpPlusPlus) throw boost::archive::archive_exception(boost::archive::archive_exception::other_exception, "Unsupported rct type"); // a & x.message; message is not serialized, as it can be reconstructed from the tx data // a & x.mixRing; mixRing is not serialized, as it can be reconstructed from the offsets @@ -373,6 +377,8 @@ namespace boost a & x.ecdhInfo; serializeOutPk(a, x.outPk, ver); a & x.txnFee; + if (x.type == rct::RCTTypeFcmpPlusPlus) + a & x.referenceBlock; //-------------- a & x.p.rangeSigs; if (x.p.rangeSigs.empty()) @@ -384,7 +390,9 @@ namespace boost a & x.p.MGs; if (ver >= 1u) a & x.p.CLSAGs; - if (x.type == rct::RCTTypeBulletproof || x.type == rct::RCTTypeBulletproof2 || x.type == rct::RCTTypeCLSAG || x.type == rct::RCTTypeBulletproofPlus) + if (ver >= 3u) + a & x.p.fcmp_pp; + if (x.type == rct::RCTTypeBulletproof || x.type == rct::RCTTypeBulletproof2 || x.type == rct::RCTTypeCLSAG || x.type == rct::RCTTypeBulletproofPlus || x.type == rct::RCTTypeFcmpPlusPlus) a & x.p.pseudoOuts; } @@ -425,6 +433,6 @@ namespace boost } } -BOOST_CLASS_VERSION(rct::rctSigPrunable, 2) -BOOST_CLASS_VERSION(rct::rctSig, 2) +BOOST_CLASS_VERSION(rct::rctSigPrunable, 3) +BOOST_CLASS_VERSION(rct::rctSig, 3) BOOST_CLASS_VERSION(rct::multisig_out, 1) diff --git a/src/cryptonote_basic/cryptonote_format_utils.cpp b/src/cryptonote_basic/cryptonote_format_utils.cpp index 05dcac4e20d..094cd28a32a 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.cpp +++ b/src/cryptonote_basic/cryptonote_format_utils.cpp @@ -106,7 +106,7 @@ namespace cryptonote uint64_t get_transaction_weight_clawback(const transaction &tx, size_t n_padded_outputs) { const rct::rctSig &rv = tx.rct_signatures; - const bool plus = rv.type == rct::RCTTypeBulletproofPlus; + const bool plus = rv.type == rct::RCTTypeBulletproofPlus || rv.type == rct::RCTTypeFcmpPlusPlus; const uint64_t bp_base = (32 * ((plus ? 6 : 9) + 7 * 2)) / 2; // notional size of a 2 output proof, normalized to 1 proof (ie, divided by 2) const size_t n_outputs = tx.vout.size(); if (n_padded_outputs <= 2) @@ -484,6 +484,7 @@ namespace cryptonote weight += extra; // calculate deterministic CLSAG/MLSAG data size + // TODO: update for fcmp_pp const size_t ring_size = boost::get(tx.vin[0]).key_offsets.size(); if (rct::is_rct_clsag(tx.rct_signatures.type)) extra = tx.vin.size() * (ring_size + 2) * 32; @@ -1292,7 +1293,8 @@ namespace cryptonote binary_archive ba(ss); const size_t inputs = t.vin.size(); const size_t outputs = t.vout.size(); - const size_t mixin = t.vin.empty() ? 0 : t.vin[0].type() == typeid(txin_to_key) ? boost::get(t.vin[0]).key_offsets.size() - 1 : 0; + const size_t mixin = (t.vin.empty() || t.rct_signatures.type == rct::RCTTypeFcmpPlusPlus || t.vin[0].type() != typeid(txin_to_key)) + ? 0 : boost::get(t.vin[0]).key_offsets.size() - 1; bool r = tt.rct_signatures.p.serialize_rctsig_prunable(ba, t.rct_signatures.type, inputs, outputs, mixin); CHECK_AND_ASSERT_MES(r, false, "Failed to serialize rct signatures prunable"); cryptonote::get_blob_hash(ss.str(), res); diff --git a/src/fcmp/proof.h b/src/fcmp/proof.h new file mode 100644 index 00000000000..89eb90d19a2 --- /dev/null +++ b/src/fcmp/proof.h @@ -0,0 +1,45 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include + +namespace fcmp +{ + +// Byte buffer containing the fcmp++ proof +using FcmpPpProof = std::vector; + +static inline std::size_t get_fcmp_pp_len_from_n_inputs(const std::size_t n_inputs) +{ + // TODO: implement + return n_inputs * 4; +}; + +}//namespace fcmp diff --git a/src/ringct/rctSigs.cpp b/src/ringct/rctSigs.cpp index 2d92ba05d4a..c96bc7bbaad 100644 --- a/src/ringct/rctSigs.cpp +++ b/src/ringct/rctSigs.cpp @@ -47,8 +47,7 @@ using namespace std; #define CHECK_AND_ASSERT_MES_L1(expr, ret, message) {if(!(expr)) {MCERROR("verify", message); return ret;}} -namespace -{ +namespace rct { rct::Bulletproof make_dummy_bulletproof(const std::vector &outamounts, rct::keyV &C, rct::keyV &masks) { const size_t n_outs = outamounts.size(); @@ -117,9 +116,7 @@ namespace const size_t n_scalars = ring_size; return rct::clsag{rct::keyV(n_scalars, I), I, I, I}; } -} -namespace rct { Bulletproof proveRangeBulletproof(keyV &C, keyV &masks, const std::vector &amounts, epee::span sk, hw::device &hwdev) { CHECK_AND_ASSERT_THROW_MES(amounts.size() == sk.size(), "Invalid amounts/sk sizes"); diff --git a/src/ringct/rctSigs.h b/src/ringct/rctSigs.h index 035d866d696..af533e49503 100644 --- a/src/ringct/rctSigs.h +++ b/src/ringct/rctSigs.h @@ -64,6 +64,10 @@ namespace hw { namespace rct { + // helpers for mock txs + Bulletproof make_dummy_bulletproof(const std::vector &outamounts, keyV &C, keyV &masks); + BulletproofPlus make_dummy_bulletproof_plus(const std::vector &outamounts, keyV &C, keyV &masks); + clsag make_dummy_clsag(size_t ring_size); boroSig genBorromean(const key64 x, const key64 P1, const key64 P2, const bits indices); bool verifyBorromean(const boroSig &bb, const key64 P1, const key64 P2); diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index 20b952c5e62..8598a4f04c4 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -45,7 +45,7 @@ extern "C" { } #include "crypto/generic-ops.h" #include "crypto/crypto.h" - +#include "fcmp/proof.h" #include "hex.h" #include "span.h" #include "memwipe.h" @@ -304,6 +304,7 @@ namespace rct { RCTTypeBulletproof2 = 4, RCTTypeCLSAG = 5, RCTTypeBulletproofPlus = 6, + RCTTypeFcmpPlusPlus = 7, }; enum RangeProofType { RangeProofBorromean, RangeProofBulletproof, RangeProofMultiOutputBulletproof, RangeProofPaddedBulletproof }; struct RCTConfig { @@ -325,9 +326,10 @@ namespace rct { std::vector ecdhInfo; ctkeyV outPk; xmr_amount txnFee; // contains b + crypto::hash referenceBlock; // block containing the merkle tree root used for fcmp's rctSigBase() : - type(RCTTypeNull), message{}, mixRing{}, pseudoOuts{}, ecdhInfo{}, outPk{}, txnFee(0) + type(RCTTypeNull), message{}, mixRing{}, pseudoOuts{}, ecdhInfo{}, outPk{}, txnFee(0), referenceBlock{} {} template class Archive> @@ -336,7 +338,7 @@ namespace rct { FIELD(type) if (type == RCTTypeNull) return ar.good(); - if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus) + if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus && type != RCTTypeFcmpPlusPlus) return false; VARINT_FIELD(txnFee) // inputs/outputs not saved, only here for serialization help @@ -365,7 +367,7 @@ namespace rct { return false; for (size_t i = 0; i < outputs; ++i) { - if (type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { // Since RCTTypeBulletproof2 enote types, we don't serialize the blinding factor, and only serialize the // first 8 bytes of ecdhInfo[i].amount @@ -401,6 +403,8 @@ namespace rct { ar.delimit_array(); } ar.end_array(); + if (type == RCTTypeFcmpPlusPlus) + FIELD(referenceBlock) return ar.good(); } @@ -412,6 +416,7 @@ namespace rct { FIELD(ecdhInfo) FIELD(outPk) VARINT_FIELD(txnFee) + FIELD(referenceBlock) END_SERIALIZE() }; struct rctSigPrunable { @@ -421,6 +426,7 @@ namespace rct { std::vector MGs; // simple rct has N, full has 1 std::vector CLSAGs; keyV pseudoOuts; //C - for simple rct + fcmp::FcmpPpProof fcmp_pp; // when changing this function, update cryptonote::get_pruned_transaction_weight template class Archive> @@ -434,9 +440,9 @@ namespace rct { return false; if (type == RCTTypeNull) return ar.good(); - if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus) + if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus && type != RCTTypeFcmpPlusPlus) return false; - if (type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { uint32_t nbp = bulletproofs_plus.size(); VARINT_FIELD(nbp) @@ -493,7 +499,20 @@ namespace rct { ar.end_array(); } - if (type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeFcmpPlusPlus) + { + ar.begin_object(); + ar.tag("fcmp_pp"); + const std::size_t proof_len = fcmp::get_fcmp_pp_len_from_n_inputs(inputs); + PREPARE_CUSTOM_VECTOR_SERIALIZATION(proof_len, fcmp_pp); + if (fcmp_pp.size() != proof_len) + return false; + ar.serialize_blob(fcmp_pp.data(), proof_len); + if (!ar.good()) + return false; + ar.end_object(); + } + else if (type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) { ar.tag("CLSAGs"); ar.begin_array(); @@ -584,7 +603,7 @@ namespace rct { } ar.end_array(); } - if (type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { ar.tag("pseudoOuts"); ar.begin_array(); @@ -608,6 +627,7 @@ namespace rct { FIELD(bulletproofs_plus) FIELD(MGs) FIELD(CLSAGs) + FIELD(fcmp_pp) FIELD(pseudoOuts) END_SERIALIZE() }; @@ -616,12 +636,12 @@ namespace rct { keyV& get_pseudo_outs() { - return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus ? p.pseudoOuts : pseudoOuts; + return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus ? p.pseudoOuts : pseudoOuts; } keyV const& get_pseudo_outs() const { - return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus ? p.pseudoOuts : pseudoOuts; + return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus ? p.pseudoOuts : pseudoOuts; } BEGIN_SERIALIZE_OBJECT() diff --git a/tests/unit_tests/serialization.cpp b/tests/unit_tests/serialization.cpp index 9daa44351cb..1e37ee9fb40 100644 --- a/tests/unit_tests/serialization.cpp +++ b/tests/unit_tests/serialization.cpp @@ -1304,3 +1304,180 @@ TEST(Serialization, tuple_many_tuples) EXPECT_EQ(tupler, tupler_recovered); } + +TEST(Serialization, tx_fcmp_pp) +{ + using namespace cryptonote; + + const std::size_t n_inputs = 2; + const std::size_t n_outputs = 3; + + const auto make_dummy_fcmp_pp_tx = [n_inputs, n_outputs]() -> transaction + { + transaction tx; + + tx.invalidate_hashes(); + tx.set_null(); + + tx.version = 2; + tx.rct_signatures.type = rct::RCTTypeFcmpPlusPlus; + + // Set inputs + txin_to_key txin_to_key1; + txin_to_key1.amount = 1; + memset(&txin_to_key1.k_image, 0x42, sizeof(crypto::key_image)); + txin_to_key1.key_offsets.clear(); + tx.vin.clear(); + for (size_t i = 0; i < n_inputs; ++i) + tx.vin.push_back(txin_to_key1); + + // Set outputs + const uint64_t amount = 1; + std::vector out_amounts; + tx_out vout; + set_tx_out(amount, crypto::public_key{}, true, crypto::view_tag{}, vout); + for (size_t i = 0; i < n_outputs; ++i) + { + tx.vout.push_back(vout); + out_amounts.push_back(amount); + } + + // 1 ecdhTuple for each output + rct::ecdhTuple ecdhInfo; + memset(&ecdhInfo.mask, 0x01, sizeof(rct::key)); + memset(&ecdhInfo.amount, 0x02, sizeof(rct::key)); + for (size_t i = 0; i < n_outputs; ++i) + tx.rct_signatures.ecdhInfo.push_back(ecdhInfo); + + // 1 outPk for each output + rct::ctkey ctkey; + memset(&ctkey.dest, 0x01, sizeof(rct::key)); + memset(&ctkey.mask, 0x02, sizeof(rct::key)); + for (size_t i = 0; i < n_outputs; ++i) + tx.rct_signatures.outPk.push_back(ctkey); + + // 1 bp+ + rct::keyV C, masks; + tx.rct_signatures.p.bulletproofs_plus.push_back(rct::make_dummy_bulletproof_plus(out_amounts, C, masks)); + + // 1 pseudoOut for each input + const rct::key pseudoOut{0x01}; + for (size_t i = 0; i < n_inputs; ++i) + tx.rct_signatures.p.pseudoOuts.push_back(pseudoOut); + + // Set the reference block for fcmp++ + const crypto::hash referenceBlock{0x01}; + tx.rct_signatures.referenceBlock = referenceBlock; + + // 1 fcmp++ proof + fcmp::FcmpPpProof fcmp_pp; + const std::size_t proof_len = fcmp::get_fcmp_pp_len_from_n_inputs(n_inputs); + fcmp_pp.reserve(proof_len); + for (std::size_t i = 0; i < proof_len; ++i) + fcmp_pp.push_back(i); + tx.rct_signatures.p.fcmp_pp = std::move(fcmp_pp); + + return tx; + }; + + // 1. Set up a normal tx that includes an fcmp++ proof + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + ASSERT_TRUE(serialization::parse_binary(blob, tx1)); + ASSERT_EQ(tx, tx1); + ASSERT_EQ(tx.rct_signatures.referenceBlock, crypto::hash{0x01}); + ASSERT_EQ(tx.rct_signatures.referenceBlock, tx1.rct_signatures.referenceBlock); + ASSERT_EQ(tx.rct_signatures.p.fcmp_pp, tx1.rct_signatures.p.fcmp_pp); + } + + // 2. fcmp++ proof is longer than expected when serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + string blob; + + // Extend fcmp++ proof + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp::get_fcmp_pp_len_from_n_inputs(n_inputs)); + tx.rct_signatures.p.fcmp_pp.push_back(0x01); + + ASSERT_FALSE(serialization::dump_binary(tx, blob)); + } + + // 3. fcmp++ proof is shorter than expected when serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + + // Shorten the fcmp++ proof + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp::get_fcmp_pp_len_from_n_inputs(n_inputs)); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() > 1); + tx.rct_signatures.p.fcmp_pp.pop_back(); + + string blob; + ASSERT_FALSE(serialization::dump_binary(tx, blob)); + } + + const auto fcmp_pp_to_hex_str = [](const transaction &tx) + { + std::string fcmp_pp_str; + for (std::size_t i = 0; i < tx.rct_signatures.p.fcmp_pp.size(); ++i) + { + std::stringstream ss; + ss << std::hex << std::setfill('0') << std::setw(2) << (int)tx.rct_signatures.p.fcmp_pp[i]; + fcmp_pp_str += ss.str(); + } + return fcmp_pp_str; + }; + + // 4. fcmp++ proof is longer than expected when de-serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + + std::string blob_str = epee::string_tools::buff_to_hex_nodelimer(blob); + + // Find the proof within the serialized tx blob + const std::string fcmp_pp_str = fcmp_pp_to_hex_str(tx); + ASSERT_TRUE(!fcmp_pp_str.empty()); + const std::size_t pos = blob_str.find(fcmp_pp_str); + ASSERT_TRUE(pos != std::string::npos); + ASSERT_TRUE(blob_str.find(fcmp_pp_str, pos + 1) == std::string::npos); + + // Insert an extra proof elem + blob_str.insert(pos, "2a"); + std::string larger_blob; + epee::string_tools::parse_hexstr_to_binbuff(blob_str, larger_blob); + + ASSERT_FALSE(serialization::parse_binary(larger_blob, tx1)); + } + + // 5. fcmp++ proof is shorter than expected when de-serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + + std::string blob_str = epee::string_tools::buff_to_hex_nodelimer(blob); + + // Find the proof within the serialized tx blob + const std::string fcmp_pp_str = fcmp_pp_to_hex_str(tx); + ASSERT_TRUE(!fcmp_pp_str.empty()); + const std::size_t pos = blob_str.find(fcmp_pp_str); + ASSERT_TRUE(pos != std::string::npos); + ASSERT_TRUE(blob_str.find(fcmp_pp_str, pos + 1) == std::string::npos); + + // Delete a proof elem + blob_str.erase(pos, 2); + std::string smaller_blob; + epee::string_tools::parse_hexstr_to_binbuff(blob_str, smaller_blob); + + ASSERT_FALSE(serialization::parse_binary(smaller_blob, tx1)); + } +} From e40c5bb0fc70e0b710d121cacf6005cb7445d00f Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 31 Jul 2024 22:50:17 -0700 Subject: [PATCH 075/127] fix json tagging in fcmp_pp serialization --- src/ringct/rctTypes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index 8598a4f04c4..ede67f7cd7f 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -501,8 +501,8 @@ namespace rct { if (type == RCTTypeFcmpPlusPlus) { - ar.begin_object(); ar.tag("fcmp_pp"); + ar.begin_object(); const std::size_t proof_len = fcmp::get_fcmp_pp_len_from_n_inputs(inputs); PREPARE_CUSTOM_VECTOR_SERIALIZATION(proof_len, fcmp_pp); if (fcmp_pp.size() != proof_len) From 95114f925306eb264fe75e938e59ea6f031edb2d Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 31 Jul 2024 23:15:08 -0700 Subject: [PATCH 076/127] Use explicit code instead of macro in custom fcmp_pp serialization It's slightly different than other usages of the macro, so figured it makes sense to just write out the code to do what I expect it to do --- src/ringct/rctTypes.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index ede67f7cd7f..1d237c6e2b1 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -504,7 +504,8 @@ namespace rct { ar.tag("fcmp_pp"); ar.begin_object(); const std::size_t proof_len = fcmp::get_fcmp_pp_len_from_n_inputs(inputs); - PREPARE_CUSTOM_VECTOR_SERIALIZATION(proof_len, fcmp_pp); + if (!typename Archive::is_saving()) + fcmp_pp.resize(proof_len); if (fcmp_pp.size() != proof_len) return false; ar.serialize_blob(fcmp_pp.data(), proof_len); From 34eafa85f358cfbd9dd1424989b2f5f5418b7e1a Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 1 Aug 2024 10:04:51 -0700 Subject: [PATCH 077/127] Store points in the tree in compressed encoding (32 bytes) --- src/blockchain_db/lmdb/db_lmdb.cpp | 88 ++++++++++++++---------------- src/blockchain_db/lmdb/db_lmdb.h | 4 +- src/fcmp/curve_trees.cpp | 1 - src/fcmp/curve_trees.h | 1 - src/fcmp/fcmp_rust/fcmp++.h | 4 ++ src/fcmp/tower_cycle.cpp | 10 ++++ src/fcmp/tower_cycle.h | 6 ++ 7 files changed, 65 insertions(+), 49 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 0573e1b96d5..dbb7fd903c8 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -216,6 +216,7 @@ namespace * * spent_keys input hash - * + * TODO: don't store leaf tuples, store reference to outputs * locked_leaves block ID [{output ID, leaf tuple}...] * leaves leaf_idx {leaf tuple} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] @@ -392,11 +393,9 @@ typedef struct outtx { uint64_t local_index; } outtx; -template struct layer_val { uint64_t child_chunk_idx; - // TODO: use compressed 32 byte point; also need a from_bytes implemented on rust side - typename C::Point child_chunk_hash; + std::array child_chunk_hash; }; std::atomic mdb_txn_safe::num_active_txns{0}; @@ -1471,9 +1470,9 @@ void BlockchainLMDB::grow_layer(const C &curve, if (ext.update_existing_last_hash) { // We updated the last hash, so update it - layer_val lv; + layer_val lv; lv.child_chunk_idx = ext.start_idx; - lv.child_chunk_hash = ext.hashes.front(); + lv.child_chunk_hash = curve.to_bytes(ext.hashes.front()); MDB_val_set(v, lv); // We expect to overwrite the existing hash @@ -1486,9 +1485,9 @@ void BlockchainLMDB::grow_layer(const C &curve, // Now add all the new hashes found in the extension for (uint64_t i = ext.update_existing_last_hash ? 1 : 0; i < ext.hashes.size(); ++i) { - layer_val lv; + layer_val lv; lv.child_chunk_idx = i + ext.start_idx; - lv.child_chunk_hash = ext.hashes[i]; + lv.child_chunk_hash = curve.to_bytes(ext.hashes[i]); MDB_val_set(v, lv); // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. @@ -1570,14 +1569,14 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) { CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layer_reductions.size(), "unexpected c2 layer reduction"); const auto &c2_reduction = c2_layer_reductions[c2_idx]; - trim_layer(c2_reduction, i); + this->trim_layer(m_curve_trees->m_c2, c2_reduction, i); ++c2_idx; } else { CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layer_reductions.size(), "unexpected c1 layer reduction"); const auto &c1_reduction = c1_layer_reductions[c1_idx]; - trim_layer(c1_reduction, i); + this->trim_layer(m_curve_trees->m_c1, c1_reduction, i); ++c1_idx; } @@ -1614,7 +1613,8 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) } template -void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &layer_reduction, +void BlockchainLMDB::trim_layer(const C &curve, + const fcmp::curve_trees::LayerReduction &layer_reduction, const uint64_t layer_idx) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -1639,7 +1639,7 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &laye if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); - const auto *lv = (layer_val *)v.mv_data; + const auto *lv = (layer_val *)v.mv_data; old_n_elems_in_layer = (1 + lv->child_chunk_idx); } @@ -1669,9 +1669,9 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction &laye // Update the last element if needed if (layer_reduction.update_existing_last_hash) { - layer_val lv; + layer_val lv; lv.child_chunk_idx = layer_reduction.new_total_parents - 1; - lv.child_chunk_hash = layer_reduction.new_last_hash; + lv.child_chunk_hash = curve.to_bytes(layer_reduction.new_last_hash); MDB_val_set(v, lv); // We expect to overwrite the existing hash @@ -1724,17 +1724,8 @@ std::array BlockchainLMDB::get_tree_root() const int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); if (result == MDB_SUCCESS) { - const uint64_t layer_idx = *(uint64_t*)k.mv_data; - if ((layer_idx % 2) == 0) - { - const auto *lv = (layer_val *)v.mv_data; - root = m_curve_trees->m_c2.to_bytes(lv->child_chunk_hash); - } - else - { - const auto *lv = (layer_val *)v.mv_data; - root = m_curve_trees->m_c1.to_bytes(lv->child_chunk_hash); - } + auto *lv = (layer_val *)v.mv_data; + root = std::move(lv->child_chunk_hash); } else if (result != MDB_NOTFOUND) throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); @@ -1776,18 +1767,19 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get last record in layer: ", result).c_str())); + const auto *lv = (layer_val *)v.mv_data; + MDEBUG("Reading last hash at layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); + const bool use_c2 = (layer_idx % 2) == 0; if (use_c2) { - const auto *lv = (layer_val *)v.mv_data; - MDEBUG("Selene, layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); - c2_last_hashes.emplace_back(std::move(lv->child_chunk_hash)); + auto point = m_curve_trees->m_c2.from_bytes(lv->child_chunk_hash); + c2_last_hashes.emplace_back(std::move(point)); } else { - const auto *lv = (layer_val *)v.mv_data; - MDEBUG("Helios, layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); - c1_last_hashes.emplace_back(std::move(lv->child_chunk_hash)); + auto point = m_curve_trees->m_c1.from_bytes(lv->child_chunk_hash); + c1_last_hashes.emplace_back(std::move(point)); } ++layer_idx; @@ -1884,16 +1876,17 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get layer elem: ", result).c_str())); + const auto *lv = (layer_val *)v.mv_data; if (parent_is_c1) { - const auto *lv = (layer_val *)v.mv_data; - auto child_scalar = m_curve_trees->m_c2.point_to_cycle_scalar(lv->child_chunk_hash); + const auto point = m_curve_trees->m_c2.from_bytes(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c2.point_to_cycle_scalar(point); c1_children.emplace_back(std::move(child_scalar)); } else { - const auto *lv = (layer_val *)v.mv_data; - auto child_scalar = m_curve_trees->m_c1.point_to_cycle_scalar(lv->child_chunk_hash); + const auto point = m_curve_trees->m_c1.from_bytes(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c1.point_to_cycle_scalar(point); c2_children.emplace_back(std::move(child_scalar)); } @@ -1941,15 +1934,16 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_t if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get layer elem: ", result).c_str())); + const auto *lv = (layer_val *)v.mv_data; if ((layer_idx % 2) == 0) { - const auto *lv = (layer_val *)v.mv_data; - last_hashes_out.c2_last_hashes.push_back(lv->child_chunk_hash); + auto point = m_curve_trees->m_c2.from_bytes(lv->child_chunk_hash); + last_hashes_out.c2_last_hashes.emplace_back(std::move(point)); } else { - const auto *lv = (layer_val *)v.mv_data; - last_hashes_out.c1_last_hashes.push_back(lv->child_chunk_hash); + auto point = m_curve_trees->m_c1.from_bytes(lv->child_chunk_hash); + last_hashes_out.c1_last_hashes.emplace_back(std::move(point)); } ++layer_idx; @@ -2050,11 +2044,11 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const << m_curve_trees->m_c2.to_string(m_curve_trees->m_c2.m_hash_init_point) << " (" << leaves.size() << " leaves)"); // Now compare to value from the db - const auto *lv = (layer_val *)v_parent.mv_data; - MDEBUG("Actual leaf chunk hash " << m_curve_trees->m_c2.to_string(lv->child_chunk_hash)); + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual leaf chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); const auto expected_bytes = m_curve_trees->m_c2.to_bytes(chunk_hash); - const auto actual_bytes = m_curve_trees->m_c2.to_bytes(lv->child_chunk_hash); + const auto actual_bytes = lv->child_chunk_hash; CHECK_AND_ASSERT_MES(expected_bytes == actual_bytes, false, "unexpected leaf chunk hash"); ++child_chunk_idx; @@ -2133,8 +2127,10 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str())); - const auto *child = (layer_val*)v_child.mv_data; - child_chunk.push_back(child->child_chunk_hash); + const auto *lv = (layer_val *)v_child.mv_data; + auto child_point = c_child.from_bytes(lv->child_chunk_hash); + + child_chunk.emplace_back(std::move(child_point)); if (child_chunk.size() == chunk_width) break; @@ -2192,10 +2188,10 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, MDEBUG("chunk_hash " << c_parent.to_string(chunk_hash) << " , hash init point: " << c_parent.to_string(c_parent.m_hash_init_point) << " (" << child_scalars.size() << " children)"); - const auto *lv = (layer_val *)v_parent.mv_data; - MDEBUG("Actual chunk hash " << c_parent.to_string(lv->child_chunk_hash)); + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); - const auto actual_bytes = c_parent.to_bytes(lv->child_chunk_hash); + const auto actual_bytes = lv->child_chunk_hash; const auto expected_bytes = c_parent.to_bytes(chunk_hash); if (actual_bytes != expected_bytes) throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str())); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 5d15ecf59ef..834b2bf1e9b 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -425,7 +425,9 @@ class BlockchainLMDB : public BlockchainDB const uint64_t layer_idx); template - void trim_layer(const fcmp::curve_trees::LayerReduction &layer_reduction, const uint64_t layer_idx); + void trim_layer(const C &curve, + const fcmp::curve_trees::LayerReduction &layer_reduction, + const uint64_t layer_idx); uint64_t get_num_leaf_tuples() const; diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 3e93ca3dc9a..90d84d87140 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -913,7 +913,6 @@ typename CurveTrees::TreeReduction CurveTrees::get_tree_reductio ++c1_idx; } - use_c2 = !use_c2; } diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 2e31bdce0b7..8218a9edaca 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -37,7 +37,6 @@ #include - namespace fcmp { namespace curve_trees diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index f1ef5c17cd0..d222dd91072 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -100,6 +100,10 @@ uint8_t *helios_point_to_bytes(HeliosPoint helios_point); uint8_t *selene_point_to_bytes(SelenePoint selene_point); +HeliosPoint helios_point_from_bytes(const uint8_t *helios_point_bytes); + +SelenePoint selene_point_from_bytes(const uint8_t *selene_point_bytes); + SeleneScalar ed25519_point_to_selene_scalar(const uint8_t *ed25519_point); HeliosScalar selene_point_to_helios_scalar(SelenePoint selene_point); diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index e1ce3f6f9af..75c73af1c1c 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -187,6 +187,16 @@ std::array Selene::to_bytes(const Selene::Point &point) const return res; } //---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::from_bytes(const std::array &bytes) const +{ + return fcmp_rust::helios_point_from_bytes(bytes.data()); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::from_bytes(const std::array &bytes) const +{ + return fcmp_rust::selene_point_from_bytes(bytes.data()); +} +//---------------------------------------------------------------------------------------------------------------------- std::string Helios::to_string(const typename Helios::Scalar &scalar) const { return epee::string_tools::pod_to_hex(this->to_bytes(scalar)); diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index 6e8cb6f507b..6b083638466 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -94,6 +94,8 @@ class Curve virtual std::array to_bytes(const typename C::Scalar &scalar) const = 0; virtual std::array to_bytes(const typename C::Point &point) const = 0; + virtual typename C::Point from_bytes(const std::array &bytes) const = 0; + virtual std::string to_string(const typename C::Scalar &scalar) const = 0; virtual std::string to_string(const typename C::Point &point) const = 0; @@ -139,6 +141,8 @@ class Helios final : public Curve std::array to_bytes(const Scalar &scalar) const override; std::array to_bytes(const Point &point) const override; + Point from_bytes(const std::array &bytes) const override; + std::string to_string(const Scalar &scalar) const override; std::string to_string(const Point &point) const override; }; @@ -179,6 +183,8 @@ class Selene final : public Curve std::array to_bytes(const Scalar &scalar) const override; std::array to_bytes(const Point &point) const override; + Point from_bytes(const std::array &bytes) const override; + std::string to_string(const Scalar &scalar) const override; std::string to_string(const Point &point) const override; }; From b90cee8bab14cb3f1a09462a494608d930c83981 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 2 Aug 2024 10:28:13 -0700 Subject: [PATCH 078/127] Store {O,C} for each leaf tuple instead of {O.x,I.x,C.x} - Can derive {O.x,I.x,C.x} from {O,C} - Note: this slows down tests since they do the derivation both on insertion into the tree, and when auditing the tree - At the hard fork, we don't need to store {O,C} in the output_amounts table anymore since that table will no longer be useful --- src/blockchain_db/blockchain_db.cpp | 8 +-- src/blockchain_db/blockchain_db.h | 4 +- src/blockchain_db/lmdb/db_lmdb.cpp | 52 ++++++++------- src/blockchain_db/lmdb/db_lmdb.h | 6 +- src/blockchain_db/testdb.h | 4 +- src/fcmp/curve_trees.cpp | 70 ++++++++++++++------- src/fcmp/curve_trees.h | 42 ++++++++----- tests/block_weight/block_weight.cpp | 2 +- tests/core_tests/chaingen.cpp | 2 +- tests/unit_tests/curve_trees.cpp | 35 +++++++---- tests/unit_tests/hardfork.cpp | 2 +- tests/unit_tests/long_term_block_weight.cpp | 2 +- 12 files changed, 144 insertions(+), 85 deletions(-) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index b44777955e7..5e3d48efbaf 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -231,7 +231,7 @@ std::vector BlockchainDB::add_transaction(const crypto::hash& blk_hash { // miner v2 txes have their coinbase output in one single out to save space, // and we store them as rct outputs with an identity mask - // note: tx_outs_to_leaf_tuples in curve_trees.cpp mirrors this logic + // note: tx_outs_to_leaf_tuple_contexts in curve_trees.cpp mirrors this logic if (miner_tx && tx.version == 2) { cryptonote::tx_out vout = tx.vout[i]; @@ -314,11 +314,11 @@ uint64_t BlockchainDB::add_block( const std::pair& blck // When adding a block, we also need to add all the leaf tuples included in // the block to a table keeping track of locked leaf tuples. Once those leaf // tuples unlock, we use them to grow the tree. - std::multimap leaf_tuples_by_unlock_block; + std::multimap leaf_tuples_by_unlock_block; // Get miner tx's leaf tuples CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); - m_curve_trees->tx_outs_to_leaf_tuples( + m_curve_trees->tx_outs_to_leaf_tuple_contexts( blk.miner_tx, miner_output_ids, prev_height, @@ -328,7 +328,7 @@ uint64_t BlockchainDB::add_block( const std::pair& blck // Get all other txs' leaf tuples for (std::size_t i = 0; i < txs.size(); ++i) { - m_curve_trees->tx_outs_to_leaf_tuples( + m_curve_trees->tx_outs_to_leaf_tuple_contexts( txs[i].first, output_ids[i], prev_height, diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 09036fbb440..5f600e408ba 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -417,7 +417,7 @@ class BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) = 0; /** @@ -1782,7 +1782,7 @@ class BlockchainDB virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; // TODO: description and make private - virtual void grow_tree(std::vector &&new_leaves) = 0; + virtual void grow_tree(std::vector &&new_leaves) = 0; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) = 0; diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index dbb7fd903c8..561d2e3fd89 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -216,7 +216,6 @@ namespace * * spent_keys input hash - * - * TODO: don't store leaf tuples, store reference to outputs * locked_leaves block ID [{output ID, leaf tuple}...] * leaves leaf_idx {leaf tuple} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] @@ -816,7 +815,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uin } void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, - uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_block) + uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_block) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -845,8 +844,8 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l } // Grow the tree with outputs that unlock at this block height - auto unlocked_leaf_tuples = this->get_leaf_tuples_at_unlock_block_id(m_height); - this->grow_tree(std::move(unlocked_leaf_tuples)); + auto unlocked_leaves = this->get_leaf_tuples_at_unlock_block_id(m_height); + this->grow_tree(std::move(unlocked_leaves)); // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table this->del_locked_leaf_tuples_at_block_id(m_height); @@ -1362,7 +1361,7 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } } -void BlockchainLMDB::grow_tree(std::vector &&new_leaves) +void BlockchainLMDB::grow_tree(std::vector &&new_leaves) { if (new_leaves.empty()) return; @@ -1815,6 +1814,8 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx) { + leaves_to_trim.reserve(trim_leaf_layer_instructions.end_trim_idx - trim_leaf_layer_instructions.start_trim_idx); + uint64_t idx = trim_leaf_layer_instructions.start_trim_idx; CHECK_AND_ASSERT_THROW_MES(idx % fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size"); @@ -1833,11 +1834,14 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); - const auto leaf = *(fcmp::curve_trees::CurveTreesV1::LeafTuple *)v.mv_data; + const auto preprocessed_leaf_tuple = *(fcmp::curve_trees::PreprocessedLeafTuple *)v.mv_data; + + // TODO: parallelize calls to this function + auto leaf = m_curve_trees->leaf_tuple(preprocessed_leaf_tuple); - leaves_to_trim.push_back(leaf.O_x); - leaves_to_trim.push_back(leaf.I_x); - leaves_to_trim.push_back(leaf.C_x); + leaves_to_trim.emplace_back(std::move(leaf.O_x)); + leaves_to_trim.emplace_back(std::move(leaf.I_x)); + leaves_to_trim.emplace_back(std::move(leaf.C_x)); idx += fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; } @@ -2002,8 +2006,10 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); - const auto leaf = *(fcmp::curve_trees::CurveTreesV1::LeafTuple *)v.mv_data; - leaf_tuples_chunk.push_back(leaf); + const auto preprocessed_leaf_tuple = *(fcmp::curve_trees::PreprocessedLeafTuple *)v.mv_data; + auto leaf = m_curve_trees->leaf_tuple(preprocessed_leaf_tuple); + + leaf_tuples_chunk.emplace_back(std::move(leaf)); if (leaf_tuples_chunk.size() == m_curve_trees->m_c2_width) break; @@ -2205,7 +2211,7 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, chunk_width); } -std::vector BlockchainLMDB::get_leaf_tuples_at_unlock_block_id( +std::vector BlockchainLMDB::get_leaf_tuples_at_unlock_block_id( uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -2218,7 +2224,7 @@ std::vector BlockchainLMDB::g MDB_val v_tuple; // Get all the locked outputs at the provided block id - std::vector leaf_tuples; + std::vector leaf_tuples; MDB_cursor_op op = MDB_SET; while (1) @@ -2234,8 +2240,8 @@ std::vector BlockchainLMDB::g if (blk_id != block_id) throw0(DB_ERROR(("Blk id " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); - const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTupleContext*)v_tuple.mv_data); - const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTupleContext); + const auto range_begin = ((const fcmp::curve_trees::LeafTupleContext*)v_tuple.mv_data); + const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::LeafTupleContext); auto it = range_begin; @@ -6772,25 +6778,27 @@ void BlockchainLMDB::migrate_5_6() // Read the output data uint64_t amount = *(const uint64_t*)k.mv_data; output_data_t output_data; - fcmp::curve_trees::CurveTreesV1::LeafTupleContext tuple_context; + uint64_t output_id; if (amount == 0) { const outkey *okp = (const outkey *)v.mv_data; output_data = okp->data; - tuple_context.output_id = okp->output_id; + output_id = okp->output_id; } else { const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; memcpy(&output_data, &okp->data, sizeof(pre_rct_output_data_t)); output_data.commitment = rct::zeroCommit(amount); - tuple_context.output_id = okp->output_id; + output_id = okp->output_id; } - // Convert the output into a leaf tuple + // Convert the output into a leaf tuple context + fcmp::curve_trees::LeafTupleContext tuple_context; try { - tuple_context.leaf_tuple = m_curve_trees->output_to_leaf_tuple( + tuple_context = m_curve_trees->output_to_leaf_context( + output_id, output_data.pubkey, rct::rct2pk(output_data.commitment)); } @@ -6890,8 +6898,8 @@ void BlockchainLMDB::migrate_5_6() } // Get the leaf tuples that unlock at the given block - auto unlocked_leaf_tuples = this->get_leaf_tuples_at_unlock_block_id(i); - this->grow_tree(std::move(unlocked_leaf_tuples)); + auto unlocked_leaves = this->get_leaf_tuples_at_unlock_block_id(i); + this->grow_tree(std::move(unlocked_leaves)); // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table this->del_locked_leaf_tuples_at_block_id(i); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 834b2bf1e9b..a9263303ab2 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -368,7 +368,7 @@ class BlockchainLMDB : public BlockchainDB static int compare_string(const MDB_val *a, const MDB_val *b); // make private - virtual void grow_tree(std::vector &&new_leaves); + virtual void grow_tree(std::vector &&new_leaves); virtual void trim_tree(const uint64_t trim_n_leaf_tuples); @@ -388,7 +388,7 @@ class BlockchainLMDB : public BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& block_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ); virtual void remove_block(); @@ -449,7 +449,7 @@ class BlockchainLMDB : public BlockchainDB const uint64_t child_chunk_idx, const uint64_t chunk_width) const; - std::vector get_leaf_tuples_at_unlock_block_id(uint64_t block_id); + std::vector get_leaf_tuples_at_unlock_block_id(uint64_t block_id); void del_locked_leaf_tuples_at_block_id(uint64_t block_id); diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 0a643876d88..d6a805d35a6 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -116,7 +116,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} - virtual void grow_tree(std::vector &&new_leaves) override {}; + virtual void grow_tree(std::vector &&new_leaves) override {}; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) override {}; virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; @@ -147,7 +147,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 90d84d87140..837d2faa995 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -153,6 +153,7 @@ static LayerExtension hash_children_chunks(const C &curve, std::size_t chunk_start_idx = chunk_size; while (chunk_start_idx < new_child_scalars.size()) { + // TODO: this loop can be parallelized chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); const auto chunk_start = new_child_scalars.data() + chunk_start_idx; @@ -626,7 +627,8 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc // CurveTrees public member functions //---------------------------------------------------------------------------------------------------------------------- template<> -CurveTrees::LeafTuple CurveTrees::output_to_leaf_tuple( +LeafTupleContext CurveTrees::output_to_leaf_context( + const std::uint64_t output_id, const crypto::public_key &output_pubkey, const crypto::public_key &commitment) const { @@ -646,11 +648,26 @@ CurveTrees::LeafTuple CurveTrees::output_to_leaf }; // Torsion clear the output pub key and commitment - const rct::key rct_O = clear_torsion(output_pubkey, "output pub key"); - const rct::key rct_C = clear_torsion(commitment, "commitment"); + rct::key O = clear_torsion(output_pubkey, "output pub key"); + rct::key C = clear_torsion(commitment, "commitment"); - const crypto::public_key &O = rct::rct2pk(rct_O); - const crypto::public_key &C = rct::rct2pk(rct_C); + PreprocessedLeafTuple o_c{ + .O = std::move(O), + .C = std::move(C) + }; + + return LeafTupleContext{ + .output_id = output_id, + .preprocessed_leaf_tuple = std::move(o_c) + }; +}; +//---------------------------------------------------------------------------------------------------------------------- +template<> +CurveTrees::LeafTuple CurveTrees::leaf_tuple( + const PreprocessedLeafTuple &preprocessed_leaf_tuple) const +{ + const crypto::public_key &O = rct::rct2pk(preprocessed_leaf_tuple.O); + const crypto::public_key &C = rct::rct2pk(preprocessed_leaf_tuple.C); crypto::ec_point I; crypto::derive_key_image_generator(O, I); @@ -680,11 +697,11 @@ std::vector CurveTrees::flatten_leaves(const std::v }; //---------------------------------------------------------------------------------------------------------------------- template <> -void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transaction &tx, +void CurveTrees::tx_outs_to_leaf_tuple_contexts(const cryptonote::transaction &tx, const std::vector &output_ids, const uint64_t tx_height, const bool miner_tx, - std::multimap::LeafTupleContext> &leaf_tuples_by_unlock_block_inout) const + std::multimap &leaf_tuples_by_unlock_block_inout) const { const uint64_t unlock_block = cryptonote::get_unlock_block_index(tx.unlock_time, tx_height); @@ -692,6 +709,7 @@ void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transa for (std::size_t i = 0; i < tx.vout.size(); ++i) { + // TODO: this loop can be parallelized const auto &out = tx.vout[i]; crypto::public_key output_public_key; @@ -708,13 +726,11 @@ void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transa ? rct::zeroCommit(out.amount) : tx.rct_signatures.outPk[i].mask; - CurveTrees::LeafTupleContext tuple_context; - tuple_context.output_id = output_ids[i]; - + LeafTupleContext leaf_tuple_context; try { - // Convert output to leaf tuple; throws if output is invalid - tuple_context.leaf_tuple = output_to_leaf_tuple( + // Convert output to leaf tuple context; throws if output is invalid + leaf_tuple_context = output_to_leaf_context(output_ids[i], output_public_key, rct::rct2pk(commitment)); } @@ -724,7 +740,7 @@ void CurveTrees::tx_outs_to_leaf_tuples(const cryptonote::transa continue; }; - leaf_tuples_by_unlock_block_inout.emplace(unlock_block, std::move(tuple_context)); + leaf_tuples_by_unlock_block_inout.emplace(unlock_block, std::move(leaf_tuple_context)); } } //---------------------------------------------------------------------------------------------------------------------- @@ -751,16 +767,25 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio const auto sort_fn = [](const LeafTupleContext &a, const LeafTupleContext &b) { return a.output_id < b.output_id; }; std::sort(new_leaf_tuples.begin(), new_leaf_tuples.end(), sort_fn); - // Copy the sorted leaves into the tree extension struct - // TODO: don't copy here + // Convert sorted pre-processed tuples into leaf tuples, place each element of each leaf tuple in a flat vector to + // be hashed, and place the pre-processed tuples in tree extension struct for insertion into the db + std::vector flattened_leaves; + flattened_leaves.reserve(new_leaf_tuples.size() * LEAF_TUPLE_SIZE); tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); - for (const auto &leaf : new_leaf_tuples) + for (auto &l : new_leaf_tuples) { - tree_extension.leaves.tuples.emplace_back(LeafTuple{ - .O_x = leaf.leaf_tuple.O_x, - .I_x = leaf.leaf_tuple.I_x, - .C_x = leaf.leaf_tuple.C_x - }); + // TODO: this loop can be parallelized + auto leaf = leaf_tuple(l.preprocessed_leaf_tuple); + + flattened_leaves.emplace_back(std::move(leaf.O_x)); + flattened_leaves.emplace_back(std::move(leaf.I_x)); + flattened_leaves.emplace_back(std::move(leaf.C_x)); + + // We only need to store O and C in the db, the leaf tuple can be derived from O and C + tree_extension.leaves.tuples.emplace_back(PreprocessedLeafTuple{ + .O = std::move(l.preprocessed_leaf_tuple.O), + .C = std::move(l.preprocessed_leaf_tuple.C) + }); } if (grow_layer_instructions.need_old_last_parent) @@ -772,7 +797,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio grow_layer_instructions.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr, grow_layer_instructions.start_offset, grow_layer_instructions.next_parent_start_index, - this->flatten_leaves(tree_extension.leaves.tuples), + flattened_leaves, m_leaf_layer_chunk_width ); @@ -865,6 +890,7 @@ typename CurveTrees::TreeReduction CurveTrees::get_tree_reductio { TreeReduction tree_reduction_out; + CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "missing trim instructions"); CHECK_AND_ASSERT_THROW_MES((trim_instructions[0].new_total_children % LEAF_TUPLE_SIZE) == 0, "unexpected new total leaves"); const uint64_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE; diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 8218a9edaca..a337481e1f9 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -128,9 +128,27 @@ struct TrimLayerInstructions final uint64_t end_trim_idx; }; +// Output pub key and commitment, ready to be converted into a leaf tuple (from {O,C} -> {O.x, I.x, C.x}) +struct PreprocessedLeafTuple final +{ + // Output pubkey that has been checked valid and torsion cleared + rct::key O; + // Commitment that has been torsion cleared + rct::key C; +}; +static_assert(sizeof(PreprocessedLeafTuple) == (32+32), "db expects 64 bytes for pre-processed leaf tuples"); + +// Contextual wrapper for a pre-processed leaf tuple +struct LeafTupleContext final +{ + // Global output ID useful to order the leaf tuple for insertion into the tree + uint64_t output_id; + PreprocessedLeafTuple preprocessed_leaf_tuple; +}; + //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// This class is useful help update the curve trees tree without needing to keep the entire tree in memory +// This class is useful to help update the curve trees merkle tree without needing to keep the entire tree in memory // - It requires instantiation with the C1 and C2 curve classes and widths, hardening the tree structure // - It ties the C2 curve in the tree to the leaf layer template @@ -163,21 +181,13 @@ class CurveTrees static const std::size_t LEAF_TUPLE_SIZE = 3; static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); - // Contextual wrapper for leaf tuple - struct LeafTupleContext final - { - // Global output ID useful to order the leaf tuple for insertion into the tree - uint64_t output_id; - LeafTuple leaf_tuple; - }; - // Contiguous leaves in the tree, starting a specified start_idx in the leaf layer struct Leaves final { // Starting leaf tuple index in the leaf layer - uint64_t start_leaf_tuple_idx{0}; + uint64_t start_leaf_tuple_idx{0}; // Contiguous leaves in a tree that start at the start_idx - std::vector tuples; + std::vector tuples; }; // A struct useful to extend an existing tree @@ -221,13 +231,17 @@ class CurveTrees //member functions public: // Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree - LeafTuple output_to_leaf_tuple(const crypto::public_key &output_pubkey, const crypto::public_key &C) const; + LeafTupleContext output_to_leaf_context(const std::uint64_t output_id, + const crypto::public_key &output_pubkey, + const crypto::public_key &C) const; + + LeafTuple leaf_tuple(const PreprocessedLeafTuple &preprocessed_leaf_tuple) const; // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] std::vector flatten_leaves(const std::vector &leaves) const; - // Convert cryptonote tx outs to leaf tuples, grouped by the leaf tuple unlock height - void tx_outs_to_leaf_tuples(const cryptonote::transaction &tx, + // Convert cryptonote tx outs to contexts ready to be converted to leaf tuples, grouped by unlock height + void tx_outs_to_leaf_tuple_contexts(const cryptonote::transaction &tx, const std::vector &output_ids, const uint64_t tx_height, const bool miner_tx, diff --git a/tests/block_weight/block_weight.cpp b/tests/block_weight/block_weight.cpp index 30f94ddfd6f..81caa945dfc 100644 --- a/tests/block_weight/block_weight.cpp +++ b/tests/block_weight/block_weight.cpp @@ -65,7 +65,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/core_tests/chaingen.cpp b/tests/core_tests/chaingen.cpp index 30e9f920fb1..376cdcc6a08 100644 --- a/tests/core_tests/chaingen.cpp +++ b/tests/core_tests/chaingen.cpp @@ -88,7 +88,7 @@ namespace , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({blk, blk_hash}); diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 04bacf0972c..b476a33daba 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -167,12 +167,14 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e "unexpected leaf start idx"); m_tree.leaves.reserve(m_tree.leaves.size() + tree_extension.leaves.tuples.size()); - for (const auto &leaf : tree_extension.leaves.tuples) + for (const auto &preprocessed_leaf_tuple : tree_extension.leaves.tuples) { + auto leaf = m_curve_trees.leaf_tuple(preprocessed_leaf_tuple); + m_tree.leaves.emplace_back(CurveTreesV1::LeafTuple{ - .O_x = leaf.O_x, - .I_x = leaf.I_x, - .C_x = leaf.C_x + .O_x = std::move(leaf.O_x), + .I_x = std::move(leaf.I_x), + .C_x = std::move(leaf.C_x) }); } @@ -587,6 +589,9 @@ bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) //---------------------------------------------------------------------------------------------------------------------- void CurveTreesGlobalTree::log_last_hashes(const CurveTreesV1::LastHashes &last_hashes) { + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + const auto &c1_last_hashes = last_hashes.c1_last_hashes; const auto &c2_last_hashes = last_hashes.c2_last_hashes; @@ -622,6 +627,9 @@ void CurveTreesGlobalTree::log_last_hashes(const CurveTreesV1::LastHashes &last_ //---------------------------------------------------------------------------------------------------------------------- void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension) { + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + const auto &c1_extensions = tree_extension.c1_layer_extensions; const auto &c2_extensions = tree_extension.c2_layer_extensions; @@ -631,7 +639,8 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension MDEBUG("Leaf start idx: " << tree_extension.leaves.start_leaf_tuple_idx); for (std::size_t i = 0; i < tree_extension.leaves.tuples.size(); ++i) { - const auto &leaf = tree_extension.leaves.tuples[i]; + const auto &preprocessed_leaf_tuple = tree_extension.leaves.tuples[i]; + const auto leaf = m_curve_trees.leaf_tuple(preprocessed_leaf_tuple); const auto O_x = m_curve_trees.m_c2.to_string(leaf.O_x); const auto I_x = m_curve_trees.m_c2.to_string(leaf.I_x); @@ -679,6 +688,9 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension //---------------------------------------------------------------------------------------------------------------------- void CurveTreesGlobalTree::log_tree() { + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + MDEBUG("Tree has " << m_tree.leaves.size() << " leaves, " << m_tree.c1_layers.size() << " helios layers, " << m_tree.c2_layers.size() << " selene layers"); @@ -730,27 +742,26 @@ void CurveTreesGlobalTree::log_tree() //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t old_n_leaf_tuples, const std::size_t new_n_leaf_tuples) { - std::vector tuples; + std::vector tuples; tuples.reserve(new_n_leaf_tuples); for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) { + const std::uint64_t output_id = old_n_leaf_tuples + i; + // Generate random output tuple crypto::secret_key o,c; crypto::public_key O,C; crypto::generate_keys(O, o, o, false); crypto::generate_keys(C, c, c, false); - auto leaf_tuple = curve_trees.output_to_leaf_tuple(O, C); + auto tuple_context = curve_trees.output_to_leaf_context(output_id, O, C); - tuples.emplace_back(fcmp::curve_trees::CurveTreesV1::LeafTupleContext{ - .output_id = old_n_leaf_tuples + i, - .leaf_tuple = std::move(leaf_tuple), - }); + tuples.emplace_back(std::move(tuple_context)); } return tuples; diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index 6061b58e2db..c233af0ff17 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -54,7 +54,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back(blk); } diff --git a/tests/unit_tests/long_term_block_weight.cpp b/tests/unit_tests/long_term_block_weight.cpp index d405a632529..92862372db3 100644 --- a/tests/unit_tests/long_term_block_weight.cpp +++ b/tests/unit_tests/long_term_block_weight.cpp @@ -58,7 +58,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } From cbf6a5d61839227868372e6cbbda1a460d584c03 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 2 Aug 2024 22:32:03 -0700 Subject: [PATCH 079/127] Optimize conversion from output to leaf tuple --- src/blockchain_db/lmdb/db_lmdb.cpp | 2 +- src/crypto/crypto-ops-data.c | 1 + src/crypto/crypto-ops.c | 69 +++++++++++----- src/crypto/crypto-ops.h | 4 + src/fcmp/curve_trees.cpp | 47 +++++------ src/fcmp/curve_trees.h | 2 +- src/fcmp/fcmp_rust/Cargo.lock | 128 +++++++++++++++++++++++++---- src/fcmp/fcmp_rust/Cargo.toml | 5 +- src/fcmp/fcmp_rust/fcmp++.h | 2 +- src/fcmp/fcmp_rust/src/lib.rs | 64 ++++----------- src/fcmp/tower_cycle.cpp | 10 +-- src/fcmp/tower_cycle.h | 4 +- src/ringct/rctOps.cpp | 25 ++++++ src/ringct/rctOps.h | 3 + src/ringct/rctTypes.h | 1 + tests/unit_tests/curve_trees.cpp | 8 +- 16 files changed, 249 insertions(+), 126 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 561d2e3fd89..af7de2ef37a 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -6800,7 +6800,7 @@ void BlockchainLMDB::migrate_5_6() tuple_context = m_curve_trees->output_to_leaf_context( output_id, output_data.pubkey, - rct::rct2pk(output_data.commitment)); + output_data.commitment); } catch(...) { diff --git a/src/crypto/crypto-ops-data.c b/src/crypto/crypto-ops-data.c index edaa4644fd2..57304c41dac 100644 --- a/src/crypto/crypto-ops-data.c +++ b/src/crypto/crypto-ops-data.c @@ -870,6 +870,7 @@ const fe fe_fffb1 = {-31702527, -2466483, -26106795, -12203692, -12169197, -3210 const fe fe_fffb2 = {8166131, -6741800, -17040804, 3154616, 21461005, 1466302, -30876704, -6368709, 10503587, -13363080}; /* sqrt(2 * A * (A + 2)) */ const fe fe_fffb3 = {-13620103, 14639558, 4532995, 7679154, 16815101, -15883539, -22863840, -14813421, 13716513, -6477756}; /* sqrt(-sqrt(-1) * A * (A + 2)) */ const fe fe_fffb4 = {-21786234, -12173074, 21573800, 4524538, -4645904, 16204591, 8012863, -8444712, 3212926, 6885324}; /* sqrt(sqrt(-1) * A * (A + 2)) */ +const fe fe_a_inv_3 = {-22207407, 11184811, 22369621, -11184811, -22369621, 11184811, 22369621, -11184811, -22369621, 11184811}; /* A / 3*/ const ge_p3 ge_p3_identity = { {0}, {1, 0}, {1, 0}, {0} }; const ge_p3 ge_p3_H = { {7329926, -15101362, 31411471, 7614783, 27996851, -3197071, -11157635, -6878293, 466949, -7986503}, diff --git a/src/crypto/crypto-ops.c b/src/crypto/crypto-ops.c index 314fe448a20..edfc61c3cc6 100644 --- a/src/crypto/crypto-ops.c +++ b/src/crypto/crypto-ops.c @@ -1328,15 +1328,9 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *r3, const unsigned char *a, con } } -/* From ge_frombytes.c, modified */ +/* From fe_frombytes.c */ -int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { - fe u; - fe v; - fe vxx; - fe check; - - /* From fe_frombytes.c */ +int fe_y_frombytes_vartime(fe y, const unsigned char *s) { int64_t h0 = load_4(s); int64_t h1 = load_3(s + 4) << 6; @@ -1378,18 +1372,31 @@ int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { carry6 = (h6 + (int64_t) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26; carry8 = (h8 + (int64_t) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26; - h->Y[0] = h0; - h->Y[1] = h1; - h->Y[2] = h2; - h->Y[3] = h3; - h->Y[4] = h4; - h->Y[5] = h5; - h->Y[6] = h6; - h->Y[7] = h7; - h->Y[8] = h8; - h->Y[9] = h9; + y[0] = h0; + y[1] = h1; + y[2] = h2; + y[3] = h3; + y[4] = h4; + y[5] = h5; + y[6] = h6; + y[7] = h7; + y[8] = h8; + y[9] = h9; - /* End fe_frombytes.c */ + return 0; +} + +/* From ge_frombytes.c, modified */ + +int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { + fe u; + fe v; + fe vxx; + fe check; + + if (fe_y_frombytes_vartime(h->Y, s) != 0) { + return -1; + } fe_1(h->Z); fe_sq(u, h->Y); @@ -3877,3 +3884,27 @@ int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p) { // Y/Z = 0/0 return 0; } + +// https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2 +void fe_y_to_wei_x(unsigned char *wei_x, const fe y) +{ + fe one; + fe_1(one); + + // (1+y),(1-y) + fe one_plus_y; + fe_add(one_plus_y, one, y); + fe one_minus_y; + fe_sub(one_minus_y, one, y); + + // (1/(1-y))*(1+y) + fe inv_one_minus_y; + fe_invert(inv_one_minus_y, one_minus_y); + fe inv_one_minus_y_mul_one_plus_y; + fe_mul(inv_one_minus_y_mul_one_plus_y, inv_one_minus_y, one_plus_y); + + // wei x = (1/(1-y))*(1+y) + (A/3) + fe wei_x_fe; + fe_add(wei_x_fe, inv_one_minus_y_mul_one_plus_y, fe_a_inv_3); + fe_tobytes(wei_x, wei_x_fe); +} diff --git a/src/crypto/crypto-ops.h b/src/crypto/crypto-ops.h index c103f1f789d..ff4ceaf6058 100644 --- a/src/crypto/crypto-ops.h +++ b/src/crypto/crypto-ops.h @@ -88,6 +88,7 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *, const unsigned char *, const extern const fe fe_sqrtm1; extern const fe fe_d; +int fe_y_frombytes_vartime(fe, const unsigned char *); int ge_frombytes_vartime(ge_p3 *, const unsigned char *); /* From ge_p1p1_to_p2.c */ @@ -143,6 +144,7 @@ extern const fe fe_fffb1; extern const fe fe_fffb2; extern const fe fe_fffb3; extern const fe fe_fffb4; +extern const fe fe_a_inv_3; extern const ge_p3 ge_p3_identity; extern const ge_p3 ge_p3_H; void ge_fromfe_frombytes_vartime(ge_p2 *, const unsigned char *); @@ -167,3 +169,5 @@ void fe_mul(fe out, const fe, const fe); void fe_0(fe h); int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p); + +void fe_y_to_wei_x(unsigned char *wei_x, const fe y); diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 837d2faa995..cb2b82f580f 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -630,26 +630,14 @@ template<> LeafTupleContext CurveTrees::output_to_leaf_context( const std::uint64_t output_id, const crypto::public_key &output_pubkey, - const crypto::public_key &commitment) const + const rct::key &commitment) const { - if (!crypto::check_key(output_pubkey)) - throw std::runtime_error("invalid output pub key"); + rct::key O, C; - const auto clear_torsion = [](const crypto::public_key &key, const std::string &s) - { - // TODO: don't need to decompress and recompress points, can be optimized - rct::key torsion_cleared_key = rct::scalarmultKey(rct::pk2rct(key), rct::INV_EIGHT); - torsion_cleared_key = rct::scalarmult8(torsion_cleared_key); - - if (torsion_cleared_key == rct::I) - throw std::runtime_error(s + " cannot equal identity"); - - return torsion_cleared_key; - }; - - // Torsion clear the output pub key and commitment - rct::key O = clear_torsion(output_pubkey, "output pub key"); - rct::key C = clear_torsion(commitment, "commitment"); + if (!rct::clear_torsion(rct::pk2rct(output_pubkey), O)) + throw std::runtime_error("output pub key is invalid, failed to clear torsion"); + if (!rct::clear_torsion(commitment, C)) + throw std::runtime_error("commitment is invalid, failed to clear torsion"); PreprocessedLeafTuple o_c{ .O = std::move(O), @@ -666,16 +654,25 @@ template<> CurveTrees::LeafTuple CurveTrees::leaf_tuple( const PreprocessedLeafTuple &preprocessed_leaf_tuple) const { - const crypto::public_key &O = rct::rct2pk(preprocessed_leaf_tuple.O); - const crypto::public_key &C = rct::rct2pk(preprocessed_leaf_tuple.C); + const rct::key &O = preprocessed_leaf_tuple.O; + const rct::key &C = preprocessed_leaf_tuple.C; crypto::ec_point I; - crypto::derive_key_image_generator(O, I); + crypto::derive_key_image_generator(rct::rct2pk(O), I); + + rct::key O_x, I_x, C_x; + + if (!rct::point_to_wei_x(O, O_x)) + throw std::runtime_error("failed to get wei x scalar from O"); + if (!rct::point_to_wei_x(rct::pt2rct(I), I_x)) + throw std::runtime_error("failed to get wei x scalar from I"); + if (!rct::point_to_wei_x(C, C_x)) + throw std::runtime_error("failed to get wei x scalar from C"); return LeafTuple{ - .O_x = tower_cycle::ed_25519_point_to_scalar(O), - .I_x = tower_cycle::ed_25519_point_to_scalar(I), - .C_x = tower_cycle::ed_25519_point_to_scalar(C) + .O_x = tower_cycle::selene_scalar_from_bytes(O_x), + .I_x = tower_cycle::selene_scalar_from_bytes(I_x), + .C_x = tower_cycle::selene_scalar_from_bytes(C_x) }; }; //---------------------------------------------------------------------------------------------------------------------- @@ -732,7 +729,7 @@ void CurveTrees::tx_outs_to_leaf_tuple_contexts(const cryptonote // Convert output to leaf tuple context; throws if output is invalid leaf_tuple_context = output_to_leaf_context(output_ids[i], output_public_key, - rct::rct2pk(commitment)); + commitment); } catch (...) { diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index a337481e1f9..8f127055272 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -233,7 +233,7 @@ class CurveTrees // Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree LeafTupleContext output_to_leaf_context(const std::uint64_t output_id, const crypto::public_key &output_pubkey, - const crypto::public_key &C) const; + const rct::key &C) const; LeafTuple leaf_tuple(const PreprocessedLeafTuple &preprocessed_leaf_tuple) const; diff --git a/src/fcmp/fcmp_rust/Cargo.lock b/src/fcmp/fcmp_rust/Cargo.lock index 18edc208f94..196a2a39d17 100644 --- a/src/fcmp/fcmp_rust/Cargo.lock +++ b/src/fcmp/fcmp_rust/Cargo.lock @@ -71,7 +71,7 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "ciphersuite" version = "0.4.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "blake2", "dalek-ff-group", @@ -160,7 +160,7 @@ dependencies = [ [[package]] name = "dalek-ff-group" version = "0.4.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "crypto-bigint", "curve25519-dalek", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "ec-divisors" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "dalek-ff-group", "group", @@ -244,11 +244,10 @@ version = "0.0.0" dependencies = [ "ciphersuite", "ec-divisors", - "flexible-transcript", "full-chain-membership-proofs", "generalized-bulletproofs", "helioselene", - "rand_core", + "monero-fcmp-plus-plus", "std-shims", ] @@ -272,7 +271,7 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "flexible-transcript" version = "0.3.2" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "blake2", "digest", @@ -285,12 +284,15 @@ dependencies = [ [[package]] name = "full-chain-membership-proofs" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ + "blake2", "ciphersuite", "ec-divisors", - "flexible-transcript", "generalized-bulletproofs", + "generalized-bulletproofs-circuit-abstraction", + "generalized-bulletproofs-ec-gadgets", + "generic-array 1.1.0", "multiexp", "rand_core", "zeroize", @@ -305,12 +307,45 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "generalized-bulletproofs" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "ciphersuite", + "multiexp", + "rand_core", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-circuit-abstraction" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-ec-gadgets" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs-circuit-abstraction", + "generic-array 1.1.0", +] + +[[package]] +name = "generalized-schnorr" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "ciphersuite", "flexible-transcript", "multiexp", "rand_core", + "std-shims", "zeroize", ] @@ -368,7 +403,7 @@ dependencies = [ [[package]] name = "helioselene" version = "0.1.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "crypto-bigint", "dalek-ff-group", @@ -429,7 +464,7 @@ dependencies = [ [[package]] name = "minimal-ed448" version = "0.4.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "crypto-bigint", "ff", @@ -441,10 +476,69 @@ dependencies = [ "zeroize", ] +[[package]] +name = "monero-fcmp-plus-plus" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "dalek-ff-group", + "ec-divisors", + "flexible-transcript", + "full-chain-membership-proofs", + "generalized-bulletproofs", + "generalized-bulletproofs-ec-gadgets", + "generalized-schnorr", + "generic-array 1.1.0", + "monero-generators", + "monero-io", + "monero-primitives", + "multiexp", + "rand_core", + "std-shims", + "zeroize", +] + +[[package]] +name = "monero-generators" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "dalek-ff-group", + "group", + "monero-io", + "sha3", + "std-shims", + "subtle", +] + +[[package]] +name = "monero-io" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "std-shims", +] + +[[package]] +name = "monero-primitives" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "sha3", + "std-shims", + "zeroize", +] + [[package]] name = "multiexp" version = "0.4.0" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "ff", "group", @@ -607,7 +701,7 @@ dependencies = [ [[package]] name = "std-shims" version = "0.1.1" -source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" dependencies = [ "hashbrown", "spin", @@ -621,9 +715,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.70" +version = "2.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0209b68b3613b093e0ec905354eccaedcfe83b8cb37cbdeae64026c3064c16" +checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" dependencies = [ "proc-macro2", "quote", @@ -650,9 +744,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "wasi" diff --git a/src/fcmp/fcmp_rust/Cargo.toml b/src/fcmp/fcmp_rust/Cargo.toml index 871a6cb664a..21573b82426 100644 --- a/src/fcmp/fcmp_rust/Cargo.toml +++ b/src/fcmp/fcmp_rust/Cargo.toml @@ -10,9 +10,6 @@ crate-type = ["staticlib"] [dependencies] std-shims = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } -rand_core = { version = "0.6", features = ["getrandom"] } - -transcript = { package = "flexible-transcript", git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["recommended"] } helioselene = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } ciphersuite = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["ed25519", "helioselene"] } @@ -21,6 +18,8 @@ generalized-bulletproofs = { git = "https://github.com/kayabaNerve/fcmp-plus-plu ec-divisors = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["ed25519"] } full-chain-membership-proofs = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } +monero-fcmp-plus-plus = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + [patch.crates-io] crypto-bigint = { git = "https://github.com/kayabaNerve/crypto-bigint", branch = "c-repr" } diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index d222dd91072..184737cd557 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -104,7 +104,7 @@ HeliosPoint helios_point_from_bytes(const uint8_t *helios_point_bytes); SelenePoint selene_point_from_bytes(const uint8_t *selene_point_bytes); -SeleneScalar ed25519_point_to_selene_scalar(const uint8_t *ed25519_point); +SeleneScalar selene_scalar_from_bytes(const uint8_t *selene_scalar_bytes); HeliosScalar selene_point_to_helios_scalar(SelenePoint selene_point); diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp/fcmp_rust/src/lib.rs index 60197349f14..cb256f14fa7 100644 --- a/src/fcmp/fcmp_rust/src/lib.rs +++ b/src/fcmp/fcmp_rust/src/lib.rs @@ -1,56 +1,29 @@ -use std_shims::sync::OnceLock; - -use rand_core::OsRng; - use ciphersuite::{ group::{ ff::{Field, PrimeField}, - Group, GroupEncoding, + GroupEncoding, }, - Ciphersuite, Ed25519, Helios, Selene, + Ciphersuite, Helios, Selene, }; use helioselene::{ Field25519 as SeleneScalar, HeliosPoint, HelioseleneField as HeliosScalar, SelenePoint, }; -use transcript::RecommendedTranscript; - -use generalized_bulletproofs::Generators; use ec_divisors::DivisorCurve; use full_chain_membership_proofs::tree::{hash_grow, hash_trim}; -// TODO: Use a macro to de-duplicate some of of this code - -const HELIOS_GENERATORS_LENGTH: usize = 128; -const SELENE_GENERATORS_LENGTH: usize = 256; - -static HELIOS_GENERATORS: OnceLock> = OnceLock::new(); -static SELENE_GENERATORS: OnceLock> = OnceLock::new(); +use monero_fcmp_plus_plus::{HELIOS_HASH_INIT, SELENE_HASH_INIT, HELIOS_GENERATORS, SELENE_GENERATORS}; -static HELIOS_HASH_INIT: OnceLock = OnceLock::new(); -static SELENE_HASH_INIT: OnceLock = OnceLock::new(); - -// TODO: Don't use random generators -fn helios_generators() -> &'static Generators { - HELIOS_GENERATORS.get_or_init(|| { - generalized_bulletproofs::tests::generators::(HELIOS_GENERATORS_LENGTH) - }) -} - -fn selene_generators() -> &'static Generators { - SELENE_GENERATORS.get_or_init(|| { - generalized_bulletproofs::tests::generators::(SELENE_GENERATORS_LENGTH) - }) -} +// TODO: Use a macro to de-duplicate some of of this code #[no_mangle] pub extern "C" fn helios_hash_init_point() -> HeliosPoint { - *HELIOS_HASH_INIT.get_or_init(|| HeliosPoint::random(&mut OsRng)) + HELIOS_HASH_INIT() } #[no_mangle] pub extern "C" fn selene_hash_init_point() -> SelenePoint { - *SELENE_HASH_INIT.get_or_init(|| SelenePoint::random(&mut OsRng)) + SELENE_HASH_INIT() } fn c_u8_32(bytes: [u8; 32]) -> *const u8 { @@ -94,30 +67,27 @@ pub extern "C" fn selene_point_from_bytes(selene_point: *const u8) -> SelenePoin ::read_G(&mut selene_point).unwrap() } -// Get the x coordinate of the ed25519 point #[allow(clippy::not_unsafe_ptr_arg_deref)] #[no_mangle] -pub extern "C" fn ed25519_point_to_selene_scalar(ed25519_point: *const u8) -> SeleneScalar { - let mut ed25519_point = unsafe { core::slice::from_raw_parts(ed25519_point, 32) }; +pub extern "C" fn selene_scalar_from_bytes(selene_scalar: *const u8) -> SeleneScalar { + let mut selene_scalar = unsafe { core::slice::from_raw_parts(selene_scalar, 32) }; // TODO: Return an error here (instead of unwrapping) - let ed25519_point = ::read_G(&mut ed25519_point).unwrap(); - - let xy_coords = ::G::to_xy(ed25519_point); - let x: SeleneScalar = xy_coords.0; - x + ::read_F(&mut selene_scalar).unwrap() } #[no_mangle] pub extern "C" fn selene_point_to_helios_scalar(selene_point: SelenePoint) -> HeliosScalar { let xy_coords = SelenePoint::to_xy(selene_point); - let x: HeliosScalar = xy_coords.0; + // TODO: Return an error here (instead of unwrapping) + let x: HeliosScalar = xy_coords.unwrap().0; x } #[no_mangle] pub extern "C" fn helios_point_to_selene_scalar(helios_point: HeliosPoint) -> SeleneScalar { let xy_coords = HeliosPoint::to_xy(helios_point); - let x: SeleneScalar = xy_coords.0; + // TODO: Return an error here (instead of unwrapping) + let x: SeleneScalar = xy_coords.unwrap().0; x } @@ -172,7 +142,7 @@ pub extern "C" fn hash_grow_helios( new_children: HeliosScalarSlice, ) -> CResult { let hash = hash_grow( - helios_generators(), + HELIOS_GENERATORS(), existing_hash, offset, existing_child_at_offset, @@ -194,7 +164,7 @@ pub extern "C" fn hash_trim_helios( child_to_grow_back: HeliosScalar, ) -> CResult { let hash = hash_trim( - helios_generators(), + HELIOS_GENERATORS(), existing_hash, offset, children.into(), @@ -216,7 +186,7 @@ pub extern "C" fn hash_grow_selene( new_children: SeleneScalarSlice, ) -> CResult { let hash = hash_grow( - selene_generators(), + SELENE_GENERATORS(), existing_hash, offset, existing_child_at_offset, @@ -238,7 +208,7 @@ pub extern "C" fn hash_trim_selene( child_to_grow_back: SeleneScalar, ) -> CResult { let hash = hash_trim( - selene_generators(), + SELENE_GENERATORS(), existing_hash, offset, children.into(), diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 75c73af1c1c..691afc3f8e6 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -220,15 +220,9 @@ std::string Selene::to_string(const typename Selene::Point &point) const //---------------------------------------------------------------------------------------------------------------------- // Exposed helper functions //---------------------------------------------------------------------------------------------------------------------- -SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point) +SeleneScalar selene_scalar_from_bytes(const rct::key &scalar) { - static_assert(sizeof(SeleneScalar) == sizeof(point), "size of selene scalar != size of ed25519 point"); - - // If this function receives the ec_point, this is fine - // If this function can receive a decompressed point, it'd be notably faster - // to extract the Wei25519 x coordinate from the C side of things and then - // pass that - return fcmp_rust::ed25519_point_to_selene_scalar((uint8_t*) &point.data); + return fcmp_rust::selene_scalar_from_bytes(scalar.bytes); } //---------------------------------------------------------------------------------------------------------------------- template diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index 6b083638466..a0454e5a922 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -30,6 +30,7 @@ #include "crypto/crypto.h" #include "fcmp_rust/fcmp++.h" +#include "ringct/rctTypes.h" #include @@ -190,8 +191,7 @@ class Selene final : public Curve }; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// Ed25519 point x-coordinates are Selene scalars -SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point); +SeleneScalar selene_scalar_from_bytes(const rct::key &scalar); //---------------------------------------------------------------------------------------------------------------------- template void extend_zeroes(const C &curve, diff --git a/src/ringct/rctOps.cpp b/src/ringct/rctOps.cpp index 0e18cb461bb..4d9cefb75d3 100644 --- a/src/ringct/rctOps.cpp +++ b/src/ringct/rctOps.cpp @@ -725,4 +725,29 @@ namespace rct { sc_sub(masked.amount.bytes, masked.amount.bytes, sharedSec2.bytes); } } + + bool clear_torsion(const key &k, key &k_out) { + ge_p3 point; + if (ge_frombytes_vartime(&point, k.bytes) != 0) + return false; + // mul by inv 8, then mul by 8 + ge_p2 point_inv_8; + ge_scalarmult(&point_inv_8, INV_EIGHT.bytes, &point); + ge_p1p1 point_inv_8_mul_8; + ge_mul8(&point_inv_8_mul_8, &point_inv_8); + ge_p3 torsion_cleared_point; + ge_p1p1_to_p3(&torsion_cleared_point, &point_inv_8_mul_8); + ge_p3_tobytes(k_out.bytes, &torsion_cleared_point); + if (k_out == I) + return false; + return true; + } + + bool point_to_wei_x(const key &pub, key &wei_x) { + fe y; + if (fe_y_frombytes_vartime(y, pub.bytes) != 0) + return false; + fe_y_to_wei_x(wei_x.bytes, y); + return true; + } } diff --git a/src/ringct/rctOps.h b/src/ringct/rctOps.h index 0edd0308c46..84db3e08909 100644 --- a/src/ringct/rctOps.h +++ b/src/ringct/rctOps.h @@ -188,5 +188,8 @@ namespace rct { key genCommitmentMask(const key &sk); void ecdhEncode(ecdhTuple & unmasked, const key & sharedSec, bool v2); void ecdhDecode(ecdhTuple & masked, const key & sharedSec, bool v2); + + bool clear_torsion(const key &k, key &k_out); + bool point_to_wei_x(const key &pub, key &wei_x); } #endif /* RCTOPS_H */ diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index 1d237c6e2b1..c7138ed9bab 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -762,6 +762,7 @@ namespace rct { static inline const rct::key &sk2rct(const crypto::secret_key &sk) { return (const rct::key&)sk; } static inline const rct::key &ki2rct(const crypto::key_image &ki) { return (const rct::key&)ki; } static inline const rct::key &hash2rct(const crypto::hash &h) { return (const rct::key&)h; } + static inline const rct::key &pt2rct(const crypto::ec_point &pt) { return (const rct::key&)pt; } static inline const crypto::public_key &rct2pk(const rct::key &k) { return (const crypto::public_key&)k; } static inline const crypto::secret_key &rct2sk(const rct::key &k) { return (const crypto::secret_key&)k; } static inline const crypto::key_image &rct2ki(const rct::key &k) { return (const crypto::key_image&)k; } diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index b476a33daba..a71d0d239bd 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -31,6 +31,7 @@ #include "cryptonote_basic/cryptonote_format_utils.h" #include "curve_trees.h" #include "misc_log_ex.h" +#include "ringct/rctOps.h" #include "unit_tests_utils.h" #include @@ -759,7 +760,7 @@ static const std::vector generate_random_le crypto::generate_keys(O, o, o, false); crypto::generate_keys(C, c, c, false); - auto tuple_context = curve_trees.output_to_leaf_context(output_id, O, C); + auto tuple_context = curve_trees.output_to_leaf_context(output_id, O, rct::pk2rct(C)); tuples.emplace_back(std::move(tuple_context)); } @@ -773,7 +774,10 @@ static const Selene::Scalar generate_random_selene_scalar() crypto::public_key S; crypto::generate_keys(S, s, s, false); - return fcmp::tower_cycle::ed_25519_point_to_scalar(S); + + rct::key S_x; + CHECK_AND_ASSERT_THROW_MES(rct::point_to_wei_x(rct::pk2rct(S), S_x), "failed to convert to wei x"); + return fcmp::tower_cycle::selene_scalar_from_bytes(S_x); } //---------------------------------------------------------------------------------------------------------------------- static bool grow_tree(CurveTreesV1 &curve_trees, From 30fc80b33ef036d79041fbd2a7b97aff4b237471 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 2 Aug 2024 22:34:06 -0700 Subject: [PATCH 080/127] Don't copy when flattening leaves --- src/blockchain_db/lmdb/db_lmdb.cpp | 2 +- src/fcmp/curve_trees.cpp | 11 +++++------ src/fcmp/curve_trees.h | 4 ++-- tests/unit_tests/curve_trees.cpp | 4 ++-- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index af7de2ef37a..b498bc659be 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -2038,7 +2038,7 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str())); // Get the expected leaf chunk hash - const auto leaves = m_curve_trees->flatten_leaves(leaf_tuples_chunk); + const auto leaves = m_curve_trees->flatten_leaves(std::move(leaf_tuples_chunk)); const fcmp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; // Hash the chunk of leaves diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index cb2b82f580f..d0309931991 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -677,17 +677,16 @@ CurveTrees::LeafTuple CurveTrees::leaf_tuple( }; //---------------------------------------------------------------------------------------------------------------------- template -std::vector CurveTrees::flatten_leaves(const std::vector &leaves) const +std::vector CurveTrees::flatten_leaves(std::vector &&leaves) const { std::vector flattened_leaves; flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - for (const auto &l : leaves) + for (auto &l : leaves) { - // TODO: implement without cloning - flattened_leaves.emplace_back(l.O_x); - flattened_leaves.emplace_back(l.I_x); - flattened_leaves.emplace_back(l.C_x); + flattened_leaves.emplace_back(std::move(l.O_x)); + flattened_leaves.emplace_back(std::move(l.I_x)); + flattened_leaves.emplace_back(std::move(l.C_x)); } return flattened_leaves; diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 8f127055272..8678942119b 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -237,8 +237,8 @@ class CurveTrees LeafTuple leaf_tuple(const PreprocessedLeafTuple &preprocessed_leaf_tuple) const; - // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] - std::vector flatten_leaves(const std::vector &leaves) const; + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [O.x, I.x, C.x, O.x, I.x, C.x...] + std::vector flatten_leaves(std::vector &&leaves) const; // Convert cryptonote tx outs to contexts ready to be converted to leaf tuples, grouped by unlock height void tx_outs_to_leaf_tuple_contexts(const cryptonote::transaction &tx, diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index a71d0d239bd..45d75625595 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -495,7 +495,7 @@ bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) { MDEBUG("Auditing global tree"); - const auto &leaves = m_tree.leaves; + auto leaves = m_tree.leaves; const auto &c1_layers = m_tree.c1_layers; const auto &c2_layers = m_tree.c2_layers; @@ -582,7 +582,7 @@ bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) // Now validate leaves return validate_layer(m_curve_trees.m_c2, c2_layers[0], - m_curve_trees.flatten_leaves(leaves), + m_curve_trees.flatten_leaves(std::move(leaves)), m_curve_trees.m_leaf_layer_chunk_width); } //---------------------------------------------------------------------------------------------------------------------- From 5e76191afe911a5a0ffc62896324952b98110616 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 2 Aug 2024 23:22:39 -0700 Subject: [PATCH 081/127] cleaner crypto for converting output to leaf tuple --- src/crypto/crypto-ops.c | 10 +++++----- src/crypto/crypto-ops.h | 4 ++-- src/fcmp/curve_trees.cpp | 9 +++++++-- src/fcmp/curve_trees.h | 2 +- src/ringct/rctOps.cpp | 8 ++++---- 5 files changed, 19 insertions(+), 14 deletions(-) diff --git a/src/crypto/crypto-ops.c b/src/crypto/crypto-ops.c index edfc61c3cc6..b8dbbc799ce 100644 --- a/src/crypto/crypto-ops.c +++ b/src/crypto/crypto-ops.c @@ -1330,7 +1330,7 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *r3, const unsigned char *a, con /* From fe_frombytes.c */ -int fe_y_frombytes_vartime(fe y, const unsigned char *s) { +int fe_frombytes_vartime(fe y, const unsigned char *s) { int64_t h0 = load_4(s); int64_t h1 = load_3(s + 4) << 6; @@ -1394,7 +1394,7 @@ int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { fe vxx; fe check; - if (fe_y_frombytes_vartime(h->Y, s) != 0) { + if (fe_frombytes_vartime(h->Y, s) != 0) { return -1; } @@ -3886,16 +3886,16 @@ int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p) { } // https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2 -void fe_y_to_wei_x(unsigned char *wei_x, const fe y) +void fe_ed_y_to_wei_x(unsigned char *wei_x, const fe ed_y) { fe one; fe_1(one); // (1+y),(1-y) fe one_plus_y; - fe_add(one_plus_y, one, y); + fe_add(one_plus_y, one, ed_y); fe one_minus_y; - fe_sub(one_minus_y, one, y); + fe_sub(one_minus_y, one, ed_y); // (1/(1-y))*(1+y) fe inv_one_minus_y; diff --git a/src/crypto/crypto-ops.h b/src/crypto/crypto-ops.h index ff4ceaf6058..7ab73887278 100644 --- a/src/crypto/crypto-ops.h +++ b/src/crypto/crypto-ops.h @@ -88,7 +88,7 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *, const unsigned char *, const extern const fe fe_sqrtm1; extern const fe fe_d; -int fe_y_frombytes_vartime(fe, const unsigned char *); +int fe_frombytes_vartime(fe, const unsigned char *); int ge_frombytes_vartime(ge_p3 *, const unsigned char *); /* From ge_p1p1_to_p2.c */ @@ -170,4 +170,4 @@ void fe_0(fe h); int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p); -void fe_y_to_wei_x(unsigned char *wei_x, const fe y); +void fe_ed_y_to_wei_x(unsigned char *wei_x, const fe ed_y); diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index d0309931991..891243406c3 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -635,9 +635,14 @@ LeafTupleContext CurveTrees::output_to_leaf_context( rct::key O, C; if (!rct::clear_torsion(rct::pk2rct(output_pubkey), O)) - throw std::runtime_error("output pub key is invalid, failed to clear torsion"); + throw std::runtime_error("output pub key is invalid"); if (!rct::clear_torsion(commitment, C)) - throw std::runtime_error("commitment is invalid, failed to clear torsion"); + throw std::runtime_error("commitment is invalid"); + + if (O == rct::I) + throw std::runtime_error("O cannot equal identity"); + if (C == rct::I) + throw std::runtime_error("C cannot equal identity"); PreprocessedLeafTuple o_c{ .O = std::move(O), diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 8678942119b..96abb1c7a1d 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -133,7 +133,7 @@ struct PreprocessedLeafTuple final { // Output pubkey that has been checked valid and torsion cleared rct::key O; - // Commitment that has been torsion cleared + // Commitment that has been checked valid and torsion cleared rct::key C; }; static_assert(sizeof(PreprocessedLeafTuple) == (32+32), "db expects 64 bytes for pre-processed leaf tuples"); diff --git a/src/ringct/rctOps.cpp b/src/ringct/rctOps.cpp index 4d9cefb75d3..e865f4398de 100644 --- a/src/ringct/rctOps.cpp +++ b/src/ringct/rctOps.cpp @@ -738,16 +738,16 @@ namespace rct { ge_p3 torsion_cleared_point; ge_p1p1_to_p3(&torsion_cleared_point, &point_inv_8_mul_8); ge_p3_tobytes(k_out.bytes, &torsion_cleared_point); - if (k_out == I) - return false; return true; } bool point_to_wei_x(const key &pub, key &wei_x) { + if (pub == I) + return false; fe y; - if (fe_y_frombytes_vartime(y, pub.bytes) != 0) + if (fe_frombytes_vartime(y, pub.bytes) != 0) return false; - fe_y_to_wei_x(wei_x.bytes, y); + fe_ed_y_to_wei_x(wei_x.bytes, y); return true; } } From edded7e6e344fb3b463b0813cd0c9cef24af83fe Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 8 Aug 2024 13:01:04 -0700 Subject: [PATCH 082/127] fcmp++: Restart migration from where it leaves off --- src/blockchain_db/lmdb/db_lmdb.cpp | 184 +++++++++++++++++++---------- src/fcmp/curve_trees.cpp | 1 + 2 files changed, 123 insertions(+), 62 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index b498bc659be..602a37116e8 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -217,7 +217,7 @@ namespace * spent_keys input hash - * * locked_leaves block ID [{output ID, leaf tuple}...] - * leaves leaf_idx {leaf tuple} + * leaves leaf_idx leaf tuple * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * * txpool_meta txn hash txn metadata @@ -6687,58 +6687,58 @@ void BlockchainLMDB::migrate_5_6() MGINFO_YELLOW("Migrating blockchain from DB version 5 to 6 - this may take a while:"); - // Reset all updated tables from migration since not sure of a simple and efficient way to continue if the migration - // stops before it's finished (outputs aren't inserted in order) - MDB_dbi dbi; - DELETE_DB("locked_leaves"); - DELETE_DB("leaves"); - DELETE_DB("layers"); - DELETE_DB("block_infn"); - - // TODO: if I instead iterate over every block's outputs and go in order that way, I'd know where to leave off based on - // the new block_infn table. Problem is that's less efficient (read block tx hashes, use tx hashes to read output ID's, read outputs) - // ... Could also require outputs be inserted all-or-nothing first, and then can pick up where left off for the tree - // if any of leaves, layers, or block_infn tables exist, then locked_leaves migration should be complete - - // TODO: I can keep track of the contiguous output_id inserted in a separate table used strictly for this migration - // On next run, read all outputs until we reach the highest contiguous output_id, then continue from there - + MDB_dbi m_tmp_last_output; do { - // 1. Set up locked outputs table + // 1. Prepare all valid outputs to be inserted into the merkle tree and + // place them in a locked leaves table. The key to this new table is the + // block id in which the outputs unlock. { - LOG_PRINT_L1("Setting up a locked outputs table (step 1/2 of full-chain membership proof migration)"); + MINFO("Setting up a locked outputs table (step 1/2 of full-chain membership proof migration)"); result = mdb_txn_begin(m_env, NULL, 0, txn); if (result) throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); lmdb_db_open(txn, LMDB_LOCKED_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_leaves, "Failed to open db handle for m_locked_leaves"); mdb_set_dupsort(txn, m_locked_leaves, compare_uint64); + lmdb_db_open(txn, "tmp_last_output", MDB_INTEGERKEY | MDB_CREATE, m_tmp_last_output, "Failed to open db handle for m_tmp_last_output"); txn.commit(); if (!m_batch_transactions) set_batch_transactions(true); - batch_start(1000); + const std::size_t BATCH_SIZE = 1000; + batch_start(BATCH_SIZE); txn.m_txn = m_write_txn->m_txn; - MDB_cursor *c_output_amounts, *c_locked_leaves; - MDB_val k, v; + // Use this cache to know how to restart the migration if the process is killed + struct tmp_output_cache { uint64_t n_outputs_read; uint64_t amount; outkey ok; }; + tmp_output_cache last_output; - MDB_cursor_op op = MDB_FIRST; - - const uint64_t n_outputs = this->num_outputs(); + MDB_cursor *c_output_amounts, *c_locked_leaves, *c_tmp_last_output; + MDB_val k, v; i = 0; + const uint64_t n_outputs = this->num_outputs(); + MDB_cursor_op op = MDB_FIRST; while (1) { - if (!(i % 1000)) + if (!(i % BATCH_SIZE)) { if (i) { LOGIF(el::Level::Info) { - std::cout << i << " / " << n_outputs << " \r" << std::flush; + const uint64_t percent = std::min((i * 100) / n_outputs, (uint64_t)99); + std::cout << i << " / " << n_outputs << " outputs (" << percent << "% of step 1/2) \r" << std::flush; } + + // Update last output read + MDB_val_set(v_last_output, last_output); + result = mdb_cursor_put(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update max output id: ", result).c_str())); + + // Commit and start a new txn txn.commit(); result = mdb_txn_begin(m_env, NULL, 0, txn); if (result) @@ -6748,16 +6748,47 @@ void BlockchainLMDB::migrate_5_6() memset(&m_wcursors, 0, sizeof(m_wcursors)); } + // Open all cursors result = mdb_cursor_open(txn, m_output_amounts, &c_output_amounts); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for output amounts: ", result).c_str())); - result = mdb_cursor_open(txn, m_locked_leaves, &c_locked_leaves); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); + result = mdb_cursor_open(txn, m_tmp_last_output, &c_tmp_last_output); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for temp last output: ", result).c_str())); - // Advance the output_amounts cursor to the current - if (i) + // Get the cached last output from the db + bool found_cached_output = false; + tmp_output_cache cached_last_o; + if (i == 0) + { + MDB_val v_last_output; + result = mdb_cursor_get(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, MDB_SET); + if (result != MDB_SUCCESS && result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get max output id: ", result).c_str())); + if (result != MDB_NOTFOUND) + { + cached_last_o = *(const tmp_output_cache*)v_last_output.mv_data; + MDEBUG("Found cached output " << cached_last_o.ok.output_id); + found_cached_output = true; + + // Set k and v so we can continue the migration from that output + k = {sizeof(cached_last_o.amount), (void *)&cached_last_o.amount}; + + const std::size_t outkey_size = (cached_last_o.amount == 0) ? sizeof(outkey) : sizeof(pre_rct_outkey); + v = {outkey_size, (void *)&cached_last_o.ok}; + + if (n_outputs < cached_last_o.n_outputs_read) + throw0(DB_ERROR("Unexpected n_outputs_read on cached last output")); + i = cached_last_o.n_outputs_read; + op = MDB_NEXT; + } + } + + // Advance the output_amounts cursor to the last output read + if (i || found_cached_output) { result = mdb_cursor_get(c_output_amounts, &k, &v, MDB_GET_BOTH); if (result) @@ -6765,6 +6796,7 @@ void BlockchainLMDB::migrate_5_6() } } + // Get the next output from the db result = mdb_cursor_get(c_output_amounts, &k, &v, op); op = MDB_NEXT; if (result == MDB_NOTFOUND) @@ -6775,6 +6807,9 @@ void BlockchainLMDB::migrate_5_6() if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get a record from output amounts: ", result).c_str())); + ++i; + const bool commit_next_iter = i && !(i % BATCH_SIZE); + // Read the output data uint64_t amount = *(const uint64_t*)k.mv_data; output_data_t output_data; @@ -6784,6 +6819,8 @@ void BlockchainLMDB::migrate_5_6() const outkey *okp = (const outkey *)v.mv_data; output_data = okp->data; output_id = okp->output_id; + if (commit_next_iter) + memcpy(&last_output.ok, okp, sizeof(outkey)); } else { @@ -6791,6 +6828,15 @@ void BlockchainLMDB::migrate_5_6() memcpy(&output_data, &okp->data, sizeof(pre_rct_output_data_t)); output_data.commitment = rct::zeroCommit(amount); output_id = okp->output_id; + if (commit_next_iter) + memcpy(&last_output.ok, okp, sizeof(pre_rct_outkey)); + } + + if (commit_next_iter) + { + // Set last output metadata + last_output.amount = amount; + last_output.n_outputs_read = i; } // Convert the output into a leaf tuple context @@ -6821,19 +6867,19 @@ void BlockchainLMDB::migrate_5_6() if (result != MDB_SUCCESS && result != MDB_KEYEXIST) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); if (result == MDB_KEYEXIST) - MDEBUG("Duplicate output pub key encountered: " << output_data.pubkey); - - ++i; + MDEBUG("Duplicate output pub key encountered: " << output_data.pubkey << " , output_id: " << output_id); } } - // 2. Set up the curve trees merkle tree + // 2. Set up the curve trees merkle tree by growing the tree block by block, + // with leaves that unlock in each respective block { - LOG_PRINT_L1("Setting up a merkle tree using existing cryptonote outputs (step 2/2 of full-chain membership proof migration)"); + MINFO("Setting up a merkle tree using existing cryptonote outputs (step 2/2 of full-chain membership proof migration)"); if (!m_batch_transactions) set_batch_transactions(true); - batch_start(1000); + const std::size_t BATCH_SIZE = 50; + batch_start(BATCH_SIZE); txn.m_txn = m_write_txn->m_txn; /* the block_info table name is the same but the old version and new version @@ -6844,30 +6890,23 @@ void BlockchainLMDB::migrate_5_6() lmdb_db_open(txn, "block_infn", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for block_infn"); mdb_set_dupsort(txn, m_block_info, compare_uint64); - // Open new leaves and layers tables - lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); - lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); - - mdb_set_dupsort(txn, m_leaves, compare_uint64); - mdb_set_dupsort(txn, m_layers, compare_uint64); - MDB_cursor *c_locked_leaves, *c_new_block_info, *c_old_block_info; - MDB_val k_blk, v_blk; - const uint64_t n_blocks = height(); - i = 0; + const uint64_t n_blocks = height(); while (i < n_blocks) { - if (!(i % 1000)) + if (!(i % BATCH_SIZE)) { if (i) { LOGIF(el::Level::Info) { - std::cout << i << " / " << n_blocks << " blocks \r" << std::flush; + const uint64_t percent = std::min((i * 100) / n_blocks, (uint64_t)99); + std::cout << i << " / " << n_blocks << " blocks (" << percent << "% of step 2/2) \r" << std::flush; } + txn.commit(); result = mdb_txn_begin(m_env, NULL, 0, txn); if (result) @@ -6877,10 +6916,10 @@ void BlockchainLMDB::migrate_5_6() memset(&m_wcursors, 0, sizeof(m_wcursors)); } + // Open all cursors result = mdb_cursor_open(txn, m_locked_leaves, &c_locked_leaves); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); - result = mdb_cursor_open(txn, m_block_info, &c_new_block_info); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_infn: ", result).c_str())); @@ -6888,12 +6927,16 @@ void BlockchainLMDB::migrate_5_6() if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_info: ", result).c_str())); - // Advance the c_old_block_info cursor to the current - if (i) + // See what the last block inserted into the new table was + if (i == 0) { - result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_GET_BOTH); + MDB_stat db_stats; + result = mdb_stat(txn, m_block_info, &db_stats); if (result) - throw0(DB_ERROR(lmdb_error("Failed to advance cursor for old block infos: ", result).c_str())); + throw0(DB_ERROR(lmdb_error("Failed to query m_block_info: ", result).c_str())); + i = db_stats.ms_entries; + if (i == n_blocks) + break; } } @@ -6901,7 +6944,7 @@ void BlockchainLMDB::migrate_5_6() auto unlocked_leaves = this->get_leaf_tuples_at_unlock_block_id(i); this->grow_tree(std::move(unlocked_leaves)); - // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table + // Now that we've used the unlocked leaves to grow the tree, we delete them from the locked leaves table this->del_locked_leaf_tuples_at_block_id(i); // Get old block_info and use it to set the new one with new values @@ -6909,6 +6952,8 @@ void BlockchainLMDB::migrate_5_6() if (result) throw0(DB_ERROR(lmdb_error("Failed to get a record from block_info: ", result).c_str())); const mdb_block_info_4 *bi_old = (const mdb_block_info_4*)v_blk.mv_data; + if (i != bi_old->bi_height) + throw0(DB_ERROR(std::string("Unexpected block retrieved, retrieved: " + std::to_string(bi_old->bi_height) + " , expected: " + std::to_string(i)).c_str())); mdb_block_info_5 bi; bi.bi_height = bi_old->bi_height; bi.bi_timestamp = bi_old->bi_timestamp; @@ -6922,19 +6967,28 @@ void BlockchainLMDB::migrate_5_6() bi.bi_n_leaf_tuples = this->get_num_leaf_tuples(); bi.bi_tree_root = this->get_tree_root(); + LOGIF(el::Level::Info) + { + if ((bi.bi_height % 1000) == 0) + { + const std::string tree_root = epee::string_tools::pod_to_hex(bi.bi_tree_root); + MINFO("Height: " << i << ", block: " << bi.bi_hash << ", tree root: " << tree_root << ", leaves: " << bi.bi_n_leaf_tuples); + } + } + MDB_val_set(nv, bi); result = mdb_cursor_put(c_new_block_info, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP); if (result) throw0(DB_ERROR(lmdb_error("Failed to put a record into block_infn: ", result).c_str())); - // TODO: delete old block info records - // /* we delete the old records immediately, so the overall DB and mapsize should not grow. - // * This is a little slower than just letting mdb_drop() delete it all at the end, but - // * it saves a significant amount of disk space. - // */ - // result = mdb_cursor_del(c_old_block_info, 0); - // if (result) - // throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_info: ", result).c_str())); + /* we delete the old records immediately, so the overall DB and mapsize should not be + * larger than it needs to be. + * This is a little slower than just letting mdb_drop() delete it all at the end, but + * it saves a significant amount of disk space. + */ + result = mdb_cursor_del(c_old_block_info, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_info: ", result).c_str())); ++i; } @@ -6969,6 +7023,12 @@ void BlockchainLMDB::migrate_5_6() result = mdb_put(txn, m_properties, &vk, &v, 0); if (result) throw0(DB_ERROR(lmdb_error("Failed to update version for the db: ", result).c_str())); + + // We only needed the temp last output table for this migration, drop it + result = mdb_drop(txn, m_tmp_last_output, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to drop temp last output table: ", result).c_str())); + txn.commit(); } diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index 891243406c3..f15f017750c 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -719,6 +719,7 @@ void CurveTrees::tx_outs_to_leaf_tuple_contexts(const cryptonote static_assert(CURRENT_TRANSACTION_VERSION == 2, "This section of code was written with 2 tx versions in mind. " "Revisit this section and update for the new tx version."); + CHECK_AND_ASSERT_THROW_MES(tx.version == 1 || tx.version == 2, "encountered unexpected tx version"); if (!miner_tx && tx.version == 2) CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); From 10c6c12b187f683b0f659e0f2fb4c8e4a770685c Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 8 Aug 2024 13:31:21 -0700 Subject: [PATCH 083/127] fcmp++: compilation fixes + misc. cleanup - Removed call to hash_init_point in constructor - Replaced global static CURVE_TREES_V1 with a smart pointer - Don't need to link Rust static lib when including curve_trees.h - leaves table doesn't need dupsort flags, all leaves should be unique by key - rename fcmp -> fcmp_pp - return when 0 leaves passed into trim_tree --- src/blockchain_db/CMakeLists.txt | 2 +- src/blockchain_db/blockchain_db.cpp | 1 + src/blockchain_db/blockchain_db.h | 7 +- src/blockchain_db/lmdb/db_lmdb.cpp | 68 +++++----- src/blockchain_db/lmdb/db_lmdb.h | 14 +- src/blockchain_db/testdb.h | 2 + src/fcmp/CMakeLists.txt | 12 +- src/fcmp/curve_trees.cpp | 172 +++++++++++++----------- src/fcmp/curve_trees.h | 41 +++--- src/fcmp/fcmp_rust/CMakeLists.txt | 26 ++-- src/fcmp/fcmp_rust/fcmp++.h | 2 +- src/fcmp/proof.h | 2 +- src/fcmp/tower_cycle.cpp | 56 ++++---- src/fcmp/tower_cycle.h | 45 ++----- src/ringct/rctOps.h | 1 + src/ringct/rctTypes.h | 4 +- tests/block_weight/CMakeLists.txt | 1 - tests/unit_tests/CMakeLists.txt | 2 +- tests/unit_tests/curve_trees.cpp | 195 +++++++++++++--------------- tests/unit_tests/hardfork.cpp | 1 + tests/unit_tests/serialization.cpp | 6 +- tests/unit_tests/unit_tests_utils.h | 2 +- 22 files changed, 335 insertions(+), 327 deletions(-) diff --git a/src/blockchain_db/CMakeLists.txt b/src/blockchain_db/CMakeLists.txt index 9c55cebaa5f..5bcb16bc879 100644 --- a/src/blockchain_db/CMakeLists.txt +++ b/src/blockchain_db/CMakeLists.txt @@ -45,7 +45,7 @@ target_link_libraries(blockchain_db PUBLIC common cncrypto - fcmp + fcmp_pp ringct ${LMDB_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 5e3d48efbaf..2a3e307ee81 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -328,6 +328,7 @@ uint64_t BlockchainDB::add_block( const std::pair& blck // Get all other txs' leaf tuples for (std::size_t i = 0; i < txs.size(); ++i) { + // TODO: this loop can be parallelized m_curve_trees->tx_outs_to_leaf_tuple_contexts( txs[i].first, output_ids[i], diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 5f600e408ba..41fbc1e23d1 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "common/command_line.h" #include "crypto/hash.h" @@ -591,14 +592,14 @@ class BlockchainDB HardFork* m_hardfork; - fcmp::curve_trees::CurveTreesV1* m_curve_trees; + std::shared_ptr m_curve_trees; public: /** * @brief An empty constructor. */ - BlockchainDB(): m_hardfork(NULL), m_open(false), m_curve_trees(NULL) { } + BlockchainDB(): m_hardfork(NULL), m_open(false), m_curve_trees() { } /** * @brief An empty destructor. @@ -1788,6 +1789,8 @@ class BlockchainDB // TODO: description virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const = 0; + virtual uint64_t get_num_leaf_tuples() const = 0; + virtual std::array get_tree_root() const = 0; // // Hard fork related storage diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 602a37116e8..3a40e6d5cad 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1363,10 +1363,10 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) void BlockchainLMDB::grow_tree(std::vector &&new_leaves) { + LOG_PRINT_L3("BlockchainLMDB::" << __func__); if (new_leaves.empty()) return; - LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); mdb_txn_cursors *m_cursors = &m_wcursors; @@ -1377,7 +1377,7 @@ void BlockchainLMDB::grow_tree(std::vector // Get the number of leaf tuples that exist in the tree const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); - // Read every layer's last hashes + // Read every layer's last hash const auto last_hashes = this->get_tree_last_hashes(); // Use the number of leaf tuples and the existing last hashes to get a struct we can use to extend the tree @@ -1392,10 +1392,10 @@ void BlockchainLMDB::grow_tree(std::vector MDB_val_copy k(i + leaves.start_leaf_tuple_idx); MDB_val_set(v, leaves.tuples[i]); - // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. + // TODO: according to the docs, MDB_APPEND isn't supposed to perform any key comparisons to maximize efficiency. // Adding MDB_NOOVERWRITE I assume re-introduces a key comparison. Benchmark NOOVERWRITE here // MDB_NOOVERWRITE makes sure key doesn't already exist - int result = mdb_cursor_put(m_cur_leaves, &k, &v, MDB_APPENDDUP | MDB_NOOVERWRITE); + int result = mdb_cursor_put(m_cur_leaves, &k, &v, MDB_APPEND | MDB_NOOVERWRITE); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); } @@ -1446,7 +1446,7 @@ void BlockchainLMDB::grow_tree(std::vector } template -void BlockchainLMDB::grow_layer(const C &curve, +void BlockchainLMDB::grow_layer(const std::unique_ptr &curve, const std::vector> &layer_extensions, const uint64_t ext_idx, const uint64_t layer_idx) @@ -1471,7 +1471,7 @@ void BlockchainLMDB::grow_layer(const C &curve, // We updated the last hash, so update it layer_val lv; lv.child_chunk_idx = ext.start_idx; - lv.child_chunk_hash = curve.to_bytes(ext.hashes.front()); + lv.child_chunk_hash = curve->to_bytes(ext.hashes.front()); MDB_val_set(v, lv); // We expect to overwrite the existing hash @@ -1486,7 +1486,7 @@ void BlockchainLMDB::grow_layer(const C &curve, { layer_val lv; lv.child_chunk_idx = i + ext.start_idx; - lv.child_chunk_hash = curve.to_bytes(ext.hashes[i]); + lv.child_chunk_hash = curve->to_bytes(ext.hashes[i]); MDB_val_set(v, lv); // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. @@ -1500,16 +1500,16 @@ void BlockchainLMDB::grow_layer(const C &curve, void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) { - // TODO: block_wtxn_start like pop_block, then call BlockchainDB::trim_tree LOG_PRINT_L3("BlockchainLMDB::" << __func__); + if (trim_n_leaf_tuples == 0) + return; + check_open(); mdb_txn_cursors *m_cursors = &m_wcursors; CURSOR(leaves) CURSOR(layers) - CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); - const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); @@ -1612,7 +1612,7 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) } template -void BlockchainLMDB::trim_layer(const C &curve, +void BlockchainLMDB::trim_layer(const std::unique_ptr &curve, const fcmp::curve_trees::LayerReduction &layer_reduction, const uint64_t layer_idx) { @@ -1670,7 +1670,7 @@ void BlockchainLMDB::trim_layer(const C &curve, { layer_val lv; lv.child_chunk_idx = layer_reduction.new_total_parents - 1; - lv.child_chunk_hash = curve.to_bytes(layer_reduction.new_last_hash); + lv.child_chunk_hash = curve->to_bytes(layer_reduction.new_last_hash); MDB_val_set(v, lv); // We expect to overwrite the existing hash @@ -1772,12 +1772,12 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes const bool use_c2 = (layer_idx % 2) == 0; if (use_c2) { - auto point = m_curve_trees->m_c2.from_bytes(lv->child_chunk_hash); + auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); c2_last_hashes.emplace_back(std::move(point)); } else { - auto point = m_curve_trees->m_c1.from_bytes(lv->child_chunk_hash); + auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); c1_last_hashes.emplace_back(std::move(point)); } @@ -1883,14 +1883,14 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las const auto *lv = (layer_val *)v.mv_data; if (parent_is_c1) { - const auto point = m_curve_trees->m_c2.from_bytes(lv->child_chunk_hash); - auto child_scalar = m_curve_trees->m_c2.point_to_cycle_scalar(point); + const auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c2->point_to_cycle_scalar(point); c1_children.emplace_back(std::move(child_scalar)); } else { - const auto point = m_curve_trees->m_c1.from_bytes(lv->child_chunk_hash); - auto child_scalar = m_curve_trees->m_c1.point_to_cycle_scalar(point); + const auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c1->point_to_cycle_scalar(point); c2_children.emplace_back(std::move(child_scalar)); } @@ -1941,12 +1941,12 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_t const auto *lv = (layer_val *)v.mv_data; if ((layer_idx % 2) == 0) { - auto point = m_curve_trees->m_c2.from_bytes(lv->child_chunk_hash); + auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); last_hashes_out.c2_last_hashes.emplace_back(std::move(point)); } else { - auto point = m_curve_trees->m_c1.from_bytes(lv->child_chunk_hash); + auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); last_hashes_out.c1_last_hashes.emplace_back(std::move(point)); } @@ -2043,17 +2043,17 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const // Hash the chunk of leaves for (uint64_t i = 0; i < leaves.size(); ++i) - MDEBUG("Hashing " << m_curve_trees->m_c2.to_string(leaves[i])); + MDEBUG("Hashing " << m_curve_trees->m_c2->to_string(leaves[i])); const fcmp::curve_trees::Selene::Point chunk_hash = fcmp::curve_trees::get_new_parent(m_curve_trees->m_c2, chunk); - MDEBUG("chunk_hash " << m_curve_trees->m_c2.to_string(chunk_hash) << " , hash init point: " - << m_curve_trees->m_c2.to_string(m_curve_trees->m_c2.m_hash_init_point) << " (" << leaves.size() << " leaves)"); + MDEBUG("chunk_hash " << m_curve_trees->m_c2->to_string(chunk_hash) << " , hash init point: " + << m_curve_trees->m_c2->to_string(m_curve_trees->m_c2->hash_init_point()) << " (" << leaves.size() << " leaves)"); // Now compare to value from the db const auto *lv = (layer_val *)v_parent.mv_data; MDEBUG("Actual leaf chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); - const auto expected_bytes = m_curve_trees->m_c2.to_bytes(chunk_hash); + const auto expected_bytes = m_curve_trees->m_c2->to_bytes(chunk_hash); const auto actual_bytes = lv->child_chunk_hash; CHECK_AND_ASSERT_MES(expected_bytes == actual_bytes, false, "unexpected leaf chunk hash"); @@ -2101,8 +2101,8 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const } template -bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, - const C_PARENT &c_parent, +bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, const uint64_t layer_idx, const uint64_t child_start_idx, const uint64_t child_chunk_idx, @@ -2134,7 +2134,7 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str())); const auto *lv = (layer_val *)v_child.mv_data; - auto child_point = c_child.from_bytes(lv->child_chunk_hash); + auto child_point = c_child->from_bytes(lv->child_chunk_hash); child_chunk.emplace_back(std::move(child_point)); @@ -2184,21 +2184,21 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, std::vector child_scalars; child_scalars.reserve(child_chunk.size()); for (const auto &child : child_chunk) - child_scalars.emplace_back(c_child.point_to_cycle_scalar(child)); + child_scalars.emplace_back(c_child->point_to_cycle_scalar(child)); const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()}; for (uint64_t i = 0; i < child_scalars.size(); ++i) - MDEBUG("Hashing " << c_parent.to_string(child_scalars[i])); + MDEBUG("Hashing " << c_parent->to_string(child_scalars[i])); const auto chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk); - MDEBUG("chunk_hash " << c_parent.to_string(chunk_hash) << " , hash init point: " - << c_parent.to_string(c_parent.m_hash_init_point) << " (" << child_scalars.size() << " children)"); + MDEBUG("chunk_hash " << c_parent->to_string(chunk_hash) << " , hash init point: " + << c_parent->to_string(c_parent->hash_init_point()) << " (" << child_scalars.size() << " children)"); const auto *lv = (layer_val *)v_parent.mv_data; MDEBUG("Actual chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); const auto actual_bytes = lv->child_chunk_hash; - const auto expected_bytes = c_parent.to_bytes(chunk_hash); + const auto expected_bytes = c_parent->to_bytes(chunk_hash); if (actual_bytes != expected_bytes) throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str())); @@ -2296,7 +2296,7 @@ BlockchainLMDB::~BlockchainLMDB() BlockchainLMDB::close(); } -BlockchainLMDB::BlockchainLMDB(bool batch_transactions, fcmp::curve_trees::CurveTreesV1 *curve_trees): BlockchainDB() +BlockchainLMDB::BlockchainLMDB(bool batch_transactions, std::shared_ptr curve_trees): BlockchainDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); // initialize folder to something "safe" just in case @@ -2437,7 +2437,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); lmdb_db_open(txn, LMDB_LOCKED_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_leaves, "Failed to open db handle for m_locked_leaves"); - lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); + lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); lmdb_db_open(txn, LMDB_TXPOOL_META, MDB_CREATE, m_txpool_meta, "Failed to open db handle for m_txpool_meta"); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index a9263303ab2..2098cdc4f5f 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -194,7 +194,7 @@ struct mdb_txn_safe class BlockchainLMDB : public BlockchainDB { public: - BlockchainLMDB(bool batch_transactions=true, fcmp::curve_trees::CurveTreesV1 *curve_trees=&fcmp::curve_trees::CURVE_TREES_V1); + BlockchainLMDB(bool batch_transactions=true, std::shared_ptr curve_trees = fcmp::curve_trees::curve_trees_v1()); ~BlockchainLMDB(); virtual void open(const std::string& filename, const int mdb_flags=0); @@ -419,19 +419,19 @@ class BlockchainLMDB : public BlockchainDB virtual void remove_spent_key(const crypto::key_image& k_image); template - void grow_layer(const C &curve, + void grow_layer(const std::unique_ptr &curve, const std::vector> &layer_extensions, const uint64_t c_idx, const uint64_t layer_idx); template - void trim_layer(const C &curve, + void trim_layer(const std::unique_ptr &curve, const fcmp::curve_trees::LayerReduction &layer_reduction, const uint64_t layer_idx); - uint64_t get_num_leaf_tuples() const; + virtual uint64_t get_num_leaf_tuples() const; - std::array get_tree_root() const; + virtual std::array get_tree_root() const; fcmp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; @@ -442,8 +442,8 @@ class BlockchainLMDB : public BlockchainDB const std::vector &trim_instructions) const; template - bool audit_layer(const C_CHILD &c_child, - const C_PARENT &c_parent, + bool audit_layer(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, const uint64_t layer_idx, const uint64_t child_start_idx, const uint64_t child_chunk_idx, diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index d6a805d35a6..33635a04b65 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -119,6 +119,8 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void grow_tree(std::vector &&new_leaves) override {}; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) override {}; virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; + virtual std::array get_tree_root() const override { return {}; }; + virtual uint64_t get_num_leaf_tuples() const override { return 0; }; virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp/CMakeLists.txt index 5204931db80..8f37e9b7490 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp/CMakeLists.txt @@ -26,20 +26,20 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -set(fcmp_sources +set(fcmp_pp_sources curve_trees.cpp tower_cycle.cpp) -monero_find_all_headers(fcmp_headers "${CMAKE_CURRENT_SOURCE_DIR}") +monero_find_all_headers(fcmp_pp_headers "${CMAKE_CURRENT_SOURCE_DIR}") add_subdirectory(fcmp_rust) monero_add_library_with_deps( - NAME fcmp + NAME fcmp_pp DEPENDS fcmp_rust SOURCES - ${fcmp_sources} - ${fcmp_headers}) + ${fcmp_pp_sources} + ${fcmp_pp_headers}) if(WIN32) set(EXTRA_RUST_LIBRARIES ws2_32 ntdll userenv) @@ -47,7 +47,7 @@ else() set(EXTRA_RUST_LIBRARIES ) endif() -target_link_libraries(fcmp +target_link_libraries(fcmp_pp PUBLIC crypto cryptonote_basic diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp/curve_trees.cpp index f15f017750c..b2e8b3ebc86 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp/curve_trees.cpp @@ -44,17 +44,33 @@ template class CurveTrees; // Public helper functions //---------------------------------------------------------------------------------------------------------------------- template -typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_children) +typename C::Point get_new_parent(const std::unique_ptr &curve, const typename C::Chunk &new_children) { - return curve.hash_grow( - curve.m_hash_init_point, + return curve->hash_grow( + curve->hash_init_point(), 0,/*offset*/ - curve.zero_scalar(), + curve->zero_scalar(), new_children ); }; -template Helios::Point get_new_parent(const Helios &curve, const typename Helios::Chunk &new_children); -template Selene::Point get_new_parent(const Selene &curve, const typename Selene::Chunk &new_children); +template Helios::Point get_new_parent(const std::unique_ptr &curve, + const typename Helios::Chunk &new_children); +template Selene::Point get_new_parent(const std::unique_ptr &curve, + const typename Selene::Chunk &new_children); +//---------------------------------------------------------------------------------------------------------------------- +std::shared_ptr curve_trees_v1(const std::size_t helios_chunk_width, const std::size_t selene_chunk_width) +{ + std::unique_ptr helios(new Helios()); + std::unique_ptr selene(new Selene()); + return std::shared_ptr( + new CurveTreesV1( + std::move(helios), + std::move(selene), + helios_chunk_width, + selene_chunk_width + ) + ); +}; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // Static functions @@ -62,7 +78,7 @@ template Selene::Point get_new_parent(const Selene &curve, const typenam // After hashing a layer of children points, convert those children x-coordinates into their respective cycle // scalars, and prepare them to be hashed for the next layer template -static std::vector next_child_scalars_from_children(const C_CHILD &c_child, +static std::vector next_child_scalars_from_children(const std::unique_ptr &c_child, const typename C_CHILD::Point *last_root, const LayerExtension &children) { @@ -79,7 +95,7 @@ static std::vector next_child_scalars_from_children(c if (children.start_idx > 0) { MDEBUG("Updating root layer and including the existing root in next children"); - child_scalars_out.emplace_back(c_child.point_to_cycle_scalar(*last_root)); + child_scalars_out.emplace_back(c_child->point_to_cycle_scalar(*last_root)); } } @@ -91,7 +107,7 @@ static std::vector next_child_scalars_from_children(c //---------------------------------------------------------------------------------------------------------------------- // Hash chunks of a layer of new children, outputting the next layer's parents template -static LayerExtension hash_children_chunks(const C &curve, +static LayerExtension hash_children_chunks(const std::unique_ptr &curve, const typename C::Scalar *old_last_child, const typename C::Point *old_last_parent, const std::size_t start_offset, @@ -119,30 +135,30 @@ static LayerExtension hash_children_chunks(const C &curve, // Prepare to hash const auto &existing_hash = old_last_parent != nullptr ? *old_last_parent - : curve.m_hash_init_point; + : curve->hash_init_point(); const auto &prior_child_after_offset = old_last_child != nullptr ? *old_last_child - : curve.zero_scalar(); + : curve->zero_scalar(); const auto chunk_start = new_child_scalars.data(); const typename C::Chunk chunk{chunk_start, chunk_size}; - MDEBUG("existing_hash: " << curve.to_string(existing_hash) << " , start_offset: " << start_offset - << " , prior_child_after_offset: " << curve.to_string(prior_child_after_offset)); + MDEBUG("existing_hash: " << curve->to_string(existing_hash) << " , start_offset: " << start_offset + << " , prior_child_after_offset: " << curve->to_string(prior_child_after_offset)); for (std::size_t i = 0; i < chunk_size; ++i) - MDEBUG("Hashing child " << curve.to_string(chunk_start[i])); + MDEBUG("Hashing child " << curve->to_string(chunk_start[i])); // Do the hash - auto chunk_hash = curve.hash_grow( + auto chunk_hash = curve->hash_grow( existing_hash, start_offset, prior_child_after_offset, chunk ); - MDEBUG("Child chunk_start_idx " << 0 << " result: " << curve.to_string(chunk_hash) + MDEBUG("Child chunk_start_idx " << 0 << " result: " << curve->to_string(chunk_hash) << " , chunk_size: " << chunk_size); // We've got our hash @@ -154,17 +170,18 @@ static LayerExtension hash_children_chunks(const C &curve, while (chunk_start_idx < new_child_scalars.size()) { // TODO: this loop can be parallelized + // Fill a complete chunk, or add the remaining new children to the last chunk chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); const auto chunk_start = new_child_scalars.data() + chunk_start_idx; const typename C::Chunk chunk{chunk_start, chunk_size}; for (std::size_t i = 0; i < chunk_size; ++i) - MDEBUG("Hashing child " << curve.to_string(chunk_start[i])); + MDEBUG("Hashing child " << curve->to_string(chunk_start[i])); auto chunk_hash = get_new_parent(curve, chunk); - MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash) + MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve->to_string(chunk_hash) << " , chunk_size: " << chunk_size); // We've got our hash @@ -173,7 +190,6 @@ static LayerExtension hash_children_chunks(const C &curve, // Advance to the next chunk chunk_start_idx += chunk_size; - // Fill a complete chunk, or add the remaining new children to the last chunk CHECK_AND_ASSERT_THROW_MES(chunk_start_idx <= new_child_scalars.size(), "unexpected chunk start idx"); } @@ -351,8 +367,8 @@ static GrowLayerInstructions get_leaf_layer_grow_instructions(const uint64_t old // - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent // layer of the leaf layer template -static LayerExtension get_next_layer_extension(const C_CHILD &c_child, - const C_PARENT &c_parent, +static LayerExtension get_next_layer_extension(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, const GrowLayerInstructions &grow_layer_instructions, const std::vector &child_last_hashes, const std::vector &parent_last_hashes, @@ -391,7 +407,7 @@ static LayerExtension get_next_layer_extension(const C_CHILD &c_child, if (grow_layer_instructions.need_old_last_child) { CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child"); - last_child_scalar = c_child.point_to_cycle_scalar(*child_last_hash); + last_child_scalar = c_child->point_to_cycle_scalar(*child_last_hash); } // Do the hashing @@ -534,8 +550,8 @@ static TrimLayerInstructions get_trim_layer_instructions( //---------------------------------------------------------------------------------------------------------------------- template static typename fcmp::curve_trees::LayerReduction get_next_layer_reduction( - const C_CHILD &c_child, - const C_PARENT &c_parent, + const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, const TrimLayerInstructions &trim_layer_instructions, const std::vector &parent_last_hashes, const std::vector> &children_to_trim, @@ -554,7 +570,7 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc const typename C_PARENT::Point &existing_hash = trim_layer_instructions.need_existing_last_hash ? parent_last_hashes[parent_layer_idx] - : c_parent.m_hash_init_point; + : c_parent->hash_init_point(); std::vector child_scalars; if (trim_layer_instructions.need_last_chunk_children_to_trim @@ -564,7 +580,7 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc child_scalars = children_to_trim[parent_layer_idx]; } - typename C_PARENT::Scalar new_last_child_scalar = c_parent.zero_scalar(); + typename C_PARENT::Scalar new_last_child_scalar = c_parent->zero_scalar(); if (trim_layer_instructions.need_new_last_child) { CHECK_AND_ASSERT_THROW_MES(child_layer_idx > 0, "child index cannot be 0 here"); @@ -572,7 +588,7 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc CHECK_AND_ASSERT_THROW_MES(child_reductions.back().update_existing_last_hash, "expected new last child"); const typename C_CHILD::Point &new_last_child = child_reductions.back().new_last_hash; - new_last_child_scalar = c_child.point_to_cycle_scalar(new_last_child); + new_last_child_scalar = c_child->point_to_cycle_scalar(new_last_child); if (trim_layer_instructions.need_last_chunk_remaining_children) { @@ -585,40 +601,40 @@ static typename fcmp::curve_trees::LayerReduction get_next_layer_reduc CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash"); const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx]; - auto old_last_child_scalar = c_child.point_to_cycle_scalar(old_last_child); + auto old_last_child_scalar = c_child->point_to_cycle_scalar(old_last_child); child_scalars.emplace_back(std::move(old_last_child_scalar)); } } for (std::size_t i = 0; i < child_scalars.size(); ++i) - MDEBUG("Hashing child " << c_parent.to_string(child_scalars[i])); + MDEBUG("Hashing child " << c_parent->to_string(child_scalars[i])); if (trim_layer_instructions.need_last_chunk_remaining_children) { - MDEBUG("hash_grow: existing_hash: " << c_parent.to_string(existing_hash) + MDEBUG("hash_grow: existing_hash: " << c_parent->to_string(existing_hash) << " , hash_offset: " << trim_layer_instructions.hash_offset); - layer_reduction_out.new_last_hash = c_parent.hash_grow( + layer_reduction_out.new_last_hash = c_parent->hash_grow( existing_hash, trim_layer_instructions.hash_offset, - c_parent.zero_scalar(), + c_parent->zero_scalar(), typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}); } else { - MDEBUG("hash_trim: existing_hash: " << c_parent.to_string(existing_hash) + MDEBUG("hash_trim: existing_hash: " << c_parent->to_string(existing_hash) << " , hash_offset: " << trim_layer_instructions.hash_offset - << " , child_to_grow_back: " << c_parent.to_string(new_last_child_scalar)); + << " , child_to_grow_back: " << c_parent->to_string(new_last_child_scalar)); - layer_reduction_out.new_last_hash = c_parent.hash_trim( + layer_reduction_out.new_last_hash = c_parent->hash_trim( existing_hash, trim_layer_instructions.hash_offset, typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}, new_last_child_scalar); } - MDEBUG("Result hash: " << c_parent.to_string(layer_reduction_out.new_last_hash)); + MDEBUG("Result hash: " << c_parent->to_string(layer_reduction_out.new_last_hash)); return layer_reduction_out; } @@ -655,48 +671,6 @@ LeafTupleContext CurveTrees::output_to_leaf_context( }; }; //---------------------------------------------------------------------------------------------------------------------- -template<> -CurveTrees::LeafTuple CurveTrees::leaf_tuple( - const PreprocessedLeafTuple &preprocessed_leaf_tuple) const -{ - const rct::key &O = preprocessed_leaf_tuple.O; - const rct::key &C = preprocessed_leaf_tuple.C; - - crypto::ec_point I; - crypto::derive_key_image_generator(rct::rct2pk(O), I); - - rct::key O_x, I_x, C_x; - - if (!rct::point_to_wei_x(O, O_x)) - throw std::runtime_error("failed to get wei x scalar from O"); - if (!rct::point_to_wei_x(rct::pt2rct(I), I_x)) - throw std::runtime_error("failed to get wei x scalar from I"); - if (!rct::point_to_wei_x(C, C_x)) - throw std::runtime_error("failed to get wei x scalar from C"); - - return LeafTuple{ - .O_x = tower_cycle::selene_scalar_from_bytes(O_x), - .I_x = tower_cycle::selene_scalar_from_bytes(I_x), - .C_x = tower_cycle::selene_scalar_from_bytes(C_x) - }; -}; -//---------------------------------------------------------------------------------------------------------------------- -template -std::vector CurveTrees::flatten_leaves(std::vector &&leaves) const -{ - std::vector flattened_leaves; - flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); - - for (auto &l : leaves) - { - flattened_leaves.emplace_back(std::move(l.O_x)); - flattened_leaves.emplace_back(std::move(l.I_x)); - flattened_leaves.emplace_back(std::move(l.C_x)); - } - - return flattened_leaves; -}; -//---------------------------------------------------------------------------------------------------------------------- template <> void CurveTrees::tx_outs_to_leaf_tuple_contexts(const cryptonote::transaction &tx, const std::vector &output_ids, @@ -746,6 +720,48 @@ void CurveTrees::tx_outs_to_leaf_tuple_contexts(const cryptonote } } //---------------------------------------------------------------------------------------------------------------------- +template<> +CurveTrees::LeafTuple CurveTrees::leaf_tuple( + const PreprocessedLeafTuple &preprocessed_leaf_tuple) const +{ + const rct::key &O = preprocessed_leaf_tuple.O; + const rct::key &C = preprocessed_leaf_tuple.C; + + crypto::ec_point I; + crypto::derive_key_image_generator(rct::rct2pk(O), I); + + rct::key O_x, I_x, C_x; + + if (!rct::point_to_wei_x(O, O_x)) + throw std::runtime_error("failed to get wei x scalar from O"); + if (!rct::point_to_wei_x(rct::pt2rct(I), I_x)) + throw std::runtime_error("failed to get wei x scalar from I"); + if (!rct::point_to_wei_x(C, C_x)) + throw std::runtime_error("failed to get wei x scalar from C"); + + return LeafTuple{ + .O_x = tower_cycle::selene_scalar_from_bytes(O_x), + .I_x = tower_cycle::selene_scalar_from_bytes(I_x), + .C_x = tower_cycle::selene_scalar_from_bytes(C_x) + }; +}; +//---------------------------------------------------------------------------------------------------------------------- +template +std::vector CurveTrees::flatten_leaves(std::vector &&leaves) const +{ + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + + for (auto &l : leaves) + { + flattened_leaves.emplace_back(std::move(l.O_x)); + flattened_leaves.emplace_back(std::move(l.I_x)); + flattened_leaves.emplace_back(std::move(l.C_x)); + } + + return flattened_leaves; +}; +//---------------------------------------------------------------------------------------------------------------------- template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, diff --git a/src/fcmp/curve_trees.h b/src/fcmp/curve_trees.h index 96abb1c7a1d..e70d5127779 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp/curve_trees.h @@ -28,12 +28,13 @@ #pragma once -#include "cryptonote_basic/cryptonote_basic.h" #include "crypto/crypto.h" +#include "cryptonote_basic/cryptonote_basic.h" #include "misc_log_ex.h" #include "tower_cycle.h" #include +#include #include @@ -45,7 +46,7 @@ namespace curve_trees //---------------------------------------------------------------------------------------------------------------------- // Hash a chunk of new children template -typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_children); +typename C::Point get_new_parent(const std::unique_ptr &curve, const typename C::Chunk &new_children); //---------------------------------------------------------------------------------------------------------------------- // A layer of contiguous hashes starting from a specific start_idx in the tree template @@ -150,14 +151,14 @@ struct LeafTupleContext final //---------------------------------------------------------------------------------------------------------------------- // This class is useful to help update the curve trees merkle tree without needing to keep the entire tree in memory // - It requires instantiation with the C1 and C2 curve classes and widths, hardening the tree structure -// - It ties the C2 curve in the tree to the leaf layer +// - It ties the C2 curve in the tree to the leaf layer (the leaf layer is composed of C2 scalars) template class CurveTrees { public: - CurveTrees(const C1 &c1, const C2 &c2, const uint64_t c1_width, const uint64_t c2_width): - m_c1{c1}, - m_c2{c2}, + CurveTrees(std::unique_ptr &&c1, std::unique_ptr &&c2, const uint64_t c1_width, const uint64_t c2_width): + m_c1{std::move(c1)}, + m_c2{std::move(c2)}, m_c1_width{c1_width}, m_c2_width{c2_width}, m_leaf_layer_chunk_width{LEAF_TUPLE_SIZE * c2_width} @@ -230,16 +231,11 @@ class CurveTrees //member functions public: - // Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree + // Convert cryptonote output pub key and commitment to a pre-processed leaf tuple ready for insertion to the tree LeafTupleContext output_to_leaf_context(const std::uint64_t output_id, const crypto::public_key &output_pubkey, const rct::key &C) const; - LeafTuple leaf_tuple(const PreprocessedLeafTuple &preprocessed_leaf_tuple) const; - - // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [O.x, I.x, C.x, O.x, I.x, C.x...] - std::vector flatten_leaves(std::vector &&leaves) const; - // Convert cryptonote tx outs to contexts ready to be converted to leaf tuples, grouped by unlock height void tx_outs_to_leaf_tuple_contexts(const cryptonote::transaction &tx, const std::vector &output_ids, @@ -247,7 +243,13 @@ class CurveTrees const bool miner_tx, std::multimap &leaf_tuples_by_unlock_block_inout) const; - // Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new + // Derive a leaf tuple from a pre-processed leaf tuple {O,C} -> {O.x,I.x,C.x} + LeafTuple leaf_tuple(const PreprocessedLeafTuple &preprocessed_leaf_tuple) const; + + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [O.x, I.x, C.x, O.x, I.x, C.x...] + std::vector flatten_leaves(std::vector &&leaves) const; + + // Take in the existing number of leaf tuples and the existing last hash in each layer in the tree, as well as new // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, @@ -259,7 +261,7 @@ class CurveTrees const uint64_t trim_n_leaf_tuples) const; // Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from - // each last chunk, and the existing last hashes in what will become the new last parent of each layer, and return + // each last chunk, and the existing last hash in what will become the new last parent of each layer, and return // a tree reduction struct that can be used to trim a tree TreeReduction get_tree_reduction( const std::vector &trim_instructions, @@ -281,8 +283,8 @@ class CurveTrees //public member variables public: // The curve interfaces - const C1 &m_c1; - const C2 &m_c2; + const std::unique_ptr m_c1; + const std::unique_ptr m_c2; // The leaf layer has a distinct chunk width than the other layers const std::size_t m_leaf_layer_chunk_width; @@ -300,9 +302,10 @@ using CurveTreesV1 = CurveTrees; // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 const std::size_t HELIOS_CHUNK_WIDTH = 38; const std::size_t SELENE_CHUNK_WIDTH = 18; -const Helios HELIOS; -const Selene SELENE; -static CurveTreesV1 CURVE_TREES_V1(HELIOS, SELENE, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); + +std::shared_ptr curve_trees_v1( + const std::size_t helios_chunk_width = HELIOS_CHUNK_WIDTH, + const std::size_t selene_chunk_width = SELENE_CHUNK_WIDTH); //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace curve_trees diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp/fcmp_rust/CMakeLists.txt index e1401d76eb2..457f44740da 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp/fcmp_rust/CMakeLists.txt @@ -93,26 +93,26 @@ else () set(TARGET_DIR "release") endif () -set(FCMP_RUST_HEADER_DIR "${MONERO_GENERATED_HEADERS_DIR}/fcmp_rust") -set(FCMP_RUST_HEADER "${FCMP_RUST_HEADER_DIR}/fcmp++.h") -set(FCMP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/libfcmp_rust.a") +set(FCMP_PP_RUST_HEADER_DIR "${MONERO_GENERATED_HEADERS_DIR}/fcmp_rust") +set(FCMP_PP_RUST_HEADER "${FCMP_PP_RUST_HEADER_DIR}/fcmp++.h") +set(FCMP_PP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/libfcmp_rust.a") # Removing OUTPUT files makes sure custom command runs every time -file(REMOVE_RECURSE "${FCMP_RUST_HEADER_DIR}") -file(MAKE_DIRECTORY "${FCMP_RUST_HEADER_DIR}") +file(REMOVE_RECURSE "${FCMP_PP_RUST_HEADER_DIR}") +file(MAKE_DIRECTORY "${FCMP_PP_RUST_HEADER_DIR}") -file(REMOVE "${FCMP_RUST_LIB}") +file(REMOVE "${FCMP_PP_RUST_LIB}") add_custom_command( - COMMENT "Building rust fcmp lib" - OUTPUT ${FCMP_RUST_HEADER} - OUTPUT ${FCMP_RUST_LIB} + COMMENT "Building fcmp++ rust lib" + OUTPUT ${FCMP_PP_RUST_HEADER} + OUTPUT ${FCMP_PP_RUST_LIB} COMMAND CARGO_TARGET_DIR=${CMAKE_CURRENT_BINARY_DIR} ${CARGO_CMD} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/fcmp++.h ${FCMP_RUST_HEADER} - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${RUST_TARGET}/${TARGET_DIR}/libfcmp_rust.a ${FCMP_RUST_LIB} - COMMAND echo "Finished copying fcmp rust targets" + COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/fcmp++.h ${FCMP_PP_RUST_HEADER} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${RUST_TARGET}/${TARGET_DIR}/libfcmp_rust.a ${FCMP_PP_RUST_LIB} + COMMAND echo "Finished copying fcmp++ rust targets" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} VERBATIM ) -add_custom_target(fcmp_rust DEPENDS ${FCMP_RUST_LIB}) +add_custom_target(fcmp_rust DEPENDS ${FCMP_PP_RUST_LIB}) diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index 184737cd557..5fb89d64185 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -1,4 +1,4 @@ -namespace fcmp_rust { +namespace fcmp_pp_rust { #include #include #include diff --git a/src/fcmp/proof.h b/src/fcmp/proof.h index 89eb90d19a2..63e7cfa10e6 100644 --- a/src/fcmp/proof.h +++ b/src/fcmp/proof.h @@ -36,7 +36,7 @@ namespace fcmp // Byte buffer containing the fcmp++ proof using FcmpPpProof = std::vector; -static inline std::size_t get_fcmp_pp_len_from_n_inputs(const std::size_t n_inputs) +static inline std::size_t fcmp_pp_len(const std::size_t n_inputs) { // TODO: implement return n_inputs * 4; diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp/tower_cycle.cpp index 691afc3f8e6..bb2e207e823 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp/tower_cycle.cpp @@ -35,14 +35,24 @@ namespace tower_cycle { //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_init_point() const +{ + return fcmp_pp_rust::helios_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::hash_init_point() const +{ + return fcmp_pp_rust::selene_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- Helios::CycleScalar Helios::point_to_cycle_scalar(const Helios::Point &point) const { - return fcmp_rust::helios_point_to_selene_scalar(point); + return fcmp_pp_rust::helios_point_to_selene_scalar(point); } //---------------------------------------------------------------------------------------------------------------------- Selene::CycleScalar Selene::point_to_cycle_scalar(const Selene::Point &point) const { - return fcmp_rust::selene_point_to_helios_scalar(point); + return fcmp_pp_rust::selene_point_to_helios_scalar(point); } //---------------------------------------------------------------------------------------------------------------------- Helios::Point Helios::hash_grow( @@ -51,7 +61,7 @@ Helios::Point Helios::hash_grow( const Helios::Scalar &existing_child_at_offset, const Helios::Chunk &new_children) const { - auto result = fcmp_rust::hash_grow_helios( + auto result = fcmp_pp_rust::hash_grow_helios( existing_hash, offset, existing_child_at_offset, @@ -75,7 +85,7 @@ Helios::Point Helios::hash_trim( const Helios::Chunk &children, const Helios::Scalar &child_to_grow_back) const { - auto result = fcmp_rust::hash_trim_helios( + auto result = fcmp_pp_rust::hash_trim_helios( existing_hash, offset, children, @@ -99,7 +109,7 @@ Selene::Point Selene::hash_grow( const Selene::Scalar &existing_child_at_offset, const Selene::Chunk &new_children) const { - auto result = fcmp_rust::hash_grow_selene( + auto result = fcmp_pp_rust::hash_grow_selene( existing_hash, offset, existing_child_at_offset, @@ -123,7 +133,7 @@ Selene::Point Selene::hash_trim( const Selene::Chunk &children, const Selene::Scalar &child_to_grow_back) const { - auto result = fcmp_rust::hash_trim_selene( + auto result = fcmp_pp_rust::hash_trim_selene( existing_hash, offset, children, @@ -143,17 +153,17 @@ Selene::Point Selene::hash_trim( //---------------------------------------------------------------------------------------------------------------------- Helios::Scalar Helios::zero_scalar() const { - return fcmp_rust::helios_zero_scalar(); + return fcmp_pp_rust::helios_zero_scalar(); } //---------------------------------------------------------------------------------------------------------------------- Selene::Scalar Selene::zero_scalar() const { - return fcmp_rust::selene_zero_scalar(); + return fcmp_pp_rust::selene_zero_scalar(); } //---------------------------------------------------------------------------------------------------------------------- std::array Helios::to_bytes(const Helios::Scalar &scalar) const { - auto bytes = fcmp_rust::helios_scalar_to_bytes(scalar); + auto bytes = fcmp_pp_rust::helios_scalar_to_bytes(scalar); std::array res; memcpy(&res, bytes, 32); free(bytes); @@ -162,7 +172,7 @@ std::array Helios::to_bytes(const Helios::Scalar &scalar) const //---------------------------------------------------------------------------------------------------------------------- std::array Selene::to_bytes(const Selene::Scalar &scalar) const { - auto bytes = fcmp_rust::selene_scalar_to_bytes(scalar); + auto bytes = fcmp_pp_rust::selene_scalar_to_bytes(scalar); std::array res; memcpy(&res, bytes, 32); free(bytes); @@ -171,7 +181,7 @@ std::array Selene::to_bytes(const Selene::Scalar &scalar) const //---------------------------------------------------------------------------------------------------------------------- std::array Helios::to_bytes(const Helios::Point &point) const { - auto bytes = fcmp_rust::helios_point_to_bytes(point); + auto bytes = fcmp_pp_rust::helios_point_to_bytes(point); std::array res; memcpy(&res, bytes, 32); free(bytes); @@ -180,7 +190,7 @@ std::array Helios::to_bytes(const Helios::Point &point) const //---------------------------------------------------------------------------------------------------------------------- std::array Selene::to_bytes(const Selene::Point &point) const { - auto bytes = fcmp_rust::selene_point_to_bytes(point); + auto bytes = fcmp_pp_rust::selene_point_to_bytes(point); std::array res; memcpy(&res, bytes, 32); free(bytes); @@ -189,12 +199,12 @@ std::array Selene::to_bytes(const Selene::Point &point) const //---------------------------------------------------------------------------------------------------------------------- Helios::Point Helios::from_bytes(const std::array &bytes) const { - return fcmp_rust::helios_point_from_bytes(bytes.data()); + return fcmp_pp_rust::helios_point_from_bytes(bytes.data()); } //---------------------------------------------------------------------------------------------------------------------- Selene::Point Selene::from_bytes(const std::array &bytes) const { - return fcmp_rust::selene_point_from_bytes(bytes.data()); + return fcmp_pp_rust::selene_point_from_bytes(bytes.data()); } //---------------------------------------------------------------------------------------------------------------------- std::string Helios::to_string(const typename Helios::Scalar &scalar) const @@ -222,31 +232,31 @@ std::string Selene::to_string(const typename Selene::Point &point) const //---------------------------------------------------------------------------------------------------------------------- SeleneScalar selene_scalar_from_bytes(const rct::key &scalar) { - return fcmp_rust::selene_scalar_from_bytes(scalar.bytes); + return fcmp_pp_rust::selene_scalar_from_bytes(scalar.bytes); } //---------------------------------------------------------------------------------------------------------------------- template -void extend_zeroes(const C &curve, +void extend_zeroes(const std::unique_ptr &curve, const std::size_t num_zeroes, std::vector &zeroes_inout) { zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); for (std::size_t i = 0; i < num_zeroes; ++i) - zeroes_inout.emplace_back(curve.zero_scalar()); + zeroes_inout.emplace_back(curve->zero_scalar()); } // Explicit instantiations -template void extend_zeroes(const Helios &curve, +template void extend_zeroes(const std::unique_ptr &curve, const std::size_t num_zeroes, std::vector &zeroes_inout); -template void extend_zeroes(const Selene &curve, +template void extend_zeroes(const std::unique_ptr &curve, const std::size_t num_zeroes, std::vector &zeroes_inout); //---------------------------------------------------------------------------------------------------------------------- template -void extend_scalars_from_cycle_points(const C_POINTS &curve, +void extend_scalars_from_cycle_points(const std::unique_ptr &curve, const std::vector &points, std::vector &scalars_out) { @@ -254,17 +264,17 @@ void extend_scalars_from_cycle_points(const C_POINTS &curve, for (const auto &point : points) { - typename C_SCALARS::Scalar scalar = curve.point_to_cycle_scalar(point); + typename C_SCALARS::Scalar scalar = curve->point_to_cycle_scalar(point); scalars_out.push_back(std::move(scalar)); } } // Explicit instantiations -template void extend_scalars_from_cycle_points(const Helios &curve, +template void extend_scalars_from_cycle_points(const std::unique_ptr &curve, const std::vector &points, std::vector &scalars_out); -template void extend_scalars_from_cycle_points(const Selene &curve, +template void extend_scalars_from_cycle_points(const std::unique_ptr &curve, const std::vector &points, std::vector &scalars_out); //---------------------------------------------------------------------------------------------------------------------- diff --git a/src/fcmp/tower_cycle.h b/src/fcmp/tower_cycle.h index a0454e5a922..e7b9fb70119 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp/tower_cycle.h @@ -43,22 +43,22 @@ namespace tower_cycle // Rust types //---------------------------------------------------------------------------------------------------------------------- // Need to forward declare Scalar types for point_to_cycle_scalar below -using SeleneScalar = fcmp_rust::SeleneScalar; -using HeliosScalar = fcmp_rust::HeliosScalar; +using SeleneScalar = fcmp_pp_rust::SeleneScalar; +using HeliosScalar = fcmp_pp_rust::HeliosScalar; //---------------------------------------------------------------------------------------------------------------------- struct HeliosT final { using Scalar = HeliosScalar; - using Point = fcmp_rust::HeliosPoint; - using Chunk = fcmp_rust::HeliosScalarSlice; + using Point = fcmp_pp_rust::HeliosPoint; + using Chunk = fcmp_pp_rust::HeliosScalarSlice; using CycleScalar = SeleneScalar; }; //---------------------------------------------------------------------------------------------------------------------- struct SeleneT final { using Scalar = SeleneScalar; - using Point = fcmp_rust::SelenePoint; - using Chunk = fcmp_rust::SeleneScalarSlice; + using Point = fcmp_pp_rust::SelenePoint; + using Chunk = fcmp_pp_rust::SeleneScalarSlice; using CycleScalar = HeliosScalar; }; //---------------------------------------------------------------------------------------------------------------------- @@ -67,14 +67,10 @@ struct SeleneT final template class Curve { -//constructor -public: - Curve(const typename C::Point &hash_init_point): - m_hash_init_point{hash_init_point} - {}; - //member functions public: + virtual typename C::Point hash_init_point() const = 0; + // Read the x-coordinate from this curve's point to get this curve's cycle scalar virtual typename C::CycleScalar point_to_cycle_scalar(const typename C::Point &point) const = 0; @@ -99,11 +95,6 @@ class Curve virtual std::string to_string(const typename C::Scalar &scalar) const = 0; virtual std::string to_string(const typename C::Point &point) const = 0; - -//member variables -public: - // kayabaNerve: this doesn't have a reference as doing so delays initialization and borks it - const typename C::Point m_hash_init_point; }; //---------------------------------------------------------------------------------------------------------------------- class Helios final : public Curve @@ -115,14 +106,10 @@ class Helios final : public Curve using Chunk = HeliosT::Chunk; using CycleScalar = HeliosT::CycleScalar; -//constructor -public: - Helios() - : Curve(fcmp_rust::helios_hash_init_point()) - {}; - //member functions public: + Point hash_init_point() const override; + CycleScalar point_to_cycle_scalar(const Point &point) const override; Point hash_grow( @@ -157,14 +144,10 @@ class Selene final : public Curve using Chunk = SeleneT::Chunk; using CycleScalar = SeleneT::CycleScalar; -//constructor -public: - Selene() - : Curve(fcmp_rust::selene_hash_init_point()) - {}; - //member functions public: + Point hash_init_point() const override; + CycleScalar point_to_cycle_scalar(const Point &point) const override; Point hash_grow( @@ -194,12 +177,12 @@ class Selene final : public Curve SeleneScalar selene_scalar_from_bytes(const rct::key &scalar); //---------------------------------------------------------------------------------------------------------------------- template -void extend_zeroes(const C &curve, +void extend_zeroes(const std::unique_ptr &curve, const std::size_t num_zeroes, std::vector &zeroes_inout); //---------------------------------------------------------------------------------------------------------------------- template -void extend_scalars_from_cycle_points(const C_POINTS &curve, +void extend_scalars_from_cycle_points(const std::unique_ptr &curve, const std::vector &points, std::vector &scalars_out); //---------------------------------------------------------------------------------------------------------------------- diff --git a/src/ringct/rctOps.h b/src/ringct/rctOps.h index 84db3e08909..2a3c1f678d6 100644 --- a/src/ringct/rctOps.h +++ b/src/ringct/rctOps.h @@ -189,6 +189,7 @@ namespace rct { void ecdhEncode(ecdhTuple & unmasked, const key & sharedSec, bool v2); void ecdhDecode(ecdhTuple & masked, const key & sharedSec, bool v2); + // TODO: tests for these functions specifically bool clear_torsion(const key &k, key &k_out); bool point_to_wei_x(const key &pub, key &wei_x); } diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index c7138ed9bab..45d52f95019 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -326,7 +326,7 @@ namespace rct { std::vector ecdhInfo; ctkeyV outPk; xmr_amount txnFee; // contains b - crypto::hash referenceBlock; // block containing the merkle tree root used for fcmp's + crypto::hash referenceBlock; // block containing the merkle tree root used for fcmp++ rctSigBase() : type(RCTTypeNull), message{}, mixRing{}, pseudoOuts{}, ecdhInfo{}, outPk{}, txnFee(0), referenceBlock{} @@ -503,7 +503,7 @@ namespace rct { { ar.tag("fcmp_pp"); ar.begin_object(); - const std::size_t proof_len = fcmp::get_fcmp_pp_len_from_n_inputs(inputs); + const std::size_t proof_len = fcmp::fcmp_pp_len(inputs); if (!typename Archive::is_saving()) fcmp_pp.resize(proof_len); if (fcmp_pp.size() != proof_len) diff --git a/tests/block_weight/CMakeLists.txt b/tests/block_weight/CMakeLists.txt index be6b12350d1..f622d5a3d30 100644 --- a/tests/block_weight/CMakeLists.txt +++ b/tests/block_weight/CMakeLists.txt @@ -38,7 +38,6 @@ target_link_libraries(block_weight PRIVATE cryptonote_core blockchain_db - fcmp ${EXTRA_LIBRARIES}) add_test( diff --git a/tests/unit_tests/CMakeLists.txt b/tests/unit_tests/CMakeLists.txt index 67ce66a1666..b48ef48524c 100644 --- a/tests/unit_tests/CMakeLists.txt +++ b/tests/unit_tests/CMakeLists.txt @@ -120,7 +120,7 @@ target_link_libraries(unit_tests daemon_messages daemon_rpc_server blockchain_db - fcmp + fcmp_pp lmdb_lib rpc net diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 45d75625595..c2e9e40cfc7 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -41,7 +41,7 @@ // CurveTreesGlobalTree helpers //---------------------------------------------------------------------------------------------------------------------- template -static bool validate_layer(const C &curve, +static bool validate_layer(const std::unique_ptr &curve, const CurveTreesGlobalTree::Layer &parents, const std::vector &child_scalars, const std::size_t max_chunk_size) @@ -60,14 +60,14 @@ static bool validate_layer(const C &curve, const typename C::Chunk chunk{chunk_start, chunk_size}; for (std::size_t i = 0; i < chunk_size; ++i) - MDEBUG("Hashing " << curve.to_string(chunk_start[i])); + MDEBUG("Hashing " << curve->to_string(chunk_start[i])); const typename C::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve, chunk); - MDEBUG("chunk_start_idx: " << chunk_start_idx << " , chunk_size: " << chunk_size << " , chunk_hash: " << curve.to_string(chunk_hash)); + MDEBUG("chunk_start_idx: " << chunk_start_idx << " , chunk_size: " << chunk_size << " , chunk_hash: " << curve->to_string(chunk_hash)); - const auto actual_bytes = curve.to_bytes(parent); - const auto expected_bytes = curve.to_bytes(chunk_hash); + const auto actual_bytes = curve->to_bytes(parent); + const auto expected_bytes = curve->to_bytes(chunk_hash); CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); chunk_start_idx += chunk_size; @@ -79,7 +79,7 @@ static bool validate_layer(const C &curve, } //---------------------------------------------------------------------------------------------------------------------- template -static std::vector get_last_chunk_children_to_trim(const C_CHILD &c_child, +static std::vector get_last_chunk_children_to_trim(const std::unique_ptr &c_child, const CurveTreesGlobalTree::Layer &child_layer, const bool need_last_chunk_children_to_trim, const bool need_last_chunk_remaining_children, @@ -96,7 +96,7 @@ static std::vector get_last_chunk_children_to_trim(co CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); const auto &child_point = child_layer[idx]; - auto child_scalar = c_child.point_to_cycle_scalar(child_point); + auto child_scalar = c_child->point_to_cycle_scalar(child_point); children_to_trim_out.push_back(std::move(child_scalar)); ++idx; @@ -608,7 +608,7 @@ void CurveTreesGlobalTree::log_last_hashes(const CurveTreesV1::LastHashes &last_ CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_hashes.size(), "unexpected c2 layer"); const auto &last_hash = c2_last_hashes[c2_idx]; - MDEBUG("c2_idx: " << c2_idx << " , last_hash: " << m_curve_trees.m_c2.to_string(last_hash)); + MDEBUG("c2_idx: " << c2_idx << " , last_hash: " << m_curve_trees.m_c2->to_string(last_hash)); ++c2_idx; } @@ -617,7 +617,7 @@ void CurveTreesGlobalTree::log_last_hashes(const CurveTreesV1::LastHashes &last_ CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_hashes.size(), "unexpected c1 layer"); const auto &last_hash = c1_last_hashes[c1_idx]; - MDEBUG("c1_idx: " << c1_idx << " , last_hash: " << m_curve_trees.m_c1.to_string(last_hash)); + MDEBUG("c1_idx: " << c1_idx << " , last_hash: " << m_curve_trees.m_c1->to_string(last_hash)); ++c1_idx; } @@ -643,9 +643,9 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension const auto &preprocessed_leaf_tuple = tree_extension.leaves.tuples[i]; const auto leaf = m_curve_trees.leaf_tuple(preprocessed_leaf_tuple); - const auto O_x = m_curve_trees.m_c2.to_string(leaf.O_x); - const auto I_x = m_curve_trees.m_c2.to_string(leaf.I_x); - const auto C_x = m_curve_trees.m_c2.to_string(leaf.C_x); + const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); + const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); + const auto C_x = m_curve_trees.m_c2->to_string(leaf.C_x); MDEBUG("Leaf tuple idx " << (tree_extension.leaves.start_leaf_tuple_idx) << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); @@ -665,7 +665,7 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) MDEBUG("Child chunk start idx: " << (j + c2_layer.start_idx) << " , hash: " - << m_curve_trees.m_c2.to_string(c2_layer.hashes[j])); + << m_curve_trees.m_c2->to_string(c2_layer.hashes[j])); ++c2_idx; } @@ -678,7 +678,7 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) MDEBUG("Child chunk start idx: " << (j + c1_layer.start_idx) << " , hash: " - << m_curve_trees.m_c1.to_string(c1_layer.hashes[j])); + << m_curve_trees.m_c1->to_string(c1_layer.hashes[j])); ++c1_idx; } @@ -699,9 +699,9 @@ void CurveTreesGlobalTree::log_tree() { const auto &leaf = m_tree.leaves[i]; - const auto O_x = m_curve_trees.m_c2.to_string(leaf.O_x); - const auto I_x = m_curve_trees.m_c2.to_string(leaf.I_x); - const auto C_x = m_curve_trees.m_c2.to_string(leaf.C_x); + const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); + const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); + const auto C_x = m_curve_trees.m_c2->to_string(leaf.C_x); MDEBUG("Leaf idx " << i << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); } @@ -719,7 +719,7 @@ void CurveTreesGlobalTree::log_tree() MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c2_layer.size(); ++j) - MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c2.to_string(c2_layer[j])); + MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c2->to_string(c2_layer[j])); ++c2_idx; } @@ -731,7 +731,7 @@ void CurveTreesGlobalTree::log_tree() MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); for (std::size_t j = 0; j < c1_layer.size(); ++j) - MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c1.to_string(c1_layer[j])); + MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c1->to_string(c1_layer[j])); ++c1_idx; } @@ -864,17 +864,17 @@ static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples, //---------------------------------------------------------------------------------------------------------------------- static bool grow_tree_db(const std::size_t init_leaves, const std::size_t ext_leaves, - CurveTreesV1 &curve_trees, + std::shared_ptr curve_trees, unit_test::BlockchainLMDBTest &test_db) { - INIT_BLOCKCHAIN_LMDB_TEST_DB(&curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); { cryptonote::db_wtxn_guard guard(test_db.m_db); LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); - auto init_leaf_tuples = generate_random_leaves(curve_trees, 0, init_leaves); + auto init_leaf_tuples = generate_random_leaves(*curve_trees, 0, init_leaves); test_db.m_db->grow_tree(std::move(init_leaf_tuples)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, @@ -883,7 +883,7 @@ static bool grow_tree_db(const std::size_t init_leaves, MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " << ext_leaves << " leaves"); - auto ext_leaf_tuples = generate_random_leaves(curve_trees, init_leaves, ext_leaves); + auto ext_leaf_tuples = generate_random_leaves(*curve_trees, init_leaves, ext_leaves); test_db.m_db->grow_tree(std::move(ext_leaf_tuples)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves + ext_leaves), false, @@ -897,17 +897,17 @@ static bool grow_tree_db(const std::size_t init_leaves, //---------------------------------------------------------------------------------------------------------------------- static bool trim_tree_db(const std::size_t init_leaves, const std::size_t trim_leaves, - CurveTreesV1 &curve_trees, + std::shared_ptr curve_trees, unit_test::BlockchainLMDBTest &test_db) { - INIT_BLOCKCHAIN_LMDB_TEST_DB(&curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); { cryptonote::db_wtxn_guard guard(test_db.m_db); LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then trimming by " << trim_leaves << " leaves"); - auto init_leaf_tuples = generate_random_leaves(curve_trees, 0, init_leaves); + auto init_leaf_tuples = generate_random_leaves(*curve_trees, 0, init_leaves); test_db.m_db->grow_tree(std::move(init_leaf_tuples)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, @@ -931,9 +931,6 @@ static bool trim_tree_db(const std::size_t init_leaves, //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, grow_tree) { - Helios helios; - Selene selene; - // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree static const std::size_t helios_chunk_width = 3; static const std::size_t selene_chunk_width = 2; @@ -944,6 +941,8 @@ TEST(curve_trees, grow_tree) LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_chunk_width << ", selene chunk width " << selene_chunk_width); + const auto curve_trees = fcmp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); + // Constant for how deep we want the tree static const std::size_t TEST_N_LAYERS = 4; @@ -955,12 +954,6 @@ TEST(curve_trees, grow_tree) leaves_needed_for_n_layers *= width; } - auto curve_trees = CurveTreesV1( - helios, - selene, - helios_chunk_width, - selene_chunk_width); - unit_test::BlockchainLMDBTest test_db; // Increment to test for off-by-1 @@ -973,7 +966,7 @@ TEST(curve_trees, grow_tree) // Then extend the tree with ext_leaves for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= leaves_needed_for_n_layers; ++ext_leaves) { - ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, curve_trees)); + ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, *curve_trees)); ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); } } @@ -981,10 +974,6 @@ TEST(curve_trees, grow_tree) //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, trim_tree) { - // TODO: consolidate code from grow_tree test - Helios helios; - Selene selene; - // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree static const std::size_t helios_chunk_width = 3; static const std::size_t selene_chunk_width = 3; @@ -995,6 +984,8 @@ TEST(curve_trees, trim_tree) LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width << ", selene chunk width " << selene_chunk_width); + const auto curve_trees = fcmp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); + // Constant for how deep we want the tree static const std::size_t TEST_N_LAYERS = 4; @@ -1006,12 +997,6 @@ TEST(curve_trees, trim_tree) leaves_needed_for_n_layers *= width; } - auto curve_trees = CurveTreesV1( - helios, - selene, - helios_chunk_width, - selene_chunk_width); - unit_test::BlockchainLMDBTest test_db; // Increment to test for off-by-1 @@ -1021,9 +1006,9 @@ TEST(curve_trees, trim_tree) for (std::size_t init_leaves = 2; init_leaves <= leaves_needed_for_n_layers; ++init_leaves) { LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); - CurveTreesGlobalTree global_tree(curve_trees); + CurveTreesGlobalTree global_tree(*curve_trees); - ASSERT_TRUE(grow_tree(curve_trees, global_tree, init_leaves)); + ASSERT_TRUE(grow_tree(*curve_trees, global_tree, init_leaves)); // Then extend the tree with ext_leaves for (std::size_t trim_leaves = 1; trim_leaves < leaves_needed_for_n_layers; ++trim_leaves) @@ -1043,6 +1028,8 @@ TEST(curve_trees, trim_tree) // Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children TEST(curve_trees, hash_trim) { + const auto curve_trees = fcmp::curve_trees::curve_trees_v1(); + // 1. Trim 1 { // Start by hashing: {selene_scalar_0, selene_scalar_1} @@ -1052,29 +1039,29 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 2 scalars std::vector init_children{selene_scalar_0, selene_scalar_1}; - const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); // Trim selene_scalar_1 const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; - const auto trim_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_trim( + const auto trim_res = curve_trees->m_c2->hash_trim( init_hash, 1, trimmed_children, - fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar()); - const auto trim_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(trim_res); + curve_trees->m_c2->zero_scalar()); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0} std::vector remaining_children{selene_scalar_0}; - const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1089,29 +1076,29 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 3 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; - const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); // Trim the initial result by 2 children const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; - const auto trim_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_trim( + const auto trim_res = curve_trees->m_c2->hash_trim( init_hash, 1, trimmed_children, - fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar()); - const auto trim_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(trim_res); + curve_trees->m_c2->zero_scalar()); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0} std::vector remaining_children{selene_scalar_0}; - const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1125,31 +1112,31 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 2 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1}; - const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); const auto selene_scalar_2 = generate_random_selene_scalar(); // Trim the 2nd child and grow with new child const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; - const auto trim_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_trim( + const auto trim_res = curve_trees->m_c2->hash_trim( init_hash, 1, trimmed_children, selene_scalar_2); - const auto trim_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(trim_res); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_2} std::vector remaining_children{selene_scalar_0, selene_scalar_2}; - const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1164,31 +1151,31 @@ TEST(curve_trees, hash_trim) // Get the initial hash of the 3 selene scalars std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; - const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); const auto selene_scalar_3 = generate_random_selene_scalar(); // Trim the initial result by 2 children+grow by 1 const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; - const auto trim_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_trim( + const auto trim_res = curve_trees->m_c2->hash_trim( init_hash, 1, trimmed_children, selene_scalar_3); - const auto trim_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(trim_res); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_3} std::vector remaining_children{selene_scalar_0, selene_scalar_3}; - const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); ASSERT_EQ(trim_res_bytes, grow_res_bytes); } @@ -1196,6 +1183,8 @@ TEST(curve_trees, hash_trim) //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, hash_grow) { + const auto curve_trees = fcmp::curve_trees::curve_trees_v1(); + // Start by hashing: {selene_scalar_0, selene_scalar_1} // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2} // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} @@ -1204,30 +1193,30 @@ TEST(curve_trees, hash_grow) // Get the initial hash of the 2 selene scalars std::vector all_children{selene_scalar_0, selene_scalar_1}; - const auto init_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); // Extend with a new child const auto selene_scalar_2 = generate_random_selene_scalar(); std::vector new_children{selene_scalar_2}; - const auto ext_hash = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + const auto ext_hash = curve_trees->m_c2->hash_grow( init_hash, all_children.size(), - fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + curve_trees->m_c2->zero_scalar(), Selene::Chunk{new_children.data(), new_children.size()}); - const auto ext_hash_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(ext_hash); + const auto ext_hash_bytes = curve_trees->m_c2->to_bytes(ext_hash); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2} all_children.push_back(selene_scalar_2); - const auto grow_res = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); - const auto grow_res_bytes = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); ASSERT_EQ(ext_hash_bytes, grow_res_bytes); @@ -1235,21 +1224,21 @@ TEST(curve_trees, hash_grow) const auto selene_scalar_3 = generate_random_selene_scalar(); new_children.clear(); new_children = {selene_scalar_3}; - const auto ext_hash2 = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( + const auto ext_hash2 = curve_trees->m_c2->hash_grow( ext_hash, all_children.size(), - fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + curve_trees->m_c2->zero_scalar(), Selene::Chunk{new_children.data(), new_children.size()}); - const auto ext_hash_bytes2 = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(ext_hash2); + const auto ext_hash_bytes2 = curve_trees->m_c2->to_bytes(ext_hash2); // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} all_children.push_back(selene_scalar_3); - const auto grow_res2 = fcmp::curve_trees::CURVE_TREES_V1.m_c2.hash_grow( - /*existing_hash*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.m_hash_init_point, + const auto grow_res2 = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), /*offset*/ 0, - /*existing_child_at_offset*/ fcmp::curve_trees::CURVE_TREES_V1.m_c2.zero_scalar(), + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); - const auto grow_res_bytes2 = fcmp::curve_trees::CURVE_TREES_V1.m_c2.to_bytes(grow_res2); + const auto grow_res_bytes2 = curve_trees->m_c2->to_bytes(grow_res2); ASSERT_EQ(ext_hash_bytes2, grow_res_bytes2); } diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index c233af0ff17..fc58c7cfbba 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -35,6 +35,7 @@ #include "cryptonote_basic/cryptonote_format_utils.h" #include "cryptonote_basic/hardfork.h" #include "blockchain_db/testdb.h" +#include "fcmp/curve_trees.h" using namespace cryptonote; diff --git a/tests/unit_tests/serialization.cpp b/tests/unit_tests/serialization.cpp index 1e37ee9fb40..92825e42bd6 100644 --- a/tests/unit_tests/serialization.cpp +++ b/tests/unit_tests/serialization.cpp @@ -1371,7 +1371,7 @@ TEST(Serialization, tx_fcmp_pp) // 1 fcmp++ proof fcmp::FcmpPpProof fcmp_pp; - const std::size_t proof_len = fcmp::get_fcmp_pp_len_from_n_inputs(n_inputs); + const std::size_t proof_len = fcmp::fcmp_pp_len(n_inputs); fcmp_pp.reserve(proof_len); for (std::size_t i = 0; i < proof_len; ++i) fcmp_pp.push_back(i); @@ -1400,7 +1400,7 @@ TEST(Serialization, tx_fcmp_pp) string blob; // Extend fcmp++ proof - ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp::get_fcmp_pp_len_from_n_inputs(n_inputs)); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp::fcmp_pp_len(n_inputs)); tx.rct_signatures.p.fcmp_pp.push_back(0x01); ASSERT_FALSE(serialization::dump_binary(tx, blob)); @@ -1411,7 +1411,7 @@ TEST(Serialization, tx_fcmp_pp) transaction tx = make_dummy_fcmp_pp_tx(); // Shorten the fcmp++ proof - ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp::get_fcmp_pp_len_from_n_inputs(n_inputs)); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp::fcmp_pp_len(n_inputs)); ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() > 1); tx.rct_signatures.p.fcmp_pp.pop_back(); diff --git a/tests/unit_tests/unit_tests_utils.h b/tests/unit_tests/unit_tests_utils.h index b6f7d7f460d..1f153fa6bda 100644 --- a/tests/unit_tests/unit_tests_utils.h +++ b/tests/unit_tests/unit_tests_utils.h @@ -84,7 +84,7 @@ namespace unit_test remove_files(); } - void init_new_db(fcmp::curve_trees::CurveTreesV1 *curve_trees) + void init_new_db(std::shared_ptr curve_trees) { CHECK_AND_ASSERT_THROW_MES(this->m_db == nullptr, "expected nullptr m_db"); this->m_db = new cryptonote::BlockchainLMDB(true/*batch_transactions*/, curve_trees); From 3a5cf70066876f2392f59a59137af482fb745631 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 8 Aug 2024 13:38:45 -0700 Subject: [PATCH 084/127] 32-bit platform compatibility in Rust FFI (untested) --- src/fcmp/fcmp_rust/fcmp++.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp/fcmp_rust/fcmp++.h index 5fb89d64185..34d019fc9bd 100644 --- a/src/fcmp/fcmp_rust/fcmp++.h +++ b/src/fcmp/fcmp_rust/fcmp++.h @@ -8,8 +8,8 @@ namespace fcmp_pp_rust { // ----- deps C bindings ----- /// Inner integer type that the [`Limb`] newtype wraps. -// TODO: This is only valid for 64-bit platforms -using Word = uint64_t; +// TODO: test 32-bit platforms +using Word = uintptr_t; /// Big integers are represented as an array of smaller CPU word-size integers /// called "limbs". @@ -49,14 +49,16 @@ struct Residue { /// A constant-time implementation of the Ed25519 field. struct SeleneScalar { - Residue<4> _0; + Residue<32 / sizeof(uintptr_t)> _0; }; +static_assert(sizeof(SeleneScalar) == 32, "unexpected size of selene scalar"); /// The field novel to Helios/Selene. struct HeliosScalar { - Residue<4> _0; + Residue<32 / sizeof(uintptr_t)> _0; }; +static_assert(sizeof(HeliosScalar) == 32, "unexpected size of helios scalar"); struct HeliosPoint { SeleneScalar x; From d72f405ceeb3e73bdf78408873be7d260d99191a Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 8 Aug 2024 13:43:54 -0700 Subject: [PATCH 085/127] Revert DELETE_DB macro to original spot --- src/blockchain_db/lmdb/db_lmdb.cpp | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 3a40e6d5cad..e5808a29c77 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -89,23 +89,6 @@ inline void throw1(const T &e) #define MDB_val_str(var, val) MDB_val var = {strlen(val) + 1, (void *)val} -#define DELETE_DB(x) do { \ - result = mdb_txn_begin(m_env, NULL, 0, txn); \ - if (result) \ - throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); \ - result = mdb_dbi_open(txn, x, 0, &dbi); \ - if (!result) { \ - result = mdb_drop(txn, dbi, 1); \ - if (result) \ - throw0(DB_ERROR(lmdb_error("Failed to delete " x ": ", result).c_str())); \ - txn.commit(); \ - } \ - else \ - { \ - txn.abort(); \ - }; \ - } while(0) - template struct MDB_val_copy: public MDB_val { @@ -5942,6 +5925,19 @@ void BlockchainLMDB::migrate_0_1() } txn.abort(); +#define DELETE_DB(x) do { \ + LOG_PRINT_L1(" " x ":"); \ + result = mdb_txn_begin(m_env, NULL, 0, txn); \ + if (result) \ + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); \ + result = mdb_dbi_open(txn, x, 0, &dbi); \ + if (!result) { \ + result = mdb_drop(txn, dbi, 1); \ + if (result) \ + throw0(DB_ERROR(lmdb_error("Failed to delete " x ": ", result).c_str())); \ + txn.commit(); \ + } } while(0) + DELETE_DB("tx_heights"); DELETE_DB("output_txs"); DELETE_DB("output_indices"); From d4847f649efe0f411d8f54bc124dac9adf94ba39 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 8 Aug 2024 14:10:34 -0700 Subject: [PATCH 086/127] Rename everything from fcmp* to fcmp_pp --- src/CMakeLists.txt | 2 +- src/blockchain_db/blockchain_db.cpp | 2 +- src/blockchain_db/blockchain_db.h | 8 +-- src/blockchain_db/lmdb/db_lmdb.cpp | 64 +++++++++---------- src/blockchain_db/lmdb/db_lmdb.h | 24 +++---- src/blockchain_db/testdb.h | 4 +- src/blockchain_utilities/CMakeLists.txt | 1 + src/{fcmp => fcmp_pp}/CMakeLists.txt | 6 +- src/{fcmp => fcmp_pp}/curve_trees.cpp | 6 +- src/{fcmp => fcmp_pp}/curve_trees.h | 4 +- .../fcmp_pp_rust}/.gitignore | 0 .../fcmp_pp_rust}/CMakeLists.txt | 8 +-- .../fcmp_pp_rust}/Cargo.lock | 2 +- .../fcmp_pp_rust}/Cargo.toml | 4 +- .../fcmp_pp_rust}/fcmp++.h | 0 .../fcmp_pp_rust}/src/lib.rs | 0 src/{fcmp => fcmp_pp}/proof.h | 6 +- src/{fcmp => fcmp_pp}/tower_cycle.cpp | 4 +- src/{fcmp => fcmp_pp}/tower_cycle.h | 6 +- src/ringct/rctTypes.h | 6 +- tests/block_weight/block_weight.cpp | 4 +- tests/core_tests/chaingen.cpp | 2 +- tests/unit_tests/curve_trees.cpp | 32 +++++----- tests/unit_tests/curve_trees.h | 14 ++-- tests/unit_tests/hardfork.cpp | 4 +- tests/unit_tests/long_term_block_weight.cpp | 2 +- tests/unit_tests/serialization.cpp | 8 +-- tests/unit_tests/unit_tests_utils.h | 4 +- 28 files changed, 114 insertions(+), 113 deletions(-) rename src/{fcmp => fcmp_pp}/CMakeLists.txt (94%) rename src/{fcmp => fcmp_pp}/curve_trees.cpp (99%) rename src/{fcmp => fcmp_pp}/curve_trees.h (99%) rename src/{fcmp/fcmp_rust => fcmp_pp/fcmp_pp_rust}/.gitignore (100%) rename src/{fcmp/fcmp_rust => fcmp_pp/fcmp_pp_rust}/CMakeLists.txt (95%) rename src/{fcmp/fcmp_rust => fcmp_pp/fcmp_pp_rust}/Cargo.lock (99%) rename src/{fcmp/fcmp_rust => fcmp_pp/fcmp_pp_rust}/Cargo.toml (95%) rename src/{fcmp/fcmp_rust => fcmp_pp/fcmp_pp_rust}/fcmp++.h (100%) rename src/{fcmp/fcmp_rust => fcmp_pp/fcmp_pp_rust}/src/lib.rs (100%) rename src/{fcmp => fcmp_pp}/proof.h (94%) rename src/{fcmp => fcmp_pp}/tower_cycle.cpp (99%) rename src/{fcmp => fcmp_pp}/tower_cycle.h (99%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 357fac0cb04..ddada45bf0b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -89,7 +89,7 @@ add_subdirectory(ringct) add_subdirectory(checkpoints) add_subdirectory(cryptonote_basic) add_subdirectory(cryptonote_core) -add_subdirectory(fcmp) +add_subdirectory(fcmp_pp) add_subdirectory(lmdb) add_subdirectory(multisig) add_subdirectory(net) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 2a3e307ee81..4779f680d31 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -314,7 +314,7 @@ uint64_t BlockchainDB::add_block( const std::pair& blck // When adding a block, we also need to add all the leaf tuples included in // the block to a table keeping track of locked leaf tuples. Once those leaf // tuples unlock, we use them to grow the tree. - std::multimap leaf_tuples_by_unlock_block; + std::multimap leaf_tuples_by_unlock_block; // Get miner tx's leaf tuples CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 41fbc1e23d1..af68071417c 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -42,7 +42,7 @@ #include "cryptonote_basic/difficulty.h" #include "cryptonote_basic/hardfork.h" #include "cryptonote_protocol/enums.h" -#include "fcmp/curve_trees.h" +#include "fcmp_pp/curve_trees.h" /** \file * Cryptonote Blockchain Database Interface @@ -418,7 +418,7 @@ class BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) = 0; /** @@ -592,7 +592,7 @@ class BlockchainDB HardFork* m_hardfork; - std::shared_ptr m_curve_trees; + std::shared_ptr m_curve_trees; public: @@ -1783,7 +1783,7 @@ class BlockchainDB virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; // TODO: description and make private - virtual void grow_tree(std::vector &&new_leaves) = 0; + virtual void grow_tree(std::vector &&new_leaves) = 0; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) = 0; diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index e5808a29c77..0642c7d783a 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -798,7 +798,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uin } void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, - uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_block) + uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_block) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1344,7 +1344,7 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } } -void BlockchainLMDB::grow_tree(std::vector &&new_leaves) +void BlockchainLMDB::grow_tree(std::vector &&new_leaves) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); if (new_leaves.empty()) @@ -1403,7 +1403,7 @@ void BlockchainLMDB::grow_tree(std::vector throw0(DB_ERROR(("Growing odd c2 layer, expected even layer idx for c1: " + std::to_string(layer_idx)).c_str())); - this->grow_layer(m_curve_trees->m_c2, + this->grow_layer(m_curve_trees->m_c2, c2_extensions, c2_idx, layer_idx); @@ -1416,7 +1416,7 @@ void BlockchainLMDB::grow_tree(std::vector throw0(DB_ERROR(("Growing even c1 layer, expected odd layer idx for c2: " + std::to_string(layer_idx)).c_str())); - this->grow_layer(m_curve_trees->m_c1, + this->grow_layer(m_curve_trees->m_c1, c1_extensions, c1_idx, layer_idx); @@ -1430,7 +1430,7 @@ void BlockchainLMDB::grow_tree(std::vector template void BlockchainLMDB::grow_layer(const std::unique_ptr &curve, - const std::vector> &layer_extensions, + const std::vector> &layer_extensions, const uint64_t ext_idx, const uint64_t layer_idx) { @@ -1596,7 +1596,7 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) template void BlockchainLMDB::trim_layer(const std::unique_ptr &curve, - const fcmp::curve_trees::LayerReduction &layer_reduction, + const fcmp_pp::curve_trees::LayerReduction &layer_reduction, const uint64_t layer_idx) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -1718,7 +1718,7 @@ std::array BlockchainLMDB::get_tree_root() const return root; } -fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes() const +fcmp_pp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes() const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1726,7 +1726,7 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes TXN_PREFIX_RDONLY(); RCURSOR(layers) - fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes; + fcmp_pp::curve_trees::CurveTreesV1::LastHashes last_hashes; auto &c1_last_hashes = last_hashes.c1_last_hashes; auto &c2_last_hashes = last_hashes.c2_last_hashes; @@ -1772,8 +1772,8 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes return last_hashes; } -fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_last_chunk_children_to_trim( - const std::vector &trim_instructions) const +fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_last_chunk_children_to_trim( + const std::vector &trim_instructions) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1783,7 +1783,7 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las TXN_PREFIX_RDONLY(); RCURSOR(layers) - fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim last_chunk_children_to_trim; + fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim last_chunk_children_to_trim; auto &c1_last_children_out = last_chunk_children_to_trim.c1_children; auto &c2_last_children_out = last_chunk_children_to_trim.c2_children; @@ -1793,17 +1793,17 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); const auto &trim_leaf_layer_instructions = trim_instructions[0]; - std::vector leaves_to_trim; + std::vector leaves_to_trim; if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx) { leaves_to_trim.reserve(trim_leaf_layer_instructions.end_trim_idx - trim_leaf_layer_instructions.start_trim_idx); uint64_t idx = trim_leaf_layer_instructions.start_trim_idx; - CHECK_AND_ASSERT_THROW_MES(idx % fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0, + CHECK_AND_ASSERT_THROW_MES(idx % fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size"); - const uint64_t leaf_tuple_idx = idx / fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + const uint64_t leaf_tuple_idx = idx / fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; MDB_val_copy k(leaf_tuple_idx); MDB_cursor_op leaf_op = MDB_SET; @@ -1817,7 +1817,7 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); - const auto preprocessed_leaf_tuple = *(fcmp::curve_trees::PreprocessedLeafTuple *)v.mv_data; + const auto preprocessed_leaf_tuple = *(fcmp_pp::curve_trees::PreprocessedLeafTuple *)v.mv_data; // TODO: parallelize calls to this function auto leaf = m_curve_trees->leaf_tuple(preprocessed_leaf_tuple); @@ -1826,7 +1826,7 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las leaves_to_trim.emplace_back(std::move(leaf.I_x)); leaves_to_trim.emplace_back(std::move(leaf.C_x)); - idx += fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + idx += fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; } while (idx < trim_leaf_layer_instructions.end_trim_idx); } @@ -1841,8 +1841,8 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las { const auto &trim_layer_instructions = trim_instructions[i]; - std::vector c1_children; - std::vector c2_children; + std::vector c1_children; + std::vector c2_children; if (trim_layer_instructions.end_trim_idx > trim_layer_instructions.start_trim_idx) { @@ -1895,8 +1895,8 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las return last_chunk_children_to_trim; } -fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_trim( - const std::vector &trim_instructions) const +fcmp_pp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_trim( + const std::vector &trim_instructions) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -1904,7 +1904,7 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_t TXN_PREFIX_RDONLY(); RCURSOR(layers) - fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; + fcmp_pp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; // Traverse the tree layer-by-layer starting at the layer closest to leaf layer uint64_t layer_idx = 0; @@ -1975,7 +1975,7 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const while (1) { // Get next leaf chunk - std::vector leaf_tuples_chunk; + std::vector leaf_tuples_chunk; leaf_tuples_chunk.reserve(m_curve_trees->m_c2_width); // Iterate until chunk is full or we get to the end of all leaves @@ -1989,7 +1989,7 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); - const auto preprocessed_leaf_tuple = *(fcmp::curve_trees::PreprocessedLeafTuple *)v.mv_data; + const auto preprocessed_leaf_tuple = *(fcmp_pp::curve_trees::PreprocessedLeafTuple *)v.mv_data; auto leaf = m_curve_trees->leaf_tuple(preprocessed_leaf_tuple); leaf_tuples_chunk.emplace_back(std::move(leaf)); @@ -2022,13 +2022,13 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const // Get the expected leaf chunk hash const auto leaves = m_curve_trees->flatten_leaves(std::move(leaf_tuples_chunk)); - const fcmp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; + const fcmp_pp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; // Hash the chunk of leaves for (uint64_t i = 0; i < leaves.size(); ++i) MDEBUG("Hashing " << m_curve_trees->m_c2->to_string(leaves[i])); - const fcmp::curve_trees::Selene::Point chunk_hash = fcmp::curve_trees::get_new_parent(m_curve_trees->m_c2, chunk); + const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(m_curve_trees->m_c2, chunk); MDEBUG("chunk_hash " << m_curve_trees->m_c2->to_string(chunk_hash) << " , hash init point: " << m_curve_trees->m_c2->to_string(m_curve_trees->m_c2->hash_init_point()) << " (" << leaves.size() << " leaves)"); @@ -2173,7 +2173,7 @@ bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, for (uint64_t i = 0; i < child_scalars.size(); ++i) MDEBUG("Hashing " << c_parent->to_string(child_scalars[i])); - const auto chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk); + const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(c_parent, chunk); MDEBUG("chunk_hash " << c_parent->to_string(chunk_hash) << " , hash init point: " << c_parent->to_string(c_parent->hash_init_point()) << " (" << child_scalars.size() << " children)"); @@ -2194,7 +2194,7 @@ bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, chunk_width); } -std::vector BlockchainLMDB::get_leaf_tuples_at_unlock_block_id( +std::vector BlockchainLMDB::get_leaf_tuples_at_unlock_block_id( uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -2207,7 +2207,7 @@ std::vector BlockchainLMDB::get_leaf_tuples MDB_val v_tuple; // Get all the locked outputs at the provided block id - std::vector leaf_tuples; + std::vector leaf_tuples; MDB_cursor_op op = MDB_SET; while (1) @@ -2223,8 +2223,8 @@ std::vector BlockchainLMDB::get_leaf_tuples if (blk_id != block_id) throw0(DB_ERROR(("Blk id " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); - const auto range_begin = ((const fcmp::curve_trees::LeafTupleContext*)v_tuple.mv_data); - const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::LeafTupleContext); + const auto range_begin = ((const fcmp_pp::curve_trees::LeafTupleContext*)v_tuple.mv_data); + const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp_pp::curve_trees::LeafTupleContext); auto it = range_begin; @@ -2279,7 +2279,7 @@ BlockchainLMDB::~BlockchainLMDB() BlockchainLMDB::close(); } -BlockchainLMDB::BlockchainLMDB(bool batch_transactions, std::shared_ptr curve_trees): BlockchainDB() +BlockchainLMDB::BlockchainLMDB(bool batch_transactions, std::shared_ptr curve_trees): BlockchainDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); // initialize folder to something "safe" just in case @@ -6836,7 +6836,7 @@ void BlockchainLMDB::migrate_5_6() } // Convert the output into a leaf tuple context - fcmp::curve_trees::LeafTupleContext tuple_context; + fcmp_pp::curve_trees::LeafTupleContext tuple_context; try { tuple_context = m_curve_trees->output_to_leaf_context( diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 2098cdc4f5f..30167f1a016 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -30,7 +30,7 @@ #include "blockchain_db/blockchain_db.h" #include "cryptonote_basic/blobdatatype.h" // for type blobdata -#include "fcmp/curve_trees.h" +#include "fcmp_pp/curve_trees.h" #include "ringct/rctTypes.h" #include @@ -194,7 +194,7 @@ struct mdb_txn_safe class BlockchainLMDB : public BlockchainDB { public: - BlockchainLMDB(bool batch_transactions=true, std::shared_ptr curve_trees = fcmp::curve_trees::curve_trees_v1()); + BlockchainLMDB(bool batch_transactions=true, std::shared_ptr curve_trees = fcmp_pp::curve_trees::curve_trees_v1()); ~BlockchainLMDB(); virtual void open(const std::string& filename, const int mdb_flags=0); @@ -368,7 +368,7 @@ class BlockchainLMDB : public BlockchainDB static int compare_string(const MDB_val *a, const MDB_val *b); // make private - virtual void grow_tree(std::vector &&new_leaves); + virtual void grow_tree(std::vector &&new_leaves); virtual void trim_tree(const uint64_t trim_n_leaf_tuples); @@ -388,7 +388,7 @@ class BlockchainLMDB : public BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& block_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ); virtual void remove_block(); @@ -420,26 +420,26 @@ class BlockchainLMDB : public BlockchainDB template void grow_layer(const std::unique_ptr &curve, - const std::vector> &layer_extensions, + const std::vector> &layer_extensions, const uint64_t c_idx, const uint64_t layer_idx); template void trim_layer(const std::unique_ptr &curve, - const fcmp::curve_trees::LayerReduction &layer_reduction, + const fcmp_pp::curve_trees::LayerReduction &layer_reduction, const uint64_t layer_idx); virtual uint64_t get_num_leaf_tuples() const; virtual std::array get_tree_root() const; - fcmp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; + fcmp_pp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; - fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim get_last_chunk_children_to_trim( - const std::vector &trim_instructions) const; + fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim get_last_chunk_children_to_trim( + const std::vector &trim_instructions) const; - fcmp::curve_trees::CurveTreesV1::LastHashes get_last_hashes_to_trim( - const std::vector &trim_instructions) const; + fcmp_pp::curve_trees::CurveTreesV1::LastHashes get_last_hashes_to_trim( + const std::vector &trim_instructions) const; template bool audit_layer(const std::unique_ptr &c_child, @@ -449,7 +449,7 @@ class BlockchainLMDB : public BlockchainDB const uint64_t child_chunk_idx, const uint64_t chunk_width) const; - std::vector get_leaf_tuples_at_unlock_block_id(uint64_t block_id); + std::vector get_leaf_tuples_at_unlock_block_id(uint64_t block_id); void del_locked_leaf_tuples_at_block_id(uint64_t block_id); diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 33635a04b65..81317b6a2d6 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -116,7 +116,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} - virtual void grow_tree(std::vector &&new_leaves) override {}; + virtual void grow_tree(std::vector &&new_leaves) override {}; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) override {}; virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; virtual std::array get_tree_root() const override { return {}; }; @@ -149,7 +149,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} diff --git a/src/blockchain_utilities/CMakeLists.txt b/src/blockchain_utilities/CMakeLists.txt index a41cd1e53f2..d6257ef991e 100644 --- a/src/blockchain_utilities/CMakeLists.txt +++ b/src/blockchain_utilities/CMakeLists.txt @@ -141,6 +141,7 @@ target_link_libraries(blockchain_import PRIVATE cryptonote_core blockchain_db + fcmp_pp version epee ${Boost_FILESYSTEM_LIBRARY} diff --git a/src/fcmp/CMakeLists.txt b/src/fcmp_pp/CMakeLists.txt similarity index 94% rename from src/fcmp/CMakeLists.txt rename to src/fcmp_pp/CMakeLists.txt index 8f37e9b7490..33d428f827c 100644 --- a/src/fcmp/CMakeLists.txt +++ b/src/fcmp_pp/CMakeLists.txt @@ -32,11 +32,11 @@ set(fcmp_pp_sources monero_find_all_headers(fcmp_pp_headers "${CMAKE_CURRENT_SOURCE_DIR}") -add_subdirectory(fcmp_rust) +add_subdirectory(fcmp_pp_rust) monero_add_library_with_deps( NAME fcmp_pp - DEPENDS fcmp_rust + DEPENDS fcmp_pp_rust SOURCES ${fcmp_pp_sources} ${fcmp_pp_headers}) @@ -54,6 +54,6 @@ target_link_libraries(fcmp_pp epee ringct PRIVATE - ${CMAKE_CURRENT_BINARY_DIR}/fcmp_rust/libfcmp_rust.a + ${CMAKE_CURRENT_BINARY_DIR}/fcmp_pp_rust/libfcmp_pp_rust.a ${EXTRA_LIBRARIES} ${EXTRA_RUST_LIBRARIES}) diff --git a/src/fcmp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp similarity index 99% rename from src/fcmp/curve_trees.cpp rename to src/fcmp_pp/curve_trees.cpp index b2e8b3ebc86..50f354a9e4c 100644 --- a/src/fcmp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -31,7 +31,7 @@ #include "ringct/rctOps.h" -namespace fcmp +namespace fcmp_pp { namespace curve_trees { @@ -549,7 +549,7 @@ static TrimLayerInstructions get_trim_layer_instructions( } //---------------------------------------------------------------------------------------------------------------------- template -static typename fcmp::curve_trees::LayerReduction get_next_layer_reduction( +static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_reduction( const std::unique_ptr &c_child, const std::unique_ptr &c_parent, const TrimLayerInstructions &trim_layer_instructions, @@ -1028,4 +1028,4 @@ GrowLayerInstructions CurveTrees::set_next_layer_extension( //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace curve_trees -} //namespace fcmp +} //namespace fcmp_pp diff --git a/src/fcmp/curve_trees.h b/src/fcmp_pp/curve_trees.h similarity index 99% rename from src/fcmp/curve_trees.h rename to src/fcmp_pp/curve_trees.h index e70d5127779..3ca9e332ae2 100644 --- a/src/fcmp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -38,7 +38,7 @@ #include -namespace fcmp +namespace fcmp_pp { namespace curve_trees { @@ -309,4 +309,4 @@ std::shared_ptr curve_trees_v1( //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace curve_trees -} //namespace fcmp +} //namespace fcmp_pp diff --git a/src/fcmp/fcmp_rust/.gitignore b/src/fcmp_pp/fcmp_pp_rust/.gitignore similarity index 100% rename from src/fcmp/fcmp_rust/.gitignore rename to src/fcmp_pp/fcmp_pp_rust/.gitignore diff --git a/src/fcmp/fcmp_rust/CMakeLists.txt b/src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt similarity index 95% rename from src/fcmp/fcmp_rust/CMakeLists.txt rename to src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt index 457f44740da..646fc6981d3 100644 --- a/src/fcmp/fcmp_rust/CMakeLists.txt +++ b/src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt @@ -93,9 +93,9 @@ else () set(TARGET_DIR "release") endif () -set(FCMP_PP_RUST_HEADER_DIR "${MONERO_GENERATED_HEADERS_DIR}/fcmp_rust") +set(FCMP_PP_RUST_HEADER_DIR "${MONERO_GENERATED_HEADERS_DIR}/fcmp_pp_rust") set(FCMP_PP_RUST_HEADER "${FCMP_PP_RUST_HEADER_DIR}/fcmp++.h") -set(FCMP_PP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/libfcmp_rust.a") +set(FCMP_PP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/libfcmp_pp_rust.a") # Removing OUTPUT files makes sure custom command runs every time file(REMOVE_RECURSE "${FCMP_PP_RUST_HEADER_DIR}") @@ -109,10 +109,10 @@ add_custom_command( OUTPUT ${FCMP_PP_RUST_LIB} COMMAND CARGO_TARGET_DIR=${CMAKE_CURRENT_BINARY_DIR} ${CARGO_CMD} COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/fcmp++.h ${FCMP_PP_RUST_HEADER} - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${RUST_TARGET}/${TARGET_DIR}/libfcmp_rust.a ${FCMP_PP_RUST_LIB} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${RUST_TARGET}/${TARGET_DIR}/libfcmp_pp_rust.a ${FCMP_PP_RUST_LIB} COMMAND echo "Finished copying fcmp++ rust targets" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} VERBATIM ) -add_custom_target(fcmp_rust DEPENDS ${FCMP_PP_RUST_LIB}) +add_custom_target(fcmp_pp_rust DEPENDS ${FCMP_PP_RUST_LIB}) diff --git a/src/fcmp/fcmp_rust/Cargo.lock b/src/fcmp_pp/fcmp_pp_rust/Cargo.lock similarity index 99% rename from src/fcmp/fcmp_rust/Cargo.lock rename to src/fcmp_pp/fcmp_pp_rust/Cargo.lock index 196a2a39d17..742b6570e8e 100644 --- a/src/fcmp/fcmp_rust/Cargo.lock +++ b/src/fcmp_pp/fcmp_pp_rust/Cargo.lock @@ -239,7 +239,7 @@ dependencies = [ ] [[package]] -name = "fcmp_rust" +name = "fcmp_pp_rust" version = "0.0.0" dependencies = [ "ciphersuite", diff --git a/src/fcmp/fcmp_rust/Cargo.toml b/src/fcmp_pp/fcmp_pp_rust/Cargo.toml similarity index 95% rename from src/fcmp/fcmp_rust/Cargo.toml rename to src/fcmp_pp/fcmp_pp_rust/Cargo.toml index 21573b82426..65a487a0c72 100644 --- a/src/fcmp/fcmp_rust/Cargo.toml +++ b/src/fcmp_pp/fcmp_pp_rust/Cargo.toml @@ -1,10 +1,10 @@ [package] -name = "fcmp_rust" +name = "fcmp_pp_rust" version = "0.0.0" edition = "2021" [lib] -name = "fcmp_rust" +name = "fcmp_pp_rust" crate-type = ["staticlib"] [dependencies] diff --git a/src/fcmp/fcmp_rust/fcmp++.h b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h similarity index 100% rename from src/fcmp/fcmp_rust/fcmp++.h rename to src/fcmp_pp/fcmp_pp_rust/fcmp++.h diff --git a/src/fcmp/fcmp_rust/src/lib.rs b/src/fcmp_pp/fcmp_pp_rust/src/lib.rs similarity index 100% rename from src/fcmp/fcmp_rust/src/lib.rs rename to src/fcmp_pp/fcmp_pp_rust/src/lib.rs diff --git a/src/fcmp/proof.h b/src/fcmp_pp/proof.h similarity index 94% rename from src/fcmp/proof.h rename to src/fcmp_pp/proof.h index 63e7cfa10e6..f01cdb267b5 100644 --- a/src/fcmp/proof.h +++ b/src/fcmp_pp/proof.h @@ -30,16 +30,16 @@ #include -namespace fcmp +namespace fcmp_pp { // Byte buffer containing the fcmp++ proof using FcmpPpProof = std::vector; -static inline std::size_t fcmp_pp_len(const std::size_t n_inputs) +static inline std::size_t proof_len(const std::size_t n_inputs) { // TODO: implement return n_inputs * 4; }; -}//namespace fcmp +}//namespace fcmp_pp diff --git a/src/fcmp/tower_cycle.cpp b/src/fcmp_pp/tower_cycle.cpp similarity index 99% rename from src/fcmp/tower_cycle.cpp rename to src/fcmp_pp/tower_cycle.cpp index bb2e207e823..9cb35af1f4b 100644 --- a/src/fcmp/tower_cycle.cpp +++ b/src/fcmp_pp/tower_cycle.cpp @@ -29,7 +29,7 @@ #include "string_tools.h" #include "tower_cycle.h" -namespace fcmp +namespace fcmp_pp { namespace tower_cycle { @@ -280,4 +280,4 @@ template void extend_scalars_from_cycle_points(const std::unique //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace tower_cycle -} //namespace fcmp +} //namespace fcmp_pp diff --git a/src/fcmp/tower_cycle.h b/src/fcmp_pp/tower_cycle.h similarity index 99% rename from src/fcmp/tower_cycle.h rename to src/fcmp_pp/tower_cycle.h index e7b9fb70119..8ab69f902b9 100644 --- a/src/fcmp/tower_cycle.h +++ b/src/fcmp_pp/tower_cycle.h @@ -29,12 +29,12 @@ #pragma once #include "crypto/crypto.h" -#include "fcmp_rust/fcmp++.h" +#include "fcmp_pp_rust/fcmp++.h" #include "ringct/rctTypes.h" #include -namespace fcmp +namespace fcmp_pp { namespace tower_cycle { @@ -188,4 +188,4 @@ void extend_scalars_from_cycle_points(const std::unique_ptr &curve, //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- }//namespace tower_cycle -}//namespace fcmp +}//namespace fcmp_pp diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index 45d52f95019..d0090251243 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -45,7 +45,7 @@ extern "C" { } #include "crypto/generic-ops.h" #include "crypto/crypto.h" -#include "fcmp/proof.h" +#include "fcmp_pp/proof.h" #include "hex.h" #include "span.h" #include "memwipe.h" @@ -426,7 +426,7 @@ namespace rct { std::vector MGs; // simple rct has N, full has 1 std::vector CLSAGs; keyV pseudoOuts; //C - for simple rct - fcmp::FcmpPpProof fcmp_pp; + fcmp_pp::FcmpPpProof fcmp_pp; // when changing this function, update cryptonote::get_pruned_transaction_weight template class Archive> @@ -503,7 +503,7 @@ namespace rct { { ar.tag("fcmp_pp"); ar.begin_object(); - const std::size_t proof_len = fcmp::fcmp_pp_len(inputs); + const std::size_t proof_len = fcmp_pp::proof_len(inputs); if (!typename Archive::is_saving()) fcmp_pp.resize(proof_len); if (fcmp_pp.size() != proof_len) diff --git a/tests/block_weight/block_weight.cpp b/tests/block_weight/block_weight.cpp index 81caa945dfc..dfcc4580c6c 100644 --- a/tests/block_weight/block_weight.cpp +++ b/tests/block_weight/block_weight.cpp @@ -32,7 +32,7 @@ #include #include "cryptonote_core/cryptonote_core.h" #include "blockchain_db/testdb.h" -#include "fcmp/curve_trees.h" +#include "fcmp_pp/curve_trees.h" #define LONG_TERM_BLOCK_WEIGHT_WINDOW 5000 @@ -65,7 +65,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/core_tests/chaingen.cpp b/tests/core_tests/chaingen.cpp index 376cdcc6a08..dfab14252bb 100644 --- a/tests/core_tests/chaingen.cpp +++ b/tests/core_tests/chaingen.cpp @@ -88,7 +88,7 @@ namespace , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({blk, blk_hash}); diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index c2e9e40cfc7..650c75bcb26 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -62,7 +62,7 @@ static bool validate_layer(const std::unique_ptr &curve, for (std::size_t i = 0; i < chunk_size; ++i) MDEBUG("Hashing " << curve->to_string(chunk_start[i])); - const typename C::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve, chunk); + const typename C::Point chunk_hash = fcmp_pp::curve_trees::get_new_parent(curve, chunk); MDEBUG("chunk_start_idx: " << chunk_start_idx << " , chunk_size: " << chunk_size << " , chunk_hash: " << curve->to_string(chunk_hash)); @@ -193,7 +193,7 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e if (use_c2) { CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); - const fcmp::curve_trees::LayerExtension &c2_ext = c2_extensions[c2_idx]; + const fcmp_pp::curve_trees::LayerExtension &c2_ext = c2_extensions[c2_idx]; CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); @@ -226,7 +226,7 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e else { CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); - const fcmp::curve_trees::LayerExtension &c1_ext = c1_extensions[c1_idx]; + const fcmp_pp::curve_trees::LayerExtension &c1_ext = c1_extensions[c1_idx]; CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); @@ -338,7 +338,7 @@ void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_r //---------------------------------------------------------------------------------------------------------------------- // TODO: template CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_children_to_trim( - const std::vector &trim_instructions) + const std::vector &trim_instructions) { CurveTreesV1::LastChunkChildrenToTrim all_children_to_trim; @@ -424,7 +424,7 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c } //---------------------------------------------------------------------------------------------------------------------- CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes_to_trim( - const std::vector &trim_instructions) const + const std::vector &trim_instructions) const { CurveTreesV1::LastHashes last_hashes; CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); @@ -532,7 +532,7 @@ bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); std::vector child_scalars; - fcmp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c1, + fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c1, children, child_scalars); @@ -559,7 +559,7 @@ bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); std::vector child_scalars; - fcmp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c2, + fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c2, children, child_scalars); @@ -660,7 +660,7 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension { CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer"); - const fcmp::curve_trees::LayerExtension &c2_layer = c2_extensions[c2_idx]; + const fcmp_pp::curve_trees::LayerExtension &c2_layer = c2_extensions[c2_idx]; MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) @@ -673,7 +673,7 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension { CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer"); - const fcmp::curve_trees::LayerExtension &c1_layer = c1_extensions[c1_idx]; + const fcmp_pp::curve_trees::LayerExtension &c1_layer = c1_extensions[c1_idx]; MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) @@ -743,11 +743,11 @@ void CurveTreesGlobalTree::log_tree() //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t old_n_leaf_tuples, const std::size_t new_n_leaf_tuples) { - std::vector tuples; + std::vector tuples; tuples.reserve(new_n_leaf_tuples); for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) @@ -777,7 +777,7 @@ static const Selene::Scalar generate_random_selene_scalar() rct::key S_x; CHECK_AND_ASSERT_THROW_MES(rct::point_to_wei_x(rct::pk2rct(S), S_x), "failed to convert to wei x"); - return fcmp::tower_cycle::selene_scalar_from_bytes(S_x); + return fcmp_pp::tower_cycle::selene_scalar_from_bytes(S_x); } //---------------------------------------------------------------------------------------------------------------------- static bool grow_tree(CurveTreesV1 &curve_trees, @@ -941,7 +941,7 @@ TEST(curve_trees, grow_tree) LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_chunk_width << ", selene chunk width " << selene_chunk_width); - const auto curve_trees = fcmp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); // Constant for how deep we want the tree static const std::size_t TEST_N_LAYERS = 4; @@ -984,7 +984,7 @@ TEST(curve_trees, trim_tree) LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width << ", selene chunk width " << selene_chunk_width); - const auto curve_trees = fcmp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); // Constant for how deep we want the tree static const std::size_t TEST_N_LAYERS = 4; @@ -1028,7 +1028,7 @@ TEST(curve_trees, trim_tree) // Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children TEST(curve_trees, hash_trim) { - const auto curve_trees = fcmp::curve_trees::curve_trees_v1(); + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(); // 1. Trim 1 { @@ -1183,7 +1183,7 @@ TEST(curve_trees, hash_trim) //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, hash_grow) { - const auto curve_trees = fcmp::curve_trees::curve_trees_v1(); + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(); // Start by hashing: {selene_scalar_0, selene_scalar_1} // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2} diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index b0a0f173c0a..2cdf07ae3eb 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -28,12 +28,12 @@ #pragma once -#include "fcmp/curve_trees.h" -#include "fcmp/tower_cycle.h" +#include "fcmp_pp/curve_trees.h" +#include "fcmp_pp/tower_cycle.h" -using Helios = fcmp::curve_trees::Helios; -using Selene = fcmp::curve_trees::Selene; -using CurveTreesV1 = fcmp::curve_trees::CurveTreesV1; +using Helios = fcmp_pp::curve_trees::Helios; +using Selene = fcmp_pp::curve_trees::Selene; +using CurveTreesV1 = fcmp_pp::curve_trees::CurveTreesV1; // Helper class to read/write a global tree in memory. It's only used in testing because normally the tree isn't kept // in memory (it's stored in the db) @@ -85,10 +85,10 @@ class CurveTreesGlobalTree // - This function is useful to collect all tree data necessary to perform the actual trim operation // - This function can return elems from each last chunk that will need to be trimmed CurveTreesV1::LastHashes get_last_hashes_to_trim( - const std::vector &trim_instructions) const; + const std::vector &trim_instructions) const; CurveTreesV1::LastChunkChildrenToTrim get_all_last_chunk_children_to_trim( - const std::vector &trim_instructions); + const std::vector &trim_instructions); private: CurveTreesV1 &m_curve_trees; diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index fc58c7cfbba..dcc0d4d93c2 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -35,7 +35,7 @@ #include "cryptonote_basic/cryptonote_format_utils.h" #include "cryptonote_basic/hardfork.h" #include "blockchain_db/testdb.h" -#include "fcmp/curve_trees.h" +#include "fcmp_pp/curve_trees.h" using namespace cryptonote; @@ -55,7 +55,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back(blk); } diff --git a/tests/unit_tests/long_term_block_weight.cpp b/tests/unit_tests/long_term_block_weight.cpp index 92862372db3..a77e431ea86 100644 --- a/tests/unit_tests/long_term_block_weight.cpp +++ b/tests/unit_tests/long_term_block_weight.cpp @@ -58,7 +58,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const std::multimap& leaf_tuples_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/unit_tests/serialization.cpp b/tests/unit_tests/serialization.cpp index 92825e42bd6..66fc4177b58 100644 --- a/tests/unit_tests/serialization.cpp +++ b/tests/unit_tests/serialization.cpp @@ -1370,8 +1370,8 @@ TEST(Serialization, tx_fcmp_pp) tx.rct_signatures.referenceBlock = referenceBlock; // 1 fcmp++ proof - fcmp::FcmpPpProof fcmp_pp; - const std::size_t proof_len = fcmp::fcmp_pp_len(n_inputs); + fcmp_pp::FcmpPpProof fcmp_pp; + const std::size_t proof_len = fcmp_pp::proof_len(n_inputs); fcmp_pp.reserve(proof_len); for (std::size_t i = 0; i < proof_len; ++i) fcmp_pp.push_back(i); @@ -1400,7 +1400,7 @@ TEST(Serialization, tx_fcmp_pp) string blob; // Extend fcmp++ proof - ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp::fcmp_pp_len(n_inputs)); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp_pp::proof_len(n_inputs)); tx.rct_signatures.p.fcmp_pp.push_back(0x01); ASSERT_FALSE(serialization::dump_binary(tx, blob)); @@ -1411,7 +1411,7 @@ TEST(Serialization, tx_fcmp_pp) transaction tx = make_dummy_fcmp_pp_tx(); // Shorten the fcmp++ proof - ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp::fcmp_pp_len(n_inputs)); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp_pp::proof_len(n_inputs)); ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() > 1); tx.rct_signatures.p.fcmp_pp.pop_back(); diff --git a/tests/unit_tests/unit_tests_utils.h b/tests/unit_tests/unit_tests_utils.h index 1f153fa6bda..11bdfcf7448 100644 --- a/tests/unit_tests/unit_tests_utils.h +++ b/tests/unit_tests/unit_tests_utils.h @@ -34,7 +34,7 @@ #include "blockchain_db/blockchain_db.h" #include "blockchain_db/lmdb/db_lmdb.h" -#include "fcmp/curve_trees.h" +#include "fcmp_pp/curve_trees.h" #include "misc_log_ex.h" #include @@ -84,7 +84,7 @@ namespace unit_test remove_files(); } - void init_new_db(std::shared_ptr curve_trees) + void init_new_db(std::shared_ptr curve_trees) { CHECK_AND_ASSERT_THROW_MES(this->m_db == nullptr, "expected nullptr m_db"); this->m_db = new cryptonote::BlockchainLMDB(true/*batch_transactions*/, curve_trees); From 9f0dd859e635bce785b9f20b999dc0db063a1475 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 8 Aug 2024 14:55:49 -0700 Subject: [PATCH 087/127] fix clang compile errors --- src/blockchain_utilities/CMakeLists.txt | 1 - src/fcmp_pp/curve_trees.cpp | 21 +++++++++++++++++++++ tests/unit_tests/serialization.cpp | 2 +- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/blockchain_utilities/CMakeLists.txt b/src/blockchain_utilities/CMakeLists.txt index d6257ef991e..a41cd1e53f2 100644 --- a/src/blockchain_utilities/CMakeLists.txt +++ b/src/blockchain_utilities/CMakeLists.txt @@ -141,7 +141,6 @@ target_link_libraries(blockchain_import PRIVATE cryptonote_core blockchain_db - fcmp_pp version epee ${Boost_FILESYSTEM_LIBRARY} diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 50f354a9e4c..366462f3998 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -761,6 +761,10 @@ std::vector CurveTrees::flatten_leaves(std::vector< return flattened_leaves; }; + +// Explicit instantiation +template std::vector CurveTrees::flatten_leaves( + std::vector &&leaves) const; //---------------------------------------------------------------------------------------------------------------------- template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( @@ -854,6 +858,12 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio return tree_extension; }; + +// Explicit instantiation +template CurveTrees::TreeExtension CurveTrees::get_tree_extension( + const uint64_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, + std::vector &&new_leaf_tuples) const; //---------------------------------------------------------------------------------------------------------------------- template std::vector CurveTrees::get_trim_instructions( @@ -899,6 +909,11 @@ std::vector CurveTrees::get_trim_instructions( return trim_instructions; } + +// Explicit instantiation +template std::vector CurveTrees::get_trim_instructions( + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const; //---------------------------------------------------------------------------------------------------------------------- template typename CurveTrees::TreeReduction CurveTrees::get_tree_reduction( @@ -962,6 +977,12 @@ typename CurveTrees::TreeReduction CurveTrees::get_tree_reductio return tree_reduction_out; }; + +// Explicit instantiation +template CurveTrees::TreeReduction CurveTrees::get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTrees private member functions diff --git a/tests/unit_tests/serialization.cpp b/tests/unit_tests/serialization.cpp index 66fc4177b58..e9520c7c819 100644 --- a/tests/unit_tests/serialization.cpp +++ b/tests/unit_tests/serialization.cpp @@ -1312,7 +1312,7 @@ TEST(Serialization, tx_fcmp_pp) const std::size_t n_inputs = 2; const std::size_t n_outputs = 3; - const auto make_dummy_fcmp_pp_tx = [n_inputs, n_outputs]() -> transaction + const auto make_dummy_fcmp_pp_tx = []() -> transaction { transaction tx; From 8b12a335c6d1d358e43261cb929754f75848d906 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 8 Aug 2024 19:26:08 -0700 Subject: [PATCH 088/127] fcmp++: implement iterative audit_tree function - Recursion goes too deep --- src/blockchain_db/lmdb/db_lmdb.cpp | 230 ++++++++++++++++------------- src/blockchain_db/lmdb/db_lmdb.h | 2 - tests/unit_tests/serialization.cpp | 2 +- 3 files changed, 132 insertions(+), 102 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 0642c7d783a..800fd933869 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1972,12 +1972,16 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const uint64_t layer_idx = 0; uint64_t child_chunk_idx = 0; MDB_cursor_op leaf_op = MDB_FIRST; + MDB_cursor_op parent_op = MDB_FIRST; while (1) { // Get next leaf chunk std::vector leaf_tuples_chunk; leaf_tuples_chunk.reserve(m_curve_trees->m_c2_width); + if (child_chunk_idx && child_chunk_idx % 1000 == 0) + MINFO("Auditing layer " << layer_idx << ", child_chunk_idx " << child_chunk_idx); + // Iterate until chunk is full or we get to the end of all leaves while (1) { @@ -2003,7 +2007,8 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const MDB_val_set(v_parent, child_chunk_idx); MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx); - int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, MDB_GET_BOTH); + int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, parent_op); + parent_op = MDB_NEXT_DUP; // Check end condition: no more leaf tuples in the leaf layer if (leaf_tuples_chunk.empty()) @@ -2019,6 +2024,8 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str())); + if (layer_idx != *(uint64_t*)k_parent.mv_data) + throw0(DB_ERROR("unexpected parent encountered")); // Get the expected leaf chunk hash const auto leaves = m_curve_trees->flatten_leaves(std::move(leaf_tuples_chunk)); @@ -2039,40 +2046,36 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const const auto expected_bytes = m_curve_trees->m_c2->to_bytes(chunk_hash); const auto actual_bytes = lv->child_chunk_hash; CHECK_AND_ASSERT_MES(expected_bytes == actual_bytes, false, "unexpected leaf chunk hash"); + CHECK_AND_ASSERT_MES(lv->child_chunk_idx == child_chunk_idx, false, "unexpected child chunk idx"); ++child_chunk_idx; } + MDEBUG("Successfully audited leaf layer"); + // Traverse up the tree auditing each layer until we've audited every layer in the tree - while (1) + bool audit_complete = false; + while (!audit_complete) { + MDEBUG("Auditing layer " << layer_idx); + // Alternate starting with c1 as parent (we already audited c2 leaf parents), then c2 as parent, then c1, etc. const bool parent_is_c1 = layer_idx % 2 == 0; if (parent_is_c1) { - if (this->audit_layer( + audit_complete = this->audit_layer( /*c_child*/ m_curve_trees->m_c2, /*c_parent*/ m_curve_trees->m_c1, layer_idx, - /*child_start_idx*/ 0, - /*child_chunk_idx*/ 0, - /*chunk_width*/ m_curve_trees->m_c1_width)) - { - break; - } + /*chunk_width*/ m_curve_trees->m_c1_width); } else { - if (this->audit_layer( + audit_complete = this->audit_layer( /*c_child*/ m_curve_trees->m_c1, /*c_parent*/ m_curve_trees->m_c2, layer_idx, - /*child_start_idx*/ 0, - /*child_chunk_idx*/ 0, - /*chunk_width*/ m_curve_trees->m_c2_width)) - { - break; - } + /*chunk_width*/ m_curve_trees->m_c2_width); } ++layer_idx; @@ -2086,112 +2089,141 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const template bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, const std::unique_ptr &c_parent, - const uint64_t layer_idx, - const uint64_t child_start_idx, - const uint64_t child_chunk_idx, + const uint64_t child_layer_idx, const uint64_t chunk_width) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); TXN_PREFIX_RDONLY(); - RCURSOR(layers) - MDEBUG("Auditing layer " << layer_idx << " at child_start_idx " << child_start_idx - << " and child_chunk_idx " << child_chunk_idx); + // Open separate cursors for child and parent layer + MDB_cursor *child_layer_cursor, *parent_layer_cursor; + + int c_result = mdb_cursor_open(m_txn, m_layers, &child_layer_cursor); + if (c_result) + throw0(DB_ERROR(lmdb_error("Failed to open child cursor: ", c_result).c_str())); + int p_result = mdb_cursor_open(m_txn, m_layers, &parent_layer_cursor); + if (p_result) + throw0(DB_ERROR(lmdb_error("Failed to open parent cursor: ", p_result).c_str())); + + // Set the cursors to the start of each layer + const uint64_t parent_layer_idx = child_layer_idx + 1; + + MDB_val_set(k_child, child_layer_idx); + MDB_val_set(k_parent, parent_layer_idx); - // Get next child chunk - std::vector child_chunk; - child_chunk.reserve(chunk_width); + MDB_val v_child, v_parent; - MDB_val_copy k_child(layer_idx); - MDB_val_set(v_child, child_start_idx); - MDB_cursor_op op_child = MDB_GET_BOTH; + c_result = mdb_cursor_get(child_layer_cursor, &k_child, &v_child, MDB_SET); + p_result = mdb_cursor_get(parent_layer_cursor, &k_parent, &v_parent, MDB_SET); + + if (c_result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get child: ", c_result).c_str())); + if (p_result != MDB_SUCCESS && p_result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get parent: ", p_result).c_str())); + + // Begin to audit the layer + MDB_cursor_op op_child = MDB_FIRST_DUP; + MDB_cursor_op op_parent = MDB_FIRST_DUP; + bool audit_complete = false; + uint64_t child_chunk_idx = 0; while (1) { - int result = mdb_cursor_get(m_cur_layers, &k_child, &v_child, op_child); - op_child = MDB_NEXT_DUP; - if (result == MDB_NOTFOUND) - break; - if (result != MDB_SUCCESS) - throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str())); + if (child_chunk_idx && child_chunk_idx % 1000 == 0) + MINFO("Auditing layer " << parent_layer_idx << ", child_chunk_idx " << child_chunk_idx); + + // Get next child chunk + std::vector child_chunk; + child_chunk.reserve(chunk_width); + while (1) + { + int result = mdb_cursor_get(child_layer_cursor, &k_child, &v_child, op_child); + op_child = MDB_NEXT_DUP; + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str())); - const auto *lv = (layer_val *)v_child.mv_data; - auto child_point = c_child->from_bytes(lv->child_chunk_hash); + const auto *lv = (layer_val *)v_child.mv_data; + auto child_point = c_child->from_bytes(lv->child_chunk_hash); - child_chunk.emplace_back(std::move(child_point)); + child_chunk.emplace_back(std::move(child_point)); - if (child_chunk.size() == chunk_width) + if (child_chunk.size() == chunk_width) + break; + } + + // Get the actual chunk hash from the db + int result = mdb_cursor_get(parent_layer_cursor, &k_parent, &v_parent, op_parent); + op_parent = MDB_NEXT_DUP; + + // Check for end conditions + // End condition A (audit_complete=false): finished auditing layer and ready to move up a layer + // End condition B (audit_complete=true ): finished auditing the tree, no more layers remaining + + // End condition A: check if finished auditing this layer + if (child_chunk.empty()) + { + // No more children, expect to be done auditing layer and ready to move up a layer + if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected parent result at parent_layer_idx " + std::to_string(parent_layer_idx) + + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); + + MDEBUG("Finished auditing layer " << child_layer_idx); + audit_complete = false; break; - } + } - // Get the actual chunk hash from the db - const uint64_t parent_layer_idx = layer_idx + 1; - MDB_val_copy k_parent(parent_layer_idx); - MDB_val_set(v_parent, child_chunk_idx); + // End condition B: check if finished auditing the tree + if (child_chunk_idx == 0 && child_chunk.size() == 1) + { + if (p_result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected parent of root at parent_layer_idx " + std::to_string(parent_layer_idx) + + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); - // Check for end conditions - // End condition A (return false): finished auditing layer and ready to move up a layer - // End condition B (return true): finished auditing the tree, no more layers remaining - int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, MDB_GET_BOTH); + MDEBUG("Encountered root at layer_idx " << child_layer_idx); + audit_complete = true; + break; + } - // End condition A: check if finished auditing this layer - if (child_chunk.empty()) - { - // No more children, expect to be done auditing layer and ready to move up a layer - if (result != MDB_NOTFOUND) - throw0(DB_ERROR(lmdb_error("unexpected parent result at parent_layer_idx " + std::to_string(parent_layer_idx) - + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get parent: ", result).c_str())); - MDEBUG("Finished auditing layer " << layer_idx); - TXN_POSTFIX_RDONLY(); - return false; - } + if (child_layer_idx != *(uint64_t*)k_child.mv_data) + throw0(DB_ERROR("unexpected child encountered")); + if (parent_layer_idx != *(uint64_t*)k_parent.mv_data) + throw0(DB_ERROR("unexpected parent encountered")); - // End condition B: check if finished auditing the tree - if (child_chunk_idx == 0 && child_chunk.size() == 1) - { - if (result != MDB_NOTFOUND) - throw0(DB_ERROR(lmdb_error("unexpected parent of root at parent_layer_idx " + std::to_string(parent_layer_idx) - + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); + // Get the expected chunk hash + std::vector child_scalars; + child_scalars.reserve(child_chunk.size()); + for (const auto &child : child_chunk) + child_scalars.emplace_back(c_child->point_to_cycle_scalar(child)); + const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()}; - MDEBUG("Encountered root at layer_idx " << layer_idx); - TXN_POSTFIX_RDONLY(); - return true; + for (uint64_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Hashing " << c_parent->to_string(child_scalars[i])); + + const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(c_parent, chunk); + MDEBUG("Expected chunk_hash " << c_parent->to_string(chunk_hash) << " (" << child_scalars.size() << " children)"); + + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); + + const auto actual_bytes = lv->child_chunk_hash; + const auto expected_bytes = c_parent->to_bytes(chunk_hash); + if (actual_bytes != expected_bytes) + throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str())); + if (lv->child_chunk_idx != child_chunk_idx) + throw0(DB_ERROR(("unexpected child_chunk_idx, epxected " + std::to_string(child_chunk_idx)).c_str())); + + ++child_chunk_idx; } - if (result != MDB_SUCCESS) - throw0(DB_ERROR(lmdb_error("Failed to get parent: ", result).c_str())); - - // Get the expected chunk hash - std::vector child_scalars; - child_scalars.reserve(child_chunk.size()); - for (const auto &child : child_chunk) - child_scalars.emplace_back(c_child->point_to_cycle_scalar(child)); - const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()}; - - for (uint64_t i = 0; i < child_scalars.size(); ++i) - MDEBUG("Hashing " << c_parent->to_string(child_scalars[i])); - - const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(c_parent, chunk); - MDEBUG("chunk_hash " << c_parent->to_string(chunk_hash) << " , hash init point: " - << c_parent->to_string(c_parent->hash_init_point()) << " (" << child_scalars.size() << " children)"); - - const auto *lv = (layer_val *)v_parent.mv_data; - MDEBUG("Actual chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); - - const auto actual_bytes = lv->child_chunk_hash; - const auto expected_bytes = c_parent->to_bytes(chunk_hash); - if (actual_bytes != expected_bytes) - throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str())); - - // TODO: use while (1) for iterative pattern, don't use recursion - return this->audit_layer(c_child, - c_parent, - layer_idx, - child_start_idx + child_chunk.size(), - child_chunk_idx + 1, - chunk_width); + TXN_POSTFIX_RDONLY(); + + return audit_complete; } std::vector BlockchainLMDB::get_leaf_tuples_at_unlock_block_id( diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 30167f1a016..35ad4c5d4ee 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -445,8 +445,6 @@ class BlockchainLMDB : public BlockchainDB bool audit_layer(const std::unique_ptr &c_child, const std::unique_ptr &c_parent, const uint64_t layer_idx, - const uint64_t child_start_idx, - const uint64_t child_chunk_idx, const uint64_t chunk_width) const; std::vector get_leaf_tuples_at_unlock_block_id(uint64_t block_id); diff --git a/tests/unit_tests/serialization.cpp b/tests/unit_tests/serialization.cpp index e9520c7c819..b4ce6c46b45 100644 --- a/tests/unit_tests/serialization.cpp +++ b/tests/unit_tests/serialization.cpp @@ -1397,12 +1397,12 @@ TEST(Serialization, tx_fcmp_pp) // 2. fcmp++ proof is longer than expected when serializing { transaction tx = make_dummy_fcmp_pp_tx(); - string blob; // Extend fcmp++ proof ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp_pp::proof_len(n_inputs)); tx.rct_signatures.p.fcmp_pp.push_back(0x01); + string blob; ASSERT_FALSE(serialization::dump_binary(tx, blob)); } From f17db01250ccd64ddc603d9cbcdc8a8e3e49d714 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 9 Aug 2024 15:43:18 -0700 Subject: [PATCH 089/127] fcmp++: store {output pubkey, commitment} in db, pre-torsion clear - We must use the output pubkey to calculate key image generator I - Since torsion cleared outputs can be spent via ring sig today, if we torsion clear outputs **before** calculating I, then the key image of torsioned outputs will be different when constructing fcmp's, effectively enabling a double spend of torsioned outputs via ring sig before fcmp's and again via fcmp. - Storing {output pubkey, commitment} instead of {O.x,I.x,C.x} to save 32 bytes per output. --- src/blockchain_db/lmdb/db_lmdb.cpp | 119 +++++++++++++---------------- src/blockchain_db/lmdb/db_lmdb.h | 14 ++-- src/fcmp_pp/curve_trees.cpp | 111 +++++++++++++-------------- src/fcmp_pp/curve_trees.h | 36 ++++----- tests/unit_tests/curve_trees.cpp | 11 +-- 5 files changed, 140 insertions(+), 151 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 800fd933869..775c545010d 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -199,8 +199,8 @@ namespace * * spent_keys input hash - * - * locked_leaves block ID [{output ID, leaf tuple}...] - * leaves leaf_idx leaf tuple + * locked_outputs block ID [{output ID, output pubkey, commitment}...] + * leaves leaf_idx {output pubkey, commitment} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * * txpool_meta txn hash txn metadata @@ -233,7 +233,7 @@ const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts"; const char* const LMDB_SPENT_KEYS = "spent_keys"; // Curve trees merkle tree tables -const char* const LMDB_LOCKED_LEAVES = "locked_leaves"; +const char* const LMDB_LOCKED_OUTPUTS = "locked_outputs"; const char* const LMDB_LEAVES = "leaves"; const char* const LMDB_LAYERS = "layers"; @@ -827,11 +827,11 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l } // Grow the tree with outputs that unlock at this block height - auto unlocked_leaves = this->get_leaf_tuples_at_unlock_block_id(m_height); - this->grow_tree(std::move(unlocked_leaves)); + auto unlocked_outputs = this->get_outs_at_unlock_block_id(m_height); + this->grow_tree(std::move(unlocked_outputs)); - // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked leaves table - this->del_locked_leaf_tuples_at_block_id(m_height); + // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked outputs table + this->del_locked_outs_at_block_id(m_height); int result = 0; @@ -878,17 +878,17 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l if (result) throw0(DB_ERROR(lmdb_error("Failed to add block height by hash to db transaction: ", result).c_str())); - CURSOR(locked_leaves) + CURSOR(locked_outputs) - // Add the locked leaf tuples from this block to the locked leaves table - for (const auto &locked_tuple : leaf_tuples_by_unlock_block) + // Add the locked outputs from this block to the locked outputs table + for (const auto &locked_output : leaf_tuples_by_unlock_block) { - MDB_val_set(k_block_id, locked_tuple.first); - MDB_val_set(v_tuple, locked_tuple.second); + MDB_val_set(k_block_id, locked_output.first); + MDB_val_set(v_output, locked_output.second); // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height - result = mdb_cursor_put(m_cur_locked_leaves, &k_block_id, &v_tuple, MDB_NODUPDATA); + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_NODUPDATA); if (result != MDB_SUCCESS && result != MDB_KEYEXIST) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); } @@ -1387,7 +1387,6 @@ void BlockchainLMDB::grow_tree(std::vectorgrow_layer(m_curve_trees->m_c2, @@ -1413,7 +1412,7 @@ void BlockchainLMDB::grow_tree(std::vectorgrow_layer(m_curve_trees->m_c1, @@ -1817,10 +1816,10 @@ fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_ if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); - const auto preprocessed_leaf_tuple = *(fcmp_pp::curve_trees::PreprocessedLeafTuple *)v.mv_data; + const auto output_pair = *(fcmp_pp::curve_trees::OutputPair *)v.mv_data; // TODO: parallelize calls to this function - auto leaf = m_curve_trees->leaf_tuple(preprocessed_leaf_tuple); + auto leaf = m_curve_trees->leaf_tuple(output_pair); leaves_to_trim.emplace_back(std::move(leaf.O_x)); leaves_to_trim.emplace_back(std::move(leaf.I_x)); @@ -1993,8 +1992,8 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); - const auto preprocessed_leaf_tuple = *(fcmp_pp::curve_trees::PreprocessedLeafTuple *)v.mv_data; - auto leaf = m_curve_trees->leaf_tuple(preprocessed_leaf_tuple); + const auto output_pair = *(fcmp_pp::curve_trees::OutputPair *)v.mv_data; + auto leaf = m_curve_trees->leaf_tuple(output_pair); leaf_tuples_chunk.emplace_back(std::move(leaf)); @@ -2226,17 +2225,17 @@ bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, return audit_complete; } -std::vector BlockchainLMDB::get_leaf_tuples_at_unlock_block_id( +std::vector BlockchainLMDB::get_outs_at_unlock_block_id( uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); TXN_PREFIX_RDONLY(); - RCURSOR(locked_leaves) + RCURSOR(locked_outputs) MDB_val_set(k_block_id, block_id); - MDB_val v_tuple; + MDB_val v_output; // Get all the locked outputs at the provided block id std::vector leaf_tuples; @@ -2244,7 +2243,7 @@ std::vector BlockchainLMDB::get_leaf_tup MDB_cursor_op op = MDB_SET; while (1) { - int result = mdb_cursor_get(m_cur_locked_leaves, &k_block_id, &v_tuple, op); + int result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, &v_output, op); if (result == MDB_NOTFOUND) break; if (result != MDB_SUCCESS) @@ -2255,8 +2254,8 @@ std::vector BlockchainLMDB::get_leaf_tup if (blk_id != block_id) throw0(DB_ERROR(("Blk id " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); - const auto range_begin = ((const fcmp_pp::curve_trees::LeafTupleContext*)v_tuple.mv_data); - const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp_pp::curve_trees::LeafTupleContext); + const auto range_begin = ((const fcmp_pp::curve_trees::LeafTupleContext*)v_output.mv_data); + const auto range_end = range_begin + v_output.mv_size / sizeof(fcmp_pp::curve_trees::LeafTupleContext); auto it = range_begin; @@ -2276,25 +2275,25 @@ std::vector BlockchainLMDB::get_leaf_tup return leaf_tuples; } -void BlockchainLMDB::del_locked_leaf_tuples_at_block_id(uint64_t block_id) +void BlockchainLMDB::del_locked_outs_at_block_id(uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); mdb_txn_cursors *m_cursors = &m_wcursors; - CURSOR(locked_leaves) + CURSOR(locked_outputs) MDB_val_set(k_block_id, block_id); - int result = mdb_cursor_get(m_cur_locked_leaves, &k_block_id, NULL, MDB_SET); + int result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, NULL, MDB_SET); if (result == MDB_NOTFOUND) return; if (result != MDB_SUCCESS) - throw1(DB_ERROR(lmdb_error("Error finding locked leaf tuples to remove: ", result).c_str())); + throw1(DB_ERROR(lmdb_error("Error finding locked outputs to remove: ", result).c_str())); - result = mdb_cursor_del(m_cur_locked_leaves, MDB_NODUPDATA); + result = mdb_cursor_del(m_cur_locked_outputs, MDB_NODUPDATA); if (result) - throw1(DB_ERROR(lmdb_error("Error removing locked leaf tuples: ", result).c_str())); + throw1(DB_ERROR(lmdb_error("Error removing locked outputs: ", result).c_str())); } BlockchainLMDB::~BlockchainLMDB() @@ -2451,7 +2450,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); - lmdb_db_open(txn, LMDB_LOCKED_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_leaves, "Failed to open db handle for m_locked_leaves"); + lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); @@ -2474,7 +2473,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_block_heights, compare_hash32); mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); - mdb_set_dupsort(txn, m_locked_leaves, compare_uint64); + mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); mdb_set_dupsort(txn, m_leaves, compare_uint64); mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); @@ -2654,8 +2653,8 @@ void BlockchainLMDB::reset() throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str())); if (auto result = mdb_drop(txn, m_spent_keys, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str())); - if (auto result = mdb_drop(txn, m_locked_leaves, 0)) - throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_leaves: ", result).c_str())); + if (auto result = mdb_drop(txn, m_locked_outputs, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_outputs: ", result).c_str())); if (auto result = mdb_drop(txn, m_leaves, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str())); if (auto result = mdb_drop(txn, m_layers, 0)) @@ -6719,7 +6718,7 @@ void BlockchainLMDB::migrate_5_6() do { // 1. Prepare all valid outputs to be inserted into the merkle tree and - // place them in a locked leaves table. The key to this new table is the + // place them in a locked outputs table. The key to this new table is the // block id in which the outputs unlock. { MINFO("Setting up a locked outputs table (step 1/2 of full-chain membership proof migration)"); @@ -6727,8 +6726,8 @@ void BlockchainLMDB::migrate_5_6() result = mdb_txn_begin(m_env, NULL, 0, txn); if (result) throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); - lmdb_db_open(txn, LMDB_LOCKED_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_leaves, "Failed to open db handle for m_locked_leaves"); - mdb_set_dupsort(txn, m_locked_leaves, compare_uint64); + lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); + mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); lmdb_db_open(txn, "tmp_last_output", MDB_INTEGERKEY | MDB_CREATE, m_tmp_last_output, "Failed to open db handle for m_tmp_last_output"); txn.commit(); @@ -6742,7 +6741,7 @@ void BlockchainLMDB::migrate_5_6() struct tmp_output_cache { uint64_t n_outputs_read; uint64_t amount; outkey ok; }; tmp_output_cache last_output; - MDB_cursor *c_output_amounts, *c_locked_leaves, *c_tmp_last_output; + MDB_cursor *c_output_amounts, *c_locked_outputs, *c_tmp_last_output; MDB_val k, v; i = 0; @@ -6780,7 +6779,7 @@ void BlockchainLMDB::migrate_5_6() result = mdb_cursor_open(txn, m_output_amounts, &c_output_amounts); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for output amounts: ", result).c_str())); - result = mdb_cursor_open(txn, m_locked_leaves, &c_locked_leaves); + result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); result = mdb_cursor_open(txn, m_tmp_last_output, &c_tmp_last_output); @@ -6867,35 +6866,25 @@ void BlockchainLMDB::migrate_5_6() last_output.n_outputs_read = i; } - // Convert the output into a leaf tuple context - fcmp_pp::curve_trees::LeafTupleContext tuple_context; - try - { - tuple_context = m_curve_trees->output_to_leaf_context( - output_id, - output_data.pubkey, - output_data.commitment); - } - catch(...) - { - // Invalid outputs can't be added to the tree - continue; - } + // Prepare the output for insertion to the tree + const auto tuple_context = m_curve_trees->output_to_leaf_context(output_id, + std::move(output_data.pubkey), + std::move(output_data.commitment)); // Get the block in which the output will unlock const uint64_t unlock_block = cryptonote::get_unlock_block_index(output_data.unlock_time, output_data.height); - // Now add the leaf tuple to the locked leaves table + // Now add the output to the locked outputs table MDB_val_set(k_block_id, unlock_block); - MDB_val_set(v_tuple, tuple_context); + MDB_val_set(v_output, tuple_context); // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height - result = mdb_cursor_put(c_locked_leaves, &k_block_id, &v_tuple, MDB_NODUPDATA); + result = mdb_cursor_put(c_locked_outputs, &k_block_id, &v_output, MDB_NODUPDATA); if (result != MDB_SUCCESS && result != MDB_KEYEXIST) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); if (result == MDB_KEYEXIST) - MDEBUG("Duplicate output pub key encountered: " << output_data.pubkey << " , output_id: " << output_id); + MDEBUG("Dup output pubkey: " << tuple_context.output_pair.output_pubkey << " , output_id: " << output_id); } } @@ -6918,7 +6907,7 @@ void BlockchainLMDB::migrate_5_6() lmdb_db_open(txn, "block_infn", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for block_infn"); mdb_set_dupsort(txn, m_block_info, compare_uint64); - MDB_cursor *c_locked_leaves, *c_new_block_info, *c_old_block_info; + MDB_cursor *c_locked_outputs, *c_new_block_info, *c_old_block_info; MDB_val k_blk, v_blk; i = 0; @@ -6945,7 +6934,7 @@ void BlockchainLMDB::migrate_5_6() } // Open all cursors - result = mdb_cursor_open(txn, m_locked_leaves, &c_locked_leaves); + result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); if (result) throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); result = mdb_cursor_open(txn, m_block_info, &c_new_block_info); @@ -6969,11 +6958,11 @@ void BlockchainLMDB::migrate_5_6() } // Get the leaf tuples that unlock at the given block - auto unlocked_leaves = this->get_leaf_tuples_at_unlock_block_id(i); - this->grow_tree(std::move(unlocked_leaves)); + auto unlocked_outputs = this->get_outs_at_unlock_block_id(i); + this->grow_tree(std::move(unlocked_outputs)); - // Now that we've used the unlocked leaves to grow the tree, we delete them from the locked leaves table - this->del_locked_leaf_tuples_at_block_id(i); + // Now that we've used the unlocked leaves to grow the tree, we delete them from the locked outputs table + this->del_locked_outs_at_block_id(i); // Get old block_info and use it to set the new one with new values result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_NEXT); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 35ad4c5d4ee..d5f4e5ad139 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -65,7 +65,7 @@ typedef struct mdb_txn_cursors MDB_cursor *m_txc_spent_keys; - MDB_cursor *m_txc_locked_leaves; + MDB_cursor *m_txc_locked_outputs; MDB_cursor *m_txc_leaves; MDB_cursor *m_txc_layers; @@ -92,7 +92,7 @@ typedef struct mdb_txn_cursors #define m_cur_tx_indices m_cursors->m_txc_tx_indices #define m_cur_tx_outputs m_cursors->m_txc_tx_outputs #define m_cur_spent_keys m_cursors->m_txc_spent_keys -#define m_cur_locked_leaves m_cursors->m_txc_locked_leaves +#define m_cur_locked_outputs m_cursors->m_txc_locked_outputs #define m_cur_leaves m_cursors->m_txc_leaves #define m_cur_layers m_cursors->m_txc_layers #define m_cur_txpool_meta m_cursors->m_txc_txpool_meta @@ -117,7 +117,7 @@ typedef struct mdb_rflags bool m_rf_tx_indices; bool m_rf_tx_outputs; bool m_rf_spent_keys; - bool m_rf_locked_leaves; + bool m_rf_locked_outputs; bool m_rf_leaves; bool m_rf_layers; bool m_rf_txpool_meta; @@ -444,12 +444,12 @@ class BlockchainLMDB : public BlockchainDB template bool audit_layer(const std::unique_ptr &c_child, const std::unique_ptr &c_parent, - const uint64_t layer_idx, + const uint64_t child_layer_idx, const uint64_t chunk_width) const; - std::vector get_leaf_tuples_at_unlock_block_id(uint64_t block_id); + std::vector get_outs_at_unlock_block_id(uint64_t block_id); - void del_locked_leaf_tuples_at_block_id(uint64_t block_id); + void del_locked_outs_at_block_id(uint64_t block_id); uint64_t num_outputs() const; @@ -518,7 +518,7 @@ class BlockchainLMDB : public BlockchainDB MDB_dbi m_spent_keys; - MDB_dbi m_locked_leaves; + MDB_dbi m_locked_outputs; MDB_dbi m_leaves; MDB_dbi m_layers; diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 366462f3998..3873b0cbc45 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -643,31 +643,18 @@ static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_re // CurveTrees public member functions //---------------------------------------------------------------------------------------------------------------------- template<> -LeafTupleContext CurveTrees::output_to_leaf_context( - const std::uint64_t output_id, - const crypto::public_key &output_pubkey, - const rct::key &commitment) const +LeafTupleContext CurveTrees::output_to_leaf_context(const std::uint64_t output_id, + crypto::public_key &&output_pubkey, + rct::key &&commitment) const { - rct::key O, C; - - if (!rct::clear_torsion(rct::pk2rct(output_pubkey), O)) - throw std::runtime_error("output pub key is invalid"); - if (!rct::clear_torsion(commitment, C)) - throw std::runtime_error("commitment is invalid"); - - if (O == rct::I) - throw std::runtime_error("O cannot equal identity"); - if (C == rct::I) - throw std::runtime_error("C cannot equal identity"); - - PreprocessedLeafTuple o_c{ - .O = std::move(O), - .C = std::move(C) + auto output_pair = OutputPair{ + .output_pubkey = std::move(output_pubkey), + .commitment = std::move(commitment) }; return LeafTupleContext{ - .output_id = output_id, - .preprocessed_leaf_tuple = std::move(o_c) + .output_id = output_id, + .output_pair = std::move(output_pair) }; }; //---------------------------------------------------------------------------------------------------------------------- @@ -698,40 +685,43 @@ void CurveTrees::tx_outs_to_leaf_tuple_contexts(const cryptonote if (!miner_tx && tx.version == 2) CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); - const rct::key commitment = (miner_tx || tx.version != 2) + rct::key commitment = (miner_tx || tx.version != 2) ? rct::zeroCommit(out.amount) : tx.rct_signatures.outPk[i].mask; - LeafTupleContext leaf_tuple_context; - try - { - // Convert output to leaf tuple context; throws if output is invalid - leaf_tuple_context = output_to_leaf_context(output_ids[i], - output_public_key, - commitment); - } - catch (...) - { - // We don't want leaf tuples from invalid outputs in the tree - continue; - }; + auto tuple_context = output_to_leaf_context(output_ids[i], + std::move(output_public_key), + std::move(commitment)); - leaf_tuples_by_unlock_block_inout.emplace(unlock_block, std::move(leaf_tuple_context)); + leaf_tuples_by_unlock_block_inout.emplace(unlock_block, std::move(tuple_context)); } } //---------------------------------------------------------------------------------------------------------------------- template<> CurveTrees::LeafTuple CurveTrees::leaf_tuple( - const PreprocessedLeafTuple &preprocessed_leaf_tuple) const + const OutputPair &output_pair) const { - const rct::key &O = preprocessed_leaf_tuple.O; - const rct::key &C = preprocessed_leaf_tuple.C; + const crypto::public_key &output_pubkey = output_pair.output_pubkey; + const rct::key &commitment = output_pair.commitment; + + rct::key O, C; + if (!rct::clear_torsion(rct::pk2rct(output_pubkey), O)) + throw std::runtime_error("output pubkey is invalid"); + if (!rct::clear_torsion(commitment, C)) + throw std::runtime_error("commitment is invalid"); + if (O == rct::I) + throw std::runtime_error("O cannot equal identity"); + if (C == rct::I) + throw std::runtime_error("C cannot equal identity"); + + // Must use the original output pubkey to derive I to prevent double spends, since torsioned outputs yield a + // a distinct I and key image from their respective torsion cleared output (and torsioned outputs are spendable + // before fcmp++) crypto::ec_point I; - crypto::derive_key_image_generator(rct::rct2pk(O), I); + crypto::derive_key_image_generator(output_pubkey, I); rct::key O_x, I_x, C_x; - if (!rct::point_to_wei_x(O, O_x)) throw std::runtime_error("failed to get wei x scalar from O"); if (!rct::point_to_wei_x(rct::pt2rct(I), I_x)) @@ -773,43 +763,50 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio std::vector &&new_leaf_tuples) const { TreeExtension tree_extension; + tree_extension.leaves.start_leaf_tuple_idx = old_n_leaf_tuples; if (new_leaf_tuples.empty()) return tree_extension; - auto grow_layer_instructions = get_leaf_layer_grow_instructions( - old_n_leaf_tuples, - new_leaf_tuples.size(), - LEAF_TUPLE_SIZE, - m_leaf_layer_chunk_width); - - tree_extension.leaves.start_leaf_tuple_idx = grow_layer_instructions.old_total_children / LEAF_TUPLE_SIZE; - // Sort the leaves by order they appear in the chain const auto sort_fn = [](const LeafTupleContext &a, const LeafTupleContext &b) { return a.output_id < b.output_id; }; std::sort(new_leaf_tuples.begin(), new_leaf_tuples.end(), sort_fn); - // Convert sorted pre-processed tuples into leaf tuples, place each element of each leaf tuple in a flat vector to - // be hashed, and place the pre-processed tuples in tree extension struct for insertion into the db + // Convert sorted outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, + // and place the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since + // they cannot be inserted to the tree. std::vector flattened_leaves; flattened_leaves.reserve(new_leaf_tuples.size() * LEAF_TUPLE_SIZE); tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); for (auto &l : new_leaf_tuples) { // TODO: this loop can be parallelized - auto leaf = leaf_tuple(l.preprocessed_leaf_tuple); + LeafTuple leaf; + try { leaf = leaf_tuple(l.output_pair); } + catch(...) + { + // Invalid outputs can't be added to the tree + continue; + } + // We use O.x, I.x, C.x to grow the tree flattened_leaves.emplace_back(std::move(leaf.O_x)); flattened_leaves.emplace_back(std::move(leaf.I_x)); flattened_leaves.emplace_back(std::move(leaf.C_x)); - // We only need to store O and C in the db, the leaf tuple can be derived from O and C - tree_extension.leaves.tuples.emplace_back(PreprocessedLeafTuple{ - .O = std::move(l.preprocessed_leaf_tuple.O), - .C = std::move(l.preprocessed_leaf_tuple.C) - }); + // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output pair in the db to save 32 bytes + tree_extension.leaves.tuples.emplace_back(std::move(l.output_pair)); } + if (flattened_leaves.empty()) + return tree_extension; + + auto grow_layer_instructions = get_leaf_layer_grow_instructions( + old_n_leaf_tuples, + tree_extension.leaves.tuples.size(), + LEAF_TUPLE_SIZE, + m_leaf_layer_chunk_width); + if (grow_layer_instructions.need_old_last_parent) CHECK_AND_ASSERT_THROW_MES(!existing_last_hashes.c2_last_hashes.empty(), "missing last c2 parent"); diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 3ca9e332ae2..0c1ce347826 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -129,22 +129,24 @@ struct TrimLayerInstructions final uint64_t end_trim_idx; }; -// Output pub key and commitment, ready to be converted into a leaf tuple (from {O,C} -> {O.x, I.x, C.x}) -struct PreprocessedLeafTuple final +// Output pub key and commitment, ready to be converted to a leaf tuple +// - From {output_pubkey,commitment} -> {O,C} -> {O.x,I.x,C.x} +// - Output pairs do NOT necessarily have torsion cleared. We need the output pubkey as it exists in the chain in order +// to derive the correct I (when deriving {O.x, I.x, C.x}). Torsion clearing O before deriving I from O would enable +// spending a torsioned output once before the fcmp++ fork and again with a different key image via fcmp++. +struct OutputPair final { - // Output pubkey that has been checked valid and torsion cleared - rct::key O; - // Commitment that has been checked valid and torsion cleared - rct::key C; + crypto::public_key output_pubkey; + rct::key commitment; }; -static_assert(sizeof(PreprocessedLeafTuple) == (32+32), "db expects 64 bytes for pre-processed leaf tuples"); +static_assert(sizeof(OutputPair) == (32+32), "db expects 64 bytes for output pairs"); -// Contextual wrapper for a pre-processed leaf tuple +// Contextual wrapper for output pairs, ready to be conerted into leaf tuples struct LeafTupleContext final { // Global output ID useful to order the leaf tuple for insertion into the tree - uint64_t output_id; - PreprocessedLeafTuple preprocessed_leaf_tuple; + uint64_t output_id; + OutputPair output_pair; }; //---------------------------------------------------------------------------------------------------------------------- @@ -186,9 +188,9 @@ class CurveTrees struct Leaves final { // Starting leaf tuple index in the leaf layer - uint64_t start_leaf_tuple_idx{0}; + uint64_t start_leaf_tuple_idx{0}; // Contiguous leaves in a tree that start at the start_idx - std::vector tuples; + std::vector tuples; }; // A struct useful to extend an existing tree @@ -231,10 +233,10 @@ class CurveTrees //member functions public: - // Convert cryptonote output pub key and commitment to a pre-processed leaf tuple ready for insertion to the tree + // Prepare output pubkey and commitment for insertion into the tree LeafTupleContext output_to_leaf_context(const std::uint64_t output_id, - const crypto::public_key &output_pubkey, - const rct::key &C) const; + crypto::public_key &&output_pubkey, + rct::key &&commitment) const; // Convert cryptonote tx outs to contexts ready to be converted to leaf tuples, grouped by unlock height void tx_outs_to_leaf_tuple_contexts(const cryptonote::transaction &tx, @@ -243,8 +245,8 @@ class CurveTrees const bool miner_tx, std::multimap &leaf_tuples_by_unlock_block_inout) const; - // Derive a leaf tuple from a pre-processed leaf tuple {O,C} -> {O.x,I.x,C.x} - LeafTuple leaf_tuple(const PreprocessedLeafTuple &preprocessed_leaf_tuple) const; + // Convert output pairs into leaf tuples, from {output pubkey,commitment} -> {O,C} -> {O.x,I.x,C.x} + LeafTuple leaf_tuple(const OutputPair &outpout_pair) const; // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [O.x, I.x, C.x, O.x, I.x, C.x...] std::vector flatten_leaves(std::vector &&leaves) const; diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 650c75bcb26..2e45758591c 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -168,9 +168,9 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e "unexpected leaf start idx"); m_tree.leaves.reserve(m_tree.leaves.size() + tree_extension.leaves.tuples.size()); - for (const auto &preprocessed_leaf_tuple : tree_extension.leaves.tuples) + for (const auto &output_pair : tree_extension.leaves.tuples) { - auto leaf = m_curve_trees.leaf_tuple(preprocessed_leaf_tuple); + auto leaf = m_curve_trees.leaf_tuple(output_pair); m_tree.leaves.emplace_back(CurveTreesV1::LeafTuple{ .O_x = std::move(leaf.O_x), @@ -640,8 +640,8 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension MDEBUG("Leaf start idx: " << tree_extension.leaves.start_leaf_tuple_idx); for (std::size_t i = 0; i < tree_extension.leaves.tuples.size(); ++i) { - const auto &preprocessed_leaf_tuple = tree_extension.leaves.tuples[i]; - const auto leaf = m_curve_trees.leaf_tuple(preprocessed_leaf_tuple); + const auto &output_pair = tree_extension.leaves.tuples[i]; + const auto leaf = m_curve_trees.leaf_tuple(output_pair); const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); @@ -760,7 +760,8 @@ static const std::vector generate_random crypto::generate_keys(O, o, o, false); crypto::generate_keys(C, c, c, false); - auto tuple_context = curve_trees.output_to_leaf_context(output_id, O, rct::pk2rct(C)); + rct::key C_key = rct::pk2rct(C); + auto tuple_context = curve_trees.output_to_leaf_context(output_id, std::move(O), std::move(C_key)); tuples.emplace_back(std::move(tuple_context)); } From 9ad49189bbba52712889250aa8181b8984c22b28 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 9 Aug 2024 15:48:51 -0700 Subject: [PATCH 090/127] link correct cncrypto and ringct_basic libs --- src/fcmp_pp/CMakeLists.txt | 4 ++-- tests/unit_tests/CMakeLists.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/fcmp_pp/CMakeLists.txt b/src/fcmp_pp/CMakeLists.txt index 33d428f827c..50dbb567f81 100644 --- a/src/fcmp_pp/CMakeLists.txt +++ b/src/fcmp_pp/CMakeLists.txt @@ -49,10 +49,10 @@ endif() target_link_libraries(fcmp_pp PUBLIC - crypto + cncrypto cryptonote_basic epee - ringct + ringct_basic PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/fcmp_pp_rust/libfcmp_pp_rust.a ${EXTRA_LIBRARIES} diff --git a/tests/unit_tests/CMakeLists.txt b/tests/unit_tests/CMakeLists.txt index b48ef48524c..41406d9664c 100644 --- a/tests/unit_tests/CMakeLists.txt +++ b/tests/unit_tests/CMakeLists.txt @@ -114,7 +114,7 @@ monero_add_minimal_executable(unit_tests target_link_libraries(unit_tests PRIVATE ringct - crypto + cncrypto cryptonote_protocol cryptonote_core daemon_messages From b6bcca989986242883f6f96e34495c7eef928597 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 9 Aug 2024 16:29:31 -0700 Subject: [PATCH 091/127] Remove ringct dep in fcmp_pp, impl in fcmp_pp_crypto --- src/fcmp_pp/CMakeLists.txt | 2 +- src/fcmp_pp/curve_trees.cpp | 14 ++++++++------ src/ringct/rctOps.cpp | 25 ------------------------- src/ringct/rctOps.h | 4 ---- tests/unit_tests/curve_trees.cpp | 3 ++- 5 files changed, 11 insertions(+), 37 deletions(-) diff --git a/src/fcmp_pp/CMakeLists.txt b/src/fcmp_pp/CMakeLists.txt index 50dbb567f81..849b09593ab 100644 --- a/src/fcmp_pp/CMakeLists.txt +++ b/src/fcmp_pp/CMakeLists.txt @@ -28,6 +28,7 @@ set(fcmp_pp_sources curve_trees.cpp + fcmp_pp_crypto.cpp tower_cycle.cpp) monero_find_all_headers(fcmp_pp_headers "${CMAKE_CURRENT_SOURCE_DIR}") @@ -52,7 +53,6 @@ target_link_libraries(fcmp_pp cncrypto cryptonote_basic epee - ringct_basic PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/fcmp_pp_rust/libfcmp_pp_rust.a ${EXTRA_LIBRARIES} diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 3873b0cbc45..85c9a42a5ad 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -26,8 +26,10 @@ // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "cryptonote_basic/cryptonote_format_utils.h" #include "curve_trees.h" + +#include "cryptonote_basic/cryptonote_format_utils.h" +#include "fcmp_pp_crypto.h" #include "ringct/rctOps.h" @@ -705,9 +707,9 @@ CurveTrees::LeafTuple CurveTrees::leaf_tuple( const rct::key &commitment = output_pair.commitment; rct::key O, C; - if (!rct::clear_torsion(rct::pk2rct(output_pubkey), O)) + if (!fcmp_pp::clear_torsion(rct::pk2rct(output_pubkey), O)) throw std::runtime_error("output pubkey is invalid"); - if (!rct::clear_torsion(commitment, C)) + if (!fcmp_pp::clear_torsion(commitment, C)) throw std::runtime_error("commitment is invalid"); if (O == rct::I) @@ -722,11 +724,11 @@ CurveTrees::LeafTuple CurveTrees::leaf_tuple( crypto::derive_key_image_generator(output_pubkey, I); rct::key O_x, I_x, C_x; - if (!rct::point_to_wei_x(O, O_x)) + if (!fcmp_pp::point_to_wei_x(O, O_x)) throw std::runtime_error("failed to get wei x scalar from O"); - if (!rct::point_to_wei_x(rct::pt2rct(I), I_x)) + if (!fcmp_pp::point_to_wei_x(rct::pt2rct(I), I_x)) throw std::runtime_error("failed to get wei x scalar from I"); - if (!rct::point_to_wei_x(C, C_x)) + if (!fcmp_pp::point_to_wei_x(C, C_x)) throw std::runtime_error("failed to get wei x scalar from C"); return LeafTuple{ diff --git a/src/ringct/rctOps.cpp b/src/ringct/rctOps.cpp index e865f4398de..0e18cb461bb 100644 --- a/src/ringct/rctOps.cpp +++ b/src/ringct/rctOps.cpp @@ -725,29 +725,4 @@ namespace rct { sc_sub(masked.amount.bytes, masked.amount.bytes, sharedSec2.bytes); } } - - bool clear_torsion(const key &k, key &k_out) { - ge_p3 point; - if (ge_frombytes_vartime(&point, k.bytes) != 0) - return false; - // mul by inv 8, then mul by 8 - ge_p2 point_inv_8; - ge_scalarmult(&point_inv_8, INV_EIGHT.bytes, &point); - ge_p1p1 point_inv_8_mul_8; - ge_mul8(&point_inv_8_mul_8, &point_inv_8); - ge_p3 torsion_cleared_point; - ge_p1p1_to_p3(&torsion_cleared_point, &point_inv_8_mul_8); - ge_p3_tobytes(k_out.bytes, &torsion_cleared_point); - return true; - } - - bool point_to_wei_x(const key &pub, key &wei_x) { - if (pub == I) - return false; - fe y; - if (fe_frombytes_vartime(y, pub.bytes) != 0) - return false; - fe_ed_y_to_wei_x(wei_x.bytes, y); - return true; - } } diff --git a/src/ringct/rctOps.h b/src/ringct/rctOps.h index 2a3c1f678d6..0edd0308c46 100644 --- a/src/ringct/rctOps.h +++ b/src/ringct/rctOps.h @@ -188,9 +188,5 @@ namespace rct { key genCommitmentMask(const key &sk); void ecdhEncode(ecdhTuple & unmasked, const key & sharedSec, bool v2); void ecdhDecode(ecdhTuple & masked, const key & sharedSec, bool v2); - - // TODO: tests for these functions specifically - bool clear_torsion(const key &k, key &k_out); - bool point_to_wei_x(const key &pub, key &wei_x); } #endif /* RCTOPS_H */ diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 2e45758591c..86769b60fcd 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -30,6 +30,7 @@ #include "cryptonote_basic/cryptonote_format_utils.h" #include "curve_trees.h" +#include "fcmp_pp/fcmp_pp_crypto.h" #include "misc_log_ex.h" #include "ringct/rctOps.h" #include "unit_tests_utils.h" @@ -777,7 +778,7 @@ static const Selene::Scalar generate_random_selene_scalar() crypto::generate_keys(S, s, s, false); rct::key S_x; - CHECK_AND_ASSERT_THROW_MES(rct::point_to_wei_x(rct::pk2rct(S), S_x), "failed to convert to wei x"); + CHECK_AND_ASSERT_THROW_MES(fcmp_pp::point_to_wei_x(rct::pk2rct(S), S_x), "failed to convert to wei x"); return fcmp_pp::tower_cycle::selene_scalar_from_bytes(S_x); } //---------------------------------------------------------------------------------------------------------------------- From 7389cb6bee794933dc409a8682850b75029daec7 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 9 Aug 2024 16:35:25 -0700 Subject: [PATCH 092/127] add missing files --- src/fcmp_pp/fcmp_pp_crypto.cpp | 66 ++++++++++++++++++++++++++++++++++ src/fcmp_pp/fcmp_pp_crypto.h | 41 +++++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 src/fcmp_pp/fcmp_pp_crypto.cpp create mode 100644 src/fcmp_pp/fcmp_pp_crypto.h diff --git a/src/fcmp_pp/fcmp_pp_crypto.cpp b/src/fcmp_pp/fcmp_pp_crypto.cpp new file mode 100644 index 00000000000..0bb70590b6b --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_crypto.cpp @@ -0,0 +1,66 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "fcmp_pp_crypto.h" + +extern "C" +{ +#include "crypto/crypto-ops.h" +} +#include "ringct/rctOps.h" + +namespace fcmp_pp +{ +//---------------------------------------------------------------------------------------------------------------------- +bool clear_torsion(const rct::key &k, rct::key &k_out) { + ge_p3 point; + if (ge_frombytes_vartime(&point, k.bytes) != 0) + return false; + // mul by inv 8, then mul by 8 + ge_p2 point_inv_8; + ge_scalarmult(&point_inv_8, rct::INV_EIGHT.bytes, &point); + ge_p1p1 point_inv_8_mul_8; + ge_mul8(&point_inv_8_mul_8, &point_inv_8); + ge_p3 torsion_cleared_point; + ge_p1p1_to_p3(&torsion_cleared_point, &point_inv_8_mul_8); + ge_p3_tobytes(k_out.bytes, &torsion_cleared_point); + return true; +} + +bool point_to_wei_x(const rct::key &pub, rct::key &wei_x) { + if (pub == rct::I) + return false; + fe y; + if (fe_frombytes_vartime(y, pub.bytes) != 0) + return false; + fe_ed_y_to_wei_x(wei_x.bytes, y); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace fcmp_pp diff --git a/src/fcmp_pp/fcmp_pp_crypto.h b/src/fcmp_pp/fcmp_pp_crypto.h new file mode 100644 index 00000000000..75356934d1f --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_crypto.h @@ -0,0 +1,41 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "ringct/rctTypes.h" + +namespace fcmp_pp +{ +//---------------------------------------------------------------------------------------------------------------------- +// TODO: tests for these functions +bool clear_torsion(const rct::key &k, rct::key &k_out); +bool point_to_wei_x(const rct::key &pub, rct::key &wei_x); +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace fcmp_pp From 6525df113ced650cd7a730143c8ddcac8fd4ba30 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 9 Aug 2024 18:31:18 -0700 Subject: [PATCH 093/127] Don't store output_id in locked_outpust table, table stays ordered --- src/blockchain_db/blockchain_db.cpp | 70 ++++++++++++++--- src/blockchain_db/blockchain_db.h | 6 +- src/blockchain_db/lmdb/db_lmdb.cpp | 39 +++++----- src/blockchain_db/lmdb/db_lmdb.h | 6 +- src/blockchain_db/testdb.h | 4 +- .../cryptonote_format_utils.h | 1 + src/fcmp_pp/CMakeLists.txt | 1 - src/fcmp_pp/curve_trees.cpp | 76 ++----------------- src/fcmp_pp/curve_trees.h | 22 +----- tests/block_weight/block_weight.cpp | 2 +- tests/core_tests/chaingen.cpp | 2 +- tests/unit_tests/curve_trees.cpp | 17 +++-- tests/unit_tests/hardfork.cpp | 2 +- tests/unit_tests/long_term_block_weight.cpp | 2 +- 14 files changed, 110 insertions(+), 140 deletions(-) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 4779f680d31..5a4ab837850 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -41,6 +41,55 @@ using epee::string_tools::pod_to_hex; +//--------------------------------------------------------------- +// Helper function to group outputs by unlock block +static void get_outs_by_unlock_block(const cryptonote::transaction &tx, + const std::vector &output_ids, + const uint64_t tx_height, + const bool miner_tx, + fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block_inout) +{ + const uint64_t unlock_block = cryptonote::get_unlock_block_index(tx.unlock_time, tx_height); + + CHECK_AND_ASSERT_THROW_MES(tx.vout.size() == output_ids.size(), "unexpected size of output ids"); + + for (std::size_t i = 0; i < tx.vout.size(); ++i) + { + const auto &out = tx.vout[i]; + + crypto::public_key output_public_key; + if (!cryptonote::get_output_public_key(out, output_public_key)) + throw std::runtime_error("Could not get an output public key from a tx output."); + + static_assert(CURRENT_TRANSACTION_VERSION == 2, "This section of code was written with 2 tx versions in mind. " + "Revisit this section and update for the new tx version."); + CHECK_AND_ASSERT_THROW_MES(tx.version == 1 || tx.version == 2, "encountered unexpected tx version"); + + if (!miner_tx && tx.version == 2) + CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); + + rct::key commitment = (miner_tx || tx.version != 2) + ? rct::zeroCommit(out.amount) // Needs ringct + : tx.rct_signatures.outPk[i].mask; + + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(output_public_key), + .commitment = std::move(commitment) + }; + + if (outs_by_unlock_block_inout.find(unlock_block) == outs_by_unlock_block_inout.end()) + { + auto new_vec = std::vector{std::move(output_pair)}; + outs_by_unlock_block_inout[unlock_block] = std::move(new_vec); + } + else + { + outs_by_unlock_block_inout[unlock_block].emplace_back(std::move(output_pair)); + } + } +} +//--------------------------------------------------------------- + namespace cryptonote { @@ -231,7 +280,7 @@ std::vector BlockchainDB::add_transaction(const crypto::hash& blk_hash { // miner v2 txes have their coinbase output in one single out to save space, // and we store them as rct outputs with an identity mask - // note: tx_outs_to_leaf_tuple_contexts in curve_trees.cpp mirrors this logic + // note: get_outs_by_unlock_block mirrors this logic if (miner_tx && tx.version == 2) { cryptonote::tx_out vout = tx.vout[i]; @@ -311,35 +360,32 @@ uint64_t BlockchainDB::add_block( const std::pair& blck TIME_MEASURE_FINISH(time1); time_add_transaction += time1; - // When adding a block, we also need to add all the leaf tuples included in - // the block to a table keeping track of locked leaf tuples. Once those leaf - // tuples unlock, we use them to grow the tree. - std::multimap leaf_tuples_by_unlock_block; + // When adding a block, we also need to keep track of when outputs unlock, so + // we can use them to grow the merkle tree used in fcmp's at that point. + fcmp_pp::curve_trees::OutputsByUnlockBlock outs_by_unlock_block; // Get miner tx's leaf tuples - CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); - m_curve_trees->tx_outs_to_leaf_tuple_contexts( + get_outs_by_unlock_block( blk.miner_tx, miner_output_ids, prev_height, true/*miner_tx*/, - leaf_tuples_by_unlock_block); + outs_by_unlock_block); // Get all other txs' leaf tuples for (std::size_t i = 0; i < txs.size(); ++i) { - // TODO: this loop can be parallelized - m_curve_trees->tx_outs_to_leaf_tuple_contexts( + get_outs_by_unlock_block( txs[i].first, output_ids[i], prev_height, false/*miner_tx*/, - leaf_tuples_by_unlock_block); + outs_by_unlock_block); } // call out to subclass implementation to add the block & metadata time1 = epee::misc_utils::get_tick_count(); - add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash, leaf_tuples_by_unlock_block); + add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash, outs_by_unlock_block); TIME_MEASURE_FINISH(time1); time_add_block1 += time1; diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index af68071417c..6ff97809fb1 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -409,7 +409,7 @@ class BlockchainDB * @param cumulative_difficulty the accumulated difficulty after this block * @param coins_generated the number of coins generated total after this block * @param blk_hash the hash of the block - * @param leaf_tuples_by_unlock_block the leaves from this block to add to the merkle tree + * @param outs_by_unlock_block the outputs from this block to add to the merkle tree */ virtual void add_block( const block& blk , size_t block_weight @@ -418,7 +418,7 @@ class BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) = 0; /** @@ -1783,7 +1783,7 @@ class BlockchainDB virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; // TODO: description and make private - virtual void grow_tree(std::vector &&new_leaves) = 0; + virtual void grow_tree(std::vector &&new_leaves) = 0; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) = 0; diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 775c545010d..f7637dad8a8 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -199,7 +199,7 @@ namespace * * spent_keys input hash - * - * locked_outputs block ID [{output ID, output pubkey, commitment}...] + * locked_outputs block ID [{output pubkey, commitment}...] * leaves leaf_idx {output pubkey, commitment} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * @@ -798,7 +798,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uin } void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, - uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap &leaf_tuples_by_unlock_block) + uint64_t num_rct_outs, const crypto::hash& blk_hash, const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -828,7 +828,8 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l // Grow the tree with outputs that unlock at this block height auto unlocked_outputs = this->get_outs_at_unlock_block_id(m_height); - this->grow_tree(std::move(unlocked_outputs)); + if (!unlocked_outputs.empty()) + this->grow_tree(std::move(unlocked_outputs)); // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked outputs table this->del_locked_outs_at_block_id(m_height); @@ -881,14 +882,13 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l CURSOR(locked_outputs) // Add the locked outputs from this block to the locked outputs table - for (const auto &locked_output : leaf_tuples_by_unlock_block) + for (const auto &locked_output : outs_by_unlock_block) { MDB_val_set(k_block_id, locked_output.first); MDB_val_set(v_output, locked_output.second); // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent - // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height - result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_NODUPDATA); + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP | MDB_NODUPDATA); if (result != MDB_SUCCESS && result != MDB_KEYEXIST) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); } @@ -1344,7 +1344,7 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } } -void BlockchainLMDB::grow_tree(std::vector &&new_leaves) +void BlockchainLMDB::grow_tree(std::vector &&new_leaves) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); if (new_leaves.empty()) @@ -2225,7 +2225,7 @@ bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, return audit_complete; } -std::vector BlockchainLMDB::get_outs_at_unlock_block_id( +std::vector BlockchainLMDB::get_outs_at_unlock_block_id( uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -2238,7 +2238,7 @@ std::vector BlockchainLMDB::get_outs_at_ MDB_val v_output; // Get all the locked outputs at the provided block id - std::vector leaf_tuples; + std::vector outs; MDB_cursor_op op = MDB_SET; while (1) @@ -2254,25 +2254,25 @@ std::vector BlockchainLMDB::get_outs_at_ if (blk_id != block_id) throw0(DB_ERROR(("Blk id " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); - const auto range_begin = ((const fcmp_pp::curve_trees::LeafTupleContext*)v_output.mv_data); - const auto range_end = range_begin + v_output.mv_size / sizeof(fcmp_pp::curve_trees::LeafTupleContext); + const auto range_begin = ((const fcmp_pp::curve_trees::OutputPair*)v_output.mv_data); + const auto range_end = range_begin + v_output.mv_size / sizeof(fcmp_pp::curve_trees::OutputPair); auto it = range_begin; // The first MDB_NEXT_MULTIPLE includes the val from MDB_SET, so skip it - if (leaf_tuples.size() == 1) + if (outs.size() == 1) ++it; while (it < range_end) { - leaf_tuples.push_back(*it); + outs.push_back(*it); ++it; } } TXN_POSTFIX_RDONLY(); - return leaf_tuples; + return outs; } void BlockchainLMDB::del_locked_outs_at_block_id(uint64_t block_id) @@ -6867,16 +6867,17 @@ void BlockchainLMDB::migrate_5_6() } // Prepare the output for insertion to the tree - const auto tuple_context = m_curve_trees->output_to_leaf_context(output_id, - std::move(output_data.pubkey), - std::move(output_data.commitment)); + const auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(output_data.pubkey), + .commitment = std::move(output_data.commitment) + }; // Get the block in which the output will unlock const uint64_t unlock_block = cryptonote::get_unlock_block_index(output_data.unlock_time, output_data.height); // Now add the output to the locked outputs table MDB_val_set(k_block_id, unlock_block); - MDB_val_set(v_output, tuple_context); + MDB_val_set(v_output, output_pair); // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height @@ -6884,7 +6885,7 @@ void BlockchainLMDB::migrate_5_6() if (result != MDB_SUCCESS && result != MDB_KEYEXIST) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); if (result == MDB_KEYEXIST) - MDEBUG("Dup output pubkey: " << tuple_context.output_pair.output_pubkey << " , output_id: " << output_id); + MDEBUG("Duplicate output pubkey: " << output_pair.output_pubkey << " , output_id: " << output_id); } } diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index d5f4e5ad139..9fb19abd6a2 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -368,7 +368,7 @@ class BlockchainLMDB : public BlockchainDB static int compare_string(const MDB_val *a, const MDB_val *b); // make private - virtual void grow_tree(std::vector &&new_leaves); + virtual void grow_tree(std::vector &&new_leaves); virtual void trim_tree(const uint64_t trim_n_leaf_tuples); @@ -388,7 +388,7 @@ class BlockchainLMDB : public BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& block_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ); virtual void remove_block(); @@ -447,7 +447,7 @@ class BlockchainLMDB : public BlockchainDB const uint64_t child_layer_idx, const uint64_t chunk_width) const; - std::vector get_outs_at_unlock_block_id(uint64_t block_id); + std::vector get_outs_at_unlock_block_id(uint64_t block_id); void del_locked_outs_at_block_id(uint64_t block_id); diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 81317b6a2d6..98d4260b2df 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -116,7 +116,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} - virtual void grow_tree(std::vector &&new_leaves) override {}; + virtual void grow_tree(std::vector &&new_leaves) override {}; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) override {}; virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; virtual std::array get_tree_root() const override { return {}; }; @@ -149,7 +149,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} diff --git a/src/cryptonote_basic/cryptonote_format_utils.h b/src/cryptonote_basic/cryptonote_format_utils.h index c1757c7a702..e3a4644030c 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.h +++ b/src/cryptonote_basic/cryptonote_format_utils.h @@ -37,6 +37,7 @@ #include "include_base_utils.h" #include "crypto/crypto.h" #include "crypto/hash.h" +#include "fcmp_pp/curve_trees.h" #include #include diff --git a/src/fcmp_pp/CMakeLists.txt b/src/fcmp_pp/CMakeLists.txt index 849b09593ab..554d1a8dcfd 100644 --- a/src/fcmp_pp/CMakeLists.txt +++ b/src/fcmp_pp/CMakeLists.txt @@ -51,7 +51,6 @@ endif() target_link_libraries(fcmp_pp PUBLIC cncrypto - cryptonote_basic epee PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/fcmp_pp_rust/libfcmp_pp_rust.a diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 85c9a42a5ad..c688c4e5a11 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -28,7 +28,6 @@ #include "curve_trees.h" -#include "cryptonote_basic/cryptonote_format_utils.h" #include "fcmp_pp_crypto.h" #include "ringct/rctOps.h" @@ -645,61 +644,6 @@ static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_re // CurveTrees public member functions //---------------------------------------------------------------------------------------------------------------------- template<> -LeafTupleContext CurveTrees::output_to_leaf_context(const std::uint64_t output_id, - crypto::public_key &&output_pubkey, - rct::key &&commitment) const -{ - auto output_pair = OutputPair{ - .output_pubkey = std::move(output_pubkey), - .commitment = std::move(commitment) - }; - - return LeafTupleContext{ - .output_id = output_id, - .output_pair = std::move(output_pair) - }; -}; -//---------------------------------------------------------------------------------------------------------------------- -template <> -void CurveTrees::tx_outs_to_leaf_tuple_contexts(const cryptonote::transaction &tx, - const std::vector &output_ids, - const uint64_t tx_height, - const bool miner_tx, - std::multimap &leaf_tuples_by_unlock_block_inout) const -{ - const uint64_t unlock_block = cryptonote::get_unlock_block_index(tx.unlock_time, tx_height); - - CHECK_AND_ASSERT_THROW_MES(tx.vout.size() == output_ids.size(), "unexpected size of output ids"); - - for (std::size_t i = 0; i < tx.vout.size(); ++i) - { - // TODO: this loop can be parallelized - const auto &out = tx.vout[i]; - - crypto::public_key output_public_key; - if (!cryptonote::get_output_public_key(out, output_public_key)) - throw std::runtime_error("Could not get an output public key from a tx output."); - - static_assert(CURRENT_TRANSACTION_VERSION == 2, "This section of code was written with 2 tx versions in mind. " - "Revisit this section and update for the new tx version."); - CHECK_AND_ASSERT_THROW_MES(tx.version == 1 || tx.version == 2, "encountered unexpected tx version"); - - if (!miner_tx && tx.version == 2) - CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); - - rct::key commitment = (miner_tx || tx.version != 2) - ? rct::zeroCommit(out.amount) - : tx.rct_signatures.outPk[i].mask; - - auto tuple_context = output_to_leaf_context(output_ids[i], - std::move(output_public_key), - std::move(commitment)); - - leaf_tuples_by_unlock_block_inout.emplace(unlock_block, std::move(tuple_context)); - } -} -//---------------------------------------------------------------------------------------------------------------------- -template<> CurveTrees::LeafTuple CurveTrees::leaf_tuple( const OutputPair &output_pair) const { @@ -762,7 +706,7 @@ template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const + std::vector &&new_leaf_tuples) const { TreeExtension tree_extension; tree_extension.leaves.start_leaf_tuple_idx = old_n_leaf_tuples; @@ -770,21 +714,17 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio if (new_leaf_tuples.empty()) return tree_extension; - // Sort the leaves by order they appear in the chain - const auto sort_fn = [](const LeafTupleContext &a, const LeafTupleContext &b) { return a.output_id < b.output_id; }; - std::sort(new_leaf_tuples.begin(), new_leaf_tuples.end(), sort_fn); - - // Convert sorted outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, - // and place the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since - // they cannot be inserted to the tree. + // Convert outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, and place + // the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since they cannot be + // inserted to the tree. std::vector flattened_leaves; flattened_leaves.reserve(new_leaf_tuples.size() * LEAF_TUPLE_SIZE); tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); - for (auto &l : new_leaf_tuples) + for (auto &o : new_leaf_tuples) { // TODO: this loop can be parallelized LeafTuple leaf; - try { leaf = leaf_tuple(l.output_pair); } + try { leaf = leaf_tuple(o); } catch(...) { // Invalid outputs can't be added to the tree @@ -797,7 +737,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio flattened_leaves.emplace_back(std::move(leaf.C_x)); // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output pair in the db to save 32 bytes - tree_extension.leaves.tuples.emplace_back(std::move(l.output_pair)); + tree_extension.leaves.tuples.emplace_back(std::move(o)); } if (flattened_leaves.empty()) @@ -862,7 +802,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio template CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const; + std::vector &&new_leaf_tuples) const; //---------------------------------------------------------------------------------------------------------------------- template std::vector CurveTrees::get_trim_instructions( diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 0c1ce347826..6c7d2b45075 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -141,13 +141,7 @@ struct OutputPair final }; static_assert(sizeof(OutputPair) == (32+32), "db expects 64 bytes for output pairs"); -// Contextual wrapper for output pairs, ready to be conerted into leaf tuples -struct LeafTupleContext final -{ - // Global output ID useful to order the leaf tuple for insertion into the tree - uint64_t output_id; - OutputPair output_pair; -}; +using OutputsByUnlockBlock = std::map>; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- @@ -233,18 +227,6 @@ class CurveTrees //member functions public: - // Prepare output pubkey and commitment for insertion into the tree - LeafTupleContext output_to_leaf_context(const std::uint64_t output_id, - crypto::public_key &&output_pubkey, - rct::key &&commitment) const; - - // Convert cryptonote tx outs to contexts ready to be converted to leaf tuples, grouped by unlock height - void tx_outs_to_leaf_tuple_contexts(const cryptonote::transaction &tx, - const std::vector &output_ids, - const uint64_t tx_height, - const bool miner_tx, - std::multimap &leaf_tuples_by_unlock_block_inout) const; - // Convert output pairs into leaf tuples, from {output pubkey,commitment} -> {O,C} -> {O.x,I.x,C.x} LeafTuple leaf_tuple(const OutputPair &outpout_pair) const; @@ -255,7 +237,7 @@ class CurveTrees // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const; + std::vector &&new_leaf_tuples) const; // Get instructions useful for trimming all existing layers in the tree std::vector get_trim_instructions( diff --git a/tests/block_weight/block_weight.cpp b/tests/block_weight/block_weight.cpp index dfcc4580c6c..31f5ad10462 100644 --- a/tests/block_weight/block_weight.cpp +++ b/tests/block_weight/block_weight.cpp @@ -65,7 +65,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/core_tests/chaingen.cpp b/tests/core_tests/chaingen.cpp index dfab14252bb..78c09271595 100644 --- a/tests/core_tests/chaingen.cpp +++ b/tests/core_tests/chaingen.cpp @@ -88,7 +88,7 @@ namespace , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({blk, blk_hash}); diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 86769b60fcd..fa204509914 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -744,17 +744,15 @@ void CurveTreesGlobalTree::log_tree() //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t old_n_leaf_tuples, const std::size_t new_n_leaf_tuples) { - std::vector tuples; - tuples.reserve(new_n_leaf_tuples); + std::vector output_pairs; + output_pairs.reserve(new_n_leaf_tuples); for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) { - const std::uint64_t output_id = old_n_leaf_tuples + i; - // Generate random output tuple crypto::secret_key o,c; crypto::public_key O,C; @@ -762,12 +760,15 @@ static const std::vector generate_random crypto::generate_keys(C, c, c, false); rct::key C_key = rct::pk2rct(C); - auto tuple_context = curve_trees.output_to_leaf_context(output_id, std::move(O), std::move(C_key)); + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(O), + .commitment = std::move(C_key) + }; - tuples.emplace_back(std::move(tuple_context)); + output_pairs.emplace_back(std::move(output_pair)); } - return tuples; + return output_pairs; } //---------------------------------------------------------------------------------------------------------------------- static const Selene::Scalar generate_random_selene_scalar() diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index dcc0d4d93c2..7e8ba1434b4 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -55,7 +55,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block ) override { blocks.push_back(blk); } diff --git a/tests/unit_tests/long_term_block_weight.cpp b/tests/unit_tests/long_term_block_weight.cpp index a77e431ea86..07d33fb7233 100644 --- a/tests/unit_tests/long_term_block_weight.cpp +++ b/tests/unit_tests/long_term_block_weight.cpp @@ -58,7 +58,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const std::multimap& leaf_tuples_by_unlock_block + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } From 83d56597e20a0e44d3d49ee7fbadad684d0a39eb Mon Sep 17 00:00:00 2001 From: j-berman Date: Sat, 10 Aug 2024 02:20:55 -0700 Subject: [PATCH 094/127] Clean lmbd impl - Reverted back to storing output_id in locked_outputs table; it's required to make sure outputs enter the tree in chain order I see no other simple way. - Removed unnecessary comments and db flags (MDB_APPENDDUP already makes sure key/value doesn't already exist, and when inserting, every global output id should be unique, so should never get that error) --- src/blockchain_db/blockchain_db.cpp | 11 +++++-- src/blockchain_db/blockchain_db.h | 2 +- src/blockchain_db/lmdb/db_lmdb.cpp | 46 +++++++++++++---------------- src/blockchain_db/lmdb/db_lmdb.h | 4 +-- src/blockchain_db/testdb.h | 2 +- src/fcmp_pp/curve_trees.cpp | 18 ++++++----- src/fcmp_pp/curve_trees.h | 18 +++++++++-- tests/unit_tests/curve_trees.cpp | 17 +++++++---- 8 files changed, 70 insertions(+), 48 deletions(-) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 5a4ab837850..c3e58e4ee8e 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -74,17 +74,22 @@ static void get_outs_by_unlock_block(const cryptonote::transaction &tx, auto output_pair = fcmp_pp::curve_trees::OutputPair{ .output_pubkey = std::move(output_public_key), - .commitment = std::move(commitment) + .commitment = std::move(commitment) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_ids[i], + .output_pair = std::move(output_pair) }; if (outs_by_unlock_block_inout.find(unlock_block) == outs_by_unlock_block_inout.end()) { - auto new_vec = std::vector{std::move(output_pair)}; + auto new_vec = std::vector{std::move(output_context)}; outs_by_unlock_block_inout[unlock_block] = std::move(new_vec); } else { - outs_by_unlock_block_inout[unlock_block].emplace_back(std::move(output_pair)); + outs_by_unlock_block_inout[unlock_block].emplace_back(std::move(output_context)); } } } diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 6ff97809fb1..8b4fac9b9fe 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -1783,7 +1783,7 @@ class BlockchainDB virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; // TODO: description and make private - virtual void grow_tree(std::vector &&new_leaves) = 0; + virtual void grow_tree(std::vector &&new_leaves) = 0; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) = 0; diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index f7637dad8a8..5efed60dd45 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -199,7 +199,7 @@ namespace * * spent_keys input hash - * - * locked_outputs block ID [{output pubkey, commitment}...] + * locked_outputs block ID [{output ID, output pubkey, commitment}...] * leaves leaf_idx {output pubkey, commitment} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * @@ -887,9 +887,8 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l MDB_val_set(k_block_id, locked_output.first); MDB_val_set(v_output, locked_output.second); - // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent - result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP | MDB_NODUPDATA); - if (result != MDB_SUCCESS && result != MDB_KEYEXIST) + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); + if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); } @@ -1344,7 +1343,7 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } } -void BlockchainLMDB::grow_tree(std::vector &&new_leaves) +void BlockchainLMDB::grow_tree(std::vector &&new_leaves) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); if (new_leaves.empty()) @@ -1375,10 +1374,7 @@ void BlockchainLMDB::grow_tree(std::vector &&n MDB_val_copy k(i + leaves.start_leaf_tuple_idx); MDB_val_set(v, leaves.tuples[i]); - // TODO: according to the docs, MDB_APPEND isn't supposed to perform any key comparisons to maximize efficiency. - // Adding MDB_NOOVERWRITE I assume re-introduces a key comparison. Benchmark NOOVERWRITE here - // MDB_NOOVERWRITE makes sure key doesn't already exist - int result = mdb_cursor_put(m_cur_leaves, &k, &v, MDB_APPEND | MDB_NOOVERWRITE); + int result = mdb_cursor_put(m_cur_leaves, &k, &v, MDB_APPEND); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); } @@ -1471,10 +1467,7 @@ void BlockchainLMDB::grow_layer(const std::unique_ptr &curve, lv.child_chunk_hash = curve->to_bytes(ext.hashes[i]); MDB_val_set(v, lv); - // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. - // Adding MDB_NODUPDATA I assume re-introduces a key comparison. Benchmark MDB_NODUPDATA here - // MDB_NODUPDATA makes sure key/data pair doesn't already exist - int result = mdb_cursor_put(m_cur_layers, &k, &v, MDB_APPENDDUP | MDB_NODUPDATA); + int result = mdb_cursor_put(m_cur_layers, &k, &v, MDB_APPENDDUP); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add hash: ", result).c_str())); } @@ -2225,7 +2218,7 @@ bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, return audit_complete; } -std::vector BlockchainLMDB::get_outs_at_unlock_block_id( +std::vector BlockchainLMDB::get_outs_at_unlock_block_id( uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -2238,7 +2231,7 @@ std::vector BlockchainLMDB::get_outs_at_unlock MDB_val v_output; // Get all the locked outputs at the provided block id - std::vector outs; + std::vector outs; MDB_cursor_op op = MDB_SET; while (1) @@ -2254,8 +2247,8 @@ std::vector BlockchainLMDB::get_outs_at_unlock if (blk_id != block_id) throw0(DB_ERROR(("Blk id " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); - const auto range_begin = ((const fcmp_pp::curve_trees::OutputPair*)v_output.mv_data); - const auto range_end = range_begin + v_output.mv_size / sizeof(fcmp_pp::curve_trees::OutputPair); + const auto range_begin = ((const fcmp_pp::curve_trees::OutputContext*)v_output.mv_data); + const auto range_end = range_begin + v_output.mv_size / sizeof(fcmp_pp::curve_trees::OutputContext); auto it = range_begin; @@ -2474,7 +2467,6 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); - mdb_set_dupsort(txn, m_leaves, compare_uint64); mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); mdb_set_dupsort(txn, m_block_info, compare_uint64); @@ -6867,9 +6859,14 @@ void BlockchainLMDB::migrate_5_6() } // Prepare the output for insertion to the tree - const auto output_pair = fcmp_pp::curve_trees::OutputPair{ + auto output_pair = fcmp_pp::curve_trees::OutputPair{ .output_pubkey = std::move(output_data.pubkey), - .commitment = std::move(output_data.commitment) + .commitment = std::move(output_data.commitment) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_id, + .output_pair = std::move(output_pair) }; // Get the block in which the output will unlock @@ -6877,15 +6874,12 @@ void BlockchainLMDB::migrate_5_6() // Now add the output to the locked outputs table MDB_val_set(k_block_id, unlock_block); - MDB_val_set(v_output, output_pair); + MDB_val_set(v_output, output_context); - // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height - result = mdb_cursor_put(c_locked_outputs, &k_block_id, &v_output, MDB_NODUPDATA); - if (result != MDB_SUCCESS && result != MDB_KEYEXIST) + result = mdb_cursor_put(c_locked_outputs, &k_block_id, &v_output, 0); + if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); - if (result == MDB_KEYEXIST) - MDEBUG("Duplicate output pubkey: " << output_pair.output_pubkey << " , output_id: " << output_id); } } diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 9fb19abd6a2..2e108d554fd 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -368,7 +368,7 @@ class BlockchainLMDB : public BlockchainDB static int compare_string(const MDB_val *a, const MDB_val *b); // make private - virtual void grow_tree(std::vector &&new_leaves); + virtual void grow_tree(std::vector &&new_leaves); virtual void trim_tree(const uint64_t trim_n_leaf_tuples); @@ -447,7 +447,7 @@ class BlockchainLMDB : public BlockchainDB const uint64_t child_layer_idx, const uint64_t chunk_width) const; - std::vector get_outs_at_unlock_block_id(uint64_t block_id); + std::vector get_outs_at_unlock_block_id(uint64_t block_id); void del_locked_outs_at_block_id(uint64_t block_id); diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 98d4260b2df..91ebf8a1f7a 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -116,7 +116,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} - virtual void grow_tree(std::vector &&new_leaves) override {}; + virtual void grow_tree(std::vector &&new_leaves) override {}; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) override {}; virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; virtual std::array get_tree_root() const override { return {}; }; diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index c688c4e5a11..e12df47edef 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -706,7 +706,7 @@ template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const + std::vector &&new_leaf_tuples) const { TreeExtension tree_extension; tree_extension.leaves.start_leaf_tuple_idx = old_n_leaf_tuples; @@ -714,9 +714,13 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio if (new_leaf_tuples.empty()) return tree_extension; - // Convert outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, and place - // the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since they cannot be - // inserted to the tree. + // Sort the leaves by order they appear in the chain + const auto sort_fn = [](const OutputContext &a, const OutputContext &b) { return a.output_id < b.output_id; }; + std::sort(new_leaf_tuples.begin(), new_leaf_tuples.end(), sort_fn); + + // Convert sorted outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, + // and place the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since + // they cannot be inserted to the tree. std::vector flattened_leaves; flattened_leaves.reserve(new_leaf_tuples.size() * LEAF_TUPLE_SIZE); tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); @@ -724,7 +728,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio { // TODO: this loop can be parallelized LeafTuple leaf; - try { leaf = leaf_tuple(o); } + try { leaf = leaf_tuple(o.output_pair); } catch(...) { // Invalid outputs can't be added to the tree @@ -737,7 +741,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio flattened_leaves.emplace_back(std::move(leaf.C_x)); // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output pair in the db to save 32 bytes - tree_extension.leaves.tuples.emplace_back(std::move(o)); + tree_extension.leaves.tuples.emplace_back(std::move(o.output_pair)); } if (flattened_leaves.empty()) @@ -802,7 +806,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio template CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const; + std::vector &&new_leaf_tuples) const; //---------------------------------------------------------------------------------------------------------------------- template std::vector CurveTrees::get_trim_instructions( diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 6c7d2b45075..4d6f48d867f 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -134,14 +134,26 @@ struct TrimLayerInstructions final // - Output pairs do NOT necessarily have torsion cleared. We need the output pubkey as it exists in the chain in order // to derive the correct I (when deriving {O.x, I.x, C.x}). Torsion clearing O before deriving I from O would enable // spending a torsioned output once before the fcmp++ fork and again with a different key image via fcmp++. +#pragma pack(push, 1) struct OutputPair final { crypto::public_key output_pubkey; rct::key commitment; }; -static_assert(sizeof(OutputPair) == (32+32), "db expects 64 bytes for output pairs"); -using OutputsByUnlockBlock = std::map>; +// Contextual wrapper for the output +struct OutputContext final +{ + // Output's global id in the chain, used to insert the output in the tree in the order it entered the chain + uint64_t output_id; + OutputPair output_pair; +}; +#pragma pack(pop) + +static_assert(sizeof(OutputPair) == (32+32), "db expects 64 bytes for output pairs"); +static_assert(sizeof(OutputContext) == (8+32+32), "db expects 72 bytes for output context"); + +using OutputsByUnlockBlock = std::map>; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- @@ -237,7 +249,7 @@ class CurveTrees // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const; + std::vector &&new_leaf_tuples) const; // Get instructions useful for trimming all existing layers in the tree std::vector get_trim_instructions( diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index fa204509914..a16ffebc945 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -744,15 +744,17 @@ void CurveTreesGlobalTree::log_tree() //---------------------------------------------------------------------------------------------------------------------- // Test helpers //---------------------------------------------------------------------------------------------------------------------- -static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, const std::size_t old_n_leaf_tuples, const std::size_t new_n_leaf_tuples) { - std::vector output_pairs; - output_pairs.reserve(new_n_leaf_tuples); + std::vector outs; + outs.reserve(new_n_leaf_tuples); for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) { + const std::uint64_t output_id = old_n_leaf_tuples + i; + // Generate random output tuple crypto::secret_key o,c; crypto::public_key O,C; @@ -765,10 +767,15 @@ static const std::vector generate_random_leave .commitment = std::move(C_key) }; - output_pairs.emplace_back(std::move(output_pair)); + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_id, + .output_pair = std::move(output_pair) + }; + + outs.emplace_back(std::move(output_context)); } - return output_pairs; + return outs; } //---------------------------------------------------------------------------------------------------------------------- static const Selene::Scalar generate_random_selene_scalar() From 67f5546d1098cd073acd985071ed2467cb176b4b Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 13 Aug 2024 09:24:43 -0700 Subject: [PATCH 095/127] lmdb touchup && OutputsByUnlockBlock map -> unordered_map --- src/blockchain_db/lmdb/db_lmdb.cpp | 28 +++++++++++++++------------- src/fcmp_pp/curve_trees.h | 2 +- tests/unit_tests/hardfork.cpp | 2 +- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 5efed60dd45..decf32d909b 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -882,14 +882,16 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l CURSOR(locked_outputs) // Add the locked outputs from this block to the locked outputs table - for (const auto &locked_output : outs_by_unlock_block) + for (const auto &unlock_block : outs_by_unlock_block) { - MDB_val_set(k_block_id, locked_output.first); - MDB_val_set(v_output, locked_output.second); - - result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); - if (result != MDB_SUCCESS) - throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); + MDB_val_set(k_block_id, unlock_block.first); + for (const auto &locked_output : unlock_block.second) + { + MDB_val_set(v_output, locked_output); + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); + } } // we use weight as a proxy for size, since we don't have size but weight is >= size @@ -6718,14 +6720,12 @@ void BlockchainLMDB::migrate_5_6() result = mdb_txn_begin(m_env, NULL, 0, txn); if (result) throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); - lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); - mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); lmdb_db_open(txn, "tmp_last_output", MDB_INTEGERKEY | MDB_CREATE, m_tmp_last_output, "Failed to open db handle for m_tmp_last_output"); txn.commit(); if (!m_batch_transactions) set_batch_transactions(true); - const std::size_t BATCH_SIZE = 1000; + const std::size_t BATCH_SIZE = 10000; batch_start(BATCH_SIZE); txn.m_txn = m_write_txn->m_txn; @@ -6790,7 +6790,8 @@ void BlockchainLMDB::migrate_5_6() if (result != MDB_NOTFOUND) { cached_last_o = *(const tmp_output_cache*)v_last_output.mv_data; - MDEBUG("Found cached output " << cached_last_o.ok.output_id); + MDEBUG("Found cached output " << cached_last_o.ok.output_id + << ", migrated " << cached_last_o.n_outputs_read << " outputs already"); found_cached_output = true; // Set k and v so we can continue the migration from that output @@ -6876,8 +6877,9 @@ void BlockchainLMDB::migrate_5_6() MDB_val_set(k_block_id, unlock_block); MDB_val_set(v_output, output_context); - // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height - result = mdb_cursor_put(c_locked_outputs, &k_block_id, &v_output, 0); + // MDB_NODUPDATA because all output id's should be unique + // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by output_id + result = mdb_cursor_put(c_locked_outputs, &k_block_id, &v_output, MDB_NODUPDATA); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); } diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 4d6f48d867f..39c949d5d1c 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -153,7 +153,7 @@ struct OutputContext final static_assert(sizeof(OutputPair) == (32+32), "db expects 64 bytes for output pairs"); static_assert(sizeof(OutputContext) == (8+32+32), "db expects 72 bytes for output context"); -using OutputsByUnlockBlock = std::map>; +using OutputsByUnlockBlock = std::unordered_map>; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index 7e8ba1434b4..bd97784aabc 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -55,7 +55,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash - , const fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back(blk); } From 918befb0f5acfa4949b53152c9f768c678cf4be3 Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 13 Aug 2024 09:25:43 -0700 Subject: [PATCH 096/127] new_leaf_tuples -> new_outputs --- src/blockchain_db/blockchain_db.cpp | 2 +- src/blockchain_db/blockchain_db.h | 2 +- src/blockchain_db/lmdb/db_lmdb.cpp | 6 +++--- src/blockchain_db/lmdb/db_lmdb.h | 2 +- src/blockchain_db/testdb.h | 2 +- src/fcmp_pp/curve_trees.cpp | 16 ++++++++-------- src/fcmp_pp/curve_trees.h | 2 +- tests/unit_tests/curve_trees.cpp | 16 ++++++++-------- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index c3e58e4ee8e..2c0cb503318 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -69,7 +69,7 @@ static void get_outs_by_unlock_block(const cryptonote::transaction &tx, CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); rct::key commitment = (miner_tx || tx.version != 2) - ? rct::zeroCommit(out.amount) // Needs ringct + ? rct::zeroCommit(out.amount) : tx.rct_signatures.outPk[i].mask; auto output_pair = fcmp_pp::curve_trees::OutputPair{ diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 8b4fac9b9fe..aa973fb6cba 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -1783,7 +1783,7 @@ class BlockchainDB virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; // TODO: description and make private - virtual void grow_tree(std::vector &&new_leaves) = 0; + virtual void grow_tree(std::vector &&new_outputs) = 0; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) = 0; diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index decf32d909b..0011bebba2d 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1345,10 +1345,10 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) } } -void BlockchainLMDB::grow_tree(std::vector &&new_leaves) +void BlockchainLMDB::grow_tree(std::vector &&new_outputs) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); - if (new_leaves.empty()) + if (new_outputs.empty()) return; check_open(); @@ -1366,7 +1366,7 @@ void BlockchainLMDB::grow_tree(std::vector // Use the number of leaf tuples and the existing last hashes to get a struct we can use to extend the tree CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); - const auto tree_extension = m_curve_trees->get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_leaves)); + const auto tree_extension = m_curve_trees->get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_outputs)); // Insert the leaves // TODO: grow_leaves diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 2e108d554fd..6276aebd222 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -368,7 +368,7 @@ class BlockchainLMDB : public BlockchainDB static int compare_string(const MDB_val *a, const MDB_val *b); // make private - virtual void grow_tree(std::vector &&new_leaves); + virtual void grow_tree(std::vector &&new_outputs); virtual void trim_tree(const uint64_t trim_n_leaf_tuples); diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 91ebf8a1f7a..ee268bb937e 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -116,7 +116,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} - virtual void grow_tree(std::vector &&new_leaves) override {}; + virtual void grow_tree(std::vector &&new_outputs) override {}; virtual void trim_tree(const uint64_t trim_n_leaf_tuples) override {}; virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; virtual std::array get_tree_root() const override { return {}; }; diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index e12df47edef..0bae506967a 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -706,25 +706,25 @@ template typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const + std::vector &&new_outputs) const { TreeExtension tree_extension; tree_extension.leaves.start_leaf_tuple_idx = old_n_leaf_tuples; - if (new_leaf_tuples.empty()) + if (new_outputs.empty()) return tree_extension; - // Sort the leaves by order they appear in the chain + // Sort the outputs by order they appear in the chain const auto sort_fn = [](const OutputContext &a, const OutputContext &b) { return a.output_id < b.output_id; }; - std::sort(new_leaf_tuples.begin(), new_leaf_tuples.end(), sort_fn); + std::sort(new_outputs.begin(), new_outputs.end(), sort_fn); // Convert sorted outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, // and place the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since // they cannot be inserted to the tree. std::vector flattened_leaves; - flattened_leaves.reserve(new_leaf_tuples.size() * LEAF_TUPLE_SIZE); - tree_extension.leaves.tuples.reserve(new_leaf_tuples.size()); - for (auto &o : new_leaf_tuples) + flattened_leaves.reserve(new_outputs.size() * LEAF_TUPLE_SIZE); + tree_extension.leaves.tuples.reserve(new_outputs.size()); + for (auto &o : new_outputs) { // TODO: this loop can be parallelized LeafTuple leaf; @@ -806,7 +806,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio template CurveTrees::TreeExtension CurveTrees::get_tree_extension( const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, - std::vector &&new_leaf_tuples) const; + std::vector &&new_outputs) const; //---------------------------------------------------------------------------------------------------------------------- template std::vector CurveTrees::get_trim_instructions( diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 39c949d5d1c..1c551e22ea7 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -246,7 +246,7 @@ class CurveTrees std::vector flatten_leaves(std::vector &&leaves) const; // Take in the existing number of leaf tuples and the existing last hash in each layer in the tree, as well as new - // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree + // outputs to add to the tree, and return a tree extension struct that can be used to extend a tree TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, const LastHashes &existing_last_hashes, std::vector &&new_leaf_tuples) const; diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index a16ffebc945..b5b1dc206e9 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -800,13 +800,13 @@ static bool grow_tree(CurveTreesV1 &curve_trees, global_tree.log_last_hashes(last_hashes); - auto new_leaf_tuples = generate_random_leaves(curve_trees, old_n_leaf_tuples, new_n_leaf_tuples); + auto new_outputs = generate_random_leaves(curve_trees, old_n_leaf_tuples, new_n_leaf_tuples); // Get a tree extension object to the existing tree using randomly generated leaves // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, last_hashes, - std::move(new_leaf_tuples)); + std::move(new_outputs)); global_tree.log_tree_extension(tree_extension); @@ -884,18 +884,18 @@ static bool grow_tree_db(const std::size_t init_leaves, LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); - auto init_leaf_tuples = generate_random_leaves(*curve_trees, 0, init_leaves); + auto init_outputs = generate_random_leaves(*curve_trees, 0, init_leaves); - test_db.m_db->grow_tree(std::move(init_leaf_tuples)); + test_db.m_db->grow_tree(std::move(init_outputs)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, "failed to add initial leaves to db"); MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " << ext_leaves << " leaves"); - auto ext_leaf_tuples = generate_random_leaves(*curve_trees, init_leaves, ext_leaves); + auto ext_outputs = generate_random_leaves(*curve_trees, init_leaves, ext_leaves); - test_db.m_db->grow_tree(std::move(ext_leaf_tuples)); + test_db.m_db->grow_tree(std::move(ext_outputs)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves + ext_leaves), false, "failed to extend tree in db"); @@ -917,9 +917,9 @@ static bool trim_tree_db(const std::size_t init_leaves, LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then trimming by " << trim_leaves << " leaves"); - auto init_leaf_tuples = generate_random_leaves(*curve_trees, 0, init_leaves); + auto init_outputs = generate_random_leaves(*curve_trees, 0, init_leaves); - test_db.m_db->grow_tree(std::move(init_leaf_tuples)); + test_db.m_db->grow_tree(std::move(init_outputs)); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, "failed to add initial leaves to db"); From ee19361ea0b518794820756ad54351a44fcbd9a9 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 14 Aug 2024 08:58:57 -0700 Subject: [PATCH 097/127] Remove extra gcc install fixes windows build --- .github/workflows/build.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 317534c0b29..73741297794 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -74,8 +74,6 @@ jobs: curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-21.9-1-any.pkg.tar.zst pacman --noconfirm -U mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst mingw-w64-x86_64-protobuf-21.9-1-any.pkg.tar.zst - # Update binutils if MinGW due to https://github.com/rust-lang/rust/issues/112368 - pacman -Syu --needed mingw-w64-x86_64-gcc --noconfirm - name: build run: | ${{env.CCACHE_SETTINGS}} From 47d47bdd20af498129ef46f55c7095ffd5511a93 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 14 Aug 2024 11:42:30 -0700 Subject: [PATCH 098/127] fcmp++: proof len from inputs *AND merkle tree depth --- .../cryptonote_boost_serialization.h | 6 ++++++ src/fcmp_pp/proof.h | 5 +++-- src/ringct/rctTypes.h | 5 ++++- tests/unit_tests/serialization.cpp | 13 +++++++++---- 4 files changed, 22 insertions(+), 7 deletions(-) diff --git a/src/cryptonote_basic/cryptonote_boost_serialization.h b/src/cryptonote_basic/cryptonote_boost_serialization.h index 81da98a7819..cbdaf507bd0 100644 --- a/src/cryptonote_basic/cryptonote_boost_serialization.h +++ b/src/cryptonote_basic/cryptonote_boost_serialization.h @@ -357,7 +357,10 @@ namespace boost if (ver >= 1u) a & x.CLSAGs; if (ver >= 3u) + { + a & x.curve_trees_tree_depth; a & x.fcmp_pp; + } if (x.rangeSigs.empty()) a & x.pseudoOuts; } @@ -391,7 +394,10 @@ namespace boost if (ver >= 1u) a & x.p.CLSAGs; if (ver >= 3u) + { + a & x.p.curve_trees_tree_depth; a & x.p.fcmp_pp; + } if (x.type == rct::RCTTypeBulletproof || x.type == rct::RCTTypeBulletproof2 || x.type == rct::RCTTypeCLSAG || x.type == rct::RCTTypeBulletproofPlus || x.type == rct::RCTTypeFcmpPlusPlus) a & x.p.pseudoOuts; } diff --git a/src/fcmp_pp/proof.h b/src/fcmp_pp/proof.h index f01cdb267b5..24f91fae9e7 100644 --- a/src/fcmp_pp/proof.h +++ b/src/fcmp_pp/proof.h @@ -36,10 +36,11 @@ namespace fcmp_pp // Byte buffer containing the fcmp++ proof using FcmpPpProof = std::vector; -static inline std::size_t proof_len(const std::size_t n_inputs) +static inline std::size_t proof_len(const std::size_t n_inputs, const uint8_t curve_trees_tree_depth) { // TODO: implement - return n_inputs * 4; + static_assert(sizeof(std::size_t) >= sizeof(uint8_t), "unexpected size of size_t"); + return n_inputs * (std::size_t)curve_trees_tree_depth * 2; }; }//namespace fcmp_pp diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index d0090251243..946f520a2ca 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -426,6 +426,7 @@ namespace rct { std::vector MGs; // simple rct has N, full has 1 std::vector CLSAGs; keyV pseudoOuts; //C - for simple rct + uint8_t curve_trees_tree_depth; // for fcmp++ fcmp_pp::FcmpPpProof fcmp_pp; // when changing this function, update cryptonote::get_pruned_transaction_weight @@ -501,9 +502,10 @@ namespace rct { if (type == RCTTypeFcmpPlusPlus) { + FIELD(curve_trees_tree_depth) ar.tag("fcmp_pp"); ar.begin_object(); - const std::size_t proof_len = fcmp_pp::proof_len(inputs); + const std::size_t proof_len = fcmp_pp::proof_len(inputs, curve_trees_tree_depth); if (!typename Archive::is_saving()) fcmp_pp.resize(proof_len); if (fcmp_pp.size() != proof_len) @@ -628,6 +630,7 @@ namespace rct { FIELD(bulletproofs_plus) FIELD(MGs) FIELD(CLSAGs) + FIELD(curve_trees_tree_depth) FIELD(fcmp_pp) FIELD(pseudoOuts) END_SERIALIZE() diff --git a/tests/unit_tests/serialization.cpp b/tests/unit_tests/serialization.cpp index b4ce6c46b45..05dd44da1ba 100644 --- a/tests/unit_tests/serialization.cpp +++ b/tests/unit_tests/serialization.cpp @@ -1311,8 +1311,11 @@ TEST(Serialization, tx_fcmp_pp) const std::size_t n_inputs = 2; const std::size_t n_outputs = 3; + const uint8_t curve_trees_tree_depth = 3; - const auto make_dummy_fcmp_pp_tx = []() -> transaction + const std::size_t proof_len = fcmp_pp::proof_len(n_inputs, curve_trees_tree_depth); + + const auto make_dummy_fcmp_pp_tx = [curve_trees_tree_depth, proof_len]() -> transaction { transaction tx; @@ -1369,9 +1372,11 @@ TEST(Serialization, tx_fcmp_pp) const crypto::hash referenceBlock{0x01}; tx.rct_signatures.referenceBlock = referenceBlock; + // Set the curve trees merkle tree depth + tx.rct_signatures.p.curve_trees_tree_depth = curve_trees_tree_depth; + // 1 fcmp++ proof fcmp_pp::FcmpPpProof fcmp_pp; - const std::size_t proof_len = fcmp_pp::proof_len(n_inputs); fcmp_pp.reserve(proof_len); for (std::size_t i = 0; i < proof_len; ++i) fcmp_pp.push_back(i); @@ -1399,7 +1404,7 @@ TEST(Serialization, tx_fcmp_pp) transaction tx = make_dummy_fcmp_pp_tx(); // Extend fcmp++ proof - ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp_pp::proof_len(n_inputs)); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == proof_len); tx.rct_signatures.p.fcmp_pp.push_back(0x01); string blob; @@ -1411,7 +1416,7 @@ TEST(Serialization, tx_fcmp_pp) transaction tx = make_dummy_fcmp_pp_tx(); // Shorten the fcmp++ proof - ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp_pp::proof_len(n_inputs)); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == proof_len); ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() > 1); tx.rct_signatures.p.fcmp_pp.pop_back(); From 41b1985f6331c251a2cad1816b97a771b6596686 Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 3 Sep 2024 13:38:44 -0700 Subject: [PATCH 099/127] Fix compile on arm-linux-androideabi (32-bit) using a newer NDK - @tobtoht reported err with size_t -> uint64_t - Also address some PR comments (@vtnerd namespace comment + @boog900 freeing unallocated rust type) - Some cleaning --- src/fcmp_pp/curve_trees.cpp | 3 +-- src/fcmp_pp/curve_trees.h | 17 ++++++++++------- src/fcmp_pp/fcmp_pp_rust/fcmp++.h | 7 ++++--- src/fcmp_pp/fcmp_pp_rust/src/lib.rs | 4 ++++ 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 0bae506967a..fd249db51ea 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -644,8 +644,7 @@ static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_re // CurveTrees public member functions //---------------------------------------------------------------------------------------------------------------------- template<> -CurveTrees::LeafTuple CurveTrees::leaf_tuple( - const OutputPair &output_pair) const +CurveTrees::LeafTuple CurveTrees::leaf_tuple(const OutputPair &output_pair) const { const crypto::public_key &output_pubkey = output_pair.output_pubkey; const rct::key &commitment = output_pair.commitment; diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 1c551e22ea7..9214740b462 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -164,12 +164,15 @@ template class CurveTrees { public: - CurveTrees(std::unique_ptr &&c1, std::unique_ptr &&c2, const uint64_t c1_width, const uint64_t c2_width): - m_c1{std::move(c1)}, - m_c2{std::move(c2)}, - m_c1_width{c1_width}, - m_c2_width{c2_width}, - m_leaf_layer_chunk_width{LEAF_TUPLE_SIZE * c2_width} + CurveTrees(std::unique_ptr &&c1, + std::unique_ptr &&c2, + const std::size_t c1_width, + const std::size_t c2_width): + m_c1{std::move(c1)}, + m_c2{std::move(c2)}, + m_c1_width{c1_width}, + m_c2_width{c2_width}, + m_leaf_layer_chunk_width{LEAF_TUPLE_SIZE * c2_width} { assert(c1_width > 0); assert(c2_width > 0); @@ -240,7 +243,7 @@ class CurveTrees //member functions public: // Convert output pairs into leaf tuples, from {output pubkey,commitment} -> {O,C} -> {O.x,I.x,C.x} - LeafTuple leaf_tuple(const OutputPair &outpout_pair) const; + LeafTuple leaf_tuple(const OutputPair &output_pair) const; // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [O.x, I.x, C.x, O.x, I.x, C.x...] std::vector flatten_leaves(std::vector &&leaves) const; diff --git a/src/fcmp_pp/fcmp_pp_rust/fcmp++.h b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h index 34d019fc9bd..1e767f9d090 100644 --- a/src/fcmp_pp/fcmp_pp_rust/fcmp++.h +++ b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h @@ -1,10 +1,12 @@ -namespace fcmp_pp_rust { #include #include #include #include #include + +namespace fcmp_pp_rust +{ // ----- deps C bindings ----- /// Inner integer type that the [`Limb`] newtype wraps. @@ -137,5 +139,4 @@ CResult hash_trim_selene(SelenePoint existing_hash, SeleneScalar child_to_grow_back); } // extern "C" - -} +}//namespace fcmp_pp diff --git a/src/fcmp_pp/fcmp_pp_rust/src/lib.rs b/src/fcmp_pp/fcmp_pp_rust/src/lib.rs index cb256f14fa7..4d9c37c75f1 100644 --- a/src/fcmp_pp/fcmp_pp_rust/src/lib.rs +++ b/src/fcmp_pp/fcmp_pp_rust/src/lib.rs @@ -152,6 +152,7 @@ pub extern "C" fn hash_grow_helios( if let Some(hash) = hash { CResult::ok(hash) } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 CResult::err(()) } } @@ -174,6 +175,7 @@ pub extern "C" fn hash_trim_helios( if let Some(hash) = hash { CResult::ok(hash) } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 CResult::err(()) } } @@ -196,6 +198,7 @@ pub extern "C" fn hash_grow_selene( if let Some(hash) = hash { CResult::ok(hash) } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 CResult::err(()) } } @@ -218,6 +221,7 @@ pub extern "C" fn hash_trim_selene( if let Some(hash) = hash { CResult::ok(hash) } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 CResult::err(()) } } From 16ff6a9e68d49c30b8ed9f50ad4683a061c35147 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 4 Sep 2024 15:37:50 -0700 Subject: [PATCH 100/127] fcmp++: trim tree when removing a block - trim_tree now re-adds trimmed outputs back to the locked outputs table. remove_output then deletes from the locked output table. - Since outputs added to the tree in a specific block may have originated from distinct younger blocks (thanks to distinct unlock times), we need to store the 8 byte output_id in the leaves table as well, so that in the event of a reorg, upon removing outputs from the tree we can add them back to the locked outputs table in the correct order. --- src/blockchain_db/blockchain_db.h | 2 +- src/blockchain_db/lmdb/db_lmdb.cpp | 94 ++++++++++++++++++++++++++---- src/blockchain_db/lmdb/db_lmdb.h | 4 +- src/blockchain_db/testdb.h | 2 +- src/fcmp_pp/curve_trees.cpp | 2 +- src/fcmp_pp/curve_trees.h | 4 +- src/fcmp_pp/fcmp_pp_rust/fcmp++.h | 2 +- tests/unit_tests/curve_trees.cpp | 11 ++-- 8 files changed, 98 insertions(+), 23 deletions(-) diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index aa973fb6cba..595587d192e 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -1785,7 +1785,7 @@ class BlockchainDB // TODO: description and make private virtual void grow_tree(std::vector &&new_outputs) = 0; - virtual void trim_tree(const uint64_t trim_n_leaf_tuples) = 0; + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) = 0; // TODO: description virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const = 0; diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 0011bebba2d..95eb7d2855f 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -200,7 +200,7 @@ namespace * spent_keys input hash - * * locked_outputs block ID [{output ID, output pubkey, commitment}...] - * leaves leaf_idx {output pubkey, commitment} + * leaves leaf_idx {output ID, output pubkey, commitment} * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * * txpool_meta txn hash txn metadata @@ -828,8 +828,7 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l // Grow the tree with outputs that unlock at this block height auto unlocked_outputs = this->get_outs_at_unlock_block_id(m_height); - if (!unlocked_outputs.empty()) - this->grow_tree(std::move(unlocked_outputs)); + this->grow_tree(std::move(unlocked_outputs)); // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked outputs table this->del_locked_outs_at_block_id(m_height); @@ -935,6 +934,15 @@ void BlockchainLMDB::remove_block() if ((result = mdb_cursor_del(m_cur_block_info, 0))) throw1(DB_ERROR(lmdb_error("Failed to add removal of block info to db transaction: ", result).c_str())); + + // Get n_leaf_tuples from the new tip so we can trim the curve trees tree to the new tip + const uint64_t new_n_leaf_tuples = get_top_block_n_leaf_tuples(); + const uint64_t old_n_leaf_tuples = bi->bi_n_leaf_tuples; + + if (new_n_leaf_tuples > old_n_leaf_tuples) + throw1(DB_ERROR("Unexpected: more leaf tuples are in prev block, tree is expected to only grow")); + + this->trim_tree(old_n_leaf_tuples - new_n_leaf_tuples, bi->bi_height/*trim_block_id*/); } uint64_t BlockchainLMDB::add_transaction_data(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash& tx_hash, const crypto::hash& tx_prunable_hash) @@ -1244,6 +1252,32 @@ void BlockchainLMDB::remove_output(const uint64_t amount, const uint64_t& out_in { throw1(DB_ERROR(lmdb_error("Error adding removal of output tx to db transaction", result).c_str())); } + + // Remove output from locked outputs table. We expect the output to be in the + // locked outputs table because remove_output is called when removing the + // top block from the chain, and all outputs from the top block are expected + // to be locked until they are at least 10 blocks old. + CURSOR(locked_outputs); + + const uint64_t unlock_block = cryptonote::get_unlock_block_index(ok->data.unlock_time, ok->data.height); + + MDB_val_set(k_block_id, unlock_block); + MDB_val_set(v_output, ok->output_id); + + result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, &v_output, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + { + throw0(DB_ERROR("Unexpected: output not found in m_cur_locked_outputs")); + } + else if (result) + { + throw1(DB_ERROR(lmdb_error("Error adding removal of locked output to db transaction", result).c_str())); + } + + result = mdb_cursor_del(m_cur_locked_outputs, 0); + if (result) + throw0(DB_ERROR(lmdb_error(std::string("Error deleting locked output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + result = mdb_cursor_del(m_cur_output_txs, 0); if (result) throw0(DB_ERROR(lmdb_error(std::string("Error deleting output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); @@ -1475,7 +1509,7 @@ void BlockchainLMDB::grow_layer(const std::unique_ptr &curve, } } -void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) +void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); if (trim_n_leaf_tuples == 0) @@ -1485,6 +1519,7 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) mdb_txn_cursors *m_cursors = &m_wcursors; CURSOR(leaves) + CURSOR(locked_outputs) CURSOR(layers) const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); @@ -1511,11 +1546,12 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) // Trim the leaves // TODO: trim_leaves + MDB_val_set(k_block_id, trim_block_id); for (uint64_t i = 0; i < trim_n_leaf_tuples; ++i) { - uint64_t last_leaf_tuple_idx = (old_n_leaf_tuples - 1 - i); + uint64_t leaf_tuple_idx = (old_n_leaf_tuples - trim_n_leaf_tuples + i); - MDB_val_copy k(last_leaf_tuple_idx); + MDB_val_copy k(leaf_tuple_idx); MDB_val v; int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_SET); if (result == MDB_NOTFOUND) @@ -1523,11 +1559,20 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples) if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); + // Re-add the output to the locked output table in order. The output should + // be in the outputs tables. + const auto o = *(fcmp_pp::curve_trees::OutputContext *)v.mv_data; + MDB_val_set(v_output, o); + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to re-add locked output: ", result).c_str())); + + // Delete the leaf result = mdb_cursor_del(m_cur_leaves, 0); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Error removing leaf: ", result).c_str())); - MDEBUG("Successfully removed leaf at last_leaf_tuple_idx: " << last_leaf_tuple_idx); + MDEBUG("Successfully removed leaf at leaf_tuple_idx: " << leaf_tuple_idx); } // Trim the layers @@ -1599,6 +1644,7 @@ void BlockchainLMDB::trim_layer(const std::unique_ptr &curve, CURSOR(layers) + MDEBUG("Trimming layer " << layer_idx); MDB_val_copy k(layer_idx); // Get the number of existing elements in the layer @@ -1685,6 +1731,32 @@ uint64_t BlockchainLMDB::get_num_leaf_tuples() const return n_leaf_tuples; } +uint64_t BlockchainLMDB::get_top_block_n_leaf_tuples() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(block_info); + + // if no blocks, return 0 + uint64_t m_height = height(); + if (m_height == 0) + { + return 0; + } + + MDB_val_copy k(m_height - 1); + MDB_val h = k; + int result = 0; + if ((result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &h, MDB_GET_BOTH))) + throw1(BLOCK_DNE(lmdb_error("Failed to get top block: ", result).c_str())); + + const uint64_t n_leaf_tuples = ((mdb_block_info *)h.mv_data)->bi_n_leaf_tuples; + TXN_POSTFIX_RDONLY(); + return n_leaf_tuples; +} + std::array BlockchainLMDB::get_tree_root() const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); @@ -1811,10 +1883,10 @@ fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_ if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); - const auto output_pair = *(fcmp_pp::curve_trees::OutputPair *)v.mv_data; + const auto o = *(fcmp_pp::curve_trees::OutputContext *)v.mv_data; // TODO: parallelize calls to this function - auto leaf = m_curve_trees->leaf_tuple(output_pair); + auto leaf = m_curve_trees->leaf_tuple(o.output_pair); leaves_to_trim.emplace_back(std::move(leaf.O_x)); leaves_to_trim.emplace_back(std::move(leaf.I_x)); @@ -1987,8 +2059,8 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); - const auto output_pair = *(fcmp_pp::curve_trees::OutputPair *)v.mv_data; - auto leaf = m_curve_trees->leaf_tuple(output_pair); + const auto o = *(fcmp_pp::curve_trees::OutputContext *)v.mv_data; + auto leaf = m_curve_trees->leaf_tuple(o.output_pair); leaf_tuples_chunk.emplace_back(std::move(leaf)); diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 6276aebd222..508c6f14568 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -370,7 +370,7 @@ class BlockchainLMDB : public BlockchainDB // make private virtual void grow_tree(std::vector &&new_outputs); - virtual void trim_tree(const uint64_t trim_n_leaf_tuples); + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id); virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const; @@ -431,6 +431,8 @@ class BlockchainLMDB : public BlockchainDB virtual uint64_t get_num_leaf_tuples() const; + uint64_t get_top_block_n_leaf_tuples() const; + virtual std::array get_tree_root() const; fcmp_pp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index ee268bb937e..5129e226007 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -117,7 +117,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} virtual void grow_tree(std::vector &&new_outputs) override {}; - virtual void trim_tree(const uint64_t trim_n_leaf_tuples) override {}; + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) override {}; virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; virtual std::array get_tree_root() const override { return {}; }; virtual uint64_t get_num_leaf_tuples() const override { return 0; }; diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index fd249db51ea..3ed54a35ca2 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -740,7 +740,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio flattened_leaves.emplace_back(std::move(leaf.C_x)); // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output pair in the db to save 32 bytes - tree_extension.leaves.tuples.emplace_back(std::move(o.output_pair)); + tree_extension.leaves.tuples.emplace_back(std::move(o)); } if (flattened_leaves.empty()) diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 9214740b462..c5dfe7d1829 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -197,9 +197,9 @@ class CurveTrees struct Leaves final { // Starting leaf tuple index in the leaf layer - uint64_t start_leaf_tuple_idx{0}; + uint64_t start_leaf_tuple_idx{0}; // Contiguous leaves in a tree that start at the start_idx - std::vector tuples; + std::vector tuples; }; // A struct useful to extend an existing tree diff --git a/src/fcmp_pp/fcmp_pp_rust/fcmp++.h b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h index 1e767f9d090..81f7d02829f 100644 --- a/src/fcmp_pp/fcmp_pp_rust/fcmp++.h +++ b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h @@ -139,4 +139,4 @@ CResult hash_trim_selene(SelenePoint existing_hash, SeleneScalar child_to_grow_back); } // extern "C" -}//namespace fcmp_pp +}//namespace fcmp_pp_rust diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index b5b1dc206e9..baedc1a246e 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -169,9 +169,9 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e "unexpected leaf start idx"); m_tree.leaves.reserve(m_tree.leaves.size() + tree_extension.leaves.tuples.size()); - for (const auto &output_pair : tree_extension.leaves.tuples) + for (const auto &o : tree_extension.leaves.tuples) { - auto leaf = m_curve_trees.leaf_tuple(output_pair); + auto leaf = m_curve_trees.leaf_tuple(o.output_pair); m_tree.leaves.emplace_back(CurveTreesV1::LeafTuple{ .O_x = std::move(leaf.O_x), @@ -641,14 +641,14 @@ void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension MDEBUG("Leaf start idx: " << tree_extension.leaves.start_leaf_tuple_idx); for (std::size_t i = 0; i < tree_extension.leaves.tuples.size(); ++i) { - const auto &output_pair = tree_extension.leaves.tuples[i]; + const auto &output_pair = tree_extension.leaves.tuples[i].output_pair; const auto leaf = m_curve_trees.leaf_tuple(output_pair); const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); const auto C_x = m_curve_trees.m_c2->to_string(leaf.C_x); - MDEBUG("Leaf tuple idx " << (tree_extension.leaves.start_leaf_tuple_idx) + MDEBUG("Leaf tuple idx " << (tree_extension.leaves.start_leaf_tuple_idx + (i * CurveTreesV1::LEAF_TUPLE_SIZE)) << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); } @@ -926,7 +926,8 @@ static bool trim_tree_db(const std::size_t init_leaves, MDEBUG("Successfully added initial " << init_leaves << " leaves to db, trimming by " << trim_leaves << " leaves"); - test_db.m_db->trim_tree(trim_leaves); + // Can use 0 from trim_block_id since it's unused in tests + test_db.m_db->trim_tree(trim_leaves, 0); CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves - trim_leaves), false, "failed to trim tree in db"); From 0a604a9786a1228b3d8fbfa846b31f81c7da473d Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 4 Sep 2024 21:13:48 -0700 Subject: [PATCH 101/127] fcmp++: Enable trimming to empty tree --- src/blockchain_db/lmdb/db_lmdb.cpp | 87 ++++++++++++++++++++---------- src/fcmp_pp/curve_trees.cpp | 12 ++++- 2 files changed, 68 insertions(+), 31 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 95eb7d2855f..70b719e1926 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -921,6 +921,8 @@ void BlockchainLMDB::remove_block() // must use h now; deleting from m_block_info will invalidate it mdb_block_info *bi = (mdb_block_info *)h.mv_data; + const uint64_t block_id = bi->bi_height; + const uint64_t old_n_leaf_tuples = bi->bi_n_leaf_tuples; blk_height bh = {bi->bi_hash, 0}; h.mv_data = (void *)&bh; h.mv_size = sizeof(bh); @@ -937,12 +939,10 @@ void BlockchainLMDB::remove_block() // Get n_leaf_tuples from the new tip so we can trim the curve trees tree to the new tip const uint64_t new_n_leaf_tuples = get_top_block_n_leaf_tuples(); - const uint64_t old_n_leaf_tuples = bi->bi_n_leaf_tuples; - if (new_n_leaf_tuples > old_n_leaf_tuples) throw1(DB_ERROR("Unexpected: more leaf tuples are in prev block, tree is expected to only grow")); - - this->trim_tree(old_n_leaf_tuples - new_n_leaf_tuples, bi->bi_height/*trim_block_id*/); + const uint64_t trim_n_leaf_tuples = old_n_leaf_tuples - new_n_leaf_tuples; + this->trim_tree(trim_n_leaf_tuples, block_id/*trim_block_id*/); } uint64_t BlockchainLMDB::add_transaction_data(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash& tx_hash, const crypto::hash& tx_prunable_hash) @@ -1523,7 +1523,7 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t CURSOR(layers) const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); - CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); const auto trim_instructions = m_curve_trees->get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); @@ -1542,8 +1542,6 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t CHECK_AND_ASSERT_THROW_MES((tree_reduction.new_total_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, "unexpected new total leaves"); - MDEBUG("Trimming " << trim_n_leaf_tuples << " leaf tuples"); - // Trim the leaves // TODO: trim_leaves MDB_val_set(k_block_id, trim_block_id); @@ -1579,12 +1577,13 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t // TODO: trim_layers const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions; const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions; - CHECK_AND_ASSERT_THROW_MES(!c2_layer_reductions.empty(), "empty c2 layer reductions"); + + const std::size_t n_layers = c2_layer_reductions.size() + c1_layer_reductions.size(); bool use_c2 = true; uint64_t c2_idx = 0; uint64_t c1_idx = 0; - for (uint64_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i) + for (uint64_t i = 0; i < n_layers; ++i) { if (use_c2) { @@ -1606,29 +1605,51 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t // Trim any remaining layers in layers after the root // TODO: trim_leftovers_after_root - const uint64_t expected_root_idx = c2_layer_reductions.size() + c1_layer_reductions.size() - 1; - while (1) + if (n_layers > 0) { - MDB_val k, v; - int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); - if (result != MDB_SUCCESS) - throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); + const uint64_t expected_root_idx = n_layers - 1; + while (1) + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); - const uint64_t last_layer_idx = *(uint64_t *)k.mv_data; + const uint64_t last_layer_idx = *(uint64_t *)k.mv_data; - if (last_layer_idx > expected_root_idx) - { - // Delete all elements in layers after the root - result = mdb_cursor_del(m_cur_layers, MDB_NODUPDATA); - } - else if (last_layer_idx < expected_root_idx) - { - throw0(DB_ERROR("Encountered unexpected last elem in tree before the root")); + if (last_layer_idx > expected_root_idx) + { + // Delete all elements in layers after the root + result = mdb_cursor_del(m_cur_layers, MDB_NODUPDATA); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing elems after root: ", result).c_str())); + } + else if (last_layer_idx < expected_root_idx) + { + throw0(DB_ERROR("Encountered unexpected last elem in tree before the root")); + } + else // last_layer_idx == expected_root_idx + { + // We've trimmed all layers past the root, we're done + break; + } } - else // last_layer_idx == expected_root_idx + } + else // n_layers == 0 + { + // We're removing everything + while (1) { - // We've trimmed all layers past the root, we're done - break; + MDB_val k, v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); + + result = mdb_cursor_del(m_cur_layers, MDB_NODUPDATA); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing elems after root: ", result).c_str())); } } } @@ -1842,6 +1863,11 @@ fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_ const std::vector &trim_instructions) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); + + fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim last_chunk_children_to_trim; + if (trim_instructions.empty()) + return last_chunk_children_to_trim; + check_open(); CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); @@ -1849,7 +1875,6 @@ fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_ TXN_PREFIX_RDONLY(); RCURSOR(layers) - fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim last_chunk_children_to_trim; auto &c1_last_children_out = last_chunk_children_to_trim.c1_children; auto &c2_last_children_out = last_chunk_children_to_trim.c2_children; @@ -1965,12 +1990,16 @@ fcmp_pp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_t const std::vector &trim_instructions) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; + if (trim_instructions.empty()) + return last_hashes_out; + check_open(); TXN_PREFIX_RDONLY(); RCURSOR(layers) - fcmp_pp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; // Traverse the tree layer-by-layer starting at the layer closest to leaf layer uint64_t layer_idx = 0; diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 3ed54a35ca2..98d18cf6115 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -812,11 +812,14 @@ std::vector CurveTrees::get_trim_instructions( const uint64_t old_n_leaf_tuples, const uint64_t trim_n_leaf_tuples) const { - CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); std::vector trim_instructions; + if (old_n_leaf_tuples == trim_n_leaf_tuples) + return trim_instructions; + // Get trim instructions for the leaf layer { const uint64_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE; @@ -865,7 +868,12 @@ typename CurveTrees::TreeReduction CurveTrees::get_tree_reductio { TreeReduction tree_reduction_out; - CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "missing trim instructions"); + if (trim_instructions.empty()) + { + tree_reduction_out.new_total_leaf_tuples = 0; + return tree_reduction_out; + } + CHECK_AND_ASSERT_THROW_MES((trim_instructions[0].new_total_children % LEAF_TUPLE_SIZE) == 0, "unexpected new total leaves"); const uint64_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE; From acc7d05885e35241cfa16cad6297ede0fbca6b5e Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 5 Sep 2024 12:27:45 -0700 Subject: [PATCH 102/127] fcmp++ LMDB: use dummy key optimization on leaves table - Save 8 bytes per leaf by using DUPFIXED table and dummy "zerokval" key and attaching leaf_idx as prefix to data to serve as DUPSORT key --- src/blockchain_db/lmdb/db_lmdb.cpp | 51 ++++++++++++++++++------------ 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 70b719e1926..4ee7adb4273 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -214,7 +214,8 @@ namespace * attached as a prefix on the Data to serve as the DUPSORT key. * (DUPFIXED saves 8 bytes per record.) * - * The output_amounts and layers tables don't use a dummy key, but use DUPSORT + * The output_amounts, locked_outputs, and layers tables don't use a + * dummy key, but use DUPSORT. */ const char* const LMDB_BLOCKS = "blocks"; const char* const LMDB_BLOCK_HEIGHTS = "block_heights"; @@ -375,10 +376,15 @@ typedef struct outtx { uint64_t local_index; } outtx; -struct layer_val { +typedef struct mdb_leaf { + uint64_t leaf_idx; + fcmp_pp::curve_trees::OutputContext output_context; +} mdb_leaf; + +typedef struct layer_val { uint64_t child_chunk_idx; std::array child_chunk_hash; -}; +} layer_val; std::atomic mdb_txn_safe::num_active_txns{0}; std::atomic_flag mdb_txn_safe::creation_gate = ATOMIC_FLAG_INIT; @@ -1400,17 +1406,18 @@ void BlockchainLMDB::grow_tree(std::vector // Use the number of leaf tuples and the existing last hashes to get a struct we can use to extend the tree CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); - const auto tree_extension = m_curve_trees->get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_outputs)); + auto tree_extension = m_curve_trees->get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_outputs)); // Insert the leaves // TODO: grow_leaves - const auto &leaves = tree_extension.leaves; + auto &leaves = tree_extension.leaves; for (uint64_t i = 0; i < leaves.tuples.size(); ++i) { - MDB_val_copy k(i + leaves.start_leaf_tuple_idx); - MDB_val_set(v, leaves.tuples[i]); + const uint64_t leaf_idx = i + leaves.start_leaf_tuple_idx; + mdb_leaf val{.leaf_idx = leaf_idx, .output_context = std::move(leaves.tuples[i])}; + MDB_val_set(v, val); - int result = mdb_cursor_put(m_cur_leaves, &k, &v, MDB_APPEND); + int result = mdb_cursor_put(m_cur_leaves, (MDB_val *)&zerokval, &v, MDB_APPENDDUP); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); } @@ -1550,8 +1557,8 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t uint64_t leaf_tuple_idx = (old_n_leaf_tuples - trim_n_leaf_tuples + i); MDB_val_copy k(leaf_tuple_idx); - MDB_val v; - int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_SET); + MDB_val v = k; + int result = mdb_cursor_get(m_cur_leaves, (MDB_val *)&zerokval, &v, MDB_GET_BOTH); if (result == MDB_NOTFOUND) throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR if (result != MDB_SUCCESS) @@ -1559,8 +1566,9 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t // Re-add the output to the locked output table in order. The output should // be in the outputs tables. - const auto o = *(fcmp_pp::curve_trees::OutputContext *)v.mv_data; - MDB_val_set(v_output, o); + const auto *o = (mdb_leaf *)v.mv_data; + MDB_val_set(v_output, o->output_context); + MDEBUG("Re-adding locked output_id: " << o->output_context.output_id << " , unlock block: " << trim_block_id); result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to re-add locked output: ", result).c_str())); @@ -1742,7 +1750,7 @@ uint64_t BlockchainLMDB::get_num_leaf_tuples() const if (result == MDB_NOTFOUND) n_leaf_tuples = 0; else if (result == MDB_SUCCESS) - n_leaf_tuples = (1 + (*(const uint64_t*)k.mv_data)); + n_leaf_tuples = 1 + ((const mdb_leaf*)v.mv_data)->leaf_idx; else throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); } @@ -1896,22 +1904,22 @@ fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_ const uint64_t leaf_tuple_idx = idx / fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; MDB_val_copy k(leaf_tuple_idx); + MDB_val v = k; - MDB_cursor_op leaf_op = MDB_SET; + MDB_cursor_op leaf_op = MDB_GET_BOTH; do { - MDB_val v; - int result = mdb_cursor_get(m_cur_leaves, &k, &v, leaf_op); + int result = mdb_cursor_get(m_cur_leaves, (MDB_val *)&zerokval, &v, leaf_op); leaf_op = MDB_NEXT; if (result == MDB_NOTFOUND) throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); - const auto o = *(fcmp_pp::curve_trees::OutputContext *)v.mv_data; + const auto *db_leaf = (mdb_leaf *)v.mv_data; // TODO: parallelize calls to this function - auto leaf = m_curve_trees->leaf_tuple(o.output_pair); + auto leaf = m_curve_trees->leaf_tuple(db_leaf->output_context.output_pair); leaves_to_trim.emplace_back(std::move(leaf.O_x)); leaves_to_trim.emplace_back(std::move(leaf.I_x)); @@ -2088,8 +2096,8 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); - const auto o = *(fcmp_pp::curve_trees::OutputContext *)v.mv_data; - auto leaf = m_curve_trees->leaf_tuple(o.output_pair); + const auto *o = (mdb_leaf *)v.mv_data; + auto leaf = m_curve_trees->leaf_tuple(o->output_context.output_pair); leaf_tuples_chunk.emplace_back(std::move(leaf)); @@ -2547,7 +2555,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); - lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); + lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); lmdb_db_open(txn, LMDB_TXPOOL_META, MDB_CREATE, m_txpool_meta, "Failed to open db handle for m_txpool_meta"); @@ -2570,6 +2578,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); + mdb_set_dupsort(txn, m_leaves, compare_uint64); mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); mdb_set_dupsort(txn, m_block_info, compare_uint64); From 2890e8cf3806264b92f18e8b86b53ce6699520cb Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 5 Sep 2024 18:45:21 -0700 Subject: [PATCH 103/127] fcmp++: LMDB touchups - fixes usage of MDB_NEXT and MDB_NEXT_DUP, allowing the db call to set key and value --- src/blockchain_db/lmdb/db_lmdb.cpp | 43 +++++++++++++----------------- 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 4ee7adb4273..44d8a69927a 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1529,6 +1529,8 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t CURSOR(locked_outputs) CURSOR(layers) + CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to trim tree"); + const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); @@ -1645,20 +1647,10 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t } else // n_layers == 0 { - // We're removing everything - while (1) - { - MDB_val k, v; - int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); - if (result == MDB_NOTFOUND) - break; - if (result != MDB_SUCCESS) - throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); - - result = mdb_cursor_del(m_cur_layers, MDB_NODUPDATA); - if (result != MDB_SUCCESS) - throw0(DB_ERROR(lmdb_error("Error removing elems after root: ", result).c_str())); - } + // Empty the layers table, no elems should remain + int result = mdb_drop(*m_write_txn, m_layers, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error emptying layers table: ", result).c_str())); } } @@ -1903,13 +1895,13 @@ fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_ "expected divisble by leaf tuple size"); const uint64_t leaf_tuple_idx = idx / fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; - MDB_val_copy k(leaf_tuple_idx); - MDB_val v = k; + MDB_val k = zerokval; + MDB_val_copy v(leaf_tuple_idx); MDB_cursor_op leaf_op = MDB_GET_BOTH; do { - int result = mdb_cursor_get(m_cur_leaves, (MDB_val *)&zerokval, &v, leaf_op); + int result = mdb_cursor_get(m_cur_leaves, &k, &v, leaf_op); leaf_op = MDB_NEXT; if (result == MDB_NOTFOUND) throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR @@ -2076,6 +2068,10 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const uint64_t child_chunk_idx = 0; MDB_cursor_op leaf_op = MDB_FIRST; MDB_cursor_op parent_op = MDB_FIRST; + + MDB_val_copy k_parent(layer_idx); + MDB_val_set(v_parent, child_chunk_idx); + while (1) { // Get next leaf chunk @@ -2086,17 +2082,17 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const MINFO("Auditing layer " << layer_idx << ", child_chunk_idx " << child_chunk_idx); // Iterate until chunk is full or we get to the end of all leaves + MDB_val k_leaf, v_leaf; while (1) { - MDB_val k, v; - int result = mdb_cursor_get(m_cur_leaves, &k, &v, leaf_op); + int result = mdb_cursor_get(m_cur_leaves, &k_leaf, &v_leaf, leaf_op); leaf_op = MDB_NEXT; if (result == MDB_NOTFOUND) break; if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); - const auto *o = (mdb_leaf *)v.mv_data; + const auto *o = (mdb_leaf *)v_leaf.mv_data; auto leaf = m_curve_trees->leaf_tuple(o->output_context.output_pair); leaf_tuples_chunk.emplace_back(std::move(leaf)); @@ -2106,9 +2102,6 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const } // Get the actual leaf chunk hash from the db - MDB_val_copy k_parent(layer_idx); - MDB_val_set(v_parent, child_chunk_idx); - MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx); int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, parent_op); parent_op = MDB_NEXT_DUP; @@ -2127,7 +2120,7 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const if (result != MDB_SUCCESS) throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str())); - if (layer_idx != *(uint64_t*)k_parent.mv_data) + if (layer_idx != *(uint64_t*)k_parent.mv_data || child_chunk_idx != ((layer_val *)v_parent.mv_data)->child_chunk_idx) throw0(DB_ERROR("unexpected parent encountered")); // Get the expected leaf chunk hash @@ -2200,7 +2193,7 @@ bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, TXN_PREFIX_RDONLY(); - // Open separate cursors for child and parent layer + // Open two separate cursors for child and parent layer MDB_cursor *child_layer_cursor, *parent_layer_cursor; int c_result = mdb_cursor_open(m_txn, m_layers, &child_layer_cursor); From 072a82dd3f62625c5b929329a36030cc3745b965 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 5 Sep 2024 18:46:48 -0700 Subject: [PATCH 104/127] fcmp++: tests now test trimming to empty tree --- tests/unit_tests/curve_trees.cpp | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index baedc1a246e..0e9236d0e26 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -273,7 +273,9 @@ void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_r // Trim the layers const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions; const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions; - CHECK_AND_ASSERT_THROW_MES(!c2_layer_reductions.empty(), "empty c2 layer reductions"); + CHECK_AND_ASSERT_THROW_MES(c2_layer_reductions.size() == c1_layer_reductions.size() + || c2_layer_reductions.size() == (c1_layer_reductions.size() + 1), + "unexpected mismatch of c2 and c1 layer reductions"); bool use_c2 = true; std::size_t c2_idx = 0; @@ -343,8 +345,10 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c { CurveTreesV1::LastChunkChildrenToTrim all_children_to_trim; + if (trim_instructions.empty()) + return all_children_to_trim; + // Leaf layer - CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); const auto &trim_leaf_layer_instructions = trim_instructions[0]; std::vector leaves_to_trim; @@ -428,7 +432,9 @@ CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes_to_trim( const std::vector &trim_instructions) const { CurveTreesV1::LastHashes last_hashes; - CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); + + if (trim_instructions.empty()) + return last_hashes; bool parent_is_c2 = true; std::size_t c1_idx = 0; @@ -500,9 +506,14 @@ bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) const auto &c1_layers = m_tree.c1_layers; const auto &c2_layers = m_tree.c2_layers; - CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree"); CHECK_AND_ASSERT_MES(leaves.size() == expected_n_leaf_tuples, false, "unexpected num leaves"); + if (leaves.empty()) + { + CHECK_AND_ASSERT_MES(c2_layers.empty() && c1_layers.empty(), false, "expected empty tree"); + return true; + } + CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), false, "unexpected mismatch of c2 and c1 layers"); @@ -851,7 +862,7 @@ static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples, CurveTreesGlobalTree &&global_tree) { const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); - CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); // Trim the global tree by `trim_n_leaf_tuples` @@ -1014,17 +1025,17 @@ TEST(curve_trees, trim_tree) ++leaves_needed_for_n_layers; // First initialize the tree with init_leaves - for (std::size_t init_leaves = 2; init_leaves <= leaves_needed_for_n_layers; ++init_leaves) + for (std::size_t init_leaves = 1; init_leaves <= leaves_needed_for_n_layers; ++init_leaves) { LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); CurveTreesGlobalTree global_tree(*curve_trees); ASSERT_TRUE(grow_tree(*curve_trees, global_tree, init_leaves)); - // Then extend the tree with ext_leaves + // Then trim by trim_leaves for (std::size_t trim_leaves = 1; trim_leaves < leaves_needed_for_n_layers; ++trim_leaves) { - if (trim_leaves >= init_leaves) + if (trim_leaves > init_leaves) continue; // Copy the already existing global tree From 0688538110cd188db9172da0c52b6de022c52e35 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 5 Sep 2024 23:22:51 -0700 Subject: [PATCH 105/127] fcmp++: add test to trim the tree then grow again after trimming --- tests/unit_tests/curve_trees.cpp | 142 ++++++++++++++++++++++--------- 1 file changed, 104 insertions(+), 38 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 0e9236d0e26..738fe392215 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -859,7 +859,7 @@ static bool grow_tree_in_memory(const std::size_t init_leaves, } //---------------------------------------------------------------------------------------------------------------------- static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples, - CurveTreesGlobalTree &&global_tree) + CurveTreesGlobalTree &global_tree) { const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); @@ -883,35 +883,41 @@ static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples, return true; } //---------------------------------------------------------------------------------------------------------------------- -static bool grow_tree_db(const std::size_t init_leaves, - const std::size_t ext_leaves, +static bool grow_tree_db(const std::size_t n_leaves, + const std::size_t expected_total_n_leaves, std::shared_ptr curve_trees, unit_test::BlockchainLMDBTest &test_db) { - INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); + CHECK_AND_ASSERT_MES(expected_total_n_leaves >= n_leaves, false, "unexpected n_leaves provideded"); + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_total_n_leaves - n_leaves), + false, "unexpected starting n leaf tuples in db"); - { - cryptonote::db_wtxn_guard guard(test_db.m_db); + auto leaves = generate_random_leaves(*curve_trees, 0, n_leaves); - LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); + test_db.m_db->grow_tree(std::move(leaves)); - auto init_outputs = generate_random_leaves(*curve_trees, 0, init_leaves); + return test_db.m_db->audit_tree(expected_total_n_leaves); +} +//---------------------------------------------------------------------------------------------------------------------- +static bool grow_and_extend_tree_db(const std::size_t init_leaves, + const std::size_t ext_leaves, + std::shared_ptr curve_trees, + unit_test::BlockchainLMDBTest &test_db) +{ + cryptonote::db_wtxn_guard guard(test_db.m_db); - test_db.m_db->grow_tree(std::move(init_outputs)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, - "failed to add initial leaves to db"); + LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); - MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " - << ext_leaves << " leaves"); + CHECK_AND_ASSERT_MES(grow_tree_db(init_leaves, init_leaves, curve_trees, test_db), false, + "failed to add initial leaves to db"); - auto ext_outputs = generate_random_leaves(*curve_trees, init_leaves, ext_leaves); + MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " + << ext_leaves << " leaves"); - test_db.m_db->grow_tree(std::move(ext_outputs)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves + ext_leaves), false, - "failed to extend tree in db"); + CHECK_AND_ASSERT_MES(grow_tree_db(ext_leaves, init_leaves + ext_leaves, curve_trees, test_db), false, + "failed to extend tree in db"); - MDEBUG("Successfully extended tree in db by " << ext_leaves << " leaves"); - } + MDEBUG("Successfully extended tree in db by " << ext_leaves << " leaves"); return true; } @@ -921,29 +927,25 @@ static bool trim_tree_db(const std::size_t init_leaves, std::shared_ptr curve_trees, unit_test::BlockchainLMDBTest &test_db) { - INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); - - { - cryptonote::db_wtxn_guard guard(test_db.m_db); + cryptonote::db_wtxn_guard guard(test_db.m_db); - LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then trimming by " << trim_leaves << " leaves"); + LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then trimming by " << trim_leaves << " leaves"); - auto init_outputs = generate_random_leaves(*curve_trees, 0, init_leaves); + auto init_outputs = generate_random_leaves(*curve_trees, 0, init_leaves); - test_db.m_db->grow_tree(std::move(init_outputs)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, - "failed to add initial leaves to db"); + test_db.m_db->grow_tree(std::move(init_outputs)); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, + "failed to add initial leaves to db"); - MDEBUG("Successfully added initial " << init_leaves << " leaves to db, trimming by " - << trim_leaves << " leaves"); + MDEBUG("Successfully added initial " << init_leaves << " leaves to db, trimming by " + << trim_leaves << " leaves"); - // Can use 0 from trim_block_id since it's unused in tests - test_db.m_db->trim_tree(trim_leaves, 0); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves - trim_leaves), false, - "failed to trim tree in db"); + // Can use 0 from trim_block_id since it's unused in tests + test_db.m_db->trim_tree(trim_leaves, 0); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves - trim_leaves), false, + "failed to trim tree in db"); - MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); - } + MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); return true; } @@ -989,7 +991,9 @@ TEST(curve_trees, grow_tree) for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= leaves_needed_for_n_layers; ++ext_leaves) { ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, *curve_trees)); - ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); + + INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); + ASSERT_TRUE(grow_and_extend_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); } } } @@ -1040,9 +1044,71 @@ TEST(curve_trees, trim_tree) // Copy the already existing global tree CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(trim_tree_in_memory(trim_leaves, tree_copy)); + + INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); + ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, curve_trees, test_db)); + } + } +} +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, trim_tree_then_grow) +{ + static const std::size_t grow_after_trim = 1; + + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 3; + + static_assert(helios_chunk_width > 1, "helios width must be > 1"); + static_assert(selene_chunk_width > 1, "selene width must be > 1"); + + LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width); + + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); + + // Constant for how deep we want the tree + static const std::size_t TEST_N_LAYERS = 2; + + // Number of leaves for which x number of layers is required + std::size_t leaves_needed_for_n_layers = selene_chunk_width; + for (std::size_t i = 1; i < TEST_N_LAYERS; ++i) + { + const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; + leaves_needed_for_n_layers *= width; + } + + unit_test::BlockchainLMDBTest test_db; + + // Increment to test for off-by-1 + ++leaves_needed_for_n_layers; + + // First initialize the tree with init_leaves + for (std::size_t init_leaves = 1; init_leaves <= leaves_needed_for_n_layers; ++init_leaves) + { + LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); + CurveTreesGlobalTree global_tree(*curve_trees); + + ASSERT_TRUE(grow_tree(*curve_trees, global_tree, init_leaves)); + + // Then trim by trim_leaves + for (std::size_t trim_leaves = 1; trim_leaves < leaves_needed_for_n_layers; ++trim_leaves) + { + if (trim_leaves > init_leaves) + continue; + + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + + ASSERT_TRUE(trim_tree_in_memory(trim_leaves, tree_copy)); + ASSERT_TRUE(grow_tree(*curve_trees, tree_copy, grow_after_trim)); - ASSERT_TRUE(trim_tree_in_memory(trim_leaves, std::move(tree_copy))); + INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, curve_trees, test_db)); + cryptonote::db_wtxn_guard guard(test_db.m_db); + const std::size_t expected_n_leaves = init_leaves - trim_leaves + grow_after_trim; + ASSERT_TRUE(grow_tree_db(grow_after_trim, expected_n_leaves, curve_trees, test_db)); } } } From b71f2440bc5eae7903adec880046bfb2e5e2bdfe Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 5 Sep 2024 23:25:34 -0700 Subject: [PATCH 106/127] fcmp++: better test names --- tests/unit_tests/curve_trees.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 738fe392215..55e153016eb 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -831,7 +831,7 @@ static bool grow_tree(CurveTreesV1 &curve_trees, return global_tree.audit_tree(expected_n_leaf_tuples); } //---------------------------------------------------------------------------------------------------------------------- -static bool grow_tree_in_memory(const std::size_t init_leaves, +static bool grow_and_extend_tree_in_memory(const std::size_t init_leaves, const std::size_t ext_leaves, CurveTreesV1 &curve_trees) { @@ -990,7 +990,7 @@ TEST(curve_trees, grow_tree) // Then extend the tree with ext_leaves for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= leaves_needed_for_n_layers; ++ext_leaves) { - ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, *curve_trees)); + ASSERT_TRUE(grow_and_extend_tree_in_memory(init_leaves, ext_leaves, *curve_trees)); INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); ASSERT_TRUE(grow_and_extend_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); From 0ff1eaf9485595df5871251f39ffb6a08e49e01f Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 6 Sep 2024 11:16:26 -0700 Subject: [PATCH 107/127] Use macro to de-dupe curve trees test logic --- src/blockchain_db/lmdb/db_lmdb.cpp | 3 +- tests/unit_tests/curve_trees.cpp | 120 +++++++++++------------------ 2 files changed, 44 insertions(+), 79 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 44d8a69927a..ac16cb3413a 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1262,7 +1262,7 @@ void BlockchainLMDB::remove_output(const uint64_t amount, const uint64_t& out_in // Remove output from locked outputs table. We expect the output to be in the // locked outputs table because remove_output is called when removing the // top block from the chain, and all outputs from the top block are expected - // to be locked until they are at least 10 blocks old. + // to be locked until they are at least 10 blocks old (10 is the lower bound). CURSOR(locked_outputs); const uint64_t unlock_block = cryptonote::get_unlock_block_index(ok->data.unlock_time, ok->data.height); @@ -1626,7 +1626,6 @@ void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); const uint64_t last_layer_idx = *(uint64_t *)k.mv_data; - if (last_layer_idx > expected_root_idx) { // Delete all elements in layers after the root diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 55e153016eb..9105fff51e5 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -801,7 +801,7 @@ static const Selene::Scalar generate_random_selene_scalar() return fcmp_pp::tower_cycle::selene_scalar_from_bytes(S_x); } //---------------------------------------------------------------------------------------------------------------------- -static bool grow_tree(CurveTreesV1 &curve_trees, +static bool grow_tree_in_memory(CurveTreesV1 &curve_trees, CurveTreesGlobalTree &global_tree, const std::size_t new_n_leaf_tuples) { @@ -843,7 +843,7 @@ static bool grow_and_extend_tree_in_memory(const std::size_t init_leaves, // Initialize global tree with `init_leaves` MDEBUG("Adding " << init_leaves << " leaves to tree"); - bool res = grow_tree(curve_trees, global_tree, init_leaves); + bool res = grow_tree_in_memory(curve_trees, global_tree, init_leaves); CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); @@ -851,7 +851,7 @@ static bool grow_and_extend_tree_in_memory(const std::size_t init_leaves, // Then extend the global tree by `ext_leaves` MDEBUG("Extending tree by " << ext_leaves << " leaves"); - res = grow_tree(curve_trees, global_tree, ext_leaves); + res = grow_tree_in_memory(curve_trees, global_tree, ext_leaves); CHECK_AND_ASSERT_MES(res, false, "failed to extend tree in memory"); MDEBUG("Successfully extended by " << ext_leaves << " leaves in memory"); @@ -950,6 +950,24 @@ static bool trim_tree_db(const std::size_t init_leaves, return true; } //---------------------------------------------------------------------------------------------------------------------- +#define INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth) \ + static_assert(helios_chunk_width > 1, "helios width must be > 1"); \ + static_assert(selene_chunk_width > 1, "selene width must be > 1"); \ + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); \ + \ + /* Number of leaves required for tree to reach given depth */ \ + std::size_t min_leaves_needed_for_tree_depth = selene_chunk_width; \ + for (std::size_t i = 1; i < tree_depth; ++i) \ + { \ + const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; \ + min_leaves_needed_for_tree_depth *= width; \ + } \ + \ + /* Increment to test for off-by-1 */ \ + ++min_leaves_needed_for_tree_depth; \ + \ + unit_test::BlockchainLMDBTest test_db; \ +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // Test //---------------------------------------------------------------------------------------------------------------------- @@ -959,36 +977,19 @@ TEST(curve_trees, grow_tree) static const std::size_t helios_chunk_width = 3; static const std::size_t selene_chunk_width = 2; - static_assert(helios_chunk_width > 1, "helios width must be > 1"); - static_assert(selene_chunk_width > 1, "selene width must be > 1"); + static const std::size_t tree_depth = 4; LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_chunk_width - << ", selene chunk width " << selene_chunk_width); - - const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); - - // Constant for how deep we want the tree - static const std::size_t TEST_N_LAYERS = 4; - - // Number of leaves for which x number of layers is required - std::size_t leaves_needed_for_n_layers = selene_chunk_width; - for (std::size_t i = 1; i < TEST_N_LAYERS; ++i) - { - const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; - leaves_needed_for_n_layers *= width; - } - - unit_test::BlockchainLMDBTest test_db; + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); - // Increment to test for off-by-1 - ++leaves_needed_for_n_layers; + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); // First initialize the tree with init_leaves - for (std::size_t init_leaves = 1; init_leaves < leaves_needed_for_n_layers; ++init_leaves) + for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) { // TODO: init tree once, then extend a copy of that tree // Then extend the tree with ext_leaves - for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= leaves_needed_for_n_layers; ++ext_leaves) + for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= min_leaves_needed_for_tree_depth; ++ext_leaves) { ASSERT_TRUE(grow_and_extend_tree_in_memory(init_leaves, ext_leaves, *curve_trees)); @@ -1004,40 +1005,22 @@ TEST(curve_trees, trim_tree) static const std::size_t helios_chunk_width = 3; static const std::size_t selene_chunk_width = 3; - static_assert(helios_chunk_width > 1, "helios width must be > 1"); - static_assert(selene_chunk_width > 1, "selene width must be > 1"); + static const std::size_t tree_depth = 4; LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width - << ", selene chunk width " << selene_chunk_width); - - const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); - - // Constant for how deep we want the tree - static const std::size_t TEST_N_LAYERS = 4; - - // Number of leaves for which x number of layers is required - std::size_t leaves_needed_for_n_layers = selene_chunk_width; - for (std::size_t i = 1; i < TEST_N_LAYERS; ++i) - { - const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; - leaves_needed_for_n_layers *= width; - } - - unit_test::BlockchainLMDBTest test_db; + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); - // Increment to test for off-by-1 - ++leaves_needed_for_n_layers; + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); // First initialize the tree with init_leaves - for (std::size_t init_leaves = 1; init_leaves <= leaves_needed_for_n_layers; ++init_leaves) + for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) { LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); CurveTreesGlobalTree global_tree(*curve_trees); - - ASSERT_TRUE(grow_tree(*curve_trees, global_tree, init_leaves)); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, init_leaves)); // Then trim by trim_leaves - for (std::size_t trim_leaves = 1; trim_leaves < leaves_needed_for_n_layers; ++trim_leaves) + for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) { if (trim_leaves > init_leaves) continue; @@ -1054,46 +1037,29 @@ TEST(curve_trees, trim_tree) //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, trim_tree_then_grow) { - static const std::size_t grow_after_trim = 1; - // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree static const std::size_t helios_chunk_width = 3; static const std::size_t selene_chunk_width = 3; - static_assert(helios_chunk_width > 1, "helios width must be > 1"); - static_assert(selene_chunk_width > 1, "selene width must be > 1"); - - LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width - << ", selene chunk width " << selene_chunk_width); - - const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); + static const std::size_t tree_depth = 2; - // Constant for how deep we want the tree - static const std::size_t TEST_N_LAYERS = 2; - - // Number of leaves for which x number of layers is required - std::size_t leaves_needed_for_n_layers = selene_chunk_width; - for (std::size_t i = 1; i < TEST_N_LAYERS; ++i) - { - const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; - leaves_needed_for_n_layers *= width; - } + static const std::size_t grow_after_trim = 1; - unit_test::BlockchainLMDBTest test_db; + LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth + << ", then grow " << grow_after_trim << " leaf/leaves"); - // Increment to test for off-by-1 - ++leaves_needed_for_n_layers; + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); // First initialize the tree with init_leaves - for (std::size_t init_leaves = 1; init_leaves <= leaves_needed_for_n_layers; ++init_leaves) + for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) { LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); CurveTreesGlobalTree global_tree(*curve_trees); - - ASSERT_TRUE(grow_tree(*curve_trees, global_tree, init_leaves)); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, init_leaves)); // Then trim by trim_leaves - for (std::size_t trim_leaves = 1; trim_leaves < leaves_needed_for_n_layers; ++trim_leaves) + for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) { if (trim_leaves > init_leaves) continue; @@ -1102,12 +1068,12 @@ TEST(curve_trees, trim_tree_then_grow) CurveTreesGlobalTree tree_copy(global_tree); ASSERT_TRUE(trim_tree_in_memory(trim_leaves, tree_copy)); - ASSERT_TRUE(grow_tree(*curve_trees, tree_copy, grow_after_trim)); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, grow_after_trim)); INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, curve_trees, test_db)); cryptonote::db_wtxn_guard guard(test_db.m_db); - const std::size_t expected_n_leaves = init_leaves - trim_leaves + grow_after_trim; + const std::size_t expected_n_leaves = grow_after_trim + init_leaves - trim_leaves; ASSERT_TRUE(grow_tree_db(grow_after_trim, expected_n_leaves, curve_trees, test_db)); } } From 9784ced3db06f4c151f04f6ae7fed5ed831284bc Mon Sep 17 00:00:00 2001 From: j-berman Date: Sat, 7 Sep 2024 17:15:06 -0700 Subject: [PATCH 108/127] fcmp++ tests: init tree once in memory and grow copy --- tests/unit_tests/curve_trees.cpp | 75 +++++++++++++------------------- 1 file changed, 31 insertions(+), 44 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 9105fff51e5..40f0505b42c 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -803,10 +803,12 @@ static const Selene::Scalar generate_random_selene_scalar() //---------------------------------------------------------------------------------------------------------------------- static bool grow_tree_in_memory(CurveTreesV1 &curve_trees, CurveTreesGlobalTree &global_tree, + const std::size_t expected_old_n_leaf_tuples, const std::size_t new_n_leaf_tuples) { // Do initial tree reads const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); const CurveTreesV1::LastHashes last_hashes = global_tree.get_last_hashes(); global_tree.log_last_hashes(last_hashes); @@ -831,37 +833,12 @@ static bool grow_tree_in_memory(CurveTreesV1 &curve_trees, return global_tree.audit_tree(expected_n_leaf_tuples); } //---------------------------------------------------------------------------------------------------------------------- -static bool grow_and_extend_tree_in_memory(const std::size_t init_leaves, - const std::size_t ext_leaves, - CurveTreesV1 &curve_trees) -{ - LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree in memory, then extending by " - << ext_leaves << " leaves"); - - CurveTreesGlobalTree global_tree(curve_trees); - - // Initialize global tree with `init_leaves` - MDEBUG("Adding " << init_leaves << " leaves to tree"); - - bool res = grow_tree_in_memory(curve_trees, global_tree, init_leaves); - CHECK_AND_ASSERT_MES(res, false, "failed to add inital leaves to tree in memory"); - - MDEBUG("Successfully added initial " << init_leaves << " leaves to tree in memory"); - - // Then extend the global tree by `ext_leaves` - MDEBUG("Extending tree by " << ext_leaves << " leaves"); - - res = grow_tree_in_memory(curve_trees, global_tree, ext_leaves); - CHECK_AND_ASSERT_MES(res, false, "failed to extend tree in memory"); - - MDEBUG("Successfully extended by " << ext_leaves << " leaves in memory"); - return true; -} -//---------------------------------------------------------------------------------------------------------------------- -static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples, +static bool trim_tree_in_memory(const std::size_t expected_old_n_leaf_tuples, + const std::size_t trim_n_leaf_tuples, CurveTreesGlobalTree &global_tree) { const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); @@ -883,20 +860,19 @@ static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples, return true; } //---------------------------------------------------------------------------------------------------------------------- -static bool grow_tree_db(const std::size_t n_leaves, - const std::size_t expected_total_n_leaves, +static bool grow_tree_db(const std::size_t expected_old_n_leaf_tuples, + const std::size_t n_leaves, std::shared_ptr curve_trees, unit_test::BlockchainLMDBTest &test_db) { - CHECK_AND_ASSERT_MES(expected_total_n_leaves >= n_leaves, false, "unexpected n_leaves provideded"); - CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_total_n_leaves - n_leaves), + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), false, "unexpected starting n leaf tuples in db"); auto leaves = generate_random_leaves(*curve_trees, 0, n_leaves); test_db.m_db->grow_tree(std::move(leaves)); - return test_db.m_db->audit_tree(expected_total_n_leaves); + return test_db.m_db->audit_tree(expected_old_n_leaf_tuples + n_leaves); } //---------------------------------------------------------------------------------------------------------------------- static bool grow_and_extend_tree_db(const std::size_t init_leaves, @@ -908,13 +884,13 @@ static bool grow_and_extend_tree_db(const std::size_t init_leaves, LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); - CHECK_AND_ASSERT_MES(grow_tree_db(init_leaves, init_leaves, curve_trees, test_db), false, + CHECK_AND_ASSERT_MES(grow_tree_db(0, init_leaves, curve_trees, test_db), false, "failed to add initial leaves to db"); MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " << ext_leaves << " leaves"); - CHECK_AND_ASSERT_MES(grow_tree_db(ext_leaves, init_leaves + ext_leaves, curve_trees, test_db), false, + CHECK_AND_ASSERT_MES(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db), false, "failed to extend tree in db"); MDEBUG("Successfully extended tree in db by " << ext_leaves << " leaves"); @@ -971,6 +947,7 @@ static bool trim_tree_db(const std::size_t init_leaves, //---------------------------------------------------------------------------------------------------------------------- // Test //---------------------------------------------------------------------------------------------------------------------- +// TODO: init tree in db once, then extend a copy of that db TEST(curve_trees, grow_tree) { // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree @@ -987,12 +964,19 @@ TEST(curve_trees, grow_tree) // First initialize the tree with init_leaves for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) { - // TODO: init tree once, then extend a copy of that tree + LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); + CurveTreesGlobalTree global_tree(*curve_trees); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, 0, init_leaves)); + // Then extend the tree with ext_leaves for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= min_leaves_needed_for_tree_depth; ++ext_leaves) { - ASSERT_TRUE(grow_and_extend_tree_in_memory(init_leaves, ext_leaves, *curve_trees)); + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, init_leaves, ext_leaves)); + // Tree in db INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); ASSERT_TRUE(grow_and_extend_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); } @@ -1017,7 +1001,7 @@ TEST(curve_trees, trim_tree) { LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); CurveTreesGlobalTree global_tree(*curve_trees); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, init_leaves)); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, 0, init_leaves)); // Then trim by trim_leaves for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) @@ -1025,10 +1009,12 @@ TEST(curve_trees, trim_tree) if (trim_leaves > init_leaves) continue; + // Tree in memory // Copy the already existing global tree CurveTreesGlobalTree tree_copy(global_tree); - ASSERT_TRUE(trim_tree_in_memory(trim_leaves, tree_copy)); + ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, tree_copy)); + // Tree in db INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, curve_trees, test_db)); } @@ -1056,7 +1042,7 @@ TEST(curve_trees, trim_tree_then_grow) { LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); CurveTreesGlobalTree global_tree(*curve_trees); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, init_leaves)); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, 0, init_leaves)); // Then trim by trim_leaves for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) @@ -1064,16 +1050,17 @@ TEST(curve_trees, trim_tree_then_grow) if (trim_leaves > init_leaves) continue; + // Tree in memory // Copy the already existing global tree CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, tree_copy)); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, init_leaves - trim_leaves, grow_after_trim)); - ASSERT_TRUE(trim_tree_in_memory(trim_leaves, tree_copy)); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, grow_after_trim)); - + // Tree in db INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, curve_trees, test_db)); cryptonote::db_wtxn_guard guard(test_db.m_db); - const std::size_t expected_n_leaves = grow_after_trim + init_leaves - trim_leaves; + const std::size_t expected_n_leaves = init_leaves - trim_leaves + grow_after_trim; ASSERT_TRUE(grow_tree_db(grow_after_trim, expected_n_leaves, curve_trees, test_db)); } } From f47c60ec6323dd8e44f09bcd137e96b97527d5e3 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 9 Sep 2024 14:44:17 -0700 Subject: [PATCH 109/127] fcmp++ tests: init tree in db once, then copy db for tests - speeds up trim_tree test by 60%+ --- tests/unit_tests/curve_trees.cpp | 180 +++++++++++++--------------- tests/unit_tests/unit_tests_utils.h | 48 +++++++- 2 files changed, 125 insertions(+), 103 deletions(-) diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 40f0505b42c..649277e50cc 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -865,6 +865,8 @@ static bool grow_tree_db(const std::size_t expected_old_n_leaf_tuples, std::shared_ptr curve_trees, unit_test::BlockchainLMDBTest &test_db) { + cryptonote::db_wtxn_guard guard(test_db.m_db); + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), false, "unexpected starting n leaf tuples in db"); @@ -875,50 +877,24 @@ static bool grow_tree_db(const std::size_t expected_old_n_leaf_tuples, return test_db.m_db->audit_tree(expected_old_n_leaf_tuples + n_leaves); } //---------------------------------------------------------------------------------------------------------------------- -static bool grow_and_extend_tree_db(const std::size_t init_leaves, - const std::size_t ext_leaves, - std::shared_ptr curve_trees, - unit_test::BlockchainLMDBTest &test_db) -{ - cryptonote::db_wtxn_guard guard(test_db.m_db); - - LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then extending by " << ext_leaves << " leaves"); - - CHECK_AND_ASSERT_MES(grow_tree_db(0, init_leaves, curve_trees, test_db), false, - "failed to add initial leaves to db"); - - MDEBUG("Successfully added initial " << init_leaves << " leaves to db, extending by " - << ext_leaves << " leaves"); - - CHECK_AND_ASSERT_MES(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db), false, - "failed to extend tree in db"); - - MDEBUG("Successfully extended tree in db by " << ext_leaves << " leaves"); - - return true; -} -//---------------------------------------------------------------------------------------------------------------------- -static bool trim_tree_db(const std::size_t init_leaves, +static bool trim_tree_db(const std::size_t expected_old_n_leaf_tuples, const std::size_t trim_leaves, - std::shared_ptr curve_trees, unit_test::BlockchainLMDBTest &test_db) { cryptonote::db_wtxn_guard guard(test_db.m_db); - LOG_PRINT_L1("Adding " << init_leaves << " leaves to db, then trimming by " << trim_leaves << " leaves"); - - auto init_outputs = generate_random_leaves(*curve_trees, 0, init_leaves); + CHECK_AND_ASSERT_THROW_MES(expected_old_n_leaf_tuples >= trim_leaves, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_leaves > 0, "must be trimming some leaves"); - test_db.m_db->grow_tree(std::move(init_outputs)); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves), false, - "failed to add initial leaves to db"); + LOG_PRINT_L1("Trimming " << trim_leaves << " leaf tuples from tree with " + << expected_old_n_leaf_tuples << " leaves in db"); - MDEBUG("Successfully added initial " << init_leaves << " leaves to db, trimming by " - << trim_leaves << " leaves"); + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), + false, "trimming unexpected starting n leaf tuples in db"); - // Can use 0 from trim_block_id since it's unused in tests + // Can use 0 for trim_block_id since it's unused in tests test_db.m_db->trim_tree(trim_leaves, 0); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(init_leaves - trim_leaves), false, + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(expected_old_n_leaf_tuples - trim_leaves), false, "failed to trim tree in db"); MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); @@ -944,10 +920,25 @@ static bool trim_tree_db(const std::size_t init_leaves, \ unit_test::BlockchainLMDBTest test_db; \ //---------------------------------------------------------------------------------------------------------------------- +#define BEGIN_INIT_TREE_ITER(curve_trees) \ + for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) \ + { \ + LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves"); \ + \ + /* Init tree in memory */ \ + CurveTreesGlobalTree global_tree(*curve_trees); \ + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, 0, init_leaves)); \ + \ + /* Init tree in db */ \ + INIT_BLOCKCHAIN_LMDB_TEST_DB(test_db, curve_trees); \ + ASSERT_TRUE(grow_tree_db(0, init_leaves, curve_trees, test_db)); \ +//---------------------------------------------------------------------------------------------------------------------- +#define END_INIT_TREE_ITER(curve_trees) \ + }; \ +//---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // Test //---------------------------------------------------------------------------------------------------------------------- -// TODO: init tree in db once, then extend a copy of that db TEST(curve_trees, grow_tree) { // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree @@ -962,25 +953,24 @@ TEST(curve_trees, grow_tree) INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); // First initialize the tree with init_leaves - for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) - { - LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); - CurveTreesGlobalTree global_tree(*curve_trees); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, 0, init_leaves)); + BEGIN_INIT_TREE_ITER(curve_trees) - // Then extend the tree with ext_leaves - for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= min_leaves_needed_for_tree_depth; ++ext_leaves) - { - // Tree in memory - // Copy the already existing global tree - CurveTreesGlobalTree tree_copy(global_tree); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, init_leaves, ext_leaves)); - - // Tree in db - INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); - ASSERT_TRUE(grow_and_extend_tree_db(init_leaves, ext_leaves, curve_trees, test_db)); - } + // Then extend the tree with ext_leaves + for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= min_leaves_needed_for_tree_depth; ++ext_leaves) + { + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, init_leaves, ext_leaves)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, copy_db)); } + + END_INIT_TREE_ITER() } //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, trim_tree) @@ -997,28 +987,27 @@ TEST(curve_trees, trim_tree) INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); // First initialize the tree with init_leaves - for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) - { - LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); - CurveTreesGlobalTree global_tree(*curve_trees); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, 0, init_leaves)); - - // Then trim by trim_leaves - for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) - { - if (trim_leaves > init_leaves) - continue; + BEGIN_INIT_TREE_ITER(curve_trees) - // Tree in memory - // Copy the already existing global tree - CurveTreesGlobalTree tree_copy(global_tree); - ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, tree_copy)); - - // Tree in db - INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); - ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, curve_trees, test_db)); - } + // Then trim by trim_leaves + for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) + { + if (trim_leaves > init_leaves) + continue; + + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, tree_copy)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, copy_db)); } + + END_INIT_TREE_ITER() } //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, trim_tree_then_grow) @@ -1038,32 +1027,29 @@ TEST(curve_trees, trim_tree_then_grow) INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); // First initialize the tree with init_leaves - for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) - { - LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves in memory"); - CurveTreesGlobalTree global_tree(*curve_trees); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, 0, init_leaves)); + BEGIN_INIT_TREE_ITER(curve_trees) - // Then trim by trim_leaves - for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) - { - if (trim_leaves > init_leaves) - continue; - - // Tree in memory - // Copy the already existing global tree - CurveTreesGlobalTree tree_copy(global_tree); - ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, tree_copy)); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, init_leaves - trim_leaves, grow_after_trim)); - - // Tree in db - INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees); - ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, curve_trees, test_db)); - cryptonote::db_wtxn_guard guard(test_db.m_db); - const std::size_t expected_n_leaves = init_leaves - trim_leaves + grow_after_trim; - ASSERT_TRUE(grow_tree_db(grow_after_trim, expected_n_leaves, curve_trees, test_db)); - } + // Then trim by trim_leaves + for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) + { + if (trim_leaves > init_leaves) + continue; + + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, tree_copy)); + ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, init_leaves - trim_leaves, grow_after_trim)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, copy_db)); + ASSERT_TRUE(grow_tree_db(init_leaves - trim_leaves, grow_after_trim, curve_trees, copy_db)); } + + END_INIT_TREE_ITER() } //---------------------------------------------------------------------------------------------------------------------- // Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children diff --git a/tests/unit_tests/unit_tests_utils.h b/tests/unit_tests/unit_tests_utils.h index 11bdfcf7448..2ffa578facb 100644 --- a/tests/unit_tests/unit_tests_utils.h +++ b/tests/unit_tests/unit_tests_utils.h @@ -75,13 +75,21 @@ namespace unit_test class BlockchainLMDBTest { public: - BlockchainLMDBTest() : m_temp_db_dir(boost::filesystem::temp_directory_path().string() + "/monero-lmdb-tests/") + BlockchainLMDBTest(bool is_copy = false) : + m_temp_db_dir(boost::filesystem::temp_directory_path().string() + "/monero-lmdb-tests/"), + m_is_copy{is_copy} {} ~BlockchainLMDBTest() { delete m_db; - remove_files(); + if (m_temp_db_dir.find("/monero-lmdb-tests/") == std::string::npos) + { + LOG_ERROR("unexpected temp db dir"); + return; + } + if (!m_is_copy) + boost::filesystem::remove_all(m_temp_db_dir); } void init_new_db(std::shared_ptr curve_trees) @@ -94,6 +102,7 @@ namespace unit_test MDEBUG("Creating test db at path " << dir_path); ASSERT_NO_THROW(this->m_db->open(dir_path)); + m_cur_dir_path = dir_path; } void init_hardfork(cryptonote::HardFork *hardfork) @@ -102,18 +111,45 @@ namespace unit_test this->m_db->set_hard_fork(hardfork); } - void remove_files() + BlockchainLMDBTest *copy_db(std::shared_ptr curve_trees) { - boost::filesystem::remove_all(m_temp_db_dir); + CHECK_AND_ASSERT_THROW_MES(this->m_db != nullptr, "expected non-null m_db"); + CHECK_AND_ASSERT_THROW_MES(this->m_cur_dir_path != "", "expected cur dir path set"); + + const boost::filesystem::path lmdb_data_path = boost::filesystem::path(m_cur_dir_path + "/data.mdb"); + CHECK_AND_ASSERT_THROW_MES(boost::filesystem::exists(lmdb_data_path), "did not find lmdb data file"); + + // Close db, copy db file, open copy, then reopen the db + this->m_db->close(); + const auto temp_db_path = boost::filesystem::unique_path(); + const std::string dest_path = m_temp_db_dir + temp_db_path.string(); + CHECK_AND_ASSERT_THROW_MES(boost::filesystem::create_directories(dest_path), + "failed to create new db dirs"); + CHECK_AND_ASSERT_THROW_MES(boost::filesystem::copy_file(lmdb_data_path, dest_path + "/data.mdb"), + "failed to copy db data"); + + // Open db copy + BlockchainLMDBTest *copy_db = new BlockchainLMDBTest(true/*is_copy*/); + copy_db->m_db = new cryptonote::BlockchainLMDB(true/*batch_transactions*/, curve_trees); + copy_db->m_db->open(dest_path); + copy_db->m_cur_dir_path = dest_path; + + // Reopen original db so it's ready for use + this->m_db->open(m_cur_dir_path); + + return copy_db; } cryptonote::BlockchainDB* m_db{nullptr}; const std::string m_temp_db_dir; + std::string m_cur_dir_path{""}; + const bool m_is_copy{false}; }; } -#define INIT_BLOCKCHAIN_LMDB_TEST_DB(curve_trees) \ - test_db.init_new_db(curve_trees); \ +#define INIT_BLOCKCHAIN_LMDB_TEST_DB(test_db, curve_trees) \ + if (curve_trees != nullptr) \ + test_db.init_new_db(curve_trees); \ auto hardfork = cryptonote::HardFork(*test_db.m_db, 1, 0); \ test_db.init_hardfork(&hardfork); \ auto scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){ \ From e636c384a9559173a6ede28c6f336fd7d260e715 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 9 Sep 2024 16:29:36 -0700 Subject: [PATCH 110/127] fix ubuntu 20 compile --- tests/unit_tests/unit_tests_utils.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/unit_tests/unit_tests_utils.h b/tests/unit_tests/unit_tests_utils.h index 2ffa578facb..ab3b77889f6 100644 --- a/tests/unit_tests/unit_tests_utils.h +++ b/tests/unit_tests/unit_tests_utils.h @@ -125,8 +125,7 @@ namespace unit_test const std::string dest_path = m_temp_db_dir + temp_db_path.string(); CHECK_AND_ASSERT_THROW_MES(boost::filesystem::create_directories(dest_path), "failed to create new db dirs"); - CHECK_AND_ASSERT_THROW_MES(boost::filesystem::copy_file(lmdb_data_path, dest_path + "/data.mdb"), - "failed to copy db data"); + boost::filesystem::copy_file(lmdb_data_path, dest_path + "/data.mdb"); // Open db copy BlockchainLMDBTest *copy_db = new BlockchainLMDBTest(true/*is_copy*/); From 90164e3d6b25ac74e80c2753341056a244eaf48a Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 10 Sep 2024 16:51:17 -0700 Subject: [PATCH 111/127] fcmp++: multithreaded hashing children chunks into tree --- src/fcmp_pp/CMakeLists.txt | 1 + src/fcmp_pp/curve_trees.cpp | 167 ++++++++++++++++++++++++++---------- 2 files changed, 121 insertions(+), 47 deletions(-) diff --git a/src/fcmp_pp/CMakeLists.txt b/src/fcmp_pp/CMakeLists.txt index 554d1a8dcfd..54ebad4a851 100644 --- a/src/fcmp_pp/CMakeLists.txt +++ b/src/fcmp_pp/CMakeLists.txt @@ -51,6 +51,7 @@ endif() target_link_libraries(fcmp_pp PUBLIC cncrypto + common epee PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/fcmp_pp_rust/libfcmp_pp_rust.a diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 98d18cf6115..c6b26713881 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -28,6 +28,7 @@ #include "curve_trees.h" +#include "common/threadpool.h" #include "fcmp_pp_crypto.h" #include "ringct/rctOps.h" @@ -106,6 +107,70 @@ static std::vector next_child_scalars_from_children(c return child_scalars_out; }; //---------------------------------------------------------------------------------------------------------------------- +template +static void hash_first_chunk(const std::unique_ptr &curve, + const typename C::Scalar *old_last_child, + const typename C::Point *old_last_parent, + const std::size_t start_offset, + const std::vector &new_child_scalars, + const std::size_t chunk_size, + typename C::Point &hash_out) +{ + // Prepare to hash + const auto &existing_hash = old_last_parent != nullptr + ? *old_last_parent + : curve->hash_init_point(); + + const auto &prior_child_after_offset = old_last_child != nullptr + ? *old_last_child + : curve->zero_scalar(); + + const auto chunk_start = new_child_scalars.data(); + const typename C::Chunk chunk{chunk_start, chunk_size}; + + MDEBUG("existing_hash: " << curve->to_string(existing_hash) << " , start_offset: " << start_offset + << " , prior_child_after_offset: " << curve->to_string(prior_child_after_offset)); + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Hashing child in first chunk " << curve->to_string(chunk_start[i])); + + // Do the hash + auto chunk_hash = curve->hash_grow( + existing_hash, + start_offset, + prior_child_after_offset, + chunk + ); + + MDEBUG("Child chunk_start_idx " << 0 << " result: " << curve->to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + hash_out = std::move(chunk_hash); +} +//---------------------------------------------------------------------------------------------------------------------- +template +static void hash_next_chunk(const std::unique_ptr &curve, + const std::size_t chunk_start_idx, + const std::vector &new_child_scalars, + const std::size_t chunk_size, + typename C::Point &hash_out) +{ + const auto chunk_start = new_child_scalars.data() + chunk_start_idx; + const typename C::Chunk chunk{chunk_start, chunk_size}; + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Child chunk_start_idx " << chunk_start_idx << " hashing child " << curve->to_string(chunk_start[i])); + + auto chunk_hash = get_new_parent(curve, chunk); + + MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve->to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + hash_out = std::move(chunk_hash); +} +//---------------------------------------------------------------------------------------------------------------------- // Hash chunks of a layer of new children, outputting the next layer's parents template static LayerExtension hash_children_chunks(const std::unique_ptr &curve, @@ -119,7 +184,6 @@ static LayerExtension hash_children_chunks(const std::unique_ptr &curve, LayerExtension parents_out; parents_out.start_idx = next_parent_start_index; parents_out.update_existing_last_hash = old_last_parent != nullptr; - parents_out.hashes.reserve(1 + (new_child_scalars.size() / chunk_width)); CHECK_AND_ASSERT_THROW_MES(!new_child_scalars.empty(), "empty child scalars"); CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width"); @@ -127,73 +191,82 @@ static LayerExtension hash_children_chunks(const std::unique_ptr &curve, // See how many children we need to fill up the existing last chunk std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); - MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() - << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); - - // Hash the first chunk - // TODO: separate function - { - // Prepare to hash - const auto &existing_hash = old_last_parent != nullptr - ? *old_last_parent - : curve->hash_init_point(); + CHECK_AND_ASSERT_THROW_MES(new_child_scalars.size() >= chunk_size, "unexpected size of new child scalars"); - const auto &prior_child_after_offset = old_last_child != nullptr - ? *old_last_child - : curve->zero_scalar(); + const std::size_t n_chunks = 1 // first chunk + + (new_child_scalars.size() - chunk_size) / chunk_width // middle chunks + + (((new_child_scalars.size() - chunk_size) % chunk_width > 0) ? 1 : 0); // final chunk - const auto chunk_start = new_child_scalars.data(); - const typename C::Chunk chunk{chunk_start, chunk_size}; + parents_out.hashes.resize(n_chunks); - MDEBUG("existing_hash: " << curve->to_string(existing_hash) << " , start_offset: " << start_offset - << " , prior_child_after_offset: " << curve->to_string(prior_child_after_offset)); + MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() + << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); - for (std::size_t i = 0; i < chunk_size; ++i) - MDEBUG("Hashing child " << curve->to_string(chunk_start[i])); + // Hash all chunks in parallel + tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); + tools::threadpool::waiter waiter(tpool); - // Do the hash - auto chunk_hash = curve->hash_grow( - existing_hash, + // Hash the first chunk + tpool.submit(&waiter, + [ + &curve, + &old_last_child, + &old_last_parent, + &new_child_scalars, + &parents_out, start_offset, - prior_child_after_offset, - chunk - ); - - MDEBUG("Child chunk_start_idx " << 0 << " result: " << curve->to_string(chunk_hash) - << " , chunk_size: " << chunk_size); - - // We've got our hash - parents_out.hashes.emplace_back(std::move(chunk_hash)); - } + chunk_size + ]() + { + auto &hash_out = parents_out.hashes[0]; + hash_first_chunk(curve, + old_last_child, + old_last_parent, + start_offset, + new_child_scalars, + chunk_size, + hash_out); + }, + true + ); // Hash chunks of child scalars to create the parent hashes std::size_t chunk_start_idx = chunk_size; + std::size_t chunk_idx = 1; while (chunk_start_idx < new_child_scalars.size()) { - // TODO: this loop can be parallelized // Fill a complete chunk, or add the remaining new children to the last chunk chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); - const auto chunk_start = new_child_scalars.data() + chunk_start_idx; - const typename C::Chunk chunk{chunk_start, chunk_size}; - - for (std::size_t i = 0; i < chunk_size; ++i) - MDEBUG("Hashing child " << curve->to_string(chunk_start[i])); - - auto chunk_hash = get_new_parent(curve, chunk); - - MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve->to_string(chunk_hash) - << " , chunk_size: " << chunk_size); - - // We've got our hash - parents_out.hashes.emplace_back(std::move(chunk_hash)); + CHECK_AND_ASSERT_THROW_MES(chunk_idx < parents_out.hashes.size(), "unexpected chunk_idx"); + + tpool.submit(&waiter, + [ + &curve, + &new_child_scalars, + &parents_out, + chunk_start_idx, + chunk_size, + chunk_idx + ]() + { + auto &hash_out = parents_out.hashes[chunk_idx]; + hash_next_chunk(curve, chunk_start_idx, new_child_scalars, chunk_size, hash_out); + }, + true + ); // Advance to the next chunk chunk_start_idx += chunk_size; CHECK_AND_ASSERT_THROW_MES(chunk_start_idx <= new_child_scalars.size(), "unexpected chunk start idx"); + + ++chunk_idx; } + CHECK_AND_ASSERT_THROW_MES(chunk_idx == n_chunks, "unexpected n chunks"); + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to hash chunks"); + return parents_out; }; //---------------------------------------------------------------------------------------------------------------------- From 8fc87d72a2411f659cd45abba5f846369d1f6b72 Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 10 Sep 2024 16:51:40 -0700 Subject: [PATCH 112/127] fcmp++: multithreaded convert valid outputs into leaf tuples --- src/fcmp_pp/curve_trees.cpp | 91 ++++++++++++++++++++++++++++--------- src/fcmp_pp/curve_trees.h | 6 +++ 2 files changed, 76 insertions(+), 21 deletions(-) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index c6b26713881..634f3f8c22e 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -794,27 +794,7 @@ typename CurveTrees::TreeExtension CurveTrees::get_tree_extensio // and place the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since // they cannot be inserted to the tree. std::vector flattened_leaves; - flattened_leaves.reserve(new_outputs.size() * LEAF_TUPLE_SIZE); - tree_extension.leaves.tuples.reserve(new_outputs.size()); - for (auto &o : new_outputs) - { - // TODO: this loop can be parallelized - LeafTuple leaf; - try { leaf = leaf_tuple(o.output_pair); } - catch(...) - { - // Invalid outputs can't be added to the tree - continue; - } - - // We use O.x, I.x, C.x to grow the tree - flattened_leaves.emplace_back(std::move(leaf.O_x)); - flattened_leaves.emplace_back(std::move(leaf.I_x)); - flattened_leaves.emplace_back(std::move(leaf.C_x)); - - // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output pair in the db to save 32 bytes - tree_extension.leaves.tuples.emplace_back(std::move(o)); - } + this->set_valid_leaves(flattened_leaves, tree_extension.leaves.tuples, std::move(new_outputs)); if (flattened_leaves.empty()) return tree_extension; @@ -1011,6 +991,75 @@ template CurveTrees::TreeReduction CurveTrees::g // CurveTrees private member functions //---------------------------------------------------------------------------------------------------------------------- template +void CurveTrees::set_valid_leaves( + std::vector &flattened_leaves_out, + std::vector &tuples_out, + std::vector &&new_outputs) const +{ + flattened_leaves_out.reserve(new_outputs.size() * LEAF_TUPLE_SIZE); + tuples_out.reserve(new_outputs.size()); + + std::vector leaves; + leaves.resize(new_outputs.size()); + + // Keep track of valid outputs to make sure we only use leaves from valid outputs. Can't use std::vector + // because std::vector concurrent access is not thread safe. + enum Boolean : uint8_t { + False = 0, + True = 1, + }; + std::vector valid_outputs(new_outputs.size(), False); + + tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); + tools::threadpool::waiter waiter(tpool); + + // Multithreaded conversion of valid outputs into leaf tuples + for (std::size_t i = 0; i < new_outputs.size(); ++i) + { + tpool.submit(&waiter, + [ + this, + &new_outputs, + &leaves, + &valid_outputs, + i + ]() + { + CHECK_AND_ASSERT_THROW_MES(leaves.size() > i, "unexpected leaves size"); + CHECK_AND_ASSERT_THROW_MES(valid_outputs.size() > i, "unexpected valid outputs size"); + CHECK_AND_ASSERT_THROW_MES(!valid_outputs[i], "unexpected valid output"); + + try { leaves[i] = this->leaf_tuple(new_outputs[i].output_pair); } + catch(...) { /* Invalid outputs can't be added to the tree */ return; } + + valid_outputs[i] = True; + }, + true + ); + } + + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to tuples"); + + // Collect valid outputs into expected objects + for (std::size_t i = 0; i < valid_outputs.size(); ++i) + { + if (!valid_outputs[i]) + continue; + + CHECK_AND_ASSERT_THROW_MES(leaves.size() > i, "unexpected size of leaves"); + CHECK_AND_ASSERT_THROW_MES(new_outputs.size() > i, "unexpected size of valid outputs"); + + // We use O.x, I.x, C.x to grow the tree + flattened_leaves_out.emplace_back(std::move(leaves[i].O_x)); + flattened_leaves_out.emplace_back(std::move(leaves[i].I_x)); + flattened_leaves_out.emplace_back(std::move(leaves[i].C_x)); + + // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output context in the db to save 32 bytes + tuples_out.emplace_back(std::move(new_outputs[i])); + } +} +//---------------------------------------------------------------------------------------------------------------------- +template GrowLayerInstructions CurveTrees::set_next_layer_extension( const GrowLayerInstructions &prev_layer_instructions, const bool parent_is_c1, diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index c5dfe7d1829..7d9c9688b28 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -268,6 +268,12 @@ class CurveTrees const LastHashes &last_hashes) const; private: + // Multithreaded helper function to convert outputs to leaf tuples and set leaves on tree extension + void set_valid_leaves( + std::vector &flattened_leaves_out, + std::vector &tuples_out, + std::vector &&new_outputs) const; + // Helper function used to set the next layer extension used to grow the next layer in the tree // - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent // layer of the leaf layer From b055eb3f61e8e51858125b7f17e914df23211893 Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 10 Sep 2024 18:11:06 -0700 Subject: [PATCH 113/127] fcmp++: output may not be present in locked outputs table on remove - If the output is invalid/unspendable, upon unlock it will be deleted from the locked outputs table and then won't be used to grow the tree. Upon reorg/pop blocks, the invalid output won't be re-added to the locked outputs table upon trimming the tree. Thus, it's possible for an invalid/unspendable output to not be present in the locked outputs table upon remove. --- src/blockchain_db/lmdb/db_lmdb.cpp | 26 +++++++++++++++++--------- src/fcmp_pp/curve_trees.cpp | 2 +- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index ac16cb3413a..608d6d99854 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1259,10 +1259,16 @@ void BlockchainLMDB::remove_output(const uint64_t amount, const uint64_t& out_in throw1(DB_ERROR(lmdb_error("Error adding removal of output tx to db transaction", result).c_str())); } - // Remove output from locked outputs table. We expect the output to be in the - // locked outputs table because remove_output is called when removing the - // top block from the chain, and all outputs from the top block are expected - // to be locked until they are at least 10 blocks old (10 is the lower bound). + // Remove output from locked outputs table if present. We expect all valid + // outputs to be in the locked outputs table because remove_output is called + // when removing the top block from the chain, and all outputs from the top + // block are expected to be locked until they are at least 10 blocks old (10 + // is the lower bound). An output might not be in the locked outputs table if + // it is invalid, then gets removed from the locked outputs table upon growing + // the tree. + // TODO: test case where we add an invalid output to the chain, grow the tree + // in the block in which that output unlocks, pop blocks to remove that output + // from the chain, then progress the chain again. CURSOR(locked_outputs); const uint64_t unlock_block = cryptonote::get_unlock_block_index(ok->data.unlock_time, ok->data.height); @@ -1273,16 +1279,18 @@ void BlockchainLMDB::remove_output(const uint64_t amount, const uint64_t& out_in result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, &v_output, MDB_GET_BOTH); if (result == MDB_NOTFOUND) { - throw0(DB_ERROR("Unexpected: output not found in m_cur_locked_outputs")); + // We expect this output is invalid } else if (result) { throw1(DB_ERROR(lmdb_error("Error adding removal of locked output to db transaction", result).c_str())); } - - result = mdb_cursor_del(m_cur_locked_outputs, 0); - if (result) - throw0(DB_ERROR(lmdb_error(std::string("Error deleting locked output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + else + { + result = mdb_cursor_del(m_cur_locked_outputs, 0); + if (result) + throw0(DB_ERROR(lmdb_error(std::string("Error deleting locked output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + } result = mdb_cursor_del(m_cur_output_txs, 0); if (result) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 634f3f8c22e..a1640b5e71c 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -191,7 +191,7 @@ static LayerExtension hash_children_chunks(const std::unique_ptr &curve, // See how many children we need to fill up the existing last chunk std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); - CHECK_AND_ASSERT_THROW_MES(new_child_scalars.size() >= chunk_size, "unexpected size of new child scalars"); + CHECK_AND_ASSERT_THROW_MES(new_child_scalars.size() >= chunk_size, "unexpected first chunk size"); const std::size_t n_chunks = 1 // first chunk + (new_child_scalars.size() - chunk_size) / chunk_width // middle chunks From 043597426bc83463b7ecdd1bffe5be7b7fd71fd0 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 11 Sep 2024 18:34:42 -0700 Subject: [PATCH 114/127] fcmp++ lmdb: migration touchups - If locked output migration step completes, then program exits while migration step to grow the tree is in progress, make sure the migration picks back up where it left off growing the tree. - Make sure db cursor gets set in all cases when renaming block infn table. --- src/blockchain_db/lmdb/db_lmdb.cpp | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 608d6d99854..373d368bcef 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -6900,6 +6900,12 @@ void BlockchainLMDB::migrate_5_6() if (result != MDB_NOTFOUND) { cached_last_o = *(const tmp_output_cache*)v_last_output.mv_data; + + if (n_outputs < cached_last_o.n_outputs_read) + throw0(DB_ERROR("Unexpected n_outputs_read on cached last output")); + if (n_outputs == cached_last_o.n_outputs_read) + break; + MDEBUG("Found cached output " << cached_last_o.ok.output_id << ", migrated " << cached_last_o.n_outputs_read << " outputs already"); found_cached_output = true; @@ -6910,8 +6916,6 @@ void BlockchainLMDB::migrate_5_6() const std::size_t outkey_size = (cached_last_o.amount == 0) ? sizeof(outkey) : sizeof(pre_rct_outkey); v = {outkey_size, (void *)&cached_last_o.ok}; - if (n_outputs < cached_last_o.n_outputs_read) - throw0(DB_ERROR("Unexpected n_outputs_read on cached last output")); i = cached_last_o.n_outputs_read; op = MDB_NEXT; } @@ -6931,6 +6935,13 @@ void BlockchainLMDB::migrate_5_6() op = MDB_NEXT; if (result == MDB_NOTFOUND) { + // Indicate we've read all outputs so we know the migration step is complete + last_output.n_outputs_read = n_outputs; + MDB_val_set(v_last_output, last_output); + result = mdb_cursor_put(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update max output id: ", result).c_str())); + batch_stop(); break; } @@ -7126,7 +7137,10 @@ void BlockchainLMDB::migrate_5_6() if (result) throw0(DB_ERROR(lmdb_error("Failed to delete old block_info table: ", result).c_str())); - MDB_cursor *c_cur = c_new_block_info; + MDB_cursor *c_cur; + result = mdb_cursor_open(txn, m_block_info, &c_cur); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_infn: ", result).c_str())); RENAME_DB("block_infn"); mdb_dbi_close(m_env, m_block_info); @@ -7137,6 +7151,7 @@ void BlockchainLMDB::migrate_5_6() } } while(0); + // Update db version uint32_t version = 6; v.mv_data = (void *)&version; v.mv_size = sizeof(version); From 7fa01d20c058afe5d60ae5a6847608b141464ef0 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 12 Sep 2024 15:03:35 -0700 Subject: [PATCH 115/127] fcmp++: key image migration - Removing the sign bit from key images enables an optimization for fcmp's. - If an fcmp includes a key image with sign bit cleared,while the same key image with sign bit set exists in the chain already via a ring signature, then the fcmp would be a double spend attempt and the daemon must be able to detect and reject it. - In order for the daemon to detect such double spends, upon booting the daemon, we clear the sign bit from all key images already in the db. We also make sure that all key images held in memory by the pool have sign bit cleared as well. - Key images with sign bit cleared are a new type: `crypto::key_image_y`. The sign bit can be cleared via `crypto::key_image_to_y`. - The `_y` denotes that the encoded point is now the point's y coordinate. - In order to maintain backwards compatibility with current RPC consumers, the daemon keeps track of which key images have sign bit cleared and not, so that upon serving `spent_key_image_info::id_hash`, the daemon can re-construct the original key image and serve it to clients. --- src/blockchain_db/blockchain_db.h | 2 +- src/blockchain_db/lmdb/db_lmdb.cpp | 153 ++++++++++++++++-- src/blockchain_db/lmdb/db_lmdb.h | 2 +- src/blockchain_db/testdb.h | 2 +- src/crypto/crypto-ops.c | 1 - src/crypto/crypto.cpp | 17 ++ src/crypto/crypto.h | 31 +++- src/cryptonote_core/blockchain.cpp | 20 ++- src/cryptonote_core/blockchain.h | 6 +- src/cryptonote_core/cryptonote_core.cpp | 6 +- src/cryptonote_core/tx_pool.cpp | 102 ++++++++---- src/cryptonote_core/tx_pool.h | 30 +++- src/rpc/core_rpc_server.cpp | 14 +- src/rpc/message_data_structs.h | 2 +- tests/crypto/main.cpp | 18 +++ tests/crypto/tests.txt | 200 ++++++++++++++++++++++++ tests/unit_tests/crypto.cpp | 24 +++ 17 files changed, 564 insertions(+), 66 deletions(-) diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 595587d192e..68a1cfdec29 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -1703,7 +1703,7 @@ class BlockchainDB * * @return false if the function returns false for any key image, otherwise true */ - virtual bool for_all_key_images(std::function) const = 0; + virtual bool for_all_key_images(std::function) const = 0; /** * @brief runs a function over a range of blocks diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 373d368bcef..765e7312b4e 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -1364,7 +1364,10 @@ void BlockchainLMDB::add_spent_key(const crypto::key_image& k_image) CURSOR(spent_keys) - MDB_val k = {sizeof(k_image), (void *)&k_image}; + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k = {sizeof(k_image_y), (void *)&k_image_y}; if (auto result = mdb_cursor_put(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_NODUPDATA)) { if (result == MDB_KEYEXIST) throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); @@ -1381,7 +1384,10 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) CURSOR(spent_keys) - MDB_val k = {sizeof(k_image), (void *)&k_image}; + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k = {sizeof(k_image_y), (void *)&k_image_y}; auto result = mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH); if (result != 0 && result != MDB_NOTFOUND) throw1(DB_ERROR(lmdb_error("Error finding spent key to remove", result).c_str())); @@ -4673,14 +4679,17 @@ bool BlockchainLMDB::has_key_image(const crypto::key_image& img) const TXN_PREFIX_RDONLY(); RCURSOR(spent_keys); - MDB_val k = {sizeof(img), (void *)&img}; + crypto::key_image_y img_y; + crypto::key_image_to_y(img, img_y); + + MDB_val k = {sizeof(img_y), (void *)&img_y}; ret = (mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH) == 0); TXN_POSTFIX_RDONLY(); return ret; } -bool BlockchainLMDB::for_all_key_images(std::function f) const +bool BlockchainLMDB::for_all_key_images(std::function f) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -4701,8 +4710,8 @@ bool BlockchainLMDB::for_all_key_images(std::functionm_txn; + + /* the spent_keys table name is the same but the old version and new version + * have different data. Create a new table. We want the name to be similar + * to the old name so that it will occupy the same location in the DB. + */ + lmdb_db_open(txn, "spent_keyr", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keyr"); + mdb_set_dupsort(txn, m_spent_keys, compare_hash32); + + MDB_cursor *c_new_spent_keys, *c_old_spent_keys; + MDB_val k, v_img; + MDB_cursor_op op = MDB_FIRST; + + uint64_t n_old_key_images; + { + MDB_stat db_stats; + if ((result = mdb_stat(txn, o_spent_keys, &db_stats))) + throw0(DB_ERROR(lmdb_error("Failed to query m_spent_keys: ", result).c_str())); + n_old_key_images = db_stats.ms_entries; + } + + uint64_t n_new_key_images; + { + MDB_stat db_stats; + if ((result = mdb_stat(txn, m_spent_keys, &db_stats))) + throw0(DB_ERROR(lmdb_error("Failed to query m_spent_keys: ", result).c_str())); + n_new_key_images = db_stats.ms_entries; + } + + const uint64_t n_key_images = n_old_key_images + n_new_key_images; + + i = n_new_key_images; + while (i < n_key_images) + { + if (!(i % BATCH_SIZE)) + { + if (i) + { + LOGIF(el::Level::Info) + { + const uint64_t percent = std::min((i * 100) / n_key_images, (uint64_t)99); + std::cout << i << " / " << n_key_images << " key images (" << percent << "% of step 1/3) \r" << std::flush; + } + + txn.commit(); + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + m_write_txn->m_txn = txn.m_txn; + m_write_batch_txn->m_txn = txn.m_txn; + memset(&m_wcursors, 0, sizeof(m_wcursors)); + } + + // Open all cursors + result = mdb_cursor_open(txn, m_spent_keys, &c_new_spent_keys); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keyr: ", result).c_str())); + result = mdb_cursor_open(txn, o_spent_keys, &c_old_spent_keys); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keys: ", result).c_str())); + op = MDB_FIRST; + } + + // Get old key image and use it to set the new key image y + result = mdb_cursor_get(c_old_spent_keys, &k, &v_img, op); + op = MDB_NEXT; + if (result) + throw0(DB_ERROR(lmdb_error("Failed to get a record from spent_keys: ", result).c_str())); + const crypto::key_image k_image = *(const crypto::key_image*)v_img.mv_data; + + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k_y = {sizeof(k_image_y), (void *)&k_image_y}; + if (auto result = mdb_cursor_put(c_new_spent_keys, (MDB_val *)&zerokval, &k_y, MDB_NODUPDATA)) { + if (result == MDB_KEYEXIST) + throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); + else + throw1(DB_ERROR(lmdb_error("Error adding spent key image to db transaction: ", result).c_str())); + } + + /* we delete the old records immediately, so the overall DB and mapsize should not be + * larger than it needs to be. + * This is a little slower than just letting mdb_drop() delete it all at the end, but + * it saves a significant amount of disk space. + */ + result = mdb_cursor_del(c_old_spent_keys, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_info: ", result).c_str())); + + ++i; + } + batch_stop(); + } + + // 2. Prepare all valid outputs to be inserted into the merkle tree and // place them in a locked outputs table. The key to this new table is the // block id in which the outputs unlock. { - MINFO("Setting up a locked outputs table (step 1/2 of full-chain membership proof migration)"); + MINFO("Setting up a locked outputs table (step 2/3 of full-chain membership proof migration)"); result = mdb_txn_begin(m_env, NULL, 0, txn); if (result) @@ -6858,7 +6971,7 @@ void BlockchainLMDB::migrate_5_6() LOGIF(el::Level::Info) { const uint64_t percent = std::min((i * 100) / n_outputs, (uint64_t)99); - std::cout << i << " / " << n_outputs << " outputs (" << percent << "% of step 1/2) \r" << std::flush; + std::cout << i << " / " << n_outputs << " outputs (" << percent << "% of step 2/3) \r" << std::flush; } // Update last output read @@ -7006,10 +7119,10 @@ void BlockchainLMDB::migrate_5_6() } } - // 2. Set up the curve trees merkle tree by growing the tree block by block, + // 3. Set up the curve trees merkle tree by growing the tree block by block, // with leaves that unlock in each respective block { - MINFO("Setting up a merkle tree using existing cryptonote outputs (step 2/2 of full-chain membership proof migration)"); + MINFO("Setting up a merkle tree using existing cryptonote outputs (step 3/3 of full-chain membership proof migration)"); if (!m_batch_transactions) set_batch_transactions(true); @@ -7039,7 +7152,7 @@ void BlockchainLMDB::migrate_5_6() LOGIF(el::Level::Info) { const uint64_t percent = std::min((i * 100) / n_blocks, (uint64_t)99); - std::cout << i << " / " << n_blocks << " blocks (" << percent << "% of step 2/2) \r" << std::flush; + std::cout << i << " / " << n_blocks << " blocks (" << percent << "% of step 3/3) \r" << std::flush; } txn.commit(); @@ -7163,6 +7276,22 @@ void BlockchainLMDB::migrate_5_6() if (result) throw0(DB_ERROR(lmdb_error("Failed to update version for the db: ", result).c_str())); + // Drop the old spent keys table. We keep it until here so we know if the key image migration is complete. + result = mdb_drop(txn, o_spent_keys, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete old spent_keys table: ", result).c_str())); + + // Rename the spent keyr table to the new spent keys table + MDB_cursor *c_cur; + result = mdb_cursor_open(txn, m_spent_keys, &c_cur); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keyr: ", result).c_str())); + RENAME_DB("spent_keyr"); + mdb_dbi_close(m_env, m_spent_keys); + + lmdb_db_open(txn, "spent_keys", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); + mdb_set_dupsort(txn, m_spent_keys, compare_hash32); + // We only needed the temp last output table for this migration, drop it result = mdb_drop(txn, m_tmp_last_output, 1); if (result) diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 508c6f14568..b8d087412ad 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -314,7 +314,7 @@ class BlockchainLMDB : public BlockchainDB virtual bool for_all_txpool_txes(std::function f, bool include_blob = false, relay_category category = relay_category::broadcasted) const; - virtual bool for_all_key_images(std::function) const; + virtual bool for_all_key_images(std::function) const; virtual bool for_blocks_range(const uint64_t& h1, const uint64_t& h2, std::function) const; virtual bool for_all_transactions(std::function, bool pruned) const; virtual bool for_all_outputs(std::function f) const; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 5129e226007..84d1d3a8016 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -122,7 +122,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual std::array get_tree_root() const override { return {}; }; virtual uint64_t get_num_leaf_tuples() const override { return 0; }; - virtual bool for_all_key_images(std::function) const override { return true; } + virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } virtual bool for_all_transactions(std::function, bool pruned) const override { return true; } virtual bool for_all_outputs(std::function f) const override { return true; } diff --git a/src/crypto/crypto-ops.c b/src/crypto/crypto-ops.c index b8dbbc799ce..bbf5632bb1b 100644 --- a/src/crypto/crypto-ops.c +++ b/src/crypto/crypto-ops.c @@ -1331,7 +1331,6 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *r3, const unsigned char *a, con /* From fe_frombytes.c */ int fe_frombytes_vartime(fe y, const unsigned char *s) { - int64_t h0 = load_4(s); int64_t h1 = load_3(s + 4) << 6; int64_t h2 = load_3(s + 7) << 5; diff --git a/src/crypto/crypto.cpp b/src/crypto/crypto.cpp index 00aedd288cc..2145b06a603 100644 --- a/src/crypto/crypto.cpp +++ b/src/crypto/crypto.cpp @@ -779,4 +779,21 @@ POP_WARNINGS static_assert(sizeof(crypto::view_tag) <= sizeof(view_tag_full), "view tag should not be larger than hash result"); memcpy(&view_tag, &view_tag_full, sizeof(crypto::view_tag)); } + + bool crypto_ops::key_image_to_y(const key_image &ki, key_image_y &ki_y) { + static_assert(sizeof(key_image) == 32 && sizeof(key_image_y) == 32, "unexpected size of key image"); + memcpy(&ki_y, &ki, 32); + // clear the sign bit, leaving us with the y coord + ki_y.data[31] &= 0x7F; + // return true if sign bit is set on the original key image + return (ki.data[31] & 0x80) > 0; + } + + void crypto_ops::key_image_from_y(const key_image_y &ki_y, const bool sign, key_image &ki) { + static_assert(sizeof(key_image) == 32 && sizeof(key_image_y) == 32, "unexpected size of key image"); + memcpy(&ki, &ki_y, 32); + if (sign) { + ki.data[31] ^= 0x80; + } + } } diff --git a/src/crypto/crypto.h b/src/crypto/crypto.h index 401af44c35d..86e7a97ffdd 100644 --- a/src/crypto/crypto.h +++ b/src/crypto/crypto.h @@ -95,6 +95,10 @@ namespace crypto { friend class crypto_ops; }; + POD_CLASS key_image_y: ec_point { + friend class crypto_ops; + }; + POD_CLASS signature { ec_scalar c, r; friend class crypto_ops; @@ -110,7 +114,7 @@ namespace crypto { static_assert(sizeof(ec_point) == 32 && sizeof(ec_scalar) == 32 && sizeof(public_key) == 32 && sizeof(public_key_memsafe) == 32 && sizeof(secret_key) == 32 && - sizeof(key_derivation) == 32 && sizeof(key_image) == 32 && + sizeof(key_derivation) == 32 && sizeof(key_image) == 32 && sizeof(key_image_y) == 32 && sizeof(signature) == 64 && sizeof(view_tag) == 1, "Invalid structure size"); class crypto_ops { @@ -159,6 +163,10 @@ namespace crypto { const public_key *const *, std::size_t, const signature *); static void derive_view_tag(const key_derivation &, std::size_t, view_tag &); friend void derive_view_tag(const key_derivation &, std::size_t, view_tag &); + static bool key_image_to_y(const key_image &, key_image_y &); + friend bool key_image_to_y(const key_image &, key_image_y &); + static void key_image_from_y(const key_image_y &, const bool, key_image &); + friend void key_image_from_y(const key_image_y &, const bool, key_image &); }; void generate_random_bytes_thread_safe(size_t N, uint8_t *bytes); @@ -317,6 +325,21 @@ namespace crypto { crypto_ops::derive_view_tag(derivation, output_index, vt); } + /** Clear the sign bit on the key image (i.e. get just the y coordinate). + * Return true if the sign bit is set, false if not. + * Since fcmp's allow construction of key images with sign bit cleared, while + * the same key image with sign bit set may already exist in the chain, we + * prevent double spends by converting all existing key images in the chain to + * their y coordinate and preventing duplicate key image y's. + */ + inline bool key_image_to_y(const key_image &ki, key_image_y &ki_y) { + return crypto_ops::key_image_to_y(ki, ki_y); + } + + inline void key_image_from_y(const key_image_y &ki_y, const bool sign, key_image &ki) { + return crypto_ops::key_image_from_y(ki_y, sign, ki); + } + inline std::ostream &operator <<(std::ostream &o, const crypto::public_key &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } @@ -329,6 +352,9 @@ namespace crypto { inline std::ostream &operator <<(std::ostream &o, const crypto::key_image &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } + inline std::ostream &operator <<(std::ostream &o, const crypto::key_image_y &v) { + epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; + } inline std::ostream &operator <<(std::ostream &o, const crypto::signature &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } @@ -343,6 +369,8 @@ namespace crypto { inline bool operator>(const public_key &p1, const public_key &p2) { return p2 < p1; } inline bool operator<(const key_image &p1, const key_image &p2) { return memcmp(&p1, &p2, sizeof(key_image)) < 0; } inline bool operator>(const key_image &p1, const key_image &p2) { return p2 < p1; } + inline bool operator<(const key_image_y &p1, const key_image_y &p2) { return memcmp(&p1, &p2, sizeof(key_image_y)) < 0; } + inline bool operator>(const key_image_y &p1, const key_image_y &p2) { return p2 < p1; } } // type conversions for easier calls to sc_add(), sc_sub(), hash functions @@ -355,5 +383,6 @@ CRYPTO_MAKE_HASHABLE(public_key) CRYPTO_MAKE_HASHABLE_CONSTANT_TIME(secret_key) CRYPTO_MAKE_HASHABLE_CONSTANT_TIME(public_key_memsafe) CRYPTO_MAKE_HASHABLE(key_image) +CRYPTO_MAKE_HASHABLE(key_image_y) CRYPTO_MAKE_COMPARABLE(signature) CRYPTO_MAKE_COMPARABLE(view_tag) diff --git a/src/cryptonote_core/blockchain.cpp b/src/cryptonote_core/blockchain.cpp index 8d34f0e858e..45a4b541417 100644 --- a/src/cryptonote_core/blockchain.cpp +++ b/src/cryptonote_core/blockchain.cpp @@ -156,7 +156,9 @@ bool Blockchain::scan_outputkeys_for_indexes(size_t tx_version, const txin_to_ke auto it = m_scan_table.find(tx_prefix_hash); if (it != m_scan_table.end()) { - auto its = it->second.find(tx_in_to_key.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(tx_in_to_key.k_image, ki_y); + auto its = it->second.find(ki_y); if (its != it->second.end()) { outputs = its->second; @@ -2909,7 +2911,9 @@ bool Blockchain::check_for_double_spend(const transaction& tx, key_images_contai // if the insert into the block-wide spent keys container succeeds, // check the blockchain-wide spent keys container and make sure the // key wasn't used in another block already. - auto r = m_spent_keys.insert(ki); + crypto::key_image_y ki_y; + crypto::key_image_to_y(ki, ki_y); + auto r = m_spent_keys.insert(ki_y); if(!r.second || m_db->has_key_image(ki)) { //double spend detected @@ -5150,7 +5154,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector>()); + m_scan_table.emplace(tx_prefix_hash, std::unordered_map>()); its = m_scan_table.find(tx_prefix_hash); assert(its != m_scan_table.end()); @@ -5160,7 +5164,9 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector (txin); // check for duplicate - auto it = its->second.find(in_to_key.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(in_to_key.k_image, ki_y); + auto it = its->second.find(ki_y); if (it != its->second.end()) SCAN_TABLE_QUIT("Duplicate key_image found from incoming blocks."); @@ -5277,7 +5283,9 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vectorsecond.emplace(in_to_key.k_image, outputs); + crypto::key_image_y ki_y; + crypto::key_image_to_y(in_to_key.k_image, ki_y); + its->second.emplace(ki_y, outputs); } } } @@ -5576,7 +5584,7 @@ void Blockchain::unlock() m_blockchain_lock.unlock(); } -bool Blockchain::for_all_key_images(std::function f) const +bool Blockchain::for_all_key_images(std::function f) const { return m_db->for_all_key_images(f); } diff --git a/src/cryptonote_core/blockchain.h b/src/cryptonote_core/blockchain.h index 503ea92ddf8..1a4e08ef489 100644 --- a/src/cryptonote_core/blockchain.h +++ b/src/cryptonote_core/blockchain.h @@ -955,7 +955,7 @@ namespace cryptonote * * @return false if any key image fails the check, otherwise true */ - bool for_all_key_images(std::function) const; + bool for_all_key_images(std::function) const; /** * @brief perform a check on all blocks in the blockchain in the given range @@ -1125,7 +1125,7 @@ namespace cryptonote #endif // TODO: evaluate whether or not each of these typedefs are left over from blockchain_storage - typedef std::unordered_set key_images_container; + typedef std::unordered_set key_images_container; typedef std::vector blocks_container; @@ -1143,7 +1143,7 @@ namespace cryptonote size_t m_current_block_cumul_weight_median; // metadata containers - std::unordered_map>> m_scan_table; + std::unordered_map>> m_scan_table; std::unordered_map m_blocks_longhash_table; // Keccak hashes for each block and for fast pow checking diff --git a/src/cryptonote_core/cryptonote_core.cpp b/src/cryptonote_core/cryptonote_core.cpp index 08c72573b64..03c6cefdb7e 100644 --- a/src/cryptonote_core/cryptonote_core.cpp +++ b/src/cryptonote_core/cryptonote_core.cpp @@ -1292,11 +1292,13 @@ namespace cryptonote //----------------------------------------------------------------------------------------------- bool core::check_tx_inputs_keyimages_diff(const transaction& tx) const { - std::unordered_set ki; + std::unordered_set ki; for(const auto& in: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, tokey_in, false); - if(!ki.insert(tokey_in.k_image).second) + crypto::key_image_y ki_y; + crypto::key_image_to_y(tokey_in.k_image, ki_y); + if(!ki.insert(ki_y).second) return false; } return true; diff --git a/src/cryptonote_core/tx_pool.cpp b/src/cryptonote_core/tx_pool.cpp index 2d01b2bb287..fdbefcfadde 100644 --- a/src/cryptonote_core/tx_pool.cpp +++ b/src/cryptonote_core/tx_pool.cpp @@ -523,7 +523,9 @@ namespace cryptonote for(const auto& in: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, txin, false); - std::unordered_set& kei_image_set = m_spent_key_images[txin.k_image]; + crypto::key_image_y ki_y; + const bool ki_sign = crypto::key_image_to_y(txin.k_image, ki_y); + auto& kei_image_set = m_spent_key_images[ki_y]; // Only allow multiple txes per key-image if kept-by-block. Only allow // the same txid if going from local/stem->fluff. @@ -531,14 +533,14 @@ namespace cryptonote if (tx_relay != relay_method::block) { const bool one_txid = - (kei_image_set.empty() || (kei_image_set.size() == 1 && *(kei_image_set.cbegin()) == id)); + (kei_image_set.empty() || (kei_image_set.size() == 1 && (*(kei_image_set.cbegin())).tx_hash == id)); CHECK_AND_ASSERT_MES(one_txid, false, "internal error: tx_relay=" << unsigned(tx_relay) << ", kei_image_set.size()=" << kei_image_set.size() << ENDL << "txin.k_image=" << txin.k_image << ENDL << "tx_id=" << id); } const bool new_or_previously_private = - kei_image_set.insert(id).second || + kei_image_set.insert({id, ki_sign}).second || !m_blockchain.txpool_tx_matches_category(id, relay_category::legacy); CHECK_AND_ASSERT_MES(new_or_previously_private, false, "internal error: try to insert duplicate iterator in key_image set"); } @@ -557,14 +559,16 @@ namespace cryptonote for(const txin_v& vi: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(vi, const txin_to_key, txin, false); - auto it = m_spent_key_images.find(txin.k_image); + crypto::key_image_y ki_y; + const bool ki_sign = crypto::key_image_to_y(txin.k_image, ki_y); + auto it = m_spent_key_images.find(ki_y); CHECK_AND_ASSERT_MES(it != m_spent_key_images.end(), false, "failed to find transaction input in key images. img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); - std::unordered_set& key_image_set = it->second; + auto& key_image_set = it->second; CHECK_AND_ASSERT_MES(key_image_set.size(), false, "empty key_image set, img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); - auto it_in_set = key_image_set.find(actual_hash); + auto it_in_set = key_image_set.find({actual_hash, ki_sign}); CHECK_AND_ASSERT_MES(it_in_set != key_image_set.end(), false, "transaction id not found in key_image set, img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); key_image_set.erase(it_in_set); @@ -1099,7 +1103,7 @@ namespace cryptonote backlog.clear(); uint64_t w = 0; - std::unordered_set k_images; + std::unordered_set k_images; for (const tx_block_template_backlog_entry& e : tmp) { @@ -1262,19 +1266,40 @@ namespace cryptonote }, true, category); for (const key_images_container::value_type& kee : m_spent_key_images) { - const crypto::key_image& k_image = kee.first; - const std::unordered_set& kei_image_set = kee.second; - spent_key_image_info ki; - ki.id_hash = epee::string_tools::pod_to_hex(k_image); - for (const crypto::hash& tx_id_hash : kei_image_set) - { - if (m_blockchain.txpool_tx_matches_category(tx_id_hash, category)) - ki.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_id_hash)); + // id_hash corresponds to key image as the daemon received it, so we need + // to derive key image from key_image_y and sign bit to prevent a breaking + // change to clients. After the fcmp fork, all key images should have sign + // bit cleared so this can be cleaned up further. + const crypto::key_image_y& k_image_y = kee.first; + const auto& kei_image_set = kee.second; + spent_key_image_info ki_info_sign; + spent_key_image_info ki_info_no_sign; + for (const auto& ki_context : kei_image_set) + { + const crypto::hash &tx_hash = ki_context.tx_hash; + const bool sign = ki_context.sign; + if (m_blockchain.txpool_tx_matches_category(tx_hash, category)) + { + crypto::key_image ki; + crypto::key_image_from_y(k_image_y, sign, ki); + if (sign) + { + ki_info_sign.id_hash = epee::string_tools::pod_to_hex(ki); + ki_info_sign.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_hash)); + } + else + { + ki_info_no_sign.id_hash = epee::string_tools::pod_to_hex(ki); + ki_info_no_sign.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_hash)); + } + } } // Only return key images for which we have at least one tx that we can show for them - if (!ki.txs_hashes.empty()) - key_image_infos.push_back(std::move(ki)); + if (!ki_info_sign.txs_hashes.empty()) + key_image_infos.push_back(std::move(ki_info_sign)); + if (!ki_info_no_sign.txs_hashes.empty()) + key_image_infos.push_back(std::move(ki_info_no_sign)); } return true; } @@ -1314,11 +1339,11 @@ namespace cryptonote for (const key_images_container::value_type& kee : m_spent_key_images) { std::vector tx_hashes; - const std::unordered_set& kei_image_set = kee.second; - for (const crypto::hash& tx_id_hash : kei_image_set) + const auto& kei_image_set = kee.second; + for (const auto& ki_context : kei_image_set) { - if (m_blockchain.txpool_tx_matches_category(tx_id_hash, relay_category::broadcasted)) - tx_hashes.push_back(tx_id_hash); + if (m_blockchain.txpool_tx_matches_category(ki_context.tx_hash, relay_category::broadcasted)) + tx_hashes.push_back(ki_context.tx_hash); } if (!tx_hashes.empty()) @@ -1337,11 +1362,13 @@ namespace cryptonote for (const auto& image : key_images) { bool is_spent = false; - const auto found = m_spent_key_images.find(image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(image, ki_y); + const auto found = m_spent_key_images.find(ki_y); if (found != m_spent_key_images.end()) { - for (const crypto::hash& tx_hash : found->second) - is_spent |= m_blockchain.txpool_tx_matches_category(tx_hash, relay_category::broadcasted); + for (const auto& ki_context : found->second) + is_spent |= m_blockchain.txpool_tx_matches_category(ki_context.tx_hash, relay_category::broadcasted); } spent.push_back(is_spent); } @@ -1402,12 +1429,14 @@ namespace cryptonote bool tx_memory_pool::have_tx_keyimg_as_spent(const crypto::key_image& key_im, const crypto::hash& txid) const { CRITICAL_REGION_LOCAL(m_transactions_lock); - const auto found = m_spent_key_images.find(key_im); + crypto::key_image_y ki_y; + crypto::key_image_to_y(key_im, ki_y); + const auto found = m_spent_key_images.find(ki_y); if (found != m_spent_key_images.end() && !found->second.empty()) { // If another tx is using the key image, always return as spent. // See `insert_key_images`. - if (1 < found->second.size() || *(found->second.cbegin()) != txid) + if (1 < found->second.size() || (*(found->second.cbegin())).tx_hash != txid) return true; return m_blockchain.txpool_tx_matches_category(txid, relay_category::legacy); } @@ -1515,23 +1544,27 @@ namespace cryptonote return is_transaction_ready_to_go(txd, txid, cryptonote::blobdata_ref{txblob.data(), txblob.size()}, tx); } //--------------------------------------------------------------------------------- - bool tx_memory_pool::have_key_images(const std::unordered_set& k_images, const transaction_prefix& tx) + bool tx_memory_pool::have_key_images(const std::unordered_set& k_images, const transaction_prefix& tx) { for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, false); - if(k_images.count(itk.k_image)) + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + if(k_images.count(ki_y)) return true; } return false; } //--------------------------------------------------------------------------------- - bool tx_memory_pool::append_key_images(std::unordered_set& k_images, const transaction_prefix& tx) + bool tx_memory_pool::append_key_images(std::unordered_set& k_images, const transaction_prefix& tx) { for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, false); - auto i_res = k_images.insert(itk.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + auto i_res = k_images.insert(ki_y); CHECK_AND_ASSERT_MES(i_res.second, false, "internal error: key images pool cache - inserted duplicate image in set: " << itk.k_image); } return true; @@ -1546,11 +1579,14 @@ namespace cryptonote for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, void()); - const key_images_container::const_iterator it = m_spent_key_images.find(itk.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + const key_images_container::const_iterator it = m_spent_key_images.find(ki_y); if (it != m_spent_key_images.end()) { - for (const crypto::hash &txid: it->second) + for (const auto &ki_context: it->second) { + const auto &txid = ki_context.tx_hash; txpool_tx_meta_t meta; if (!m_blockchain.get_txpool_tx_meta(txid, meta)) { @@ -1634,7 +1670,7 @@ namespace cryptonote size_t max_total_weight_pre_v5 = (130 * median_weight) / 100 - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE; size_t max_total_weight_v5 = 2 * median_weight - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE; size_t max_total_weight = version >= 5 ? max_total_weight_v5 : max_total_weight_pre_v5; - std::unordered_set k_images; + std::unordered_set k_images; LOG_PRINT_L2("Filling block template, median weight " << median_weight << ", " << m_txs_by_fee_and_receive_time.size() << " txes in the pool"); diff --git a/src/cryptonote_core/tx_pool.h b/src/cryptonote_core/tx_pool.h index 69a123fc9e3..73313ba29e7 100644 --- a/src/cryptonote_core/tx_pool.h +++ b/src/cryptonote_core/tx_pool.h @@ -81,6 +81,14 @@ namespace cryptonote //! container for sorting transactions by fee per unit size typedef std::set sorted_tx_container; + //! key image's contextual data + struct ki_context_t + { + crypto::hash tx_hash; + bool sign; // original key image had sign bit set + bool operator==(const ki_context_t rhs) const { return rhs.tx_hash == tx_hash && rhs.sign == sign; }; + }; + /** * @brief Transaction pool, handles transactions which are not part of a block * @@ -553,7 +561,7 @@ namespace cryptonote * * @return true if any key images present in the set, otherwise false */ - static bool have_key_images(const std::unordered_set& kic, const transaction_prefix& tx); + static bool have_key_images(const std::unordered_set& kic, const transaction_prefix& tx); /** * @brief append the key images from a transaction to the given set @@ -563,7 +571,7 @@ namespace cryptonote * * @return false if any append fails, otherwise true */ - static bool append_key_images(std::unordered_set& kic, const transaction_prefix& tx); + static bool append_key_images(std::unordered_set& kic, const transaction_prefix& tx); /** * @brief check if a transaction is a valid candidate for inclusion in a block @@ -602,8 +610,12 @@ namespace cryptonote * in the event of a reorg where someone creates a new/different * transaction on the assumption that the original will not be in a * block again. + *! we use key_image_y as the key since we need to prevent double spends of + * key image y coordinates (fcmp's enables constructing key images with + * sign bit cleared for key images which may already exist in the chain + * with sign bit set) */ - typedef std::unordered_map> key_images_container; + typedef std::unordered_map> key_images_container; #if defined(DEBUG_CREATE_BLOCK_TEMPLATE) public: @@ -713,5 +725,17 @@ namespace boost BOOST_CLASS_VERSION(cryptonote::tx_memory_pool, CURRENT_MEMPOOL_ARCHIVE_VER) BOOST_CLASS_VERSION(cryptonote::tx_memory_pool::tx_details, CURRENT_MEMPOOL_TX_DETAILS_ARCHIVE_VER) +namespace std +{ + template<> struct hash + { + std::size_t operator()(const cryptonote::ki_context_t &_ki_context) const + { + std::size_t res = reinterpret_cast(_ki_context.tx_hash); + res += _ki_context.sign ? 1 : 0; + return res; + } + }; +} // std diff --git a/src/rpc/core_rpc_server.cpp b/src/rpc/core_rpc_server.cpp index 7fcd1e6d7ce..46df1bcede5 100644 --- a/src/rpc/core_rpc_server.cpp +++ b/src/rpc/core_rpc_server.cpp @@ -1288,6 +1288,16 @@ namespace cryptonote res.status = "Failed"; return true; } + + std::vector key_images_y; + key_images_y.reserve(key_images.size()); + for (const auto &ki : key_images) + { + crypto::key_image_y ki_y; + crypto::key_image_to_y(ki, ki_y); + key_images_y.emplace_back(std::move(ki_y)); + } + for (std::vector::const_iterator i = ki.begin(); i != ki.end(); ++i) { crypto::hash hash; @@ -1295,11 +1305,13 @@ namespace cryptonote if (parse_hash256(i->id_hash, hash)) { memcpy(&spent_key_image, &hash, sizeof(hash)); // a bit dodgy, should be other parse functions somewhere + crypto::key_image_y spent_key_image_y; + crypto::key_image_to_y(spent_key_image, spent_key_image_y); for (size_t n = 0; n < res.spent_status.size(); ++n) { if (res.spent_status[n] == COMMAND_RPC_IS_KEY_IMAGE_SPENT::UNSPENT) { - if (key_images[n] == spent_key_image) + if (key_images_y[n] == spent_key_image_y) { res.spent_status[n] = COMMAND_RPC_IS_KEY_IMAGE_SPENT::SPENT_IN_POOL; break; diff --git a/src/rpc/message_data_structs.h b/src/rpc/message_data_structs.h index 3fff923b77c..f808f0b19c6 100644 --- a/src/rpc/message_data_structs.h +++ b/src/rpc/message_data_structs.h @@ -104,7 +104,7 @@ namespace rpc bool double_spend_seen; }; - typedef std::unordered_map > key_images_with_tx_hashes; + typedef std::unordered_map > key_images_with_tx_hashes; struct output_amount_count { diff --git a/tests/crypto/main.cpp b/tests/crypto/main.cpp index 668c04ea107..a2e3a96b58f 100644 --- a/tests/crypto/main.cpp +++ b/tests/crypto/main.cpp @@ -277,6 +277,24 @@ int main(int argc, char *argv[]) { if (expected != actual) { goto error; } + } else if (cmd == "key_image_to_y") { + key_image ki; + key_image_y expected_ki_y, actual_ki_y; + bool expected_sign, actual_sign; + get(input, ki, expected_ki_y, expected_sign); + actual_sign = key_image_to_y(ki, actual_ki_y); + if (expected_ki_y != actual_ki_y || expected_sign != actual_sign) { + goto error; + } + } else if (cmd == "key_image_from_y") { + key_image_y ki_y; + bool sign; + key_image expected_ki, actual_ki; + get(input, ki_y, sign, expected_ki); + key_image_from_y(ki_y, sign, actual_ki); + if (expected_ki != actual_ki) { + goto error; + } } else { throw ios_base::failure("Unknown function: " + cmd); } diff --git a/tests/crypto/tests.txt b/tests/crypto/tests.txt index 32e3b2d090b..9e48c8b28da 100644 --- a/tests/crypto/tests.txt +++ b/tests/crypto/tests.txt @@ -5543,3 +5543,203 @@ derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 15 00 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 127 a6 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 128 0d +key_image_to_y fefdcf401bcf85b3b744e7c9f6af8ea0e181c799b1ec0f1c887cf77df085051d fefdcf401bcf85b3b744e7c9f6af8ea0e181c799b1ec0f1c887cf77df085051d false +key_image_to_y af6ce7761f2062d7f6f1f7158e4448989b459dfa1d6df35db12360e2322aab1b af6ce7761f2062d7f6f1f7158e4448989b459dfa1d6df35db12360e2322aab1b false +key_image_to_y f2e73a432004eeac96746e43885021ec1fc2a59d11a5cb17e0757aedc8bc2a2e f2e73a432004eeac96746e43885021ec1fc2a59d11a5cb17e0757aedc8bc2a2e false +key_image_to_y b6692eb3436a670837ce2ed2a580ed18a62eaa1a7c7c515882e8a6a6e3416867 b6692eb3436a670837ce2ed2a580ed18a62eaa1a7c7c515882e8a6a6e3416867 false +key_image_to_y b405de8162d9d3b2f89588e374aa0efde8bfd9b9f848cf2b9831258d776a3512 b405de8162d9d3b2f89588e374aa0efde8bfd9b9f848cf2b9831258d776a3512 false +key_image_to_y 0b5a7872f28930d1384dbf75c41c06ff9254d807507cbfacbba4e8ae71191f4b 0b5a7872f28930d1384dbf75c41c06ff9254d807507cbfacbba4e8ae71191f4b false +key_image_to_y d947f1c89ec5de075b3c987d69ccd54b5a91eb78d7708e876b6968537bd5f877 d947f1c89ec5de075b3c987d69ccd54b5a91eb78d7708e876b6968537bd5f877 false +key_image_to_y f995fb5b59cec97eef860b27c1cf8c30d5da21324ab351e3bb62a97712a22830 f995fb5b59cec97eef860b27c1cf8c30d5da21324ab351e3bb62a97712a22830 false +key_image_to_y 07c2fa33abba6add8c34b43bbef8d4cc5fb515f876c7d06440a676e9d68fee36 07c2fa33abba6add8c34b43bbef8d4cc5fb515f876c7d06440a676e9d68fee36 false +key_image_to_y 96b01ba882e094e90fb5ae63134da1c78be4e62c57f1eb121bef2789f5fe3923 96b01ba882e094e90fb5ae63134da1c78be4e62c57f1eb121bef2789f5fe3923 false +key_image_to_y b4796a8bc9801ed57e28d59b95c18371e8cf933e297551ec5bd1f47b270c9c7b b4796a8bc9801ed57e28d59b95c18371e8cf933e297551ec5bd1f47b270c9c7b false +key_image_to_y b964be6438b709aed6d79677c7c0cc6446f502b31a1af108409c3d3a80c84203 b964be6438b709aed6d79677c7c0cc6446f502b31a1af108409c3d3a80c84203 false +key_image_to_y ae926a5a753f3d7a7aaf4dd1ca171c45c0bb0aa75280e9b9e088a764b2d0ef55 ae926a5a753f3d7a7aaf4dd1ca171c45c0bb0aa75280e9b9e088a764b2d0ef55 false +key_image_to_y 69463dbc77e0bd1363f46c05a0791bd84a9f34b4c6274654fa84ac74a7273331 69463dbc77e0bd1363f46c05a0791bd84a9f34b4c6274654fa84ac74a7273331 false +key_image_to_y 8ea9c60b287243a611316ff4a70ca667610c06a51570d65c836626fa2a81aa54 8ea9c60b287243a611316ff4a70ca667610c06a51570d65c836626fa2a81aa54 false +key_image_to_y ca55d6a70218adfe3e6e893c39888a01f7b297fdddaf8a48f333ed6bc5731d32 ca55d6a70218adfe3e6e893c39888a01f7b297fdddaf8a48f333ed6bc5731d32 false +key_image_to_y 5880cbd36de5697e73a8310972d13dd4c5a10ec091501abd63bc3dc21a736305 5880cbd36de5697e73a8310972d13dd4c5a10ec091501abd63bc3dc21a736305 false +key_image_to_y 70b620fd6ffec9720309dcc4d90fe37244ce62276c0fd910f782d72976909306 70b620fd6ffec9720309dcc4d90fe37244ce62276c0fd910f782d72976909306 false +key_image_to_y ddb3a9e3c57fbc7bf030b0155afe41563b0b89fdc50aed9a203319b65a3f960b ddb3a9e3c57fbc7bf030b0155afe41563b0b89fdc50aed9a203319b65a3f960b false +key_image_to_y 4eb11c28a0eb7ce30dcca67ae05c79181a8603ec1e55d83000cb72c3842da100 4eb11c28a0eb7ce30dcca67ae05c79181a8603ec1e55d83000cb72c3842da100 false +key_image_to_y 32df4f304bb1bed43f76d2315d139fe66aa79ede363ab5961e828f477dfbf772 32df4f304bb1bed43f76d2315d139fe66aa79ede363ab5961e828f477dfbf772 false +key_image_to_y b46bba83dbe888af05289c99911d251ab71621222311ea18cf5ca6cdcd74ed2b b46bba83dbe888af05289c99911d251ab71621222311ea18cf5ca6cdcd74ed2b false +key_image_to_y 582535660d8d8a8b11158b2cc72baab9824ca63b7b9ed99d19247d140cd4fb23 582535660d8d8a8b11158b2cc72baab9824ca63b7b9ed99d19247d140cd4fb23 false +key_image_to_y 4cfb08d96e162470e92651550ec06d6c693d428b8d85d43c53b67ff6dbc53030 4cfb08d96e162470e92651550ec06d6c693d428b8d85d43c53b67ff6dbc53030 false +key_image_to_y 1cd08eb20c60ba42f3eaeccce39ea185d588f61e3e51a38ff5cc48aa4458037e 1cd08eb20c60ba42f3eaeccce39ea185d588f61e3e51a38ff5cc48aa4458037e false +key_image_to_y 3c7e2a6795db3c1b70f8786e2d2d20116dd6478acfc374362b6ea106a1af2d42 3c7e2a6795db3c1b70f8786e2d2d20116dd6478acfc374362b6ea106a1af2d42 false +key_image_to_y e01f40decbc883a5daa126755e972e6427b052be9f3edec00d49041119a44f63 e01f40decbc883a5daa126755e972e6427b052be9f3edec00d49041119a44f63 false +key_image_to_y 048fbfa1d56a6d7c239321eb85aebf6839fc4ac830329aebce827f5140d7cb0c 048fbfa1d56a6d7c239321eb85aebf6839fc4ac830329aebce827f5140d7cb0c false +key_image_to_y 31630841a494d5a34d89a4709d8b36eee4ab3b6cf4914ff61a6b0eace1cdbd43 31630841a494d5a34d89a4709d8b36eee4ab3b6cf4914ff61a6b0eace1cdbd43 false +key_image_to_y 1350e9f4231fa7a7878172a08a63618dc710ca4bfa9a93a32dd0976ecbf67059 1350e9f4231fa7a7878172a08a63618dc710ca4bfa9a93a32dd0976ecbf67059 false +key_image_to_y 325eef1bb5a4d96a5ec074cc29fd4078a1aadc3f7435985d42c96cbc9526a002 325eef1bb5a4d96a5ec074cc29fd4078a1aadc3f7435985d42c96cbc9526a002 false +key_image_to_y 86e74b20ec60d8162c026206a61dfb8da300a0b563cb69c3f456c8a21f135d4f 86e74b20ec60d8162c026206a61dfb8da300a0b563cb69c3f456c8a21f135d4f false +key_image_to_y f3f3100cd90ce128b4c8d6c339d77249106c0a656fe651fe7a285a607e47a966 f3f3100cd90ce128b4c8d6c339d77249106c0a656fe651fe7a285a607e47a966 false +key_image_to_y 625fc0d4f4728f7a659c127026bbbc7c0c26a68b351a1656c8875e2d5ff1473f 625fc0d4f4728f7a659c127026bbbc7c0c26a68b351a1656c8875e2d5ff1473f false +key_image_to_y e10e93b9478bb561f4b08fc1d9d0b63f2f4b082ba49e5d0736bc5dac7551896a e10e93b9478bb561f4b08fc1d9d0b63f2f4b082ba49e5d0736bc5dac7551896a false +key_image_to_y 0a45ed914810e7a1c4e94d5d3466702790fe2882458092fcec8fd2ece7544e12 0a45ed914810e7a1c4e94d5d3466702790fe2882458092fcec8fd2ece7544e12 false +key_image_to_y a83bb44ffaa27eb6c1f1bce66018e3fa96587d30f37a5338905616502b78da47 a83bb44ffaa27eb6c1f1bce66018e3fa96587d30f37a5338905616502b78da47 false +key_image_to_y 9d96f7f4617236da071a986deb7e3afdd3b96989c747384bc6d1b863ff72620c 9d96f7f4617236da071a986deb7e3afdd3b96989c747384bc6d1b863ff72620c false +key_image_to_y e4cc5a1a31184e706c2a8aaf510f16cd2f5d623037aae52a27b010319522bf10 e4cc5a1a31184e706c2a8aaf510f16cd2f5d623037aae52a27b010319522bf10 false +key_image_to_y 7baf7f85021d837208a600256a78684c5c2542f0cf085df7d75d4e5c148a4358 7baf7f85021d837208a600256a78684c5c2542f0cf085df7d75d4e5c148a4358 false +key_image_to_y b55f9018f08daea272b8726fa9a3fd5c0a97683654a694cf361bc534a0d74a54 b55f9018f08daea272b8726fa9a3fd5c0a97683654a694cf361bc534a0d74a54 false +key_image_to_y 218e58d7355d1534b5b633abc5caa16386ca3109519dd04086db30c6c7d1af06 218e58d7355d1534b5b633abc5caa16386ca3109519dd04086db30c6c7d1af06 false +key_image_to_y 0f274b89128658a5d1736638fef3ca7ce20a4d89ff23d7c79d9add00fa263b3b 0f274b89128658a5d1736638fef3ca7ce20a4d89ff23d7c79d9add00fa263b3b false +key_image_to_y dcfddcb87039b6a615df29491fd39b8decf62bc8b06cb85f170eeec1505c6001 dcfddcb87039b6a615df29491fd39b8decf62bc8b06cb85f170eeec1505c6001 false +key_image_to_y b75a2d1c3f7f262830619e5ed38cbb9656737d273180c4e7f4fac875d434fd18 b75a2d1c3f7f262830619e5ed38cbb9656737d273180c4e7f4fac875d434fd18 false +key_image_to_y fe5b354a174c97dc1742a71191f602395867efb961c817764171f39f50347264 fe5b354a174c97dc1742a71191f602395867efb961c817764171f39f50347264 false +key_image_to_y 16c2908507300ffbb4b346e19c15cd3b9f04459ee939a144866e1f02d19ef97f 16c2908507300ffbb4b346e19c15cd3b9f04459ee939a144866e1f02d19ef97f false +key_image_to_y 14c04cb0415df29bb918fd9e4b6a878ead2669668ba72d7d78c7c74068d50377 14c04cb0415df29bb918fd9e4b6a878ead2669668ba72d7d78c7c74068d50377 false +key_image_to_y 35e1f520f6cae77dc98e0bbd09129d86e82fb5ad23f44f676f5b56731e575a13 35e1f520f6cae77dc98e0bbd09129d86e82fb5ad23f44f676f5b56731e575a13 false +key_image_to_y 35747e2cba77c3103c205919180eeb55d614af69107d586e0b4946651b815a5b 35747e2cba77c3103c205919180eeb55d614af69107d586e0b4946651b815a5b false +key_image_to_y 977b0f71a082f9a73ce6343ba0f12e257477633b0ddda6ec79fa4efa2a1d2e29 977b0f71a082f9a73ce6343ba0f12e257477633b0ddda6ec79fa4efa2a1d2e29 false +key_image_to_y 2b9c90881584045c6b114d6c86be8901ce38162a2168ba1a485203d89a5c6c2f 2b9c90881584045c6b114d6c86be8901ce38162a2168ba1a485203d89a5c6c2f false +key_image_to_y 61debcd08a03cffec9745c95371f749d749b1f24dafd8f1b3016105f77408b0c 61debcd08a03cffec9745c95371f749d749b1f24dafd8f1b3016105f77408b0c false +key_image_to_y 8bc860d86aad2dd0be6af91f7e5185d56fa66d9e7ffb1339b0c5991663bcaa54 8bc860d86aad2dd0be6af91f7e5185d56fa66d9e7ffb1339b0c5991663bcaa54 false +key_image_to_y 25bf7f10ebd260a5dbae567dfce30525dfbc9af1b2521e5baeb7fd8cbc2ca93d 25bf7f10ebd260a5dbae567dfce30525dfbc9af1b2521e5baeb7fd8cbc2ca93d false +key_image_to_y b949beebe0ffe7b87bc1c9e4bce431d6d75d706b008797043607bf407a301e5e b949beebe0ffe7b87bc1c9e4bce431d6d75d706b008797043607bf407a301e5e false +key_image_to_y a896a41cd7622e38d0b43ee402f48886b3daa8f747dd96f8661243ee513d98de a896a41cd7622e38d0b43ee402f48886b3daa8f747dd96f8661243ee513d985e true +key_image_to_y 70491afad4c4739263dac2a94cadaffe95115553ed2252f784704867a05488fd 70491afad4c4739263dac2a94cadaffe95115553ed2252f784704867a054887d true +key_image_to_y b131e2745a54155a1d490e73e95294466740fbb5276f727a63e5f5eb182e13a1 b131e2745a54155a1d490e73e95294466740fbb5276f727a63e5f5eb182e1321 true +key_image_to_y 5cb915be5aec2fb6986143f6f3df4563e9051ea96591f20f20f49f97a01055ea 5cb915be5aec2fb6986143f6f3df4563e9051ea96591f20f20f49f97a010556a true +key_image_to_y 3810186d871bdfe0e7269d1d26682a1761c91d6d934b370ea17b14dc7044c6c8 3810186d871bdfe0e7269d1d26682a1761c91d6d934b370ea17b14dc7044c648 true +key_image_to_y 00db468b9479beed003e34d38d439267d6e6acffde1e606f465dbce0fc0666d0 00db468b9479beed003e34d38d439267d6e6acffde1e606f465dbce0fc066650 true +key_image_to_y a6c29a734b7aa4d93d29657be4dacb4a0f3595e530d8eff1edb08f1eace15181 a6c29a734b7aa4d93d29657be4dacb4a0f3595e530d8eff1edb08f1eace15101 true +key_image_to_y 48d9a438ef0265d8936f58a14c5b786a47481098b6db206b61e8305cf0b780cc 48d9a438ef0265d8936f58a14c5b786a47481098b6db206b61e8305cf0b7804c true +key_image_to_y 93204f8620430334b844704de904ad1bc6c8622769360c679b77df1673264e90 93204f8620430334b844704de904ad1bc6c8622769360c679b77df1673264e10 true +key_image_to_y dfb14b5961117227711861b81778b283aeded0cdd6f9717a95701042f7b2ddcf dfb14b5961117227711861b81778b283aeded0cdd6f9717a95701042f7b2dd4f true +key_image_to_y 7c06d5309ceecd9f4e2dd11a28dfdc035096780f36d9a4c61d63ff54075e1ad3 7c06d5309ceecd9f4e2dd11a28dfdc035096780f36d9a4c61d63ff54075e1a53 true +key_image_to_y 70d3176a9b2ca21e60675ce4b2a097d8e9d4794a0e838cc598b21a6a7aef06cd 70d3176a9b2ca21e60675ce4b2a097d8e9d4794a0e838cc598b21a6a7aef064d true +key_image_to_y 8266564fa110f488aefb36048d46959708bdd839cb8b7ba58190c9c8ffd27cab 8266564fa110f488aefb36048d46959708bdd839cb8b7ba58190c9c8ffd27c2b true +key_image_to_y 06d94e0765b6b11ab9c1baf3e5ff1cddbaf7f2c0ac17cf0da55cde4b06bab0b2 06d94e0765b6b11ab9c1baf3e5ff1cddbaf7f2c0ac17cf0da55cde4b06bab032 true +key_image_to_y f7d0ac71751e9bea9e8bd8da43ddab22bcb3edacf019a99443b9068cd4474185 f7d0ac71751e9bea9e8bd8da43ddab22bcb3edacf019a99443b9068cd4474105 true +key_image_to_y ee029aa2269ce142e2e9d6e0502cf7ee23dedc847436c0eb935a1ded8701f382 ee029aa2269ce142e2e9d6e0502cf7ee23dedc847436c0eb935a1ded8701f302 true +key_image_to_y 370640fae7b49a03da1c99538d6afe52bf09afd160e35c9e971b41c2aba8e3cc 370640fae7b49a03da1c99538d6afe52bf09afd160e35c9e971b41c2aba8e34c true +key_image_to_y 02d781e9c56a25a924fd49f5f80eee90eb55f0d7269d7157f89403dfc58ad386 02d781e9c56a25a924fd49f5f80eee90eb55f0d7269d7157f89403dfc58ad306 true +key_image_to_y 7cb9d19798bc4bf5402326b2e9aa371b2b7a504f09e4cfc123d23ee0f05098fc 7cb9d19798bc4bf5402326b2e9aa371b2b7a504f09e4cfc123d23ee0f050987c true +key_image_to_y f01e93a0b1fb01890b162f7002a4425c35421e8a46cec1d9c84d0fa9263990e5 f01e93a0b1fb01890b162f7002a4425c35421e8a46cec1d9c84d0fa926399065 true +key_image_to_y 8018dc18a0bf007fde0bb5293ef247b9446f4f0b9c20d18194a216fc500bf4db 8018dc18a0bf007fde0bb5293ef247b9446f4f0b9c20d18194a216fc500bf45b true +key_image_to_y f8e3600327a95a90b8dea0659ac00110b45c410b97dad6660348892891ffb690 f8e3600327a95a90b8dea0659ac00110b45c410b97dad6660348892891ffb610 true +key_image_to_y 11ed169c028c854bd41f6d7ea583ec50c1568bfc8c784e4d04d30533e58496f3 11ed169c028c854bd41f6d7ea583ec50c1568bfc8c784e4d04d30533e5849673 true +key_image_to_y ddcac461f3c9d265cce797039bbfff3f3156d07c4e0231b096292434df5bcabb ddcac461f3c9d265cce797039bbfff3f3156d07c4e0231b096292434df5bca3b true +key_image_to_y b861f2dba6252d878029f417ac02555f9502c66d889de49683262d1b020f5adb b861f2dba6252d878029f417ac02555f9502c66d889de49683262d1b020f5a5b true +key_image_to_y cda6bd18b5dbe8705d7a7be1d5f4b96767bf03d901931b643ee138ba66c64dd3 cda6bd18b5dbe8705d7a7be1d5f4b96767bf03d901931b643ee138ba66c64d53 true +key_image_to_y b4aa5fefc0e81a37f3ac19482a32fc49141c79c013e8d9058a9d1c6ca347a79b b4aa5fefc0e81a37f3ac19482a32fc49141c79c013e8d9058a9d1c6ca347a71b true +key_image_to_y cc3b15a7feec558a73e12028b11cede86ff9f6956b014722872037b9ee652ebf cc3b15a7feec558a73e12028b11cede86ff9f6956b014722872037b9ee652e3f true +key_image_to_y 1e5b547e0e6da07390a74da76995118abc565c4e7f4acb24d90e5f85721d33d5 1e5b547e0e6da07390a74da76995118abc565c4e7f4acb24d90e5f85721d3355 true +key_image_to_y f3003c72bf5f87b97f34dc255dda2cb39d3e8e4045168631de8d2fecf5e76296 f3003c72bf5f87b97f34dc255dda2cb39d3e8e4045168631de8d2fecf5e76216 true +key_image_to_y 241452c33318416debb476707bcb7e52c9f3480768ac2c9bf394ce36df7923de 241452c33318416debb476707bcb7e52c9f3480768ac2c9bf394ce36df79235e true +key_image_to_y 8a4f7a0e19ad5af9315b0691f35506fc78e9e8fe7f5572e36d19d582526abdff 8a4f7a0e19ad5af9315b0691f35506fc78e9e8fe7f5572e36d19d582526abd7f true +key_image_to_y fdae81e5a3719b1ac05f27cd7bf83e01bd5026d91e99b6f8bc1672bc2711fb91 fdae81e5a3719b1ac05f27cd7bf83e01bd5026d91e99b6f8bc1672bc2711fb11 true +key_image_to_y 2649d2bc7f3e0d6b87e5d519d5aad9f8e22ff5e8f02466efc33be443e67d76f0 2649d2bc7f3e0d6b87e5d519d5aad9f8e22ff5e8f02466efc33be443e67d7670 true +key_image_to_y fc161a566fc014ed9e15e4cec7b2eb1c19a3220e518106982843861c9aac69e3 fc161a566fc014ed9e15e4cec7b2eb1c19a3220e518106982843861c9aac6963 true +key_image_to_y d246c119405dfd6de0ed83a04ca542caf73785b55671572a60ea5f665ec91296 d246c119405dfd6de0ed83a04ca542caf73785b55671572a60ea5f665ec91216 true +key_image_to_y f28722915db5acda96b5281f2a36625e9994d5b8eca68f3e250dd4c4e815b5c1 f28722915db5acda96b5281f2a36625e9994d5b8eca68f3e250dd4c4e815b541 true +key_image_to_y eb73cb1356f4114d01983d552301bb8f4927b41256d9c90d52024476d3d2e2cc eb73cb1356f4114d01983d552301bb8f4927b41256d9c90d52024476d3d2e24c true +key_image_to_y 5d3ea3b7c892e585008a220c51cbe42ae7e0c7e9e525a42ec492d3a7602a1cd5 5d3ea3b7c892e585008a220c51cbe42ae7e0c7e9e525a42ec492d3a7602a1c55 true +key_image_to_y 36a322f166933f550102d14e9c9daaeaa34bd06e9e20dc605a101a2d0ae69fbb 36a322f166933f550102d14e9c9daaeaa34bd06e9e20dc605a101a2d0ae69f3b true +key_image_to_y 2b31db2834f0e35ca15ebe00e73a583581476253f94b7f3b270546e58193b4a0 2b31db2834f0e35ca15ebe00e73a583581476253f94b7f3b270546e58193b420 true +key_image_to_y 3725e83d6e945fb0f8feb442cd12487f9e351d286ee89fa4dd68fb86b847bcb1 3725e83d6e945fb0f8feb442cd12487f9e351d286ee89fa4dd68fb86b847bc31 true +key_image_to_y 4cdcc458412ed752e804a0d4bc31bc5b4f47ff49a8771b0dc47d0388c10805f7 4cdcc458412ed752e804a0d4bc31bc5b4f47ff49a8771b0dc47d0388c1080577 true +key_image_to_y bb50dc83ae41cd9f1508073186087950c95a482bd780eccd70cd63388c7649f1 bb50dc83ae41cd9f1508073186087950c95a482bd780eccd70cd63388c764971 true +key_image_from_y b14939b9254f8df6d3e5c7b33a7dc0c6aa1ab8fe1293cb4795c9d92cf81d634f false b14939b9254f8df6d3e5c7b33a7dc0c6aa1ab8fe1293cb4795c9d92cf81d634f +key_image_from_y 6669a8eab861a2f4d4fdfd9fb8a9cb5fdd3a15e0facb8ff77c24727635af634e false 6669a8eab861a2f4d4fdfd9fb8a9cb5fdd3a15e0facb8ff77c24727635af634e +key_image_from_y c3134aa2143389e2d3b1a00fe661e2b82490956bbdf65ac2396d606f1a58b134 false c3134aa2143389e2d3b1a00fe661e2b82490956bbdf65ac2396d606f1a58b134 +key_image_from_y 8c27bc121f3fe85abb4c8084c5744960231d7b1b5861c30aa82749bf54018b53 false 8c27bc121f3fe85abb4c8084c5744960231d7b1b5861c30aa82749bf54018b53 +key_image_from_y 9e7be117a77921058748cba4fcfa043c026a884d969fd7b3a49ef99fdda3a772 false 9e7be117a77921058748cba4fcfa043c026a884d969fd7b3a49ef99fdda3a772 +key_image_from_y 19c1820e3677f5b6c72db2c4ae804e6b93cbe802bf5de884d7d695253079da02 false 19c1820e3677f5b6c72db2c4ae804e6b93cbe802bf5de884d7d695253079da02 +key_image_from_y f37184d49ef88da56a1f37b3a4424c8c40a39b888c0c65817ce0cbfaeba17943 false f37184d49ef88da56a1f37b3a4424c8c40a39b888c0c65817ce0cbfaeba17943 +key_image_from_y a558636042e148d97f699bb55dd2c2fb6c6d64f54aa5e1c06f6d2e6e054c5261 false a558636042e148d97f699bb55dd2c2fb6c6d64f54aa5e1c06f6d2e6e054c5261 +key_image_from_y f2c6b50c496c5b4e0fa715d24e8a22727633d05b91b9d08232181741bcb36a3a false f2c6b50c496c5b4e0fa715d24e8a22727633d05b91b9d08232181741bcb36a3a +key_image_from_y 0543ccb07c6b9d2a2602107d0aa5ed6aa1398ec6543d9b9d7822bbf339ddbb09 false 0543ccb07c6b9d2a2602107d0aa5ed6aa1398ec6543d9b9d7822bbf339ddbb09 +key_image_from_y bf2acac9328c8538beec88fffee1ca49d9b28c70f9acc23f59dfbc8d21754654 false bf2acac9328c8538beec88fffee1ca49d9b28c70f9acc23f59dfbc8d21754654 +key_image_from_y 81e6611d33146dd5c3e402b4cb660b628175e074c1ccff093258a6f355655045 false 81e6611d33146dd5c3e402b4cb660b628175e074c1ccff093258a6f355655045 +key_image_from_y 4382e51caba64548432e6f0ddf3df5bb29eba0d55f46f806f8281b6b324ccf66 false 4382e51caba64548432e6f0ddf3df5bb29eba0d55f46f806f8281b6b324ccf66 +key_image_from_y 7d7185e987cbb9ee1608c7eef268764080906c9a7d5e91dfd1f6ea6538405f6e false 7d7185e987cbb9ee1608c7eef268764080906c9a7d5e91dfd1f6ea6538405f6e +key_image_from_y 8558c5ad0304b8b4fbf0ab12ed4f89295e7729a3ec4b05fffacdb9fbcc53f859 false 8558c5ad0304b8b4fbf0ab12ed4f89295e7729a3ec4b05fffacdb9fbcc53f859 +key_image_from_y 4c9ec93dbaf801eae69ea60ea6c5b970b06c9bd542ad3aba60d6d982abfcd653 false 4c9ec93dbaf801eae69ea60ea6c5b970b06c9bd542ad3aba60d6d982abfcd653 +key_image_from_y 361268ad395bc3162699092b95d138f023c41dd0e832d85c3f190440a2d0a87c false 361268ad395bc3162699092b95d138f023c41dd0e832d85c3f190440a2d0a87c +key_image_from_y f1ab05c1794fe907bbe657af5e046e2682312408ab267e24f6586f7fd52c306d false f1ab05c1794fe907bbe657af5e046e2682312408ab267e24f6586f7fd52c306d +key_image_from_y 9870dec355f5afcd193f7bbd803ad3038540cca12aa65ee0fc4108fe72657f1a false 9870dec355f5afcd193f7bbd803ad3038540cca12aa65ee0fc4108fe72657f1a +key_image_from_y 71c688eaef0dee7d48d803fa38fd7d20690e666594a4ce5ea505832e2e8c4666 false 71c688eaef0dee7d48d803fa38fd7d20690e666594a4ce5ea505832e2e8c4666 +key_image_from_y af57f563d8446a522666222c830f33f89ce0124280be5159388900a657ea9d12 false af57f563d8446a522666222c830f33f89ce0124280be5159388900a657ea9d12 +key_image_from_y 76d14b96961619765fc5b6f4e2e30166fa4c3e275c227bd275b5f4e6c0a91255 false 76d14b96961619765fc5b6f4e2e30166fa4c3e275c227bd275b5f4e6c0a91255 +key_image_from_y 59d7e8425798b6c6b2f7fa7ff6fe344eb5cf84511899dd39bd56e71beea5f960 false 59d7e8425798b6c6b2f7fa7ff6fe344eb5cf84511899dd39bd56e71beea5f960 +key_image_from_y d0db255ff4a1b619dc5e0fc9773659a19c75bd7a868e3fd45e83c92aa18c6e04 false d0db255ff4a1b619dc5e0fc9773659a19c75bd7a868e3fd45e83c92aa18c6e04 +key_image_from_y c03bf07443db65ce3b7bcd58c17b6266d81b8a6624deb081c65c14650b51d827 false c03bf07443db65ce3b7bcd58c17b6266d81b8a6624deb081c65c14650b51d827 +key_image_from_y 87102828ddeb3a31a266de1937b966658710264ad3c520bcc93abb07bc459849 false 87102828ddeb3a31a266de1937b966658710264ad3c520bcc93abb07bc459849 +key_image_from_y fc749c9fcc3300819ad312af6d235813975e6ce70bb904bad49930ce34b47201 false fc749c9fcc3300819ad312af6d235813975e6ce70bb904bad49930ce34b47201 +key_image_from_y e53657e4a0bbf098112777134885f65ea7abfc0639d28515bd00bd52a418b93e false e53657e4a0bbf098112777134885f65ea7abfc0639d28515bd00bd52a418b93e +key_image_from_y 55c7fe59e8c41d0d0f77f2d993f10e638cf6d4678984a4b9422202105ad51349 false 55c7fe59e8c41d0d0f77f2d993f10e638cf6d4678984a4b9422202105ad51349 +key_image_from_y e4246f6bd27e9323e08107ac9fa911f3f6c27f64d0f03b2a265789f2f8718401 false e4246f6bd27e9323e08107ac9fa911f3f6c27f64d0f03b2a265789f2f8718401 +key_image_from_y f7fce49a5ff25d00f655942508e1a31e210a66fe03f22bd6c799575ea6b88b5b false f7fce49a5ff25d00f655942508e1a31e210a66fe03f22bd6c799575ea6b88b5b +key_image_from_y 20325307f450143797fc7b7969b3ad093fd6318d97c6dfbe09a04a50abc9ba42 false 20325307f450143797fc7b7969b3ad093fd6318d97c6dfbe09a04a50abc9ba42 +key_image_from_y 4a2e87eaade16f12c728bd0fee887488db0d9e03f940de2e1acd4d77123ede59 false 4a2e87eaade16f12c728bd0fee887488db0d9e03f940de2e1acd4d77123ede59 +key_image_from_y 9a4227ccd723624c7dd4d536a8476463bd767ebc55e1e4f27bbe84139245151b false 9a4227ccd723624c7dd4d536a8476463bd767ebc55e1e4f27bbe84139245151b +key_image_from_y c64c6c2505ccfbe929fe6e93a8376c9377a05cb9df5547a203d3e9247e5dfa75 false c64c6c2505ccfbe929fe6e93a8376c9377a05cb9df5547a203d3e9247e5dfa75 +key_image_from_y f32193f4a45a9ee531f4e54b6a8cbae179048cd3e93d24cc21229ba67d3c886f false f32193f4a45a9ee531f4e54b6a8cbae179048cd3e93d24cc21229ba67d3c886f +key_image_from_y e480ed1ecdbf1e10dd7e347862e153b35f457bb2dac5bce766cb831265a0122a false e480ed1ecdbf1e10dd7e347862e153b35f457bb2dac5bce766cb831265a0122a +key_image_from_y e3283fa4f9eae1a612ac40a3a9f7ceaf472d4ad0fc7dba0c2bc6387f4f170753 false e3283fa4f9eae1a612ac40a3a9f7ceaf472d4ad0fc7dba0c2bc6387f4f170753 +key_image_from_y 208220ab9fb01a76f92df80d367c9b8187bd647e2df67143d315107c24c19870 false 208220ab9fb01a76f92df80d367c9b8187bd647e2df67143d315107c24c19870 +key_image_from_y 4ec772fe0cd753a573838454fa5d3764c84466bf1d8c7b051b0499c56c8ccf58 false 4ec772fe0cd753a573838454fa5d3764c84466bf1d8c7b051b0499c56c8ccf58 +key_image_from_y 350f46cace1e8cf8e82352a72070d3131d9fd7f7b71bec1781a93ddfb82a7601 false 350f46cace1e8cf8e82352a72070d3131d9fd7f7b71bec1781a93ddfb82a7601 +key_image_from_y 91624ed82640d2f131b996db59c64564be1342725a7de6ced3776d19f15b4367 false 91624ed82640d2f131b996db59c64564be1342725a7de6ced3776d19f15b4367 +key_image_from_y 700abb5038344ed6561a2e25f5296f785cdf6f359b360cb3be69eaf535df6671 false 700abb5038344ed6561a2e25f5296f785cdf6f359b360cb3be69eaf535df6671 +key_image_from_y 4d93904090a5c37cadb4c8b911955bd6374ab302f142d918c722eb8252bace0c true 4d93904090a5c37cadb4c8b911955bd6374ab302f142d918c722eb8252bace8c +key_image_from_y d375ac0223b138a9d0a0d3adf3a7a62c0a7207bc87a30bed0e582912aa4fb656 true d375ac0223b138a9d0a0d3adf3a7a62c0a7207bc87a30bed0e582912aa4fb6d6 +key_image_from_y ece17c47a92da9b0ef4218c19fa799ec04673c8843f65f20a14d492ced296542 true ece17c47a92da9b0ef4218c19fa799ec04673c8843f65f20a14d492ced2965c2 +key_image_from_y c721614309a89ac2ef41570662ce244da418476cfbd87331cd8e44ce6dd24973 true c721614309a89ac2ef41570662ce244da418476cfbd87331cd8e44ce6dd249f3 +key_image_from_y 00b5b85871c39384b359b2d2e89773c619ea546512d9e78ef43b6d8ad4f55408 true 00b5b85871c39384b359b2d2e89773c619ea546512d9e78ef43b6d8ad4f55488 +key_image_from_y fa32508215245c07dc980bbddf4483e597ed8ceb2747f559bcb4950e7706a43b true fa32508215245c07dc980bbddf4483e597ed8ceb2747f559bcb4950e7706a4bb +key_image_from_y 5c02d3bc62f0fcd55c264e8919f4a7cd84f1646a5f467df8e0cc70a0a2a0c717 true 5c02d3bc62f0fcd55c264e8919f4a7cd84f1646a5f467df8e0cc70a0a2a0c797 +key_image_from_y b96033b13a7007d716200bc739001fcf9a062dbdc4c2583270cd1cf8fda38f5b true b96033b13a7007d716200bc739001fcf9a062dbdc4c2583270cd1cf8fda38fdb +key_image_from_y 52c650e2e938e87f72f40bfa534f454c5b6339a3fbfd3059afb939c2d9ab683a true 52c650e2e938e87f72f40bfa534f454c5b6339a3fbfd3059afb939c2d9ab68ba +key_image_from_y 5afee29bbf0ffbf1feec56d43f624f429565fdea27d9544d6c7dcb9d2d43d11f true 5afee29bbf0ffbf1feec56d43f624f429565fdea27d9544d6c7dcb9d2d43d19f +key_image_from_y a4c5b1932e4dba9666641782a4f95f8bb5a617633a17fb4bc10cfccde634276b true a4c5b1932e4dba9666641782a4f95f8bb5a617633a17fb4bc10cfccde63427eb +key_image_from_y 7a63fafdcf359db81604b14bbe51e15adf8d28ba9394d306aa665a258aef2609 true 7a63fafdcf359db81604b14bbe51e15adf8d28ba9394d306aa665a258aef2689 +key_image_from_y 696183751af706b468e221b207ba4aa5a3f97902afa4ab825bf235e85e13dc16 true 696183751af706b468e221b207ba4aa5a3f97902afa4ab825bf235e85e13dc96 +key_image_from_y 28b328e40365cd780fb0637d3870dcf755976ec5c088e97d8a1e8a04db54bd45 true 28b328e40365cd780fb0637d3870dcf755976ec5c088e97d8a1e8a04db54bdc5 +key_image_from_y c4f938652ade2f8996addca457c82876205b207ea470c4231e3a7f5ca3472d4d true c4f938652ade2f8996addca457c82876205b207ea470c4231e3a7f5ca3472dcd +key_image_from_y b993e32601093bf0e63c708501c7f91afe9fa4298d287f3f55bb493569f6b26b true b993e32601093bf0e63c708501c7f91afe9fa4298d287f3f55bb493569f6b2eb +key_image_from_y 6246cfaa394da87a45edf395472ad3594d8b3b6f39550078cfbf39066aeea91b true 6246cfaa394da87a45edf395472ad3594d8b3b6f39550078cfbf39066aeea99b +key_image_from_y 5f0590a3b37df89f27caee54afc6101a3cf0b896a0f1997098bace1bf3d9b954 true 5f0590a3b37df89f27caee54afc6101a3cf0b896a0f1997098bace1bf3d9b9d4 +key_image_from_y e3955bd20dc37d5ae620ee5bffa1b1cfdc05a062826df39197e6d191f23c031b true e3955bd20dc37d5ae620ee5bffa1b1cfdc05a062826df39197e6d191f23c039b +key_image_from_y 53aa7307b2ef3c5d5f51e73b90891b1a597d5ddfcbb41bcd79a0f199f7b20d54 true 53aa7307b2ef3c5d5f51e73b90891b1a597d5ddfcbb41bcd79a0f199f7b20dd4 +key_image_from_y 3474abfa456935af7ca56f5bdf3751ff8437e30de6b5f830329ec2cdd8aa1846 true 3474abfa456935af7ca56f5bdf3751ff8437e30de6b5f830329ec2cdd8aa18c6 +key_image_from_y 2533d58ebfa13c3175be9f05235c1730c93033a35fa002577e44c6675b817e15 true 2533d58ebfa13c3175be9f05235c1730c93033a35fa002577e44c6675b817e95 +key_image_from_y ae848420273035bd516728bd9c2f9b421736a46c3806a77fa64acd680357d733 true ae848420273035bd516728bd9c2f9b421736a46c3806a77fa64acd680357d7b3 +key_image_from_y af96b48d7a704a507a9b0eee52b19edf1ddaa00ed84ff1f04202113dbb79634d true af96b48d7a704a507a9b0eee52b19edf1ddaa00ed84ff1f04202113dbb7963cd +key_image_from_y 1305c030bf02efd242f7d826d53fefdba57546228f911d2a6b2e32bd02952577 true 1305c030bf02efd242f7d826d53fefdba57546228f911d2a6b2e32bd029525f7 +key_image_from_y 6dd4392fb42c478bfbb1555276a79bdb8558cfa0f207787c6b700f40f464042f true 6dd4392fb42c478bfbb1555276a79bdb8558cfa0f207787c6b700f40f46404af +key_image_from_y a7e51a48f006bcff53fbf9f2a5dbc5c8b2cb5251147fa4bd10e5f9bb00db2f7d true a7e51a48f006bcff53fbf9f2a5dbc5c8b2cb5251147fa4bd10e5f9bb00db2ffd +key_image_from_y dc4713b4709e1bf6df4f72a728328816d25ba9b013e4183f1802cc1bbf6c9149 true dc4713b4709e1bf6df4f72a728328816d25ba9b013e4183f1802cc1bbf6c91c9 +key_image_from_y 393b89cb3a994e60cdec7c004be50262c36b7171c22cc8b9ed93d217b3dd1b20 true 393b89cb3a994e60cdec7c004be50262c36b7171c22cc8b9ed93d217b3dd1ba0 +key_image_from_y dde48284f2512d01fe5e6eb0dc7bed77d9f0cd4a520d7e4f48fd98d8be871a47 true dde48284f2512d01fe5e6eb0dc7bed77d9f0cd4a520d7e4f48fd98d8be871ac7 +key_image_from_y 168c123f48f0e06f8dd2fb656a4418e8c8118f94c1b4fb4dd875ce66f79f0108 true 168c123f48f0e06f8dd2fb656a4418e8c8118f94c1b4fb4dd875ce66f79f0188 +key_image_from_y 638317b0f09425de7b63a3e349e706b0a51ee5872a1bfe5b5c6bbc7bf7dee201 true 638317b0f09425de7b63a3e349e706b0a51ee5872a1bfe5b5c6bbc7bf7dee281 +key_image_from_y 261c14e9b16d50f500e60e9d1f66d33a8466fe8bd8d025418d80602d5caff465 true 261c14e9b16d50f500e60e9d1f66d33a8466fe8bd8d025418d80602d5caff4e5 +key_image_from_y c6aa35885209ab7f49ce3635f1b2c16b70c2bd8c8b0ea9dd22210fc5a8d5c852 true c6aa35885209ab7f49ce3635f1b2c16b70c2bd8c8b0ea9dd22210fc5a8d5c8d2 +key_image_from_y f8222184ed7629b3e994b43fe9d787aa34f33a784a9985deaa1f9dcfb709be73 true f8222184ed7629b3e994b43fe9d787aa34f33a784a9985deaa1f9dcfb709bef3 +key_image_from_y 33a015c73192d8dbc67bd25d28ba2c4cbb4fb8bc92fa5c680d4179d54b7dfe6c true 33a015c73192d8dbc67bd25d28ba2c4cbb4fb8bc92fa5c680d4179d54b7dfeec +key_image_from_y 6b9a54af748eca68552c36464b32344583444a76456cfeab8badf753d2919663 true 6b9a54af748eca68552c36464b32344583444a76456cfeab8badf753d29196e3 +key_image_from_y b8a1892a9174bd24bc5c4560f2116d64ef9985eb39f7c56ae6dcf9112e0d3b40 true b8a1892a9174bd24bc5c4560f2116d64ef9985eb39f7c56ae6dcf9112e0d3bc0 +key_image_from_y 55075478f2d9a2f93c2c8c40e32a2e79b157b16ae619c7f4492e9e5aee450b37 true 55075478f2d9a2f93c2c8c40e32a2e79b157b16ae619c7f4492e9e5aee450bb7 +key_image_from_y bbb54e6c3500b90a73fd9df8273a8146dd182af9350867671f6b3335c340625c true bbb54e6c3500b90a73fd9df8273a8146dd182af9350867671f6b3335c34062dc +key_image_from_y 2f1602dbf3381f51d8d33d56becffec3f5cdef6230032e27a719525b4b38c941 true 2f1602dbf3381f51d8d33d56becffec3f5cdef6230032e27a719525b4b38c9c1 +key_image_from_y af046aaf25bf374dd22baa4fae78c982f800f1b7c2731f97f97e882688856034 true af046aaf25bf374dd22baa4fae78c982f800f1b7c2731f97f97e8826888560b4 +key_image_from_y 6a9640d8f15984358ce8acf46cb39dec56933cf13e335d6a3bd0e01dd64e7b57 true 6a9640d8f15984358ce8acf46cb39dec56933cf13e335d6a3bd0e01dd64e7bd7 +key_image_from_y 49ac34e8e8089177108c9c764feeba8f7bc67bb1715a9f937b400d5726dd2810 true 49ac34e8e8089177108c9c764feeba8f7bc67bb1715a9f937b400d5726dd2890 +key_image_from_y 874e851d37c89dbe2cddd1848d0f4fa991e59455deb73f754cf2615c2f21595a true 874e851d37c89dbe2cddd1848d0f4fa991e59455deb73f754cf2615c2f2159da +key_image_from_y 7b0ee4271536ba40e0e3bfacbd6a9f04f24ba6cb2e79b96070bec36480973113 true 7b0ee4271536ba40e0e3bfacbd6a9f04f24ba6cb2e79b96070bec36480973193 +key_image_from_y bbebfd909dffa248f85390d3860bf5f2123369be07ea3dee5f13e99e25e49359 true bbebfd909dffa248f85390d3860bf5f2123369be07ea3dee5f13e99e25e493d9 +key_image_from_y df5ca9835c856ee38141ff5d10a4985f958e7f986391ecf639263f7319bc6b36 true df5ca9835c856ee38141ff5d10a4985f958e7f986391ecf639263f7319bc6bb6 +key_image_from_y 7a4fbe8236800caeaea9d726a15b21ba515e7414ed74790717d464b2d8c70e39 true 7a4fbe8236800caeaea9d726a15b21ba515e7414ed74790717d464b2d8c70eb9 +key_image_from_y a338ae983e77870095e9f7cc6f9c13d8603796741553483071a4971c8de4bc7d true a338ae983e77870095e9f7cc6f9c13d8603796741553483071a4971c8de4bcfd +key_image_from_y 7bda514ff46aeae2c62b19fe1be1cb11ccd7405cbf089088863d12d97e718324 true 7bda514ff46aeae2c62b19fe1be1cb11ccd7405cbf089088863d12d97e7183a4 +key_image_from_y c3ce63428cfda4dc85a1dae4c3c6b051087a82f49776a546cff6b47484ff3961 true c3ce63428cfda4dc85a1dae4c3c6b051087a82f49776a546cff6b47484ff39e1 +key_image_from_y 3dfc1d9d714860c14540568d7da56e31b1a290db1023ad5bd10862ade6d4ae74 true 3dfc1d9d714860c14540568d7da56e31b1a290db1023ad5bd10862ade6d4aef4 +key_image_from_y 3cecd238630915a9f142a9c561461a7f321824ae726e03290fe70e2cbb17e955 true 3cecd238630915a9f142a9c561461a7f321824ae726e03290fe70e2cbb17e9d5 +key_image_from_y 683c108ea462e200e6e774f6b6ec75bd6a1041f4c0c3ac392f79c2ae66bc1f4d true 683c108ea462e200e6e774f6b6ec75bd6a1041f4c0c3ac392f79c2ae66bc1fcd +key_image_from_y 6428539f6949cb005e1dfa470718c6f2eddee6ad4579e876c909b92a6561c178 true 6428539f6949cb005e1dfa470718c6f2eddee6ad4579e876c909b92a6561c1f8 +key_image_from_y 6658cab76b1481b2023873a57c06d69097d9bfa96c05a995e84731cfe65a384b true 6658cab76b1481b2023873a57c06d69097d9bfa96c05a995e84731cfe65a38cb diff --git a/tests/unit_tests/crypto.cpp b/tests/unit_tests/crypto.cpp index f2512356383..b7bcf8df8de 100644 --- a/tests/unit_tests/crypto.cpp +++ b/tests/unit_tests/crypto.cpp @@ -345,3 +345,27 @@ TEST(Crypto, generator_consistency) // ringct/rctTypes.h ASSERT_TRUE(memcmp(H.data, rct::H.bytes, 32) == 0); } + +TEST(Crypto, key_image_y) +{ + const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); + crypto::key_image ki; + crypto::generate_key_image(kp.pub, kp.sec, ki); + + crypto::key_image_y ki_y; + bool sign = crypto::key_image_to_y(ki, ki_y); + + static_assert(sizeof(crypto::key_image) == sizeof(crypto::key_image_y), "unequal key image <> key image y size"); + if (memcmp(ki.data, ki_y.data, sizeof(crypto::key_image)) == 0) + ASSERT_FALSE(sign); + else + ASSERT_TRUE(sign); + + // decoded y coordinate should be the same + fe y_from_ki; + fe y_from_ki_y; + ASSERT_EQ(fe_frombytes_vartime(y_from_ki, (unsigned char*)ki.data), 0); + ASSERT_EQ(fe_frombytes_vartime(y_from_ki_y, (unsigned char*)ki_y.data), 0); + + ASSERT_EQ(memcmp(y_from_ki, y_from_ki_y, sizeof(fe)), 0); +} From 5f4b20b8b688cb096f32efb959a9362ea890a673 Mon Sep 17 00:00:00 2001 From: j-berman Date: Thu, 12 Sep 2024 16:11:47 -0700 Subject: [PATCH 116/127] fcmp++: fix build errs for ki_context_t hash specialization - plus slightly cleaner hash --- src/cryptonote_core/tx_pool.h | 43 +++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/src/cryptonote_core/tx_pool.h b/src/cryptonote_core/tx_pool.h index 73313ba29e7..86ae947f4c5 100644 --- a/src/cryptonote_core/tx_pool.h +++ b/src/cryptonote_core/tx_pool.h @@ -52,6 +52,29 @@ #include "rpc/core_rpc_server_commands_defs.h" #include "rpc/message_data_structs.h" +namespace cryptonote +{ + //! key image's contextual data + struct ki_context_t + { + crypto::hash tx_hash; + bool sign; // original key image had sign bit set + bool operator==(const ki_context_t rhs) const { return rhs.tx_hash == tx_hash && rhs.sign == sign; }; + }; +}//cryptonote + +namespace std +{ + template<> struct hash + { + std::size_t operator()(const cryptonote::ki_context_t &_ki_context) const + { + const std::size_t h = reinterpret_cast(_ki_context.tx_hash); + return h + (_ki_context.sign ? 1 : 0); + } + }; +}//std + namespace cryptonote { class Blockchain; @@ -81,14 +104,6 @@ namespace cryptonote //! container for sorting transactions by fee per unit size typedef std::set sorted_tx_container; - //! key image's contextual data - struct ki_context_t - { - crypto::hash tx_hash; - bool sign; // original key image had sign bit set - bool operator==(const ki_context_t rhs) const { return rhs.tx_hash == tx_hash && rhs.sign == sign; }; - }; - /** * @brief Transaction pool, handles transactions which are not part of a block * @@ -725,17 +740,5 @@ namespace boost BOOST_CLASS_VERSION(cryptonote::tx_memory_pool, CURRENT_MEMPOOL_ARCHIVE_VER) BOOST_CLASS_VERSION(cryptonote::tx_memory_pool::tx_details, CURRENT_MEMPOOL_TX_DETAILS_ARCHIVE_VER) -namespace std -{ - template<> struct hash - { - std::size_t operator()(const cryptonote::ki_context_t &_ki_context) const - { - std::size_t res = reinterpret_cast(_ki_context.tx_hash); - res += _ki_context.sign ? 1 : 0; - return res; - } - }; -} // std From 358a66f6b11bd0d3f0e1ce8b96fd2b8d77a77d2f Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 16 Sep 2024 16:49:07 -0700 Subject: [PATCH 117/127] crypto: fe_batch_invert using Montgomery's trick Speeds up inverting many elems at once 95%+ --- src/crypto/crypto-ops.c | 41 +++++++++++- src/crypto/crypto-ops.h | 1 + tests/performance_tests/CMakeLists.txt | 1 + tests/performance_tests/fe_batch_invert.h | 79 +++++++++++++++++++++++ tests/performance_tests/main.cpp | 3 + tests/unit_tests/crypto.cpp | 24 +++++++ 6 files changed, 146 insertions(+), 3 deletions(-) create mode 100644 tests/performance_tests/fe_batch_invert.h diff --git a/src/crypto/crypto-ops.c b/src/crypto/crypto-ops.c index bbf5632bb1b..ed2f4738953 100644 --- a/src/crypto/crypto-ops.c +++ b/src/crypto/crypto-ops.c @@ -30,6 +30,8 @@ #include #include +#include +#include #include "warnings.h" #include "crypto-ops.h" @@ -313,6 +315,39 @@ void fe_invert(fe out, const fe z) { return; } +// Montgomery's trick +// https://iacr.org/archive/pkc2004/29470042/29470042.pdf 2.2 +int fe_batch_invert(fe *out, const fe *in, const int n) { + if (n == 0) { + return 0; + } + + // Step 1: collect initial muls + fe *init_muls = (fe *) malloc(n * sizeof(fe)); + if (!init_muls) { + return 1; + } + memcpy(&init_muls[0], &in[0], sizeof(fe)); + for (int i = 1; i < n; ++i) { + fe_mul(init_muls[i], init_muls[i-1], in[i]); + } + + // Step 2: get the inverse of all elems multiplied together + fe a; + fe_invert(a, init_muls[n-1]); + + // Step 3: get each inverse + for (int i = n; i > 1; --i) { + fe_mul(out[i-1], a, init_muls[i-2]); + fe_mul(a, a, in[i-1]); + } + memcpy(&out[0], &a, sizeof(fe)); + + free(init_muls); + + return 0; +} + /* From fe_isnegative.c */ /* @@ -1612,7 +1647,7 @@ static void ge_precomp_cmov(ge_precomp *t, const ge_precomp *u, unsigned char b) fe_cmov(t->xy2d, u->xy2d, b); } -static void select(ge_precomp *t, int pos, signed char b) { +static void _select(ge_precomp *t, int pos, signed char b) { ge_precomp minust; unsigned char bnegative = negative(b); unsigned char babs = b - (((-bnegative) & b) << 1); @@ -1668,7 +1703,7 @@ void ge_scalarmult_base(ge_p3 *h, const unsigned char *a) { ge_p3_0(h); for (i = 1; i < 64; i += 2) { - select(&t, i / 2, e[i]); + _select(&t, i / 2, e[i]); ge_madd(&r, h, &t); ge_p1p1_to_p3(h, &r); } @@ -1678,7 +1713,7 @@ void ge_scalarmult_base(ge_p3 *h, const unsigned char *a) { ge_p2_dbl(&r, &s); ge_p1p1_to_p3(h, &r); for (i = 0; i < 64; i += 2) { - select(&t, i / 2, e[i]); + _select(&t, i / 2, e[i]); ge_madd(&r, h, &t); ge_p1p1_to_p3(h, &r); } } diff --git a/src/crypto/crypto-ops.h b/src/crypto/crypto-ops.h index 7ab73887278..465eb3bdd70 100644 --- a/src/crypto/crypto-ops.h +++ b/src/crypto/crypto-ops.h @@ -165,6 +165,7 @@ void ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); void fe_add(fe h, const fe f, const fe g); void fe_tobytes(unsigned char *, const fe); void fe_invert(fe out, const fe z); +int fe_batch_invert(fe *out, const fe *in, const int n); void fe_mul(fe out, const fe, const fe); void fe_0(fe h); diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index a1158fcecd6..4f259643a2c 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -36,6 +36,7 @@ set(performance_tests_headers construct_tx.h derive_public_key.h derive_secret_key.h + fe_batch_invert.h ge_frombytes_vartime.h generate_key_derivation.h generate_key_image.h diff --git a/tests/performance_tests/fe_batch_invert.h b/tests/performance_tests/fe_batch_invert.h new file mode 100644 index 00000000000..2aed96f9a0e --- /dev/null +++ b/tests/performance_tests/fe_batch_invert.h @@ -0,0 +1,79 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers + +#pragma once + +#include "crypto/crypto.h" + +template +class test_fe_batch_invert +{ +public: + static const size_t loop_count = 50; + static const size_t n_elems = 1000; + + bool init() + { + m_fes = (fe *) malloc(n_elems * sizeof(fe)); + + for (std::size_t i = 0; i < n_elems; ++i) + { + crypto::secret_key r; + crypto::random32_unbiased((unsigned char*)r.data); + + ge_p3 point; + ge_scalarmult_base(&point, (unsigned char*)r.data); + + memcpy(m_fes[i], &point.Y, sizeof(fe)); + } + + return true; + } + + bool test() + { + fe *inv_fes = (fe *) malloc(n_elems * sizeof(fe)); + + if (batched) + fe_batch_invert(inv_fes, m_fes, n_elems); + else + { + for (std::size_t i = 0; i < n_elems; ++i) + fe_invert(inv_fes[i], m_fes[i]); + } + + free(inv_fes); + + return true; + } + +private: + fe *m_fes; +}; diff --git a/tests/performance_tests/main.cpp b/tests/performance_tests/main.cpp index 929eec590d6..02770d65a8e 100644 --- a/tests/performance_tests/main.cpp +++ b/tests/performance_tests/main.cpp @@ -43,6 +43,7 @@ #include "derive_public_key.h" #include "derive_secret_key.h" #include "derive_view_tag.h" +#include "fe_batch_invert.h" #include "ge_frombytes_vartime.h" #include "ge_tobytes.h" #include "generate_key_derivation.h" @@ -206,6 +207,8 @@ int main(int argc, char** argv) TEST_PERFORMANCE0(filter, p, test_generate_key_image); TEST_PERFORMANCE0(filter, p, test_derive_public_key); TEST_PERFORMANCE0(filter, p, test_derive_secret_key); + TEST_PERFORMANCE1(filter, p, test_fe_batch_invert, true); // batched + TEST_PERFORMANCE1(filter, p, test_fe_batch_invert, false); // individual inversions TEST_PERFORMANCE0(filter, p, test_ge_frombytes_vartime); TEST_PERFORMANCE0(filter, p, test_ge_tobytes); TEST_PERFORMANCE0(filter, p, test_generate_keypair); diff --git a/tests/unit_tests/crypto.cpp b/tests/unit_tests/crypto.cpp index b7bcf8df8de..6ce4e7299d4 100644 --- a/tests/unit_tests/crypto.cpp +++ b/tests/unit_tests/crypto.cpp @@ -369,3 +369,27 @@ TEST(Crypto, key_image_y) ASSERT_EQ(memcmp(y_from_ki, y_from_ki_y, sizeof(fe)), 0); } + +TEST(Crypto, batch_inversion) +{ + const std::size_t n_elems = 1000; + std::vector field_elems(n_elems), batch_inverted(n_elems), norm_inverted(n_elems); + + // Populate random field elems + for (std::size_t i = 0; i < n_elems; ++i) + { + const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); + ASSERT_EQ(fe_frombytes_vartime(field_elems[i], (unsigned char*)kp.pub.data), 0); + } + + // Do batch inversion + fe_batch_invert(batch_inverted.data(), field_elems.data(), n_elems); + + // Invert every elem individually + for (std::size_t i = 0; i < n_elems; ++i) + { + fe_invert(norm_inverted[i], field_elems[i]); + } + + ASSERT_EQ(memcmp(batch_inverted.data(), norm_inverted.data(), n_elems * sizeof(fe)), 0); +} From b2ea86235d7fab5d1114d47c3aae4c7003cdaacd Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 16 Sep 2024 16:51:41 -0700 Subject: [PATCH 118/127] fcmp++: use batch inversion when converting outputs to leaf tupels --- src/crypto/crypto-ops.c | 17 +---- src/crypto/crypto-ops.h | 4 +- src/fcmp_pp/curve_trees.cpp | 136 ++++++++++++++++++++++++++------- src/fcmp_pp/curve_trees.h | 9 +++ src/fcmp_pp/fcmp_pp_crypto.cpp | 40 ++++++++-- src/fcmp_pp/fcmp_pp_crypto.h | 15 ++++ 6 files changed, 170 insertions(+), 51 deletions(-) diff --git a/src/crypto/crypto-ops.c b/src/crypto/crypto-ops.c index ed2f4738953..8d2680c62ac 100644 --- a/src/crypto/crypto-ops.c +++ b/src/crypto/crypto-ops.c @@ -92,7 +92,7 @@ void fe_0(fe h) { h = 1 */ -static void fe_1(fe h) { +void fe_1(fe h) { h[0] = 1; h[1] = 0; h[2] = 0; @@ -993,7 +993,7 @@ Can overlap h with f or g. |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static void fe_sub(fe h, const fe f, const fe g) { +void fe_sub(fe h, const fe f, const fe g) { int32_t f0 = f[0]; int32_t f1 = f[1]; int32_t f2 = f[2]; @@ -3920,20 +3920,9 @@ int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p) { } // https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2 -void fe_ed_y_to_wei_x(unsigned char *wei_x, const fe ed_y) +void fe_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y) { - fe one; - fe_1(one); - - // (1+y),(1-y) - fe one_plus_y; - fe_add(one_plus_y, one, ed_y); - fe one_minus_y; - fe_sub(one_minus_y, one, ed_y); - // (1/(1-y))*(1+y) - fe inv_one_minus_y; - fe_invert(inv_one_minus_y, one_minus_y); fe inv_one_minus_y_mul_one_plus_y; fe_mul(inv_one_minus_y_mul_one_plus_y, inv_one_minus_y, one_plus_y); diff --git a/src/crypto/crypto-ops.h b/src/crypto/crypto-ops.h index 465eb3bdd70..cd3821663cd 100644 --- a/src/crypto/crypto-ops.h +++ b/src/crypto/crypto-ops.h @@ -167,8 +167,10 @@ void fe_tobytes(unsigned char *, const fe); void fe_invert(fe out, const fe z); int fe_batch_invert(fe *out, const fe *in, const int n); void fe_mul(fe out, const fe, const fe); +void fe_sub(fe h, const fe f, const fe g); void fe_0(fe h); +void fe_1(fe h); int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p); -void fe_ed_y_to_wei_x(unsigned char *wei_x, const fe ed_y); +void fe_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y); diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index a1640b5e71c..0ab427a1266 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -29,7 +29,6 @@ #include "curve_trees.h" #include "common/threadpool.h" -#include "fcmp_pp_crypto.h" #include "ringct/rctOps.h" @@ -713,11 +712,7 @@ static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_re return layer_reduction_out; } //---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- -// CurveTrees public member functions -//---------------------------------------------------------------------------------------------------------------------- -template<> -CurveTrees::LeafTuple CurveTrees::leaf_tuple(const OutputPair &output_pair) const +static PreLeafTuple output_to_pre_leaf_tuple(const OutputPair &output_pair) { const crypto::public_key &output_pubkey = output_pair.output_pubkey; const rct::key &commitment = output_pair.commitment; @@ -739,13 +734,29 @@ CurveTrees::LeafTuple CurveTrees::leaf_tuple(con crypto::ec_point I; crypto::derive_key_image_generator(output_pubkey, I); + PreLeafTuple plt; + if (!fcmp_pp::point_to_pre_wei_x(O, plt.O_pre_x)) + throw std::runtime_error("failed to get pre wei x scalar from O"); + if (!fcmp_pp::point_to_pre_wei_x(rct::pt2rct(I), plt.I_pre_x)) + throw std::runtime_error("failed to get pre wei x scalar from I"); + if (!fcmp_pp::point_to_pre_wei_x(C, plt.C_pre_x)) + throw std::runtime_error("failed to get pre wei x scalar from C"); + + return plt; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTrees public member functions +//---------------------------------------------------------------------------------------------------------------------- +template<> +CurveTrees::LeafTuple CurveTrees::leaf_tuple(const OutputPair &output_pair) const +{ + const auto plt = output_to_pre_leaf_tuple(output_pair); + rct::key O_x, I_x, C_x; - if (!fcmp_pp::point_to_wei_x(O, O_x)) - throw std::runtime_error("failed to get wei x scalar from O"); - if (!fcmp_pp::point_to_wei_x(rct::pt2rct(I), I_x)) - throw std::runtime_error("failed to get wei x scalar from I"); - if (!fcmp_pp::point_to_wei_x(C, C_x)) - throw std::runtime_error("failed to get wei x scalar from C"); + fcmp_pp::pre_wei_x_to_wei_x(plt.O_pre_x, O_x); + fcmp_pp::pre_wei_x_to_wei_x(plt.I_pre_x, I_x); + fcmp_pp::pre_wei_x_to_wei_x(plt.C_pre_x, C_x); return LeafTuple{ .O_x = tower_cycle::selene_scalar_from_bytes(O_x), @@ -996,11 +1007,8 @@ void CurveTrees::set_valid_leaves( std::vector &tuples_out, std::vector &&new_outputs) const { - flattened_leaves_out.reserve(new_outputs.size() * LEAF_TUPLE_SIZE); - tuples_out.reserve(new_outputs.size()); - - std::vector leaves; - leaves.resize(new_outputs.size()); + flattened_leaves_out.clear(); + tuples_out.clear(); // Keep track of valid outputs to make sure we only use leaves from valid outputs. Can't use std::vector // because std::vector concurrent access is not thread safe. @@ -1013,23 +1021,27 @@ void CurveTrees::set_valid_leaves( tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); tools::threadpool::waiter waiter(tpool); - // Multithreaded conversion of valid outputs into leaf tuples + // Step 1. Multithreaded convert valid outputs into pre-Wei x coords + std::vector pre_leaves; + pre_leaves.resize(new_outputs.size()); for (std::size_t i = 0; i < new_outputs.size(); ++i) { tpool.submit(&waiter, [ this, &new_outputs, - &leaves, &valid_outputs, + &pre_leaves, i ]() { - CHECK_AND_ASSERT_THROW_MES(leaves.size() > i, "unexpected leaves size"); CHECK_AND_ASSERT_THROW_MES(valid_outputs.size() > i, "unexpected valid outputs size"); CHECK_AND_ASSERT_THROW_MES(!valid_outputs[i], "unexpected valid output"); + CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > i, "unexpected pre_leaves size"); + + const auto &output_pair = new_outputs[i].output_pair; - try { leaves[i] = this->leaf_tuple(new_outputs[i].output_pair); } + try { pre_leaves[i] = output_to_pre_leaf_tuple(output_pair); } catch(...) { /* Invalid outputs can't be added to the tree */ return; } valid_outputs[i] = True; @@ -1038,21 +1050,87 @@ void CurveTrees::set_valid_leaves( ); } - CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to tuples"); + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to pre wei x coords"); + + // Step 2. Collect valid pre-Wei x coords + const std::size_t n_valid_outputs = std::count(valid_outputs.begin(), valid_outputs.end(), True); + const std::size_t n_valid_leaf_elems = n_valid_outputs * LEAF_TUPLE_SIZE; - // Collect valid outputs into expected objects + std::vector one_plus_y_vec(n_valid_leaf_elems), one_minus_y_vec(n_valid_leaf_elems); + + std::size_t valid_i = 0; for (std::size_t i = 0; i < valid_outputs.size(); ++i) { if (!valid_outputs[i]) continue; - CHECK_AND_ASSERT_THROW_MES(leaves.size() > i, "unexpected size of leaves"); - CHECK_AND_ASSERT_THROW_MES(new_outputs.size() > i, "unexpected size of valid outputs"); + CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > i, "unexpected size of pre_leaves"); + CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems > valid_i, "unexpected valid_i"); + + static_assert(LEAF_TUPLE_SIZE == 3, "unexpected leaf tuple size"); + CHECK_AND_ASSERT_THROW_MES(one_plus_y_vec.size() > (valid_i+2), "unexpected size of one_plus_y_vec"); + CHECK_AND_ASSERT_THROW_MES(one_minus_y_vec.size() > (valid_i+2), "unexpected size of one_minus_y_vec"); + + auto &pl = pre_leaves[i]; + + auto &O_pre_x = pl.O_pre_x; + auto &I_pre_x = pl.I_pre_x; + auto &C_pre_x = pl.C_pre_x; + + // TODO: avoid copying underlying (tried using pointer to pointers, but wasn't clean) + memcpy(&one_plus_y_vec[valid_i], &O_pre_x.one_plus_y, sizeof(fe)); + memcpy(&one_plus_y_vec[valid_i+1], &I_pre_x.one_plus_y, sizeof(fe)); + memcpy(&one_plus_y_vec[valid_i+2], &C_pre_x.one_plus_y, sizeof(fe)); - // We use O.x, I.x, C.x to grow the tree - flattened_leaves_out.emplace_back(std::move(leaves[i].O_x)); - flattened_leaves_out.emplace_back(std::move(leaves[i].I_x)); - flattened_leaves_out.emplace_back(std::move(leaves[i].C_x)); + memcpy(&one_minus_y_vec[valid_i], &O_pre_x.one_minus_y, sizeof(fe)); + memcpy(&one_minus_y_vec[valid_i+1], &I_pre_x.one_minus_y, sizeof(fe)); + memcpy(&one_minus_y_vec[valid_i+2], &C_pre_x.one_minus_y, sizeof(fe)); + + valid_i += LEAF_TUPLE_SIZE; + } + + CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems == valid_i, "unexpected end valid_i"); + + // Step 3. Get batch inverse of valid pre-Wei x (1-y)'s + // - Batch inversion is significantly faster than inverting 1 at a time + std::vector inv_one_minus_y_vec(n_valid_leaf_elems); + CHECK_AND_ASSERT_THROW_MES(batch_invert(one_minus_y_vec, inv_one_minus_y_vec), "failed to batch invert"); + + // Step 4. Multithreaded get Wei x coords and convert to Selene scalars + CHECK_AND_ASSERT_THROW_MES(inv_one_minus_y_vec.size() == n_valid_leaf_elems, + "unexpected size of inv_one_minus_y_vec"); + CHECK_AND_ASSERT_THROW_MES(one_plus_y_vec.size() == n_valid_leaf_elems, + "unexpected size of one_plus_y_vec"); + + flattened_leaves_out.resize(n_valid_leaf_elems); + for (std::size_t i = 0; i < n_valid_leaf_elems; ++i) + { + tpool.submit(&waiter, + [ + &inv_one_minus_y_vec, + &one_plus_y_vec, + &flattened_leaves_out, + i + ]() + { + rct::key wei_x; + fcmp_pp::to_wei_x(inv_one_minus_y_vec[i], one_plus_y_vec[i], wei_x); + flattened_leaves_out[i] = tower_cycle::selene_scalar_from_bytes(wei_x); + }, + true + ); + } + + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to wei x coords"); + + // Step 5. Set valid tuples + tuples_out.reserve(n_valid_outputs); + for (std::size_t i = 0; i < valid_outputs.size(); ++i) + { + if (!valid_outputs[i]) + continue; + + CHECK_AND_ASSERT_THROW_MES(new_outputs.size() > i, "unexpected size of valid outputs"); // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output context in the db to save 32 bytes tuples_out.emplace_back(std::move(new_outputs[i])); diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 7d9c9688b28..7aa3f827550 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -30,6 +30,7 @@ #include "crypto/crypto.h" #include "cryptonote_basic/cryptonote_basic.h" +#include "fcmp_pp_crypto.h" #include "misc_log_ex.h" #include "tower_cycle.h" @@ -155,6 +156,14 @@ static_assert(sizeof(OutputContext) == (8+32+32), "db expects 72 bytes for outpu using OutputsByUnlockBlock = std::unordered_map>; +// Struct composed of ec elems needed to get a full-fledged leaf tuple +struct PreLeafTuple final +{ + fcmp_pp::PreWeiX O_pre_x; + fcmp_pp::PreWeiX I_pre_x; + fcmp_pp::PreWeiX C_pre_x; +}; + //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // This class is useful to help update the curve trees merkle tree without needing to keep the entire tree in memory diff --git a/src/fcmp_pp/fcmp_pp_crypto.cpp b/src/fcmp_pp/fcmp_pp_crypto.cpp index 0bb70590b6b..f31d5fe0449 100644 --- a/src/fcmp_pp/fcmp_pp_crypto.cpp +++ b/src/fcmp_pp/fcmp_pp_crypto.cpp @@ -28,10 +28,6 @@ #include "fcmp_pp_crypto.h" -extern "C" -{ -#include "crypto/crypto-ops.h" -} #include "ringct/rctOps.h" namespace fcmp_pp @@ -51,14 +47,44 @@ bool clear_torsion(const rct::key &k, rct::key &k_out) { ge_p3_tobytes(k_out.bytes, &torsion_cleared_point); return true; } - -bool point_to_wei_x(const rct::key &pub, rct::key &wei_x) { +//---------------------------------------------------------------------------------------------------------------------- +bool batch_invert(const std::vector &elems, std::vector &inv_elems) { + if (elems.size() != inv_elems.size()) + return false; + fe_batch_invert(inv_elems.data(), elems.data(), elems.size()); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +bool point_to_pre_wei_x(const rct::key &pub, PreWeiX &pre_wei_x) { if (pub == rct::I) return false; fe y; if (fe_frombytes_vartime(y, pub.bytes) != 0) return false; - fe_ed_y_to_wei_x(wei_x.bytes, y); + fe one; + fe_1(one); + + // (1+y),(1-y) + fe_add(pre_wei_x.one_plus_y, one, y); + fe_sub(pre_wei_x.one_minus_y, one, y); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +void to_wei_x(const fe inv_one_minus_y, const fe one_plus_y, rct::key &wei_x) { + fe_to_wei_x(wei_x.bytes, inv_one_minus_y, one_plus_y); +} +//---------------------------------------------------------------------------------------------------------------------- +void pre_wei_x_to_wei_x(const PreWeiX pre_wei_x, rct::key &wei_x) { + fe inv_one_minus_y; + fe_invert(inv_one_minus_y, pre_wei_x.one_minus_y); + to_wei_x(inv_one_minus_y, pre_wei_x.one_plus_y, wei_x); +} +//---------------------------------------------------------------------------------------------------------------------- +bool point_to_wei_x(const rct::key &pub, rct::key &wei_x) { + PreWeiX pre_wei_x; + if (!point_to_pre_wei_x(pub, pre_wei_x)) + return false; + pre_wei_x_to_wei_x(pre_wei_x, wei_x); return true; } //---------------------------------------------------------------------------------------------------------------------- diff --git a/src/fcmp_pp/fcmp_pp_crypto.h b/src/fcmp_pp/fcmp_pp_crypto.h index 75356934d1f..0a4e47a43e3 100644 --- a/src/fcmp_pp/fcmp_pp_crypto.h +++ b/src/fcmp_pp/fcmp_pp_crypto.h @@ -28,13 +28,28 @@ #pragma once +extern "C" +{ +#include "crypto/crypto-ops.h" +} #include "ringct/rctTypes.h" namespace fcmp_pp { //---------------------------------------------------------------------------------------------------------------------- +// Field elems needed to get wei x coord +struct PreWeiX final +{ + fe one_plus_y; + fe one_minus_y; +}; +//---------------------------------------------------------------------------------------------------------------------- // TODO: tests for these functions bool clear_torsion(const rct::key &k, rct::key &k_out); +bool batch_invert(const std::vector &elems, std::vector &inv_elems); +bool point_to_pre_wei_x(const rct::key &pub, PreWeiX &pre_wei_x); +void to_wei_x(const fe inv_one_minus_y, const fe one_plus_y, rct::key &wei_x); +void pre_wei_x_to_wei_x(const PreWeiX pre_wei_x, rct::key &wei_x); bool point_to_wei_x(const rct::key &pub, rct::key &wei_x); //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- From 330b82ff9ae2f00d11507404b9d86ae74d9d62a8 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 16 Sep 2024 17:21:24 -0700 Subject: [PATCH 119/127] Fix build errs + warnings --- src/fcmp_pp/curve_trees.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 0ab427a1266..e851be1aa8b 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -1007,9 +1007,6 @@ void CurveTrees::set_valid_leaves( std::vector &tuples_out, std::vector &&new_outputs) const { - flattened_leaves_out.clear(); - tuples_out.clear(); - // Keep track of valid outputs to make sure we only use leaves from valid outputs. Can't use std::vector // because std::vector concurrent access is not thread safe. enum Boolean : uint8_t { @@ -1028,7 +1025,6 @@ void CurveTrees::set_valid_leaves( { tpool.submit(&waiter, [ - this, &new_outputs, &valid_outputs, &pre_leaves, @@ -1124,6 +1120,7 @@ void CurveTrees::set_valid_leaves( CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to wei x coords"); // Step 5. Set valid tuples + tuples_out.clear(); tuples_out.reserve(n_valid_outputs); for (std::size_t i = 0; i < valid_outputs.size(); ++i) { From 21664f99b2e6af68b9bfc6ae28f6aa791adb461e Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 16 Sep 2024 18:41:04 -0700 Subject: [PATCH 120/127] fcmp++: use fe * instead of vector + clean up --- src/fcmp_pp/curve_trees.cpp | 31 +++++++++++++---------- src/fcmp_pp/fcmp_pp_crypto.cpp | 13 +--------- src/fcmp_pp/fcmp_pp_crypto.h | 2 -- tests/unit_tests/crypto.cpp | 46 +++++++++++++++++++++++++--------- 4 files changed, 53 insertions(+), 39 deletions(-) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index e851be1aa8b..3436b7d951d 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -31,6 +31,7 @@ #include "common/threadpool.h" #include "ringct/rctOps.h" +#include namespace fcmp_pp { @@ -1052,7 +1053,11 @@ void CurveTrees::set_valid_leaves( const std::size_t n_valid_outputs = std::count(valid_outputs.begin(), valid_outputs.end(), True); const std::size_t n_valid_leaf_elems = n_valid_outputs * LEAF_TUPLE_SIZE; - std::vector one_plus_y_vec(n_valid_leaf_elems), one_minus_y_vec(n_valid_leaf_elems); + fe *one_plus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(one_plus_y_vec, "failed malloc one_plus_y_vec"); + + fe *one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(one_minus_y_vec, "failed malloc one_minus_y_vec"); std::size_t valid_i = 0; for (std::size_t i = 0; i < valid_outputs.size(); ++i) @@ -1063,16 +1068,14 @@ void CurveTrees::set_valid_leaves( CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > i, "unexpected size of pre_leaves"); CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems > valid_i, "unexpected valid_i"); - static_assert(LEAF_TUPLE_SIZE == 3, "unexpected leaf tuple size"); - CHECK_AND_ASSERT_THROW_MES(one_plus_y_vec.size() > (valid_i+2), "unexpected size of one_plus_y_vec"); - CHECK_AND_ASSERT_THROW_MES(one_minus_y_vec.size() > (valid_i+2), "unexpected size of one_minus_y_vec"); - auto &pl = pre_leaves[i]; auto &O_pre_x = pl.O_pre_x; auto &I_pre_x = pl.I_pre_x; auto &C_pre_x = pl.C_pre_x; + static_assert(LEAF_TUPLE_SIZE == 3, "unexpected leaf tuple size"); + // TODO: avoid copying underlying (tried using pointer to pointers, but wasn't clean) memcpy(&one_plus_y_vec[valid_i], &O_pre_x.one_plus_y, sizeof(fe)); memcpy(&one_plus_y_vec[valid_i+1], &I_pre_x.one_plus_y, sizeof(fe)); @@ -1089,15 +1092,12 @@ void CurveTrees::set_valid_leaves( // Step 3. Get batch inverse of valid pre-Wei x (1-y)'s // - Batch inversion is significantly faster than inverting 1 at a time - std::vector inv_one_minus_y_vec(n_valid_leaf_elems); - CHECK_AND_ASSERT_THROW_MES(batch_invert(one_minus_y_vec, inv_one_minus_y_vec), "failed to batch invert"); + fe *inv_one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(inv_one_minus_y_vec, "failed malloc inv_one_minus_y_vec"); + CHECK_AND_ASSERT_THROW_MES(fe_batch_invert(inv_one_minus_y_vec, one_minus_y_vec, n_valid_leaf_elems) == 0, + "failed to batch invert"); // Step 4. Multithreaded get Wei x coords and convert to Selene scalars - CHECK_AND_ASSERT_THROW_MES(inv_one_minus_y_vec.size() == n_valid_leaf_elems, - "unexpected size of inv_one_minus_y_vec"); - CHECK_AND_ASSERT_THROW_MES(one_plus_y_vec.size() == n_valid_leaf_elems, - "unexpected size of one_plus_y_vec"); - flattened_leaves_out.resize(n_valid_leaf_elems); for (std::size_t i = 0; i < n_valid_leaf_elems; ++i) { @@ -1110,7 +1110,7 @@ void CurveTrees::set_valid_leaves( ]() { rct::key wei_x; - fcmp_pp::to_wei_x(inv_one_minus_y_vec[i], one_plus_y_vec[i], wei_x); + fe_to_wei_x(wei_x.bytes, inv_one_minus_y_vec[i], one_plus_y_vec[i]); flattened_leaves_out[i] = tower_cycle::selene_scalar_from_bytes(wei_x); }, true @@ -1132,6 +1132,11 @@ void CurveTrees::set_valid_leaves( // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output context in the db to save 32 bytes tuples_out.emplace_back(std::move(new_outputs[i])); } + + // Step 6. Clean up + free(one_plus_y_vec); + free(one_minus_y_vec); + free(inv_one_minus_y_vec); } //---------------------------------------------------------------------------------------------------------------------- template diff --git a/src/fcmp_pp/fcmp_pp_crypto.cpp b/src/fcmp_pp/fcmp_pp_crypto.cpp index f31d5fe0449..7748355fd2f 100644 --- a/src/fcmp_pp/fcmp_pp_crypto.cpp +++ b/src/fcmp_pp/fcmp_pp_crypto.cpp @@ -48,13 +48,6 @@ bool clear_torsion(const rct::key &k, rct::key &k_out) { return true; } //---------------------------------------------------------------------------------------------------------------------- -bool batch_invert(const std::vector &elems, std::vector &inv_elems) { - if (elems.size() != inv_elems.size()) - return false; - fe_batch_invert(inv_elems.data(), elems.data(), elems.size()); - return true; -} -//---------------------------------------------------------------------------------------------------------------------- bool point_to_pre_wei_x(const rct::key &pub, PreWeiX &pre_wei_x) { if (pub == rct::I) return false; @@ -70,14 +63,10 @@ bool point_to_pre_wei_x(const rct::key &pub, PreWeiX &pre_wei_x) { return true; } //---------------------------------------------------------------------------------------------------------------------- -void to_wei_x(const fe inv_one_minus_y, const fe one_plus_y, rct::key &wei_x) { - fe_to_wei_x(wei_x.bytes, inv_one_minus_y, one_plus_y); -} -//---------------------------------------------------------------------------------------------------------------------- void pre_wei_x_to_wei_x(const PreWeiX pre_wei_x, rct::key &wei_x) { fe inv_one_minus_y; fe_invert(inv_one_minus_y, pre_wei_x.one_minus_y); - to_wei_x(inv_one_minus_y, pre_wei_x.one_plus_y, wei_x); + fe_to_wei_x(wei_x.bytes, inv_one_minus_y, pre_wei_x.one_plus_y); } //---------------------------------------------------------------------------------------------------------------------- bool point_to_wei_x(const rct::key &pub, rct::key &wei_x) { diff --git a/src/fcmp_pp/fcmp_pp_crypto.h b/src/fcmp_pp/fcmp_pp_crypto.h index 0a4e47a43e3..97bfc1f1ce6 100644 --- a/src/fcmp_pp/fcmp_pp_crypto.h +++ b/src/fcmp_pp/fcmp_pp_crypto.h @@ -46,9 +46,7 @@ struct PreWeiX final //---------------------------------------------------------------------------------------------------------------------- // TODO: tests for these functions bool clear_torsion(const rct::key &k, rct::key &k_out); -bool batch_invert(const std::vector &elems, std::vector &inv_elems); bool point_to_pre_wei_x(const rct::key &pub, PreWeiX &pre_wei_x); -void to_wei_x(const fe inv_one_minus_y, const fe one_plus_y, rct::key &wei_x); void pre_wei_x_to_wei_x(const PreWeiX pre_wei_x, rct::key &wei_x); bool point_to_wei_x(const rct::key &pub, rct::key &wei_x); //---------------------------------------------------------------------------------------------------------------------- diff --git a/tests/unit_tests/crypto.cpp b/tests/unit_tests/crypto.cpp index 6ce4e7299d4..904ebf032e0 100644 --- a/tests/unit_tests/crypto.cpp +++ b/tests/unit_tests/crypto.cpp @@ -372,24 +372,46 @@ TEST(Crypto, key_image_y) TEST(Crypto, batch_inversion) { - const std::size_t n_elems = 1000; - std::vector field_elems(n_elems), batch_inverted(n_elems), norm_inverted(n_elems); + const std::vector test_n_elems{1, 100, 1000}; - // Populate random field elems - for (std::size_t i = 0; i < n_elems; ++i) + // Memory allocator + auto alloc = [](const std::size_t n) -> fe* + { + fe * ptr = (fe *) malloc(n * sizeof(fe)); + if (!ptr) + throw std::runtime_error("failed to malloc fe *"); + return ptr; + }; + + // Init test elems + fe *init_elems = alloc(test_n_elems.back()); + for (std::size_t i = 0; i < test_n_elems.back(); ++i) { const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); - ASSERT_EQ(fe_frombytes_vartime(field_elems[i], (unsigned char*)kp.pub.data), 0); + ASSERT_EQ(fe_frombytes_vartime(init_elems[i], (unsigned char*)kp.pub.data), 0); } - // Do batch inversion - fe_batch_invert(batch_inverted.data(), field_elems.data(), n_elems); - - // Invert every elem individually - for (std::size_t i = 0; i < n_elems; ++i) + for (std::size_t n_elems : {1, 100, 1000}) { - fe_invert(norm_inverted[i], field_elems[i]); + // Memory allocations + fe *batch_inverted = alloc(n_elems); + fe *norm_inverted = alloc(n_elems); + + // Do batch inversion + ASSERT_EQ(fe_batch_invert(batch_inverted, init_elems, n_elems), 0); + + // Invert every elem individually + for (std::size_t i = 0; i < n_elems; ++i) + { + fe_invert(norm_inverted[i], init_elems[i]); + } + + ASSERT_EQ(memcmp(batch_inverted, norm_inverted, n_elems * sizeof(fe)), 0); + + // Clean up + free(batch_inverted); + free(norm_inverted); } - ASSERT_EQ(memcmp(batch_inverted.data(), norm_inverted.data(), n_elems * sizeof(fe)), 0); + free(init_elems); } From 513dae88dc62dd8c4a9d9baf6d5a54756d88584e Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 16 Sep 2024 18:55:39 -0700 Subject: [PATCH 121/127] small touchups --- src/fcmp_pp/curve_trees.cpp | 6 +++--- src/fcmp_pp/fcmp_pp_crypto.cpp | 1 - tests/unit_tests/crypto.cpp | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 3436b7d951d..12640c578c8 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -737,11 +737,11 @@ static PreLeafTuple output_to_pre_leaf_tuple(const OutputPair &output_pair) PreLeafTuple plt; if (!fcmp_pp::point_to_pre_wei_x(O, plt.O_pre_x)) - throw std::runtime_error("failed to get pre wei x scalar from O"); + throw std::runtime_error("failed to get pre wei x from O"); if (!fcmp_pp::point_to_pre_wei_x(rct::pt2rct(I), plt.I_pre_x)) - throw std::runtime_error("failed to get pre wei x scalar from I"); + throw std::runtime_error("failed to get pre wei x from I"); if (!fcmp_pp::point_to_pre_wei_x(C, plt.C_pre_x)) - throw std::runtime_error("failed to get pre wei x scalar from C"); + throw std::runtime_error("failed to get pre wei x from C"); return plt; } diff --git a/src/fcmp_pp/fcmp_pp_crypto.cpp b/src/fcmp_pp/fcmp_pp_crypto.cpp index 7748355fd2f..4bd48f6f753 100644 --- a/src/fcmp_pp/fcmp_pp_crypto.cpp +++ b/src/fcmp_pp/fcmp_pp_crypto.cpp @@ -56,7 +56,6 @@ bool point_to_pre_wei_x(const rct::key &pub, PreWeiX &pre_wei_x) { return false; fe one; fe_1(one); - // (1+y),(1-y) fe_add(pre_wei_x.one_plus_y, one, y); fe_sub(pre_wei_x.one_minus_y, one, y); diff --git a/tests/unit_tests/crypto.cpp b/tests/unit_tests/crypto.cpp index 904ebf032e0..043d324ce04 100644 --- a/tests/unit_tests/crypto.cpp +++ b/tests/unit_tests/crypto.cpp @@ -377,7 +377,7 @@ TEST(Crypto, batch_inversion) // Memory allocator auto alloc = [](const std::size_t n) -> fe* { - fe * ptr = (fe *) malloc(n * sizeof(fe)); + fe *ptr = (fe *) malloc(n * sizeof(fe)); if (!ptr) throw std::runtime_error("failed to malloc fe *"); return ptr; @@ -391,7 +391,7 @@ TEST(Crypto, batch_inversion) ASSERT_EQ(fe_frombytes_vartime(init_elems[i], (unsigned char*)kp.pub.data), 0); } - for (std::size_t n_elems : {1, 100, 1000}) + for (const std::size_t n_elems : test_n_elems) { // Memory allocations fe *batch_inverted = alloc(n_elems); From da710fcaaf1111c464176d3822ede1d14371d5f3 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 16 Sep 2024 19:04:48 -0700 Subject: [PATCH 122/127] crypto: test more batch inversions --- tests/unit_tests/crypto.cpp | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/tests/unit_tests/crypto.cpp b/tests/unit_tests/crypto.cpp index 043d324ce04..07db5d27463 100644 --- a/tests/unit_tests/crypto.cpp +++ b/tests/unit_tests/crypto.cpp @@ -372,7 +372,7 @@ TEST(Crypto, key_image_y) TEST(Crypto, batch_inversion) { - const std::vector test_n_elems{1, 100, 1000}; + std::size_t MAX_TEST_ELEMS = 1000; // Memory allocator auto alloc = [](const std::size_t n) -> fe* @@ -383,35 +383,25 @@ TEST(Crypto, batch_inversion) return ptr; }; - // Init test elems - fe *init_elems = alloc(test_n_elems.back()); - for (std::size_t i = 0; i < test_n_elems.back(); ++i) + // Init test elems and individual inversions + fe *init_elems = alloc(MAX_TEST_ELEMS); + fe *norm_inverted = alloc(MAX_TEST_ELEMS); + for (std::size_t i = 0; i < MAX_TEST_ELEMS; ++i) { const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); ASSERT_EQ(fe_frombytes_vartime(init_elems[i], (unsigned char*)kp.pub.data), 0); + fe_invert(norm_inverted[i], init_elems[i]); } - for (const std::size_t n_elems : test_n_elems) + // Do batch inversions and compare to individual inversions + for (std::size_t n_elems = 1; n_elems <= MAX_TEST_ELEMS; ++n_elems) { - // Memory allocations fe *batch_inverted = alloc(n_elems); - fe *norm_inverted = alloc(n_elems); - - // Do batch inversion ASSERT_EQ(fe_batch_invert(batch_inverted, init_elems, n_elems), 0); - - // Invert every elem individually - for (std::size_t i = 0; i < n_elems; ++i) - { - fe_invert(norm_inverted[i], init_elems[i]); - } - ASSERT_EQ(memcmp(batch_inverted, norm_inverted, n_elems * sizeof(fe)), 0); - - // Clean up free(batch_inverted); - free(norm_inverted); } free(init_elems); + free(norm_inverted); } From a74f7aeada6628add26d85488c929c9983a578d4 Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 17 Sep 2024 10:36:00 -0700 Subject: [PATCH 123/127] fcmp++: rename "PreWeiX" -> "EdYDerivatives" + small touchups Naming suggestion from @kayabaNerve --- src/crypto/crypto-ops.c | 2 +- src/crypto/crypto-ops.h | 2 +- src/fcmp_pp/curve_trees.cpp | 36 +++++++++++++++++++--------------- src/fcmp_pp/curve_trees.h | 6 +++--- src/fcmp_pp/fcmp_pp_crypto.cpp | 16 +++++++-------- src/fcmp_pp/fcmp_pp_crypto.h | 6 +++--- tests/unit_tests/crypto.cpp | 6 +++--- 7 files changed, 39 insertions(+), 35 deletions(-) diff --git a/src/crypto/crypto-ops.c b/src/crypto/crypto-ops.c index 8d2680c62ac..9dd9ff7ddf6 100644 --- a/src/crypto/crypto-ops.c +++ b/src/crypto/crypto-ops.c @@ -3920,7 +3920,7 @@ int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p) { } // https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2 -void fe_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y) +void fe_ed_y_derivatives_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y) { // (1/(1-y))*(1+y) fe inv_one_minus_y_mul_one_plus_y; diff --git a/src/crypto/crypto-ops.h b/src/crypto/crypto-ops.h index cd3821663cd..b5976c7621b 100644 --- a/src/crypto/crypto-ops.h +++ b/src/crypto/crypto-ops.h @@ -173,4 +173,4 @@ void fe_1(fe h); int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p); -void fe_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y); +void fe_ed_y_derivatives_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y); diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index 12640c578c8..d8b08085d0a 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -736,12 +736,12 @@ static PreLeafTuple output_to_pre_leaf_tuple(const OutputPair &output_pair) crypto::derive_key_image_generator(output_pubkey, I); PreLeafTuple plt; - if (!fcmp_pp::point_to_pre_wei_x(O, plt.O_pre_x)) - throw std::runtime_error("failed to get pre wei x from O"); - if (!fcmp_pp::point_to_pre_wei_x(rct::pt2rct(I), plt.I_pre_x)) - throw std::runtime_error("failed to get pre wei x from I"); - if (!fcmp_pp::point_to_pre_wei_x(C, plt.C_pre_x)) - throw std::runtime_error("failed to get pre wei x from C"); + if (!fcmp_pp::point_to_ed_y_derivatives(O, plt.O_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from O"); + if (!fcmp_pp::point_to_ed_y_derivatives(rct::pt2rct(I), plt.I_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from I"); + if (!fcmp_pp::point_to_ed_y_derivatives(C, plt.C_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from C"); return plt; } @@ -755,9 +755,9 @@ CurveTrees::LeafTuple CurveTrees::leaf_tuple(con const auto plt = output_to_pre_leaf_tuple(output_pair); rct::key O_x, I_x, C_x; - fcmp_pp::pre_wei_x_to_wei_x(plt.O_pre_x, O_x); - fcmp_pp::pre_wei_x_to_wei_x(plt.I_pre_x, I_x); - fcmp_pp::pre_wei_x_to_wei_x(plt.C_pre_x, C_x); + fcmp_pp::ed_y_derivatives_to_wei_x(plt.O_pre_x, O_x); + fcmp_pp::ed_y_derivatives_to_wei_x(plt.I_pre_x, I_x); + fcmp_pp::ed_y_derivatives_to_wei_x(plt.C_pre_x, C_x); return LeafTuple{ .O_x = tower_cycle::selene_scalar_from_bytes(O_x), @@ -1019,7 +1019,8 @@ void CurveTrees::set_valid_leaves( tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); tools::threadpool::waiter waiter(tpool); - // Step 1. Multithreaded convert valid outputs into pre-Wei x coords + // Step 1. Multithreaded convert valid outputs into Edwards y derivatives needed to get Wei x coordinates + // TODO: investigate batched threading (as opposed to small tasks) std::vector pre_leaves; pre_leaves.resize(new_outputs.size()); for (std::size_t i = 0; i < new_outputs.size(); ++i) @@ -1047,15 +1048,17 @@ void CurveTrees::set_valid_leaves( ); } - CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to pre wei x coords"); + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to ed y derivatives"); - // Step 2. Collect valid pre-Wei x coords + // Step 2. Collect valid Edwards y derivatives const std::size_t n_valid_outputs = std::count(valid_outputs.begin(), valid_outputs.end(), True); const std::size_t n_valid_leaf_elems = n_valid_outputs * LEAF_TUPLE_SIZE; + // Collecting (1+y)'s fe *one_plus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); CHECK_AND_ASSERT_THROW_MES(one_plus_y_vec, "failed malloc one_plus_y_vec"); + // Collecting (1-y)'s fe *one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); CHECK_AND_ASSERT_THROW_MES(one_minus_y_vec, "failed malloc one_minus_y_vec"); @@ -1090,14 +1093,15 @@ void CurveTrees::set_valid_leaves( CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems == valid_i, "unexpected end valid_i"); - // Step 3. Get batch inverse of valid pre-Wei x (1-y)'s + // Step 3. Get batch inverse of all valid (1-y)'s // - Batch inversion is significantly faster than inverting 1 at a time fe *inv_one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); CHECK_AND_ASSERT_THROW_MES(inv_one_minus_y_vec, "failed malloc inv_one_minus_y_vec"); CHECK_AND_ASSERT_THROW_MES(fe_batch_invert(inv_one_minus_y_vec, one_minus_y_vec, n_valid_leaf_elems) == 0, "failed to batch invert"); - // Step 4. Multithreaded get Wei x coords and convert to Selene scalars + // Step 4. Multithreaded get Wei x's and convert to Selene scalars + // TODO: investigate batched threading (as opposed to small tasks) flattened_leaves_out.resize(n_valid_leaf_elems); for (std::size_t i = 0; i < n_valid_leaf_elems; ++i) { @@ -1110,7 +1114,7 @@ void CurveTrees::set_valid_leaves( ]() { rct::key wei_x; - fe_to_wei_x(wei_x.bytes, inv_one_minus_y_vec[i], one_plus_y_vec[i]); + fe_ed_y_derivatives_to_wei_x(wei_x.bytes, inv_one_minus_y_vec[i], one_plus_y_vec[i]); flattened_leaves_out[i] = tower_cycle::selene_scalar_from_bytes(wei_x); }, true @@ -1119,7 +1123,7 @@ void CurveTrees::set_valid_leaves( CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to wei x coords"); - // Step 5. Set valid tuples + // Step 5. Set valid tuples to be stored in the db tuples_out.clear(); tuples_out.reserve(n_valid_outputs); for (std::size_t i = 0; i < valid_outputs.size(); ++i) diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 7aa3f827550..1c9758c3a0f 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -159,9 +159,9 @@ using OutputsByUnlockBlock = std::unordered_map fe* @@ -384,8 +384,8 @@ TEST(Crypto, batch_inversion) }; // Init test elems and individual inversions - fe *init_elems = alloc(MAX_TEST_ELEMS); - fe *norm_inverted = alloc(MAX_TEST_ELEMS); + fe *init_elems = alloc(MAX_TEST_ELEMS); + fe *norm_inverted = alloc(MAX_TEST_ELEMS); for (std::size_t i = 0; i < MAX_TEST_ELEMS; ++i) { const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); From 901cc873aa15140e52add90ca4f59f78720e30bc Mon Sep 17 00:00:00 2001 From: j-berman Date: Tue, 17 Sep 2024 10:39:15 -0700 Subject: [PATCH 124/127] missing ref --- src/fcmp_pp/fcmp_pp_crypto.cpp | 2 +- src/fcmp_pp/fcmp_pp_crypto.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fcmp_pp/fcmp_pp_crypto.cpp b/src/fcmp_pp/fcmp_pp_crypto.cpp index 990fec57e16..6d3f2507a7b 100644 --- a/src/fcmp_pp/fcmp_pp_crypto.cpp +++ b/src/fcmp_pp/fcmp_pp_crypto.cpp @@ -62,7 +62,7 @@ bool point_to_ed_y_derivatives(const rct::key &pub, EdYDerivatives &ed_y_derivat return true; } //---------------------------------------------------------------------------------------------------------------------- -void ed_y_derivatives_to_wei_x(const EdYDerivatives pre_wei_x, rct::key &wei_x) { +void ed_y_derivatives_to_wei_x(const EdYDerivatives &pre_wei_x, rct::key &wei_x) { fe inv_one_minus_y; fe_invert(inv_one_minus_y, pre_wei_x.one_minus_y); fe_ed_y_derivatives_to_wei_x(wei_x.bytes, inv_one_minus_y, pre_wei_x.one_plus_y); diff --git a/src/fcmp_pp/fcmp_pp_crypto.h b/src/fcmp_pp/fcmp_pp_crypto.h index 46bb01c5f84..2c63ff22455 100644 --- a/src/fcmp_pp/fcmp_pp_crypto.h +++ b/src/fcmp_pp/fcmp_pp_crypto.h @@ -47,7 +47,7 @@ struct EdYDerivatives final // TODO: tests for these functions bool clear_torsion(const rct::key &k, rct::key &k_out); bool point_to_ed_y_derivatives(const rct::key &pub, EdYDerivatives &ed_y_derivatives); -void ed_y_derivatives_to_wei_x(const EdYDerivatives ed_y_derivatives, rct::key &wei_x); +void ed_y_derivatives_to_wei_x(const EdYDerivatives &ed_y_derivatives, rct::key &wei_x); bool point_to_wei_x(const rct::key &pub, rct::key &wei_x); //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- From 036fc2a2b8826d9ba850cd9317adcd7c8672ffe1 Mon Sep 17 00:00:00 2001 From: j-berman Date: Wed, 18 Sep 2024 13:17:46 -0700 Subject: [PATCH 125/127] fcmp++ tests: set up test for prove - Moved functions around in unit_tests/curve_trees.{h,cpp} to ease using the in-memory Global tree across tests - Introduced PathV1 struct, which is a path in the tree containing whole chunks at each layer - Implemented functions to get_path_at_leaf_idx and get_tree_root on in-memory Global tree --- src/fcmp_pp/curve_trees.h | 24 + tests/unit_tests/CMakeLists.txt | 1 + tests/unit_tests/curve_trees.cpp | 832 +++++++++++++++++-------------- tests/unit_tests/curve_trees.h | 57 ++- tests/unit_tests/fcmp_pp.cpp | 64 +++ 5 files changed, 590 insertions(+), 388 deletions(-) create mode 100644 tests/unit_tests/fcmp_pp.cpp diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h index 1c9758c3a0f..aca0c753fb5 100644 --- a/src/fcmp_pp/curve_trees.h +++ b/src/fcmp_pp/curve_trees.h @@ -156,6 +156,14 @@ static_assert(sizeof(OutputContext) == (8+32+32), "db expects 72 bytes for outpu using OutputsByUnlockBlock = std::unordered_map>; +// Ed25519 points (can go from OutputTuple -> LeafTuple) +struct OutputTuple final +{ + rct::key O; + rct::key I; + rct::key C; +}; + // Struct composed of ec elems needed to get a full-fledged leaf tuple struct PreLeafTuple final { @@ -320,6 +328,22 @@ const std::size_t SELENE_CHUNK_WIDTH = 18; std::shared_ptr curve_trees_v1( const std::size_t helios_chunk_width = HELIOS_CHUNK_WIDTH, const std::size_t selene_chunk_width = SELENE_CHUNK_WIDTH); + +// A path in the tree containing whole chunks at each layer +// - leaves contain a complete chunk of leaves, encoded as compressed ed25519 points +// - c2_layers[0] refers to the chunk of elems in the tree in the layer after leaves. The hash of the chunk of +// leaves is 1 member of the c2_layers[0] chunk. The rest of c2_layers[0] is the chunk of elems that hash is in. +// - layers alternate between C1 and C2 +// - c1_layers[0] refers to the chunk of elems in the tree in the layer after c2_layers[0]. The hash of the chunk +// of c2_layers[0] is 1 member of the c1_layers[0] chunk. The rest of c1_layers[0] is the chunk of elems that hash +// is in. +// - c2_layers[1] refers to the chunk of elems in the tree in the layer after c1_layers[0] etc. +struct PathV1 final +{ + std::vector leaves; + std::vector> c1_layers; + std::vector> c2_layers; +}; //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- } //namespace curve_trees diff --git a/tests/unit_tests/CMakeLists.txt b/tests/unit_tests/CMakeLists.txt index 41406d9664c..eb48e281cb1 100644 --- a/tests/unit_tests/CMakeLists.txt +++ b/tests/unit_tests/CMakeLists.txt @@ -52,6 +52,7 @@ set(unit_tests_sources epee_serialization.cpp epee_utils.cpp expect.cpp + fcmp_pp.cpp json_serialization.cpp get_xtype_from_string.cpp hashchain.cpp diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp index 649277e50cc..491448b3a22 100644 --- a/tests/unit_tests/curve_trees.cpp +++ b/tests/unit_tests/curve_trees.cpp @@ -33,10 +33,117 @@ #include "fcmp_pp/fcmp_pp_crypto.h" #include "misc_log_ex.h" #include "ringct/rctOps.h" -#include "unit_tests_utils.h" #include + +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Test helpers +//---------------------------------------------------------------------------------------------------------------------- +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, + const std::size_t old_n_leaf_tuples, + const std::size_t new_n_leaf_tuples) +{ + std::vector outs; + outs.reserve(new_n_leaf_tuples); + + for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) + { + const std::uint64_t output_id = old_n_leaf_tuples + i; + + // Generate random output tuple + crypto::secret_key o,c; + crypto::public_key O,C; + crypto::generate_keys(O, o, o, false); + crypto::generate_keys(C, c, c, false); + + rct::key C_key = rct::pk2rct(C); + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(O), + .commitment = std::move(C_key) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_id, + .output_pair = std::move(output_pair) + }; + + outs.emplace_back(std::move(output_context)); + } + + return outs; +} +//---------------------------------------------------------------------------------------------------------------------- +static const Selene::Scalar generate_random_selene_scalar() +{ + crypto::secret_key s; + crypto::public_key S; + + crypto::generate_keys(S, s, s, false); + + rct::key S_x; + CHECK_AND_ASSERT_THROW_MES(fcmp_pp::point_to_wei_x(rct::pk2rct(S), S_x), "failed to convert to wei x"); + return fcmp_pp::tower_cycle::selene_scalar_from_bytes(S_x); +} +//---------------------------------------------------------------------------------------------------------------------- +static bool grow_tree_db(const std::size_t expected_old_n_leaf_tuples, + const std::size_t n_leaves, + std::shared_ptr curve_trees, + unit_test::BlockchainLMDBTest &test_db) +{ + cryptonote::db_wtxn_guard guard(test_db.m_db); + + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), + false, "unexpected starting n leaf tuples in db"); + + auto leaves = generate_random_leaves(*curve_trees, 0, n_leaves); + + test_db.m_db->grow_tree(std::move(leaves)); + + return test_db.m_db->audit_tree(expected_old_n_leaf_tuples + n_leaves); +} +//---------------------------------------------------------------------------------------------------------------------- +static bool trim_tree_db(const std::size_t expected_old_n_leaf_tuples, + const std::size_t trim_leaves, + unit_test::BlockchainLMDBTest &test_db) +{ + cryptonote::db_wtxn_guard guard(test_db.m_db); + + CHECK_AND_ASSERT_THROW_MES(expected_old_n_leaf_tuples >= trim_leaves, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_leaves > 0, "must be trimming some leaves"); + + LOG_PRINT_L1("Trimming " << trim_leaves << " leaf tuples from tree with " + << expected_old_n_leaf_tuples << " leaves in db"); + + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), + false, "trimming unexpected starting n leaf tuples in db"); + + // Can use 0 for trim_block_id since it's unused in tests + test_db.m_db->trim_tree(trim_leaves, 0); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(expected_old_n_leaf_tuples - trim_leaves), false, + "failed to trim tree in db"); + + MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +#define BEGIN_INIT_TREE_ITER(curve_trees) \ + for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) \ + { \ + LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves"); \ + \ + /* Init tree in memory */ \ + CurveTreesGlobalTree global_tree(*curve_trees); \ + ASSERT_TRUE(global_tree.grow_tree(0, init_leaves)); \ + \ + /* Init tree in db */ \ + INIT_BLOCKCHAIN_LMDB_TEST_DB(test_db, curve_trees); \ + ASSERT_TRUE(grow_tree_db(0, init_leaves, curve_trees, test_db)); \ +//---------------------------------------------------------------------------------------------------------------------- +#define END_INIT_TREE_ITER(curve_trees) \ + }; \ //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- // CurveTreesGlobalTree helpers @@ -109,59 +216,305 @@ static std::vector get_last_chunk_children_to_trim(co } //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// CurveTreesGlobalTree implementations +// CurveTreesGlobalTree public implementations //---------------------------------------------------------------------------------------------------------------------- std::size_t CurveTreesGlobalTree::get_num_leaf_tuples() const { return m_tree.leaves.size(); } //---------------------------------------------------------------------------------------------------------------------- -CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes() const +bool CurveTreesGlobalTree::grow_tree(const std::size_t expected_old_n_leaf_tuples,const std::size_t new_n_leaf_tuples) { - CurveTreesV1::LastHashes last_hashes_out; - auto &c1_last_hashes_out = last_hashes_out.c1_last_hashes; - auto &c2_last_hashes_out = last_hashes_out.c2_last_hashes; + // Do initial tree reads + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); + const CurveTreesV1::LastHashes last_hashes = this->get_last_hashes(); + + this->log_last_hashes(last_hashes); + + auto new_outputs = generate_random_leaves(m_curve_trees, old_n_leaf_tuples, new_n_leaf_tuples); + + // Get a tree extension object to the existing tree using randomly generated leaves + // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves + const auto tree_extension = m_curve_trees.get_tree_extension(old_n_leaf_tuples, + last_hashes, + std::move(new_outputs)); + + this->log_tree_extension(tree_extension); + + // Use the tree extension to extend the existing tree + this->extend_tree(tree_extension); + + this->log_tree(); + + // Validate tree structure and all hashes + const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples + new_n_leaf_tuples; + return this->audit_tree(expected_n_leaf_tuples); +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesGlobalTree::trim_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t trim_n_leaf_tuples) +{ + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); + + // Trim the global tree by `trim_n_leaf_tuples` + LOG_PRINT_L1("Trimming " << trim_n_leaf_tuples << " leaf tuples from tree with " + << old_n_leaf_tuples << " leaves in memory"); + + // Get trim instructions + const auto trim_instructions = m_curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); + MDEBUG("Acquired trim instructions for " << trim_instructions.size() << " layers"); + + // Do initial tree reads + const auto last_chunk_children_to_trim = this->get_all_last_chunk_children_to_trim(trim_instructions); + const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); + + // Get the new hashes, wrapped in a simple struct we can use to trim the tree + const auto tree_reduction = m_curve_trees.get_tree_reduction( + trim_instructions, + last_chunk_children_to_trim, + last_hashes_to_trim); + + // Use tree reduction to trim tree + this->reduce_tree(tree_reduction); + + const std::size_t new_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES((new_n_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, + "unexpected num leaves after trim"); + + MDEBUG("Finished trimming " << trim_n_leaf_tuples << " leaf tuples from tree"); + this->log_tree(); + + const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples - trim_n_leaf_tuples; + bool res = this->audit_tree(expected_n_leaf_tuples); + CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory"); + + MDEBUG("Successfully trimmed " << trim_n_leaf_tuples << " leaves in memory"); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) const +{ + MDEBUG("Auditing global tree"); + + auto leaves = m_tree.leaves; const auto &c1_layers = m_tree.c1_layers; const auto &c2_layers = m_tree.c2_layers; - // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), - "unexpected number of curve layers"); + CHECK_AND_ASSERT_MES(leaves.size() == expected_n_leaf_tuples, false, "unexpected num leaves"); - c1_last_hashes_out.reserve(c1_layers.size()); - c2_last_hashes_out.reserve(c2_layers.size()); + if (leaves.empty()) + { + CHECK_AND_ASSERT_MES(c2_layers.empty() && c1_layers.empty(), false, "expected empty tree"); + return true; + } - if (c2_layers.empty()) - return last_hashes_out; + CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); + CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + false, "unexpected mismatch of c2 and c1 layers"); - // Next parents will be c2 - bool use_c2 = true; + // Verify root has 1 member in it + const bool c2_is_root = c2_layers.size() > c1_layers.size(); + CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, + "root must have 1 member in it"); - // Then get last chunks up until the root - std::size_t c1_idx = 0; - std::size_t c2_idx = 0; - while (c1_last_hashes_out.size() < c1_layers.size() || c2_last_hashes_out.size() < c2_layers.size()) + // Iterate from root down to layer above leaves, and check hashes match up correctly + bool parent_is_c2 = c2_is_root; + std::size_t c2_idx = c2_layers.size() - 1; + std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); + for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) + { + // TODO: implement templated function for below if statement + if (parent_is_c2) + { + MDEBUG("Validating parent c2 layer " << c2_idx << " , child c1 layer " << c1_idx); + + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + + const Layer &parents = c2_layers[c2_idx]; + const Layer &children = c1_layers[c1_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); + + std::vector child_scalars; + fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c1, + children, + child_scalars); + + const bool valid = validate_layer(m_curve_trees.m_c2, + parents, + child_scalars, + m_curve_trees.m_c2_width); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); + + --c2_idx; + } + else + { + MDEBUG("Validating parent c1 layer " << c1_idx << " , child c2 layer " << c2_idx); + + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + + const Layer &parents = c1_layers[c1_idx]; + const Layer &children = c2_layers[c2_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); + + std::vector child_scalars; + fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c2, + children, + child_scalars); + + const bool valid = validate_layer( + m_curve_trees.m_c1, + parents, + child_scalars, + m_curve_trees.m_c1_width); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); + + --c1_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + MDEBUG("Validating leaves"); + + // Convert output pairs to leaf tuples + std::vector leaf_tuples; + leaf_tuples.reserve(leaves.size()); + for (const auto &leaf : leaves) + { + auto leaf_tuple = m_curve_trees.leaf_tuple(leaf); + leaf_tuples.emplace_back(std::move(leaf_tuple)); + } + + // Now validate leaves + return validate_layer(m_curve_trees.m_c2, + c2_layers[0], + m_curve_trees.flatten_leaves(std::move(leaf_tuples)), + m_curve_trees.m_leaf_layer_chunk_width); +} +//---------------------------------------------------------------------------------------------------------------------- +fcmp_pp::curve_trees::PathV1 CurveTreesGlobalTree::get_path_at_leaf_idx(const std::size_t leaf_idx) const +{ + fcmp_pp::curve_trees::PathV1 path_out; + + const std::size_t n_leaf_tuples = get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES(n_leaf_tuples > leaf_idx, "too high leaf idx"); + + // Get leaves + const std::size_t start_leaf_idx = (leaf_idx / m_curve_trees.m_c2_width) * m_curve_trees.m_c2_width; + const std::size_t end_leaf_idx = std::min(n_leaf_tuples, start_leaf_idx + m_curve_trees.m_c2_width); + for (std::size_t i = start_leaf_idx; i < end_leaf_idx; ++i) + { + const auto &output_pair = m_tree.leaves[i]; + + const crypto::public_key &output_pubkey = output_pair.output_pubkey; + const rct::key &commitment = output_pair.commitment; + + crypto::ec_point I; + crypto::derive_key_image_generator(output_pubkey, I); + + rct::key O = rct::pk2rct(output_pubkey); + rct::key C = commitment; + + auto output_tuple = fcmp_pp::curve_trees::OutputTuple{ + .O = std::move(O), + .I = std::move(rct::pt2rct(I)), + .C = std::move(C) + }; + + path_out.leaves.emplace_back(std::move(output_tuple)); + } + + // Get parents + const std::size_t n_layers = m_tree.c1_layers.size() + m_tree.c2_layers.size(); + std::size_t start_parent_idx = start_leaf_idx / m_curve_trees.m_c2_width; + std::size_t c1_idx = 0, c2_idx = 0; + bool use_c2 = true; + for (std::size_t i = 0; i < n_layers; ++i) { if (use_c2) { - CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); - c2_last_hashes_out.push_back(c2_layers[c2_idx].back()); + path_out.c2_layers.emplace_back(); + auto &layer_out = path_out.c2_layers.back(); + + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "too high c2_idx"); + const std::size_t n_layer_elems = m_tree.c2_layers[c2_idx].size(); + + CHECK_AND_ASSERT_THROW_MES(n_layer_elems > start_parent_idx, "too high parent idx"); + const std::size_t end_parent_idx = std::min(n_layer_elems, start_parent_idx + m_curve_trees.m_c2_width); + + for (std::size_t j = start_parent_idx; j < end_parent_idx; ++j) + { + layer_out.emplace_back(m_tree.c2_layers[c2_idx][j]); + } + + start_parent_idx /= m_curve_trees.m_c1_width; ++c2_idx; } else { - CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); - c1_last_hashes_out.push_back(c1_layers[c1_idx].back()); + path_out.c1_layers.emplace_back(); + auto &layer_out = path_out.c1_layers.back(); + + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "too high c1_idx"); + const std::size_t n_layer_elems = m_tree.c1_layers[c1_idx].size(); + + CHECK_AND_ASSERT_THROW_MES(n_layer_elems > start_parent_idx, "too high parent idx"); + const std::size_t end_parent_idx = std::min(n_layer_elems, start_parent_idx + m_curve_trees.m_c1_width); + + for (std::size_t j = start_parent_idx; j < end_parent_idx; ++j) + { + layer_out.emplace_back(m_tree.c1_layers[c1_idx][j]); + } + + start_parent_idx /= m_curve_trees.m_c2_width; ++c1_idx; } use_c2 = !use_c2; } - return last_hashes_out; + return path_out; } //---------------------------------------------------------------------------------------------------------------------- +std::array CurveTreesGlobalTree::get_tree_root() const +{ + const std::size_t n_layers = m_tree.c1_layers.size() + m_tree.c2_layers.size(); + + if (n_layers == 0) + return std::array(); + + if ((n_layers % 2) == 0) + { + CHECK_AND_ASSERT_THROW_MES(!m_tree.c1_layers.empty(), "missing c1 layers"); + const auto &last_layer = m_tree.c1_layers.back(); + CHECK_AND_ASSERT_THROW_MES(!last_layer.empty(), "missing elems from last c1 layer"); + return m_curve_trees.m_c1->to_bytes(last_layer.back()); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!m_tree.c2_layers.empty(), "missing c2 layers"); + const auto &last_layer = m_tree.c2_layers.back(); + CHECK_AND_ASSERT_THROW_MES(!last_layer.empty(), "missing elems from last c2 layer"); + return m_curve_trees.m_c2->to_bytes(last_layer.back()); + } +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesGlobalTree private implementations +//---------------------------------------------------------------------------------------------------------------------- void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_extension) { // Add the leaves @@ -171,13 +524,7 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e m_tree.leaves.reserve(m_tree.leaves.size() + tree_extension.leaves.tuples.size()); for (const auto &o : tree_extension.leaves.tuples) { - auto leaf = m_curve_trees.leaf_tuple(o.output_pair); - - m_tree.leaves.emplace_back(CurveTreesV1::LeafTuple{ - .O_x = std::move(leaf.O_x), - .I_x = std::move(leaf.I_x), - .C_x = std::move(leaf.C_x) - }); + m_tree.leaves.emplace_back(o.output_pair); } // Add the layers @@ -339,6 +686,52 @@ void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_r m_tree.c2_layers.shrink_to_fit(); } //---------------------------------------------------------------------------------------------------------------------- +CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes() const +{ + CurveTreesV1::LastHashes last_hashes_out; + auto &c1_last_hashes_out = last_hashes_out.c1_last_hashes; + auto &c2_last_hashes_out = last_hashes_out.c2_last_hashes; + + const auto &c1_layers = m_tree.c1_layers; + const auto &c2_layers = m_tree.c2_layers; + + // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + "unexpected number of curve layers"); + + c1_last_hashes_out.reserve(c1_layers.size()); + c2_last_hashes_out.reserve(c2_layers.size()); + + if (c2_layers.empty()) + return last_hashes_out; + + // Next parents will be c2 + bool use_c2 = true; + + // Then get last chunks up until the root + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + while (c1_last_hashes_out.size() < c1_layers.size() || c2_last_hashes_out.size() < c2_layers.size()) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); + c2_last_hashes_out.push_back(c2_layers[c2_idx].back()); + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); + c1_last_hashes_out.push_back(c1_layers[c1_idx].back()); + ++c1_idx; + } + + use_c2 = !use_c2; + } + + return last_hashes_out; +} +//---------------------------------------------------------------------------------------------------------------------- // TODO: template CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_children_to_trim( const std::vector &trim_instructions) @@ -364,7 +757,7 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE; CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high"); - const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx]; + const auto leaf_tuple = m_curve_trees.leaf_tuple(m_tree.leaves[leaf_tuple_idx]); leaves_to_trim.push_back(leaf_tuple.O_x); leaves_to_trim.push_back(leaf_tuple.I_x); @@ -419,183 +812,56 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c end_trim_idx); all_children_to_trim.c1_children.emplace_back(std::move(children_to_trim)); - ++c2_idx; - } - - parent_is_c2 = !parent_is_c2; - } - - return all_children_to_trim; -} -//---------------------------------------------------------------------------------------------------------------------- -CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes_to_trim( - const std::vector &trim_instructions) const -{ - CurveTreesV1::LastHashes last_hashes; - - if (trim_instructions.empty()) - return last_hashes; - - bool parent_is_c2 = true; - std::size_t c1_idx = 0; - std::size_t c2_idx = 0; - for (const auto &trim_layer_instructions : trim_instructions) - { - const std::size_t new_total_parents = trim_layer_instructions.new_total_parents; - CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "no new parents"); - - if (parent_is_c2) - { - CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high"); - const auto &c2_layer = m_tree.c2_layers[c2_idx]; - - CHECK_AND_ASSERT_THROW_MES(c2_layer.size() >= new_total_parents, "not enough c2 parents"); - - last_hashes.c2_last_hashes.push_back(c2_layer[new_total_parents - 1]); - ++c2_idx; - } - else - { - CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); - const auto &c1_layer = m_tree.c1_layers[c1_idx]; - - CHECK_AND_ASSERT_THROW_MES(c1_layer.size() >= new_total_parents, "not enough c1 parents"); - - last_hashes.c1_last_hashes.push_back(c1_layer[new_total_parents - 1]); - ++c1_idx; - } - - parent_is_c2 = !parent_is_c2; - } - - return last_hashes; -} -//---------------------------------------------------------------------------------------------------------------------- -void CurveTreesGlobalTree::trim_tree(const std::size_t trim_n_leaf_tuples) -{ - const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); - MDEBUG(old_n_leaf_tuples << " leaves in the tree, trimming " << trim_n_leaf_tuples); - - // Get trim instructions - const auto trim_instructions = m_curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); - MDEBUG("Acquired trim instructions for " << trim_instructions.size() << " layers"); - - // Do initial tree reads - const auto last_chunk_children_to_trim = this->get_all_last_chunk_children_to_trim(trim_instructions); - const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); - - // Get the new hashes, wrapped in a simple struct we can use to trim the tree - const auto tree_reduction = m_curve_trees.get_tree_reduction( - trim_instructions, - last_chunk_children_to_trim, - last_hashes_to_trim); + ++c2_idx; + } - // Use tree reduction to trim tree - this->reduce_tree(tree_reduction); + parent_is_c2 = !parent_is_c2; + } - const std::size_t new_n_leaf_tuples = this->get_num_leaf_tuples(); - CHECK_AND_ASSERT_THROW_MES((new_n_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, - "unexpected num leaves after trim"); + return all_children_to_trim; } //---------------------------------------------------------------------------------------------------------------------- -bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) +CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes_to_trim( + const std::vector &trim_instructions) const { - MDEBUG("Auditing global tree"); - - auto leaves = m_tree.leaves; - const auto &c1_layers = m_tree.c1_layers; - const auto &c2_layers = m_tree.c2_layers; + CurveTreesV1::LastHashes last_hashes; - CHECK_AND_ASSERT_MES(leaves.size() == expected_n_leaf_tuples, false, "unexpected num leaves"); + if (trim_instructions.empty()) + return last_hashes; - if (leaves.empty()) + bool parent_is_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (const auto &trim_layer_instructions : trim_instructions) { - CHECK_AND_ASSERT_MES(c2_layers.empty() && c1_layers.empty(), false, "expected empty tree"); - return true; - } - - CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); - CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), - false, "unexpected mismatch of c2 and c1 layers"); - - // Verify root has 1 member in it - const bool c2_is_root = c2_layers.size() > c1_layers.size(); - CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, - "root must have 1 member in it"); + const std::size_t new_total_parents = trim_layer_instructions.new_total_parents; + CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "no new parents"); - // Iterate from root down to layer above leaves, and check hashes match up correctly - bool parent_is_c2 = c2_is_root; - std::size_t c2_idx = c2_layers.size() - 1; - std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); - for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) - { - // TODO: implement templated function for below if statement if (parent_is_c2) { - MDEBUG("Validating parent c2 layer " << c2_idx << " , child c1 layer " << c1_idx); - - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); - - const Layer &parents = c2_layers[c2_idx]; - const Layer &children = c1_layers[c1_idx]; - - CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); - CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); - - std::vector child_scalars; - fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c1, - children, - child_scalars); - - const bool valid = validate_layer(m_curve_trees.m_c2, - parents, - child_scalars, - m_curve_trees.m_c2_width); + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high"); + const auto &c2_layer = m_tree.c2_layers[c2_idx]; - CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); + CHECK_AND_ASSERT_THROW_MES(c2_layer.size() >= new_total_parents, "not enough c2 parents"); - --c2_idx; + last_hashes.c2_last_hashes.push_back(c2_layer[new_total_parents - 1]); + ++c2_idx; } else { - MDEBUG("Validating parent c1 layer " << c1_idx << " , child c2 layer " << c2_idx); - - CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); - CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); - - const Layer &parents = c1_layers[c1_idx]; - const Layer &children = c2_layers[c2_idx]; - - CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); - CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); - - std::vector child_scalars; - fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c2, - children, - child_scalars); - - const bool valid = validate_layer( - m_curve_trees.m_c1, - parents, - child_scalars, - m_curve_trees.m_c1_width); + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); + const auto &c1_layer = m_tree.c1_layers[c1_idx]; - CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); + CHECK_AND_ASSERT_THROW_MES(c1_layer.size() >= new_total_parents, "not enough c1 parents"); - --c1_idx; + last_hashes.c1_last_hashes.push_back(c1_layer[new_total_parents - 1]); + ++c1_idx; } parent_is_c2 = !parent_is_c2; } - MDEBUG("Validating leaves"); - - // Now validate leaves - return validate_layer(m_curve_trees.m_c2, - c2_layers[0], - m_curve_trees.flatten_leaves(std::move(leaves)), - m_curve_trees.m_leaf_layer_chunk_width); + return last_hashes; } //---------------------------------------------------------------------------------------------------------------------- // Logging helpers @@ -709,7 +975,7 @@ void CurveTreesGlobalTree::log_tree() for (std::size_t i = 0; i < m_tree.leaves.size(); ++i) { - const auto &leaf = m_tree.leaves[i]; + const auto leaf = m_curve_trees.leaf_tuple(m_tree.leaves[i]); const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); @@ -753,190 +1019,6 @@ void CurveTreesGlobalTree::log_tree() } //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- -// Test helpers -//---------------------------------------------------------------------------------------------------------------------- -static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, - const std::size_t old_n_leaf_tuples, - const std::size_t new_n_leaf_tuples) -{ - std::vector outs; - outs.reserve(new_n_leaf_tuples); - - for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) - { - const std::uint64_t output_id = old_n_leaf_tuples + i; - - // Generate random output tuple - crypto::secret_key o,c; - crypto::public_key O,C; - crypto::generate_keys(O, o, o, false); - crypto::generate_keys(C, c, c, false); - - rct::key C_key = rct::pk2rct(C); - auto output_pair = fcmp_pp::curve_trees::OutputPair{ - .output_pubkey = std::move(O), - .commitment = std::move(C_key) - }; - - auto output_context = fcmp_pp::curve_trees::OutputContext{ - .output_id = output_id, - .output_pair = std::move(output_pair) - }; - - outs.emplace_back(std::move(output_context)); - } - - return outs; -} -//---------------------------------------------------------------------------------------------------------------------- -static const Selene::Scalar generate_random_selene_scalar() -{ - crypto::secret_key s; - crypto::public_key S; - - crypto::generate_keys(S, s, s, false); - - rct::key S_x; - CHECK_AND_ASSERT_THROW_MES(fcmp_pp::point_to_wei_x(rct::pk2rct(S), S_x), "failed to convert to wei x"); - return fcmp_pp::tower_cycle::selene_scalar_from_bytes(S_x); -} -//---------------------------------------------------------------------------------------------------------------------- -static bool grow_tree_in_memory(CurveTreesV1 &curve_trees, - CurveTreesGlobalTree &global_tree, - const std::size_t expected_old_n_leaf_tuples, - const std::size_t new_n_leaf_tuples) -{ - // Do initial tree reads - const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); - CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); - const CurveTreesV1::LastHashes last_hashes = global_tree.get_last_hashes(); - - global_tree.log_last_hashes(last_hashes); - - auto new_outputs = generate_random_leaves(curve_trees, old_n_leaf_tuples, new_n_leaf_tuples); - - // Get a tree extension object to the existing tree using randomly generated leaves - // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves - const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples, - last_hashes, - std::move(new_outputs)); - - global_tree.log_tree_extension(tree_extension); - - // Use the tree extension to extend the existing tree - global_tree.extend_tree(tree_extension); - - global_tree.log_tree(); - - // Validate tree structure and all hashes - const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples + new_n_leaf_tuples; - return global_tree.audit_tree(expected_n_leaf_tuples); -} -//---------------------------------------------------------------------------------------------------------------------- -static bool trim_tree_in_memory(const std::size_t expected_old_n_leaf_tuples, - const std::size_t trim_n_leaf_tuples, - CurveTreesGlobalTree &global_tree) -{ - const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples(); - CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); - CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); - CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); - - // Trim the global tree by `trim_n_leaf_tuples` - LOG_PRINT_L1("Trimming " << trim_n_leaf_tuples << " leaf tuples from tree with " - << old_n_leaf_tuples << " leaves in memory"); - - global_tree.trim_tree(trim_n_leaf_tuples); - - MDEBUG("Finished trimming " << trim_n_leaf_tuples << " leaf tuples from tree"); - - global_tree.log_tree(); - - const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples - trim_n_leaf_tuples; - bool res = global_tree.audit_tree(expected_n_leaf_tuples); - CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory"); - - MDEBUG("Successfully trimmed " << trim_n_leaf_tuples << " leaves in memory"); - return true; -} -//---------------------------------------------------------------------------------------------------------------------- -static bool grow_tree_db(const std::size_t expected_old_n_leaf_tuples, - const std::size_t n_leaves, - std::shared_ptr curve_trees, - unit_test::BlockchainLMDBTest &test_db) -{ - cryptonote::db_wtxn_guard guard(test_db.m_db); - - CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), - false, "unexpected starting n leaf tuples in db"); - - auto leaves = generate_random_leaves(*curve_trees, 0, n_leaves); - - test_db.m_db->grow_tree(std::move(leaves)); - - return test_db.m_db->audit_tree(expected_old_n_leaf_tuples + n_leaves); -} -//---------------------------------------------------------------------------------------------------------------------- -static bool trim_tree_db(const std::size_t expected_old_n_leaf_tuples, - const std::size_t trim_leaves, - unit_test::BlockchainLMDBTest &test_db) -{ - cryptonote::db_wtxn_guard guard(test_db.m_db); - - CHECK_AND_ASSERT_THROW_MES(expected_old_n_leaf_tuples >= trim_leaves, "cannot trim more leaves than exist"); - CHECK_AND_ASSERT_THROW_MES(trim_leaves > 0, "must be trimming some leaves"); - - LOG_PRINT_L1("Trimming " << trim_leaves << " leaf tuples from tree with " - << expected_old_n_leaf_tuples << " leaves in db"); - - CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), - false, "trimming unexpected starting n leaf tuples in db"); - - // Can use 0 for trim_block_id since it's unused in tests - test_db.m_db->trim_tree(trim_leaves, 0); - CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(expected_old_n_leaf_tuples - trim_leaves), false, - "failed to trim tree in db"); - - MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); - - return true; -} -//---------------------------------------------------------------------------------------------------------------------- -#define INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth) \ - static_assert(helios_chunk_width > 1, "helios width must be > 1"); \ - static_assert(selene_chunk_width > 1, "selene width must be > 1"); \ - const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); \ - \ - /* Number of leaves required for tree to reach given depth */ \ - std::size_t min_leaves_needed_for_tree_depth = selene_chunk_width; \ - for (std::size_t i = 1; i < tree_depth; ++i) \ - { \ - const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; \ - min_leaves_needed_for_tree_depth *= width; \ - } \ - \ - /* Increment to test for off-by-1 */ \ - ++min_leaves_needed_for_tree_depth; \ - \ - unit_test::BlockchainLMDBTest test_db; \ -//---------------------------------------------------------------------------------------------------------------------- -#define BEGIN_INIT_TREE_ITER(curve_trees) \ - for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) \ - { \ - LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves"); \ - \ - /* Init tree in memory */ \ - CurveTreesGlobalTree global_tree(*curve_trees); \ - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, global_tree, 0, init_leaves)); \ - \ - /* Init tree in db */ \ - INIT_BLOCKCHAIN_LMDB_TEST_DB(test_db, curve_trees); \ - ASSERT_TRUE(grow_tree_db(0, init_leaves, curve_trees, test_db)); \ -//---------------------------------------------------------------------------------------------------------------------- -#define END_INIT_TREE_ITER(curve_trees) \ - }; \ -//---------------------------------------------------------------------------------------------------------------------- -//---------------------------------------------------------------------------------------------------------------------- // Test //---------------------------------------------------------------------------------------------------------------------- TEST(curve_trees, grow_tree) @@ -961,7 +1043,7 @@ TEST(curve_trees, grow_tree) // Tree in memory // Copy the already existing global tree CurveTreesGlobalTree tree_copy(global_tree); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, init_leaves, ext_leaves)); + ASSERT_TRUE(tree_copy.grow_tree(init_leaves, ext_leaves)); // Tree in db // Copy the already existing db @@ -998,7 +1080,7 @@ TEST(curve_trees, trim_tree) // Tree in memory // Copy the already existing global tree CurveTreesGlobalTree tree_copy(global_tree); - ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, tree_copy)); + ASSERT_TRUE(tree_copy.trim_tree(init_leaves, trim_leaves)); // Tree in db // Copy the already existing db @@ -1038,8 +1120,8 @@ TEST(curve_trees, trim_tree_then_grow) // Tree in memory // Copy the already existing global tree CurveTreesGlobalTree tree_copy(global_tree); - ASSERT_TRUE(trim_tree_in_memory(init_leaves, trim_leaves, tree_copy)); - ASSERT_TRUE(grow_tree_in_memory(*curve_trees, tree_copy, init_leaves - trim_leaves, grow_after_trim)); + ASSERT_TRUE(tree_copy.trim_tree(init_leaves, trim_leaves)); + ASSERT_TRUE(tree_copy.grow_tree(init_leaves - trim_leaves, grow_after_trim)); // Tree in db // Copy the already existing db diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h index 2cdf07ae3eb..4706a50581d 100644 --- a/tests/unit_tests/curve_trees.h +++ b/tests/unit_tests/curve_trees.h @@ -30,11 +30,32 @@ #include "fcmp_pp/curve_trees.h" #include "fcmp_pp/tower_cycle.h" +#include "unit_tests_utils.h" using Helios = fcmp_pp::curve_trees::Helios; using Selene = fcmp_pp::curve_trees::Selene; using CurveTreesV1 = fcmp_pp::curve_trees::CurveTreesV1; +//---------------------------------------------------------------------------------------------------------------------- +#define INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth) \ + static_assert(helios_chunk_width > 1, "helios width must be > 1"); \ + static_assert(selene_chunk_width > 1, "selene width must be > 1"); \ + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); \ + \ + /* Number of leaves required for tree to reach given depth */ \ + std::size_t min_leaves_needed_for_tree_depth = selene_chunk_width; \ + for (std::size_t i = 1; i < tree_depth; ++i) \ + { \ + const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; \ + min_leaves_needed_for_tree_depth *= width; \ + } \ + \ + /* Increment to test for off-by-1 */ \ + ++min_leaves_needed_for_tree_depth; \ + \ + unit_test::BlockchainLMDBTest test_db; \ +//---------------------------------------------------------------------------------------------------------------------- + // Helper class to read/write a global tree in memory. It's only used in testing because normally the tree isn't kept // in memory (it's stored in the db) class CurveTreesGlobalTree @@ -50,7 +71,7 @@ class CurveTreesGlobalTree // A complete tree, useful for testing (don't want to keep the whole tree in memory during normal operation) struct Tree final { - std::vector leaves; + std::vector leaves; std::vector> c1_layers; std::vector> c2_layers; }; @@ -60,25 +81,30 @@ class CurveTreesGlobalTree // Read the in-memory tree and get the number of leaf tuples std::size_t get_num_leaf_tuples() const; - // Read the in-memory tree and get the last hashes from each layer in the tree - CurveTreesV1::LastHashes get_last_hashes() const; + // Grow tree by provided new_n_leaf_tuples + bool grow_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t new_n_leaf_tuples); + + // Trim the provided number of leaf tuples from the tree + bool trim_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t trim_n_leaf_tuples); + + // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer + bool audit_tree(const std::size_t expected_n_leaf_tuples) const; + // Get the path in the tree of the provided leaf idx + fcmp_pp::curve_trees::PathV1 get_path_at_leaf_idx(const std::size_t leaf_idx) const; + + // Hint: use num leaf tuples in the tree to determine the type + std::array get_tree_root() const; + +private: // Use the tree extension to extend the in-memory tree void extend_tree(const CurveTreesV1::TreeExtension &tree_extension); // Use the tree reduction to reduce the in-memory tree void reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction); - // Trim the provided number of leaf tuples from the tree - void trim_tree(const std::size_t trim_n_leaf_tuples); - - // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer - bool audit_tree(const std::size_t expected_n_leaf_tuples); - - // logging helpers - void log_last_hashes(const CurveTreesV1::LastHashes &last_hashes); - void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); - void log_tree(); + // Read the in-memory tree and get the last hashes from each layer in the tree + CurveTreesV1::LastHashes get_last_hashes() const; // Read the in-memory tree and get data from what will be the last chunks after trimming the tree to the provided // number of leaves @@ -90,6 +116,11 @@ class CurveTreesGlobalTree CurveTreesV1::LastChunkChildrenToTrim get_all_last_chunk_children_to_trim( const std::vector &trim_instructions); + // logging helpers + void log_last_hashes(const CurveTreesV1::LastHashes &last_hashes); + void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); + void log_tree(); + private: CurveTreesV1 &m_curve_trees; Tree m_tree = Tree{}; diff --git a/tests/unit_tests/fcmp_pp.cpp b/tests/unit_tests/fcmp_pp.cpp new file mode 100644 index 00000000000..fc879a8248a --- /dev/null +++ b/tests/unit_tests/fcmp_pp.cpp @@ -0,0 +1,64 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gtest/gtest.h" + +#include "cryptonote_basic/cryptonote_format_utils.h" +#include "curve_trees.h" +#include "misc_log_ex.h" +#include "ringct/rctOps.h" + + +//---------------------------------------------------------------------------------------------------------------------- +TEST(fcmp_pp, prove) +{ + static const std::size_t helios_chunk_width = fcmp_pp::curve_trees::HELIOS_CHUNK_WIDTH; + static const std::size_t selene_chunk_width = fcmp_pp::curve_trees::SELENE_CHUNK_WIDTH; + + static const std::size_t tree_depth = 3; + + LOG_PRINT_L1("Test prove with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + LOG_PRINT_L1("Initializing tree with " << min_leaves_needed_for_tree_depth << " leaves"); + + // Init tree in memory + CurveTreesGlobalTree global_tree(*curve_trees); + ASSERT_TRUE(global_tree.grow_tree(0, min_leaves_needed_for_tree_depth)); + + LOG_PRINT_L1("Finished initializing tree with " << min_leaves_needed_for_tree_depth << " leaves"); + + // Create proof for every leaf in the tree + for (std::size_t leaf_idx = 0; leaf_idx < global_tree.get_num_leaf_tuples(); ++leaf_idx) + { + const auto path = global_tree.get_path_at_leaf_idx(leaf_idx); + } +} +//---------------------------------------------------------------------------------------------------------------------- From 1241172c5fe332f5e8c7ffbbc06b743bda2c42e6 Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 7 Oct 2024 15:22:52 -0700 Subject: [PATCH 126/127] fcmp++: fix trim_tree get_trim_layer_instructions logic err - Cleanly separate logic to set the hash_offset that we use when calling hash_trim and hash_grow from the logic used to determine which old child values we need from the tree - The core logic error was not properly setting the range of children needed from the tree when need_last_chunk_remaining_children is true. The fix makes sure to use the correct range, and to set hash_offset appropriately for eveery case. - In the case that get_next_layer_reduction doesn't actually need to do any hashing, only tell the caller to trim to boundary, the function now short-circuits and doesn't continue with hashing --- src/fcmp_pp/curve_trees.cpp | 55 +++++++++++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp index d8b08085d0a..f4701d6f97a 100644 --- a/src/fcmp_pp/curve_trees.cpp +++ b/src/fcmp_pp/curve_trees.cpp @@ -556,13 +556,32 @@ static TrimLayerInstructions get_trim_layer_instructions( // hashed for the first time, and so we don't need the existing last hash in that case, even if the hash is updating const bool need_existing_last_hash = update_existing_last_hash && !need_last_chunk_remaining_children; - // We need to decrement the offset we use to hash the chunk if the last child is changing - std::size_t hash_offset = new_offset; - if (last_child_will_change) + // Set the hash_offset to use when calling hash_grow or hash_trim + std::size_t hash_offset = 0; + if (need_last_chunk_children_to_trim) + { + CHECK_AND_ASSERT_THROW_MES(new_offset > 0, "new_offset must be > 0 when trimming last chunk children"); + hash_offset = new_offset; + + if (last_child_will_change) + { + // We decrement the offset we use to hash the chunk if the last child is changing, since we're going to + // use the old value of the last child when trimming + --hash_offset; + } + } + else if (need_last_chunk_remaining_children) + { + // If we're trimming using remaining children, then we're just going to call hash_grow with offset 0 + hash_offset = 0; + } + else if (last_child_will_change) { - hash_offset = hash_offset == 0 + // We're not trimming at all in this case, we're only updating the existing last hash with hash_trim. We need + // hash_offset to be equal to 1 - this existing last hash's position + hash_offset = new_offset == 0 ? (parent_chunk_width - 1) // chunk is full, so decrement full width by 1 - : (hash_offset - 1); + : (new_offset - 1); } // Set the child index range so the caller knows which children to read from the tree @@ -580,15 +599,17 @@ static TrimLayerInstructions get_trim_layer_instructions( else if (need_last_chunk_remaining_children) { // We'll call hash_grow with the remaining children between [0, offset] - CHECK_AND_ASSERT_THROW_MES(new_total_children >= hash_offset, "hash_offset is unexpectedly high"); - start_trim_idx = new_total_children - hash_offset; + CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "new_offset is unexpectedly high"); + start_trim_idx = new_total_children - new_offset; end_trim_idx = new_total_children; - } - // If we're trimming using remaining children, then we're just going to call hash_grow with offset 0 - if (need_last_chunk_remaining_children) - { - hash_offset = 0; + if (last_child_will_change) + { + // We don't need the last old child if it's changing, we'll just use its new value. Decrement the + // end_trim_idx by 1 so we know not to read and use the last old child from the tree in this case. + CHECK_AND_ASSERT_THROW_MES(end_trim_idx > 0, "end_trim_idx cannot be 0"); + --end_trim_idx; + } } MDEBUG("parent_chunk_width: " << parent_chunk_width @@ -639,6 +660,16 @@ static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_re layer_reduction_out.new_total_parents = trim_layer_instructions.new_total_parents; layer_reduction_out.update_existing_last_hash = trim_layer_instructions.update_existing_last_hash; + if (!trim_layer_instructions.need_last_chunk_children_to_trim && + !trim_layer_instructions.need_last_chunk_remaining_children && + !trim_layer_instructions.need_new_last_child) + { + // In this case we're just trimming to the boundary, and don't need to get a new hash + CHECK_AND_ASSERT_THROW_MES(!layer_reduction_out.update_existing_last_hash, "unexpected update last hash"); + MDEBUG("Trimming to chunk boundary"); + return layer_reduction_out; + } + if (trim_layer_instructions.need_existing_last_hash) CHECK_AND_ASSERT_THROW_MES(parent_last_hashes.size() > parent_layer_idx, "missing last parent hash"); From 3ac3e53c54ee2e53adbf28e9cc6e8ab0d37a0a3f Mon Sep 17 00:00:00 2001 From: j-berman Date: Mon, 21 Oct 2024 10:10:53 -0700 Subject: [PATCH 127/127] fcmp++: fix migration batching to resize db as needed - batch_start is the simplest function to use to resize db, since resizing requires no active txns. - batch_stop makes sure no active txns. - need to decrement txns before calling migrate() so that do_resize does not deadlock in wait_no_active_txns --- src/blockchain_db/lmdb/db_lmdb.cpp | 48 +++++++++++++++--------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 765e7312b4e..0def1e70511 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -2642,7 +2642,10 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) // We don't handle the old format previous to that commit. txn.commit(); m_open = true; + // Decrement num active txs so db can resize if needed + mdb_txn_safe::increment_txns(-1); migrate(db_version); + mdb_txn_safe::increment_txns(1); return; } #endif @@ -6838,7 +6841,9 @@ void BlockchainLMDB::migrate_5_6() if (!m_batch_transactions) set_batch_transactions(true); const std::size_t BATCH_SIZE = 10000; - batch_start(BATCH_SIZE); + // Since step 3/3 in migration deletes block info records from the db, can't use num blocks batch size, otherwise + // get_estimated_batch_size can fail to read block weight by height + batch_start(); txn.m_txn = m_write_txn->m_txn; /* the spent_keys table name is the same but the old version and new version @@ -6883,13 +6888,10 @@ void BlockchainLMDB::migrate_5_6() std::cout << i << " / " << n_key_images << " key images (" << percent << "% of step 1/3) \r" << std::flush; } - txn.commit(); - result = mdb_txn_begin(m_env, NULL, 0, txn); - if (result) - throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); - m_write_txn->m_txn = txn.m_txn; - m_write_batch_txn->m_txn = txn.m_txn; - memset(&m_wcursors, 0, sizeof(m_wcursors)); + // Start a new batch so resizing can occur as needed + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; } // Open all cursors @@ -6949,7 +6951,7 @@ void BlockchainLMDB::migrate_5_6() if (!m_batch_transactions) set_batch_transactions(true); const std::size_t BATCH_SIZE = 10000; - batch_start(BATCH_SIZE); + batch_start(); txn.m_txn = m_write_txn->m_txn; // Use this cache to know how to restart the migration if the process is killed @@ -6981,13 +6983,15 @@ void BlockchainLMDB::migrate_5_6() throw0(DB_ERROR(lmdb_error("Failed to update max output id: ", result).c_str())); // Commit and start a new txn - txn.commit(); - result = mdb_txn_begin(m_env, NULL, 0, txn); - if (result) - throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); - m_write_txn->m_txn = txn.m_txn; - m_write_batch_txn->m_txn = txn.m_txn; - memset(&m_wcursors, 0, sizeof(m_wcursors)); + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; + + // Reset k and v so we continue migration from the last output + k = {sizeof(last_output.amount), (void *)&last_output.amount}; + + const std::size_t outkey_size = (last_output.amount == 0) ? sizeof(outkey) : sizeof(pre_rct_outkey); + v = {outkey_size, (void *)&last_output.ok}; } // Open all cursors @@ -7127,7 +7131,7 @@ void BlockchainLMDB::migrate_5_6() if (!m_batch_transactions) set_batch_transactions(true); const std::size_t BATCH_SIZE = 50; - batch_start(BATCH_SIZE); + batch_start(); txn.m_txn = m_write_txn->m_txn; /* the block_info table name is the same but the old version and new version @@ -7155,13 +7159,9 @@ void BlockchainLMDB::migrate_5_6() std::cout << i << " / " << n_blocks << " blocks (" << percent << "% of step 3/3) \r" << std::flush; } - txn.commit(); - result = mdb_txn_begin(m_env, NULL, 0, txn); - if (result) - throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); - m_write_txn->m_txn = txn.m_txn; - m_write_batch_txn->m_txn = txn.m_txn; - memset(&m_wcursors, 0, sizeof(m_wcursors)); + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; } // Open all cursors