diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9cc619688a8..6ef35c4304e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -215,7 +215,7 @@ jobs: - run: "echo Full E2E tests enabled." network-test-gate: - needs: [build, configure] + needs: [build, yarn-project-test, configure] if: github.ref_name == 'master' || contains(github.event.pull_request.labels.*.name, 'network-all') runs-on: ubuntu-20.04 steps: @@ -684,7 +684,7 @@ jobs: run: sudo shutdown -P 60 ; earthly-ci --no-output ./yarn-project/+network-test --test=./test-transfer.sh network-full-test: - needs: [network-test-gate, configure] + needs: [network-test-gate, network-transfer-test, configure] runs-on: ${{ needs.configure.outputs.username }}-x86 strategy: max-parallel: 1 diff --git a/.noir-sync-commit b/.noir-sync-commit index c4b6d336e04..b63a49f65c0 100644 --- a/.noir-sync-commit +++ b/.noir-sync-commit @@ -1 +1 @@ -b88db67a4fa92f861329105fb732a7b1309620fe +eb975ab28fb056cf92859377c02f2bb1a608eda3 diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp index a24d4131a93..de98846ab47 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp @@ -59,7 +59,7 @@ template struct MegaTraceBlockData { }; } - auto get_gate_blocks() + auto get_gate_blocks() const { return RefArray{ busread, arithmetic, delta_range, elliptic, aux, poseidon2_external, poseidon2_internal, lookup diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/ultra_execution_trace.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/ultra_execution_trace.hpp index 92a2c5f8d6b..ccf81b1645c 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/ultra_execution_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/ultra_execution_trace.hpp @@ -31,7 +31,7 @@ template struct UltraTraceBlockData { lookup, poseidon2_external, poseidon2_internal, overflow }; } - auto get_gate_blocks() + auto get_gate_blocks() const { return RefArray{ arithmetic, delta_range, elliptic, aux, lookup, poseidon2_external, poseidon2_internal }; } diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.cpp index e82b2ccacf9..246c6263200 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.cpp @@ -30,6 +30,142 @@ template size_t DeciderProvingKey_::compute_dyadi return circuit.get_circuit_subgroup_size(total_num_gates); } +template void DeciderProvingKey_::allocate_wires() +{ + PROFILE_THIS_NAME("allocate_wires"); + + for (auto& wire : proving_key.polynomials.get_wires()) { + wire = Polynomial::shiftable(proving_key.circuit_size); + } +} + +template void DeciderProvingKey_::allocate_permutation_argument_polynomials() +{ + PROFILE_THIS_NAME("allocate_permutation_argument_polynomials"); + + for (auto& sigma : proving_key.polynomials.get_sigmas()) { + sigma = Polynomial(proving_key.circuit_size); + } + for (auto& id : proving_key.polynomials.get_ids()) { + id = Polynomial(proving_key.circuit_size); + } + proving_key.polynomials.z_perm = Polynomial::shiftable(proving_key.circuit_size); +} + +template void DeciderProvingKey_::allocate_lagrange_polynomials() +{ + PROFILE_THIS_NAME("allocate_lagrange_polynomials"); + + // First and last lagrange polynomials (in the full circuit size) + proving_key.polynomials.lagrange_first = Polynomial( + /* size=*/1, /*virtual size=*/dyadic_circuit_size, /*start_index=*/0); + + // Even though lagrange_last has a single non-zero element, we cannot set its size to 0 as different + // keys being folded might have lagrange_last set at different indexes and folding does not work + // correctly unless the polynomial is allocated in the correct range to accomodate this + proving_key.polynomials.lagrange_last = Polynomial( + /* size=*/dyadic_circuit_size, /*virtual size=*/dyadic_circuit_size, /*start_index=*/0); +} + +template void DeciderProvingKey_::allocate_selectors(const Circuit& circuit) +{ + PROFILE_THIS_NAME("allocate_selectors"); + + // Define gate selectors over the block they are isolated to + for (auto [selector, block] : + zip_view(proving_key.polynomials.get_gate_selectors(), circuit.blocks.get_gate_blocks())) { + + // TODO(https://github.com/AztecProtocol/barretenberg/issues/914): q_arith is currently used + // in aux block. + if (&block == &circuit.blocks.arithmetic) { + size_t arith_size = circuit.blocks.aux.trace_offset - circuit.blocks.arithmetic.trace_offset + + circuit.blocks.aux.get_fixed_size(is_structured); + selector = Polynomial(arith_size, proving_key.circuit_size, circuit.blocks.arithmetic.trace_offset); + } else { + selector = Polynomial(block.get_fixed_size(is_structured), proving_key.circuit_size, block.trace_offset); + } + } + + // Set the other non-gate selector polynomials (e.g. q_l, q_r, q_m etc.) to full size + for (auto& selector : proving_key.polynomials.get_non_gate_selectors()) { + selector = Polynomial(proving_key.circuit_size); + } +} + +template +void DeciderProvingKey_::allocate_table_lookup_polynomials(const Circuit& circuit) +{ + PROFILE_THIS_NAME("allocate_table_lookup_polynomials"); + + const size_t max_tables_size = std::min(static_cast(MAX_LOOKUP_TABLES_SIZE), dyadic_circuit_size - 1); + size_t table_offset = dyadic_circuit_size - max_tables_size; + ASSERT(dyadic_circuit_size > max_tables_size); + + // Allocate the polynomials containing the actual table data + if constexpr (IsUltraFlavor) { + for (auto& poly : proving_key.polynomials.get_tables()) { + poly = Polynomial(max_tables_size, dyadic_circuit_size, table_offset); + } + } + + // Allocate the read counts and tags polynomials + proving_key.polynomials.lookup_read_counts = Polynomial(max_tables_size, dyadic_circuit_size, table_offset); + proving_key.polynomials.lookup_read_tags = Polynomial(max_tables_size, dyadic_circuit_size, table_offset); + ZoneScopedN("allocating lookup and databus inverses"); + + // Allocate the log derivative lookup argument inverse polynomial + const size_t lookup_offset = static_cast(circuit.blocks.lookup.trace_offset); + const size_t lookup_inverses_start = std::min(lookup_offset, table_offset); + const size_t lookup_inverses_end = + std::min(dyadic_circuit_size, + std::max(lookup_offset + circuit.blocks.lookup.get_fixed_size(is_structured), + table_offset + MAX_LOOKUP_TABLES_SIZE)); + proving_key.polynomials.lookup_inverses = + Polynomial(lookup_inverses_end - lookup_inverses_start, dyadic_circuit_size, lookup_inverses_start); +} + +template +void DeciderProvingKey_::allocate_ecc_op_polynomials(const Circuit& circuit) + requires IsMegaFlavor +{ + PROFILE_THIS_NAME("allocate_ecc_op_polynomials"); + + // Allocate the ecc op wires and selector + const size_t ecc_op_block_size = circuit.blocks.ecc_op.get_fixed_size(is_structured); + const size_t op_wire_offset = circuit.blocks.ecc_op.trace_offset; + for (auto& wire : proving_key.polynomials.get_ecc_op_wires()) { + wire = Polynomial(ecc_op_block_size, proving_key.circuit_size, op_wire_offset); + } + proving_key.polynomials.lagrange_ecc_op = Polynomial(ecc_op_block_size, proving_key.circuit_size, op_wire_offset); +} + +template +void DeciderProvingKey_::allocate_databus_polynomials(const Circuit& circuit) + requires HasDataBus +{ + proving_key.polynomials.calldata = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + proving_key.polynomials.calldata_read_counts = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + proving_key.polynomials.calldata_read_tags = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + proving_key.polynomials.secondary_calldata = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + proving_key.polynomials.secondary_calldata_read_counts = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + proving_key.polynomials.secondary_calldata_read_tags = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + proving_key.polynomials.return_data = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + proving_key.polynomials.return_data_read_counts = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + proving_key.polynomials.return_data_read_tags = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + + proving_key.polynomials.databus_id = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); + + // Allocate log derivative lookup argument inverse polynomials + const size_t q_busread_end = + circuit.blocks.busread.trace_offset + circuit.blocks.busread.get_fixed_size(is_structured); + proving_key.polynomials.calldata_inverses = + Polynomial(std::max(circuit.get_calldata().size(), q_busread_end), dyadic_circuit_size); + proving_key.polynomials.secondary_calldata_inverses = + Polynomial(std::max(circuit.get_secondary_calldata().size(), q_busread_end), dyadic_circuit_size); + proving_key.polynomials.return_data_inverses = + Polynomial(std::max(circuit.get_return_data().size(), q_busread_end), dyadic_circuit_size); +} + /** * @brief * @details @@ -39,7 +175,7 @@ template size_t DeciderProvingKey_::compute_dyadi */ template void DeciderProvingKey_::construct_databus_polynomials(Circuit& circuit) - requires IsMegaFlavor + requires HasDataBus { auto& calldata_poly = proving_key.polynomials.calldata; auto& calldata_read_counts = proving_key.polynomials.calldata_read_counts; @@ -51,9 +187,9 @@ void DeciderProvingKey_::construct_databus_polynomials(Circuit& circuit) auto& return_data_read_counts = proving_key.polynomials.return_data_read_counts; auto& return_data_read_tags = proving_key.polynomials.return_data_read_tags; - auto calldata = circuit.get_calldata(); - auto secondary_calldata = circuit.get_secondary_calldata(); - auto return_data = circuit.get_return_data(); + const auto& calldata = circuit.get_calldata(); + const auto& secondary_calldata = circuit.get_secondary_calldata(); + const auto& return_data = circuit.get_return_data(); // Note: We do not utilize a zero row for databus columns for (size_t idx = 0; idx < calldata.size(); ++idx) { diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp index b96540d13a8..0fdfbfe7e16 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp @@ -98,8 +98,8 @@ template class DeciderProvingKey_ { if constexpr (IsMegaFlavor) { circuit.op_queue->append_nonzero_ops(); } + vinfo("allocating polynomials object in proving key..."); { - PROFILE_THIS_NAME("allocating proving key"); proving_key = ProvingKey(dyadic_circuit_size, circuit.public_inputs.size(), commitment_key); @@ -109,161 +109,24 @@ template class DeciderProvingKey_ { if ((IsMegaFlavor && !is_structured) || (is_structured && circuit.blocks.has_overflow)) { // Allocate full size polynomials proving_key.polynomials = typename Flavor::ProverPolynomials(dyadic_circuit_size); - vinfo("allocated polynomials object in proving key"); } else { // Allocate only a correct amount of memory for each polynomial - // Allocate the wires and selectors polynomials - { - PROFILE_THIS_NAME("allocating wires"); - - for (auto& wire : proving_key.polynomials.get_wires()) { - wire = Polynomial::shiftable(proving_key.circuit_size); - } - } - { - PROFILE_THIS_NAME("allocating gate selectors"); - - // Define gate selectors over the block they are isolated to - for (auto [selector, block] : - zip_view(proving_key.polynomials.get_gate_selectors(), circuit.blocks.get_gate_blocks())) { - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/914): q_arith is currently used - // in aux block. - if (&block == &circuit.blocks.arithmetic) { - size_t arith_size = circuit.blocks.aux.trace_offset - - circuit.blocks.arithmetic.trace_offset + - circuit.blocks.aux.get_fixed_size(is_structured); - selector = Polynomial( - arith_size, proving_key.circuit_size, circuit.blocks.arithmetic.trace_offset); - } else { - selector = Polynomial( - block.get_fixed_size(is_structured), proving_key.circuit_size, block.trace_offset); - } - } - } - { - PROFILE_THIS_NAME("allocating non-gate selectors"); + allocate_wires(); - // Set the other non-gate selector polynomials to full size - for (auto& selector : proving_key.polynomials.get_non_gate_selectors()) { - selector = Polynomial(proving_key.circuit_size); - } - } - if constexpr (IsMegaFlavor) { - PROFILE_THIS_NAME("allocating ecc op wires and selector"); - - // Allocate the ecc op wires and selector - const size_t ecc_op_block_size = circuit.blocks.ecc_op.get_fixed_size(is_structured); - const size_t op_wire_offset = Flavor::has_zero_row ? 1 : 0; - for (auto& wire : proving_key.polynomials.get_ecc_op_wires()) { - wire = Polynomial(ecc_op_block_size, proving_key.circuit_size, op_wire_offset); - } - proving_key.polynomials.lagrange_ecc_op = - Polynomial(ecc_op_block_size, proving_key.circuit_size, op_wire_offset); - } + allocate_permutation_argument_polynomials(); - if constexpr (HasDataBus) { - proving_key.polynomials.calldata = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - proving_key.polynomials.calldata_read_counts = - Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - proving_key.polynomials.calldata_read_tags = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - proving_key.polynomials.secondary_calldata = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - proving_key.polynomials.secondary_calldata_read_counts = - Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - proving_key.polynomials.secondary_calldata_read_tags = - Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - proving_key.polynomials.return_data = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - proving_key.polynomials.return_data_read_counts = - Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - proving_key.polynomials.return_data_read_tags = - Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - - proving_key.polynomials.databus_id = Polynomial(MAX_DATABUS_SIZE, proving_key.circuit_size); - } - const size_t max_tables_size = - std::min(static_cast(MAX_LOOKUP_TABLES_SIZE), dyadic_circuit_size - 1); - size_t table_offset = dyadic_circuit_size - max_tables_size; - { - PROFILE_THIS_NAME("allocating table polynomials"); - - ASSERT(dyadic_circuit_size > max_tables_size); - - // Allocate the table polynomials - if constexpr (IsUltraFlavor) { - for (auto& poly : proving_key.polynomials.get_tables()) { - poly = Polynomial(max_tables_size, dyadic_circuit_size, table_offset); - } - } - } - { - PROFILE_THIS_NAME("allocating sigmas and ids"); - - for (auto& sigma : proving_key.polynomials.get_sigmas()) { - sigma = Polynomial(proving_key.circuit_size); - } - for (auto& id : proving_key.polynomials.get_ids()) { - id = Polynomial(proving_key.circuit_size); - } - } - { - ZoneScopedN("allocating lookup read counts and tags"); - // Allocate the read counts and tags polynomials - proving_key.polynomials.lookup_read_counts = - Polynomial(max_tables_size, dyadic_circuit_size, table_offset); - proving_key.polynomials.lookup_read_tags = - Polynomial(max_tables_size, dyadic_circuit_size, table_offset); - } - { - ZoneScopedN("allocating lookup and databus inverses"); - // Allocate the lookup_inverses polynomial - const size_t lookup_offset = static_cast(circuit.blocks.lookup.trace_offset); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1033): construct tables and counts - // at top of trace - const size_t table_offset = - dyadic_circuit_size - - std::min(dyadic_circuit_size - 1, static_cast(MAX_LOOKUP_TABLES_SIZE)); - const size_t lookup_inverses_start = std::min(lookup_offset, table_offset); - const size_t lookup_inverses_end = - std::min(dyadic_circuit_size, - std::max(lookup_offset + circuit.blocks.lookup.get_fixed_size(is_structured), - table_offset + MAX_LOOKUP_TABLES_SIZE)); - proving_key.polynomials.lookup_inverses = Polynomial( - lookup_inverses_end - lookup_inverses_start, dyadic_circuit_size, lookup_inverses_start); - if constexpr (HasDataBus) { - const size_t q_busread_end = - circuit.blocks.busread.trace_offset + circuit.blocks.busread.get_fixed_size(is_structured); - // Allocate the databus inverse polynomials - proving_key.polynomials.calldata_inverses = - Polynomial(std::max(circuit.get_calldata().size(), q_busread_end), dyadic_circuit_size); - proving_key.polynomials.secondary_calldata_inverses = Polynomial( - std::max(circuit.get_secondary_calldata().size(), q_busread_end), dyadic_circuit_size); - proving_key.polynomials.return_data_inverses = - Polynomial(std::max(circuit.get_return_data().size(), q_busread_end), dyadic_circuit_size); - } - } - { - PROFILE_THIS_NAME("constructing z_perm"); + allocate_selectors(circuit); - // Allocate the z_perm polynomial - vinfo("constructing z_perm..."); - proving_key.polynomials.z_perm = Polynomial::shiftable(proving_key.circuit_size); - vinfo("done constructing z_perm."); - } + allocate_table_lookup_polynomials(circuit); - { - PROFILE_THIS_NAME("allocating lagrange polynomials"); + allocate_lagrange_polynomials(); - // First and last lagrange polynomials (in the full circuit size) - proving_key.polynomials.lagrange_first = Polynomial( - /* size=*/1, /*virtual size=*/dyadic_circuit_size, /*start_idx=*/0); - - // Even though lagrange_last has a single non-zero element, we cannot set its size to 0 as different - // keys being folded might have lagrange_last set at different indexes and folding does not work - // correctly unless the polynomial is allocated in the correct range to accomodate this - proving_key.polynomials.lagrange_last = Polynomial( - /* size=*/dyadic_circuit_size, /*virtual size=*/dyadic_circuit_size, /*start_idx=*/0); + if constexpr (IsMegaFlavor) { + allocate_ecc_op_polynomials(circuit); + } + if constexpr (HasDataBus) { + allocate_databus_polynomials(circuit); } } - vinfo("allocated polynomials object in proving key"); // We can finally set the shifted polynomials now that all of the to_be_shifted polynomials are // defined. proving_key.polynomials.set_shifted(); // Ensure shifted wires are set correctly @@ -272,7 +135,6 @@ template class DeciderProvingKey_ { // Construct and add to proving key the wire, selector and copy constraint polynomials vinfo("populating trace..."); Trace::populate(circuit, proving_key, is_structured); - vinfo("done populating trace."); { PROFILE_THIS_NAME("constructing prover instance after trace populate"); @@ -339,6 +201,22 @@ template class DeciderProvingKey_ { size_t compute_dyadic_size(Circuit&); + void allocate_wires(); + + void allocate_permutation_argument_polynomials(); + + void allocate_lagrange_polynomials(); + + void allocate_selectors(const Circuit&); + + void allocate_table_lookup_polynomials(const Circuit&); + + void allocate_ecc_op_polynomials(const Circuit&) + requires IsMegaFlavor; + + void allocate_databus_polynomials(const Circuit&) + requires HasDataBus; + /** * @brief Compute dyadic size based on a structured trace with fixed block size * @@ -346,7 +224,7 @@ template class DeciderProvingKey_ { size_t compute_structured_dyadic_size(Circuit& circuit) { return circuit.blocks.get_structured_dyadic_size(); } void construct_databus_polynomials(Circuit&) - requires IsMegaFlavor; + requires HasDataBus; static void move_structured_trace_overflow_to_overflow_block(Circuit& circuit); }; diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/trace/execution.cpp b/barretenberg/cpp/src/barretenberg/vm/avm/trace/execution.cpp index 80a49dafcec..d4359ec0b1c 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/trace/execution.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/trace/execution.cpp @@ -9,6 +9,7 @@ #include "barretenberg/vm/avm/generated/verifier.hpp" #include "barretenberg/vm/avm/trace/common.hpp" #include "barretenberg/vm/avm/trace/deserialization.hpp" +#include "barretenberg/vm/avm/trace/gadgets/merkle_tree.hpp" #include "barretenberg/vm/avm/trace/helper.hpp" #include "barretenberg/vm/avm/trace/instructions.hpp" #include "barretenberg/vm/avm/trace/kernel_trace.hpp" @@ -336,23 +337,33 @@ std::vector Execution::gen_trace(AvmPublicInputs const& public_inputs, if (phase == TxExecutionPhase::SETUP) { vinfo("Inserting non-revertible side effects from private before SETUP phase. Checkpointing trees."); // Temporary spot for private non-revertible insertion - std::vector siloed_nullifiers; - siloed_nullifiers.insert( - siloed_nullifiers.end(), - public_inputs.previous_non_revertible_accumulated_data.nullifiers.begin(), - public_inputs.previous_non_revertible_accumulated_data.nullifiers.begin() + - public_inputs.previous_non_revertible_accumulated_data_array_lengths.nullifiers); - trace_builder.insert_private_state(siloed_nullifiers, {}); + auto siloed_nullifiers = + std::vector(public_inputs.previous_non_revertible_accumulated_data.nullifiers.begin(), + public_inputs.previous_non_revertible_accumulated_data.nullifiers.begin() + + public_inputs.previous_non_revertible_accumulated_data_array_lengths.nullifiers); + + auto unique_note_hashes = + std::vector(public_inputs.previous_non_revertible_accumulated_data.note_hashes.begin(), + public_inputs.previous_non_revertible_accumulated_data.note_hashes.begin() + + public_inputs.previous_non_revertible_accumulated_data_array_lengths.note_hashes); + + trace_builder.insert_private_state(siloed_nullifiers, unique_note_hashes); + trace_builder.checkpoint_non_revertible_state(); } else if (phase == TxExecutionPhase::APP_LOGIC) { vinfo("Inserting revertible side effects from private before APP_LOGIC phase"); // Temporary spot for private revertible insertion - std::vector siloed_nullifiers; - siloed_nullifiers.insert(siloed_nullifiers.end(), - public_inputs.previous_revertible_accumulated_data.nullifiers.begin(), - public_inputs.previous_revertible_accumulated_data.nullifiers.begin() + - public_inputs.previous_revertible_accumulated_data_array_lengths.nullifiers); - trace_builder.insert_private_state(siloed_nullifiers, {}); + auto siloed_nullifiers = + std::vector(public_inputs.previous_revertible_accumulated_data.nullifiers.begin(), + public_inputs.previous_revertible_accumulated_data.nullifiers.begin() + + public_inputs.previous_revertible_accumulated_data_array_lengths.nullifiers); + + auto siloed_note_hashes = + std::vector(public_inputs.previous_revertible_accumulated_data.note_hashes.begin(), + public_inputs.previous_revertible_accumulated_data.note_hashes.begin() + + public_inputs.previous_revertible_accumulated_data_array_lengths.note_hashes); + + trace_builder.insert_private_revertible_state(siloed_nullifiers, siloed_note_hashes); } vinfo("Beginning execution of phase ", to_name(phase), " (", public_call_requests.size(), " enqueued calls)."); diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/trace/gadgets/merkle_tree.cpp b/barretenberg/cpp/src/barretenberg/vm/avm/trace/gadgets/merkle_tree.cpp index 87e46bf7128..f3332c79a69 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/trace/gadgets/merkle_tree.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/trace/gadgets/merkle_tree.cpp @@ -34,6 +34,16 @@ FF AvmMerkleTreeTraceBuilder::unconstrained_silo_note_hash(FF contract_address, return Poseidon2::hash({ GENERATOR_INDEX__SILOED_NOTE_HASH, contract_address, note_hash }); } +FF AvmMerkleTreeTraceBuilder::unconstrained_compute_note_hash_nonce(FF tx_hash, FF note_index_in_tx) +{ + return Poseidon2::hash({ GENERATOR_INDEX__NOTE_HASH_NONCE, tx_hash, note_index_in_tx }); +} + +FF AvmMerkleTreeTraceBuilder::unconstrained_compute_unique_note_hash(FF nonce, FF siloed_note_hash) +{ + return Poseidon2::hash({ GENERATOR_INDEX__UNIQUE_NOTE_HASH, nonce, siloed_note_hash }); +} + FF AvmMerkleTreeTraceBuilder::unconstrained_silo_nullifier(FF contract_address, FF nullifier) { return Poseidon2::hash({ GENERATOR_INDEX__OUTER_NULLIFIER, contract_address, nullifier }); @@ -78,6 +88,31 @@ FF AvmMerkleTreeTraceBuilder::unconstrained_update_leaf_index(const FF& leaf_val return unconstrained_compute_root_from_path(leaf_value, leaf_index, path); } +bool AvmMerkleTreeTraceBuilder::assert_public_data_non_membership_check( + const PublicDataTreeLeafPreimage& low_leaf_preimage, const FF& leaf_slot) +{ + auto low_leaf_slot = uint256_t(low_leaf_preimage.slot); + auto low_leaf_next_index = uint256_t(low_leaf_preimage.next_index); + auto low_leaf_next_slot = uint256_t(low_leaf_preimage.next_slot); + + auto leaf_slot_value = uint256_t(leaf_slot); + + return low_leaf_slot < leaf_slot_value && (low_leaf_next_index == 0 || low_leaf_next_slot > leaf_slot_value); +} + +bool AvmMerkleTreeTraceBuilder::assert_nullifier_non_membership_check(const NullifierLeafPreimage& low_leaf_preimage, + const FF& siloed_nullifier) +{ + auto low_leaf_nullifier = uint256_t(low_leaf_preimage.nullifier); + auto low_leaf_next_index = uint256_t(low_leaf_preimage.next_index); + auto low_leaf_next_nullifier = uint256_t(low_leaf_preimage.next_nullifier); + + auto siloed_leaf_nullifier = uint256_t(siloed_nullifier); + + return low_leaf_nullifier < siloed_leaf_nullifier && + (low_leaf_next_index == 0 || low_leaf_next_nullifier > siloed_leaf_nullifier); +} + /************************************************************************************************** * STORAGE TREE OPERATIONS **************************************************************************************************/ @@ -113,6 +148,10 @@ FF AvmMerkleTreeTraceBuilder::perform_storage_write([[maybe_unused]] uint32_t cl unconstrained_update_leaf_index(low_preimage_hash, static_cast(low_index), low_path); return tree_snapshots.public_data_tree.root; } + // Check the low leaf conditions (i.e. the slot is sandwiched by the low nullifier, or the new slot is a max + // value) + bool low_leaf_conditions = assert_public_data_non_membership_check(low_preimage, slot); + ASSERT(low_leaf_conditions); // The new leaf for an insertion is PublicDataTreeLeafPreimage new_preimage{ .slot = slot, .value = value, .next_index = low_preimage.next_index, .next_slot = low_preimage.next_slot @@ -165,6 +204,10 @@ FF AvmMerkleTreeTraceBuilder::perform_nullifier_append([[maybe_unused]] uint32_t ASSERT(is_member); return tree_snapshots.nullifier_tree.root; } + // Check the low leaf conditions (i.e. the slot is sandwiched by the low nullifier, or the new slot is a max + // value) + bool low_leaf_conditions = assert_nullifier_non_membership_check(low_preimage, nullifier); + ASSERT(low_leaf_conditions); // Check membership of the low leaf bool low_leaf_member = unconstrained_check_membership( low_preimage_hash, static_cast(low_index), low_path, tree_snapshots.nullifier_tree.root); diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/trace/gadgets/merkle_tree.hpp b/barretenberg/cpp/src/barretenberg/vm/avm/trace/gadgets/merkle_tree.hpp index 7b67db89313..581ee0660a5 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/trace/gadgets/merkle_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/trace/gadgets/merkle_tree.hpp @@ -99,8 +99,15 @@ class AvmMerkleTreeTraceBuilder { static FF unconstrained_hash_public_data_preimage(const PublicDataTreeLeafPreimage& preimage); static FF unconstrained_silo_note_hash(FF contract_address, FF note_hash); + static FF unconstrained_compute_note_hash_nonce(FF tx_hash, FF note_index_in_tx); + static FF unconstrained_compute_unique_note_hash(FF nonce, FF siloed_note_hash); static FF unconstrained_silo_nullifier(FF contract_address, FF nullifier); static FF unconstrained_compute_public_tree_leaf_slot(FF contract_address, FF leaf_index); + // Could probably template these + static bool assert_public_data_non_membership_check(const PublicDataTreeLeafPreimage& low_leaf_preimage, + const FF& leaf_slot); + static bool assert_nullifier_non_membership_check(const NullifierLeafPreimage& low_leaf_preimage, + const FF& nullifier); void finalize(std::vector>& main_trace); diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.cpp b/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.cpp index 94c223f38d5..747d79d722f 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.cpp @@ -213,8 +213,14 @@ std::vector AvmTraceBuilder::get_bytecode(const FF contract_address, bo throw std::runtime_error("Bytecode not found"); } +uint32_t AvmTraceBuilder::get_inserted_note_hashes_count() +{ + return merkle_tree_trace_builder.get_tree_snapshots().note_hash_tree.size - + public_inputs.start_tree_snapshots.note_hash_tree.size; +} + void AvmTraceBuilder::insert_private_state(const std::vector& siloed_nullifiers, - [[maybe_unused]] const std::vector& siloed_note_hashes) + const std::vector& unique_note_hashes) { for (const auto& siloed_nullifier : siloed_nullifiers) { auto hint = execution_hints.nullifier_write_hints[nullifier_write_counter++]; @@ -225,6 +231,28 @@ void AvmTraceBuilder::insert_private_state(const std::vector& siloed_nullifi siloed_nullifier, hint.insertion_path); } + + for (const auto& unique_note_hash : unique_note_hashes) { + auto hint = execution_hints.note_hash_write_hints[note_hash_write_counter++]; + merkle_tree_trace_builder.perform_note_hash_append(0, unique_note_hash, hint.sibling_path); + } +} + +void AvmTraceBuilder::insert_private_revertible_state(const std::vector& siloed_nullifiers, + const std::vector& siloed_note_hashes) +{ + // Revertibles come only siloed from private, so we need to make them unique here + std::vector unique_note_hashes; + unique_note_hashes.reserve(siloed_note_hashes.size()); + + for (size_t i = 0; i < siloed_note_hashes.size(); i++) { + size_t note_index_in_tx = i + get_inserted_note_hashes_count(); + FF nonce = AvmMerkleTreeTraceBuilder::unconstrained_compute_note_hash_nonce(get_tx_hash(), note_index_in_tx); + unique_note_hashes.push_back( + AvmMerkleTreeTraceBuilder::unconstrained_compute_unique_note_hash(nonce, siloed_note_hashes.at(i))); + } + + insert_private_state(siloed_nullifiers, unique_note_hashes); } void AvmTraceBuilder::pay_fee() @@ -2535,19 +2563,25 @@ AvmError AvmTraceBuilder::op_sload(uint8_t indirect, uint32_t slot_offset, uint3 // Retrieve the public data read hint for this sload PublicDataReadTreeHint read_hint = execution_hints.storage_read_hints.at(storage_read_counter++); + // Check that the hinted leaf is a member of the public data tree + bool is_member = merkle_tree_trace_builder.perform_storage_read( + clk, read_hint.leaf_preimage, read_hint.leaf_index, read_hint.sibling_path); + ASSERT(is_member); // Compute the tree slot FF computed_tree_slot = merkle_tree_trace_builder.compute_public_tree_leaf_slot(clk, current_ext_call_ctx.contract_address, read_slot); - // Sanity check that the computed slot using the value read from slot_offset should match the read hint - ASSERT(computed_tree_slot == read_hint.leaf_preimage.slot); - - // Check that the leaf is a member of the public data tree - bool is_member = merkle_tree_trace_builder.perform_storage_read( - clk, read_hint.leaf_preimage, read_hint.leaf_index, read_hint.sibling_path); - ASSERT(is_member); + // Check if the computed_tree_slot matches the read hint + bool exists = computed_tree_slot == read_hint.leaf_preimage.slot; + + // If it doesnt exist, we should check the low nullifier conditions + if (!exists) { + bool non_member = AvmMerkleTreeTraceBuilder::assert_public_data_non_membership_check(read_hint.leaf_preimage, + computed_tree_slot); + ASSERT(non_member); + } - FF value = read_hint.leaf_preimage.value; + FF value = exists ? read_hint.leaf_preimage.value : FF::zero(); auto write_a = constrained_write_to_memory( call_ptr, clk, resolved_dest, value, AvmMemoryTag::FF, AvmMemoryTag::FF, IntermRegister::IA); @@ -2770,8 +2804,8 @@ AvmError AvmTraceBuilder::op_note_hash_exists(uint8_t indirect, AvmError AvmTraceBuilder::op_emit_note_hash(uint8_t indirect, uint32_t note_hash_offset) { auto const clk = static_cast(main_trace.size()) + 1; - - if (note_hash_write_counter >= MAX_NOTE_HASHES_PER_TX) { + uint32_t inserted_note_hashes_count = get_inserted_note_hashes_count(); + if (inserted_note_hashes_count >= MAX_NOTE_HASHES_PER_TX) { AvmError error = AvmError::SIDE_EFFECT_LIMIT_REACHED; auto row = Row{ .main_clk = clk, @@ -2791,16 +2825,20 @@ AvmError AvmTraceBuilder::op_emit_note_hash(uint8_t indirect, uint32_t note_hash row.main_op_err = FF(static_cast(!is_ok(error))); AppendTreeHint note_hash_write_hint = execution_hints.note_hash_write_hints.at(note_hash_write_counter++); - auto siloed_note_hash = AvmMerkleTreeTraceBuilder::unconstrained_silo_note_hash( + FF siloed_note_hash = AvmMerkleTreeTraceBuilder::unconstrained_silo_note_hash( current_public_call_request.contract_address, row.main_ia); - ASSERT(row.main_ia == note_hash_write_hint.leaf_value); + FF nonce = + AvmMerkleTreeTraceBuilder::unconstrained_compute_note_hash_nonce(get_tx_hash(), inserted_note_hashes_count); + FF unique_note_hash = AvmMerkleTreeTraceBuilder::unconstrained_compute_unique_note_hash(nonce, siloed_note_hash); + + ASSERT(unique_note_hash == note_hash_write_hint.leaf_value); // We first check that the index is currently empty bool insert_index_is_empty = merkle_tree_trace_builder.perform_note_hash_read( clk, FF::zero(), note_hash_write_hint.leaf_index, note_hash_write_hint.sibling_path); ASSERT(insert_index_is_empty); // Update the root with the new leaf that is appended - merkle_tree_trace_builder.perform_note_hash_append(clk, siloed_note_hash, note_hash_write_hint.sibling_path); + merkle_tree_trace_builder.perform_note_hash_append(clk, unique_note_hash, note_hash_write_hint.sibling_path); // Constrain gas cost gas_trace_builder.constrain_gas(clk, OpCode::EMITNOTEHASH); @@ -2836,7 +2874,6 @@ AvmError AvmTraceBuilder::op_nullifier_exists(uint8_t indirect, Row row; // Exists is written to b - bool exists = false; if (is_ok(error)) { NullifierReadTreeHint nullifier_read_hint = execution_hints.nullifier_read_hints.at(nullifier_read_counter++); FF nullifier_value = unconstrained_read_from_memory(resolved_nullifier_offset); @@ -2847,17 +2884,11 @@ AvmError AvmTraceBuilder::op_nullifier_exists(uint8_t indirect, nullifier_read_hint.low_leaf_index, nullifier_read_hint.low_leaf_sibling_path); ASSERT(is_member); - - if (siloed_nullifier == nullifier_read_hint.low_leaf_preimage.nullifier) { - // This is a direct membership check - exists = true; - } else { - exists = false; - // This is a non-membership proof - // Show that the target nullifier meets the non membership conditions (sandwich or max) - ASSERT(siloed_nullifier < nullifier_read_hint.low_leaf_preimage.nullifier && - (nullifier_read_hint.low_leaf_preimage.next_nullifier == FF::zero() || - siloed_nullifier > nullifier_read_hint.low_leaf_preimage.next_nullifier)); + bool exists = siloed_nullifier == nullifier_read_hint.low_leaf_preimage.nullifier; + if (!exists) { + bool is_non_member = AvmMerkleTreeTraceBuilder::assert_nullifier_non_membership_check( + nullifier_read_hint.low_leaf_preimage, siloed_nullifier); + ASSERT(is_non_member); } auto read_a = constrained_read_from_memory( @@ -4764,8 +4795,15 @@ std::vector AvmTraceBuilder::finalize(bool apply_end_gas_assertions) if (apply_end_gas_assertions) { // Sanity check that the amount of gas consumed matches what we expect from the public inputs + auto const l2_gas_left_after_private = + public_inputs.gas_settings.gas_limits.l2_gas - public_inputs.start_gas_used.l2_gas; + auto const allocated_l2_gas = + std::min(l2_gas_left_after_private, static_cast(MAX_L2_GAS_PER_TX_PUBLIC_PORTION)); + // Since end_gas_used also contains start_gas_used, we need to subtract it out to see what is consumed by the + // public computation only + auto expected_public_gas_consumed = public_inputs.end_gas_used.l2_gas - public_inputs.start_gas_used.l2_gas; + auto expected_end_gas_l2 = allocated_l2_gas - expected_public_gas_consumed; auto last_l2_gas_remaining = main_trace.back().main_l2_gas_remaining; - auto expected_end_gas_l2 = public_inputs.gas_settings.gas_limits.l2_gas - public_inputs.end_gas_used.l2_gas; vinfo("Last L2 gas remaining: ", last_l2_gas_remaining); vinfo("Expected end gas L2: ", expected_end_gas_l2); ASSERT(last_l2_gas_remaining == expected_end_gas_l2); diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.hpp b/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.hpp index 5e6af27c2a9..fa0d1f89833 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.hpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.hpp @@ -234,7 +234,9 @@ class AvmTraceBuilder { void rollback_to_non_revertible_checkpoint(); std::vector get_bytecode(const FF contract_address, bool check_membership = false); std::unordered_set bytecode_membership_cache; - void insert_private_state(const std::vector& siloed_nullifiers, const std::vector& siloed_note_hashes); + void insert_private_state(const std::vector& siloed_nullifiers, const std::vector& unique_note_hashes); + void insert_private_revertible_state(const std::vector& siloed_nullifiers, + const std::vector& siloed_note_hashes); void pay_fee(); // These are used for testing only. @@ -359,6 +361,8 @@ class AvmTraceBuilder { AvmMemoryTag write_tag, IntermRegister reg, AvmMemTraceBuilder::MemOpOwner mem_op_owner = AvmMemTraceBuilder::MAIN); + uint32_t get_inserted_note_hashes_count(); + FF get_tx_hash() const { return public_inputs.previous_non_revertible_accumulated_data.nullifiers[0]; } // TODO: remove these once everything is constrained. AvmMemoryTag unconstrained_get_memory_tag(AddressWithMode addr); diff --git a/noir/noir-repo/compiler/fm/src/file_map.rs b/noir/noir-repo/compiler/fm/src/file_map.rs index ba552fe5156..857c7460fb9 100644 --- a/noir/noir-repo/compiler/fm/src/file_map.rs +++ b/noir/noir-repo/compiler/fm/src/file_map.rs @@ -80,6 +80,19 @@ impl FileMap { pub fn all_file_ids(&self) -> impl Iterator { self.name_to_id.values() } + + pub fn get_name(&self, file_id: FileId) -> Result { + let name = self.files.get(file_id.as_usize())?.name().clone(); + + // See if we can make the file name a bit shorter/easier to read if it starts with the current directory + if let Some(current_dir) = &self.current_dir { + if let Ok(name_without_prefix) = name.0.strip_prefix(current_dir) { + return Ok(PathString::from_path(name_without_prefix.to_path_buf())); + } + } + + Ok(name) + } } impl Default for FileMap { fn default() -> Self { @@ -97,16 +110,7 @@ impl<'a> Files<'a> for FileMap { type Source = &'a str; fn name(&self, file_id: Self::FileId) -> Result { - let name = self.files.get(file_id.as_usize())?.name().clone(); - - // See if we can make the file name a bit shorter/easier to read if it starts with the current directory - if let Some(current_dir) = &self.current_dir { - if let Ok(name_without_prefix) = name.0.strip_prefix(current_dir) { - return Ok(PathString::from_path(name_without_prefix.to_path_buf())); - } - } - - Ok(name) + self.get_name(file_id) } fn source(&'a self, file_id: Self::FileId) -> Result { diff --git a/noir/noir-repo/compiler/noirc_errors/src/reporter.rs b/noir/noir-repo/compiler/noirc_errors/src/reporter.rs index f029b4e6de8..e57775d9a7f 100644 --- a/noir/noir-repo/compiler/noirc_errors/src/reporter.rs +++ b/noir/noir-repo/compiler/noirc_errors/src/reporter.rs @@ -272,7 +272,7 @@ fn convert_diagnostic( diagnostic.with_message(&cd.message).with_labels(secondary_labels).with_notes(notes) } -fn stack_trace<'files>( +pub fn stack_trace<'files>( files: &'files impl Files<'files, FileId = fm::FileId>, call_stack: &[Location], ) -> String { diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/acir/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/acir/mod.rs index f83a2095f13..769d0d80cc4 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/acir/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/acir/mod.rs @@ -1880,7 +1880,7 @@ impl<'a> Context<'a> { // This conversion is for debugging support only, to allow the // debugging instrumentation code to work. Taking the reference // of a function in ACIR is useless. - let id = self.acir_context.add_constant(function_id.to_usize()); + let id = self.acir_context.add_constant(function_id.to_u32()); AcirValue::Var(id, AcirType::field()) } Value::ForeignFunction(_) => unimplemented!( diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index 56511127da8..d2bf7e5bdca 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -1614,7 +1614,7 @@ impl<'block> BrilligBlock<'block> { self.brillig_context.const_instruction( new_variable.extract_single_addr(), - value_id.to_usize().into(), + value_id.to_u32().into(), ); new_variable } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/map.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/map.rs index 23f5380f030..0fb02f19b14 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/map.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/map.rs @@ -4,7 +4,7 @@ use std::{ collections::BTreeMap, hash::Hash, str::FromStr, - sync::atomic::{AtomicUsize, Ordering}, + sync::atomic::{AtomicU32, Ordering}, }; use thiserror::Error; @@ -18,7 +18,7 @@ use thiserror::Error; /// another map where it will likely be invalid. #[derive(Serialize, Deserialize)] pub(crate) struct Id { - index: usize, + index: u32, // If we do not skip this field it will simply serialize as `"_marker":null` which is useless extra data #[serde(skip)] _marker: std::marker::PhantomData, @@ -26,14 +26,15 @@ pub(crate) struct Id { impl Id { /// Constructs a new Id for the given index. - /// This constructor is deliberately private to prevent - /// constructing invalid IDs. - pub(crate) fn new(index: usize) -> Self { + /// + /// This is private so that we can guarantee ids created from this function + /// point to valid T values in their external maps. + fn new(index: u32) -> Self { Self { index, _marker: std::marker::PhantomData } } /// Returns the underlying index of this Id. - pub(crate) fn to_usize(self) -> usize { + pub(crate) fn to_u32(self) -> u32 { self.index } @@ -43,7 +44,7 @@ impl Id { /// as unlike DenseMap::push and SparseMap::push, the Ids created /// here are likely invalid for any particularly map. #[cfg(test)] - pub(crate) fn test_new(index: usize) -> Self { + pub(crate) fn test_new(index: u32) -> Self { Self::new(index) } } @@ -187,7 +188,7 @@ impl DenseMap { /// Adds an element to the map. /// Returns the identifier/reference to that element. pub(crate) fn insert(&mut self, element: T) -> Id { - let id = Id::new(self.storage.len()); + let id = Id::new(self.storage.len().try_into().unwrap()); self.storage.push(element); id } @@ -195,7 +196,7 @@ impl DenseMap { /// Given the Id of the element being created, adds the element /// returned by the given function to the map pub(crate) fn insert_with_id(&mut self, f: impl FnOnce(Id) -> T) -> Id { - let id = Id::new(self.storage.len()); + let id = Id::new(self.storage.len().try_into().unwrap()); self.storage.push(f(id)); id } @@ -204,7 +205,7 @@ impl DenseMap { /// /// The id-element pairs are ordered by the numeric values of the ids. pub(crate) fn iter(&self) -> impl ExactSizeIterator, &T)> { - let ids_iter = (0..self.storage.len()).map(|idx| Id::new(idx)); + let ids_iter = (0..self.storage.len() as u32).map(|idx| Id::new(idx)); ids_iter.zip(self.storage.iter()) } } @@ -219,13 +220,13 @@ impl std::ops::Index> for DenseMap { type Output = T; fn index(&self, id: Id) -> &Self::Output { - &self.storage[id.index] + &self.storage[id.index as usize] } } impl std::ops::IndexMut> for DenseMap { fn index_mut(&mut self, id: Id) -> &mut Self::Output { - &mut self.storage[id.index] + &mut self.storage[id.index as usize] } } @@ -253,7 +254,7 @@ impl SparseMap { /// Adds an element to the map. /// Returns the identifier/reference to that element. pub(crate) fn insert(&mut self, element: T) -> Id { - let id = Id::new(self.storage.len()); + let id = Id::new(self.storage.len().try_into().unwrap()); self.storage.insert(id, element); id } @@ -261,7 +262,7 @@ impl SparseMap { /// Given the Id of the element being created, adds the element /// returned by the given function to the map pub(crate) fn insert_with_id(&mut self, f: impl FnOnce(Id) -> T) -> Id { - let id = Id::new(self.storage.len()); + let id = Id::new(self.storage.len().try_into().unwrap()); self.storage.insert(id, f(id)); id } @@ -365,7 +366,7 @@ impl std::ops::Index<&K> for TwoWayMap { /// This type wraps an AtomicUsize so it can safely be used across threads. #[derive(Debug, Serialize, Deserialize)] pub(crate) struct AtomicCounter { - next: AtomicUsize, + next: AtomicU32, _marker: std::marker::PhantomData, } @@ -373,7 +374,7 @@ impl AtomicCounter { /// Create a new counter starting after the given Id. /// Use AtomicCounter::default() to start at zero. pub(crate) fn starting_after(id: Id) -> Self { - Self { next: AtomicUsize::new(id.index + 1), _marker: Default::default() } + Self { next: AtomicU32::new(id.index + 1), _marker: Default::default() } } /// Return the next fresh id diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs index ded1f52d529..7d7798fd30a 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs @@ -268,7 +268,7 @@ fn create_apply_functions( } fn function_id_to_field(function_id: FunctionId) -> FieldElement { - (function_id.to_usize() as u128).into() + (function_id.to_u32() as u128).into() } /// Creates an apply function for the given signature and variants diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 142447c83a5..7ef793a350b 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -1146,7 +1146,7 @@ mod tests { let refs = loop0.find_pre_header_reference_values(function, &loops.cfg).unwrap(); assert_eq!(refs.len(), 1); - assert!(refs.contains(&ValueId::new(2))); + assert!(refs.contains(&ValueId::test_new(2))); let (loads, stores) = loop0.count_loads_and_stores(function, &refs); assert_eq!(loads, 1); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs index b0003aa5f0f..7c7e977c6ce 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs @@ -57,7 +57,7 @@ impl Translator { // A FunctionBuilder must be created with a main Function, so here wer remove it // from the parsed SSA to avoid adding it twice later on. let main_function = parsed_ssa.functions.remove(0); - let main_id = FunctionId::new(0); + let main_id = FunctionId::test_new(0); let mut builder = FunctionBuilder::new(main_function.external_name.clone(), main_id); builder.set_runtime(main_function.runtime_type); @@ -65,7 +65,7 @@ impl Translator { let mut function_id_counter = 1; let mut functions = HashMap::new(); for function in &parsed_ssa.functions { - let function_id = FunctionId::new(function_id_counter); + let function_id = FunctionId::test_new(function_id_counter); function_id_counter += 1; functions.insert(function.internal_name.clone(), function_id); diff --git a/noir/noir-repo/tooling/nargo_cli/src/cli/test_cmd.rs b/noir/noir-repo/tooling/nargo_cli/src/cli/test_cmd.rs index 75e71818594..1fd4ed2d873 100644 --- a/noir/noir-repo/tooling/nargo_cli/src/cli/test_cmd.rs +++ b/noir/noir-repo/tooling/nargo_cli/src/cli/test_cmd.rs @@ -12,7 +12,7 @@ use acvm::{BlackBoxFunctionSolver, FieldElement}; use bn254_blackbox_solver::Bn254BlackBoxSolver; use clap::Args; use fm::FileManager; -use formatters::{Formatter, PrettyFormatter, TerseFormatter}; +use formatters::{Formatter, JsonFormatter, PrettyFormatter, TerseFormatter}; use nargo::{ insert_all_files_for_workspace_into_file_manager, ops::TestStatus, package::Package, parse_all, prepare_package, workspace::Workspace, PrintOutput, @@ -71,6 +71,8 @@ enum Format { Pretty, /// Display one character per test Terse, + /// Output a JSON Lines document + Json, } impl Format { @@ -78,6 +80,7 @@ impl Format { match self { Format::Pretty => Box::new(PrettyFormatter), Format::Terse => Box::new(TerseFormatter), + Format::Json => Box::new(JsonFormatter), } } } @@ -87,6 +90,7 @@ impl Display for Format { match self { Format::Pretty => write!(f, "pretty"), Format::Terse => write!(f, "terse"), + Format::Json => write!(f, "json"), } } } @@ -211,6 +215,12 @@ impl<'a> TestRunner<'a> { ) -> bool { let mut all_passed = true; + for (package_name, total_test_count) in test_count_per_package { + self.formatter + .package_start_async(package_name, *total_test_count) + .expect("Could not display package start"); + } + let (sender, receiver) = mpsc::channel(); let iter = &Mutex::new(tests.into_iter()); thread::scope(|scope| { @@ -228,6 +238,10 @@ impl<'a> TestRunner<'a> { break; }; + self.formatter + .test_start_async(&test.name, &test.package_name) + .expect("Could not display test start"); + let time_before_test = std::time::Instant::now(); let (status, output) = match catch_unwind(test.runner) { Ok((status, output)) => (status, output), @@ -255,6 +269,16 @@ impl<'a> TestRunner<'a> { time_to_run, }; + self.formatter + .test_end_async( + &test_result, + self.file_manager, + self.args.show_output, + self.args.compile_options.deny_warnings, + self.args.compile_options.silence_warnings, + ) + .expect("Could not display test start"); + if thread_sender.send(test_result).is_err() { break; } @@ -275,7 +299,7 @@ impl<'a> TestRunner<'a> { let total_test_count = *total_test_count; self.formatter - .package_start(package_name, total_test_count) + .package_start_sync(package_name, total_test_count) .expect("Could not display package start"); // Check if we have buffered test results for this package @@ -485,7 +509,7 @@ impl<'a> TestRunner<'a> { current_test_count: usize, total_test_count: usize, ) -> std::io::Result<()> { - self.formatter.test_end( + self.formatter.test_end_sync( test_result, current_test_count, total_test_count, @@ -496,31 +520,3 @@ impl<'a> TestRunner<'a> { ) } } - -#[cfg(test)] -mod tests { - use std::io::Write; - use std::{thread, time::Duration}; - use termcolor::{ColorChoice, StandardStream}; - - #[test] - fn test_stderr_lock() { - for i in 0..4 { - thread::spawn(move || { - let mut writer = StandardStream::stderr(ColorChoice::Always); - //let mut writer = writer.lock(); - - let mut show = |msg| { - thread::sleep(Duration::from_millis(10)); - //println!("{i} {msg}"); - writeln!(writer, "{i} {msg}").unwrap(); - }; - - show("a"); - show("b"); - show("c"); - }); - } - thread::sleep(Duration::from_millis(100)); - } -} diff --git a/noir/noir-repo/tooling/nargo_cli/src/cli/test_cmd/formatters.rs b/noir/noir-repo/tooling/nargo_cli/src/cli/test_cmd/formatters.rs index 2a791930f60..1b9b2d50378 100644 --- a/noir/noir-repo/tooling/nargo_cli/src/cli/test_cmd/formatters.rs +++ b/noir/noir-repo/tooling/nargo_cli/src/cli/test_cmd/formatters.rs @@ -2,15 +2,47 @@ use std::{io::Write, panic::RefUnwindSafe, time::Duration}; use fm::FileManager; use nargo::ops::TestStatus; +use noirc_errors::{reporter::stack_trace, FileDiagnostic}; +use serde_json::{json, Map}; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, StandardStreamLock, WriteColor}; use super::TestResult; +/// A formatter for showing test results. +/// +/// The order of events is: +/// 1. Compilation of all packages happen (in parallel). There's no formatter method for this. +/// 2. If compilation is successful, one `package_start_async` for each package. +/// 3. For each test, one `test_start_async` event +/// (there's no `test_start_sync` event because it would happen right before `test_end_sync`) +/// 4. For each package, sequentially: +/// a. A `package_start_sync` event +/// b. One `test_end` event for each test +/// a. A `package_end` event +/// +/// The reason we have some `sync` and `async` events is that formatters that show output +/// to humans rely on the `sync` events to show a more predictable output (package by package), +/// and formatters that output to a machine-readable format (like JSON) rely on the `async` +/// events to show things as soon as they happen, regardless of a package ordering. pub(super) trait Formatter: Send + Sync + RefUnwindSafe { - fn package_start(&self, package_name: &str, test_count: usize) -> std::io::Result<()>; + fn package_start_async(&self, package_name: &str, test_count: usize) -> std::io::Result<()>; + + fn package_start_sync(&self, package_name: &str, test_count: usize) -> std::io::Result<()>; + + fn test_start_async(&self, name: &str, package_name: &str) -> std::io::Result<()>; + + #[allow(clippy::too_many_arguments)] + fn test_end_async( + &self, + test_result: &TestResult, + file_manager: &FileManager, + show_output: bool, + deny_warnings: bool, + silence_warnings: bool, + ) -> std::io::Result<()>; #[allow(clippy::too_many_arguments)] - fn test_end( + fn test_end_sync( &self, test_result: &TestResult, current_test_count: usize, @@ -35,11 +67,30 @@ pub(super) trait Formatter: Send + Sync + RefUnwindSafe { pub(super) struct PrettyFormatter; impl Formatter for PrettyFormatter { - fn package_start(&self, package_name: &str, test_count: usize) -> std::io::Result<()> { + fn package_start_async(&self, _package_name: &str, _test_count: usize) -> std::io::Result<()> { + Ok(()) + } + + fn package_start_sync(&self, package_name: &str, test_count: usize) -> std::io::Result<()> { package_start(package_name, test_count) } - fn test_end( + fn test_start_async(&self, _name: &str, _package_name: &str) -> std::io::Result<()> { + Ok(()) + } + + fn test_end_async( + &self, + _test_result: &TestResult, + _file_manager: &FileManager, + _show_output: bool, + _deny_warnings: bool, + _silence_warnings: bool, + ) -> std::io::Result<()> { + Ok(()) + } + + fn test_end_sync( &self, test_result: &TestResult, _current_test_count: usize, @@ -173,11 +224,30 @@ impl Formatter for PrettyFormatter { pub(super) struct TerseFormatter; impl Formatter for TerseFormatter { - fn package_start(&self, package_name: &str, test_count: usize) -> std::io::Result<()> { + fn package_start_async(&self, _package_name: &str, _test_count: usize) -> std::io::Result<()> { + Ok(()) + } + + fn package_start_sync(&self, package_name: &str, test_count: usize) -> std::io::Result<()> { package_start(package_name, test_count) } - fn test_end( + fn test_start_async(&self, _name: &str, _package_name: &str) -> std::io::Result<()> { + Ok(()) + } + + fn test_end_async( + &self, + _test_result: &TestResult, + _file_manager: &FileManager, + _show_output: bool, + _deny_warnings: bool, + _silence_warnings: bool, + ) -> std::io::Result<()> { + Ok(()) + } + + fn test_end_sync( &self, test_result: &TestResult, current_test_count: usize, @@ -317,8 +387,153 @@ impl Formatter for TerseFormatter { } } +pub(super) struct JsonFormatter; + +impl Formatter for JsonFormatter { + fn package_start_async(&self, package_name: &str, test_count: usize) -> std::io::Result<()> { + let json = json!({"type": "suite", "event": "started", "name": package_name, "test_count": test_count}); + println!("{json}"); + Ok(()) + } + + fn package_start_sync(&self, _package_name: &str, _test_count: usize) -> std::io::Result<()> { + Ok(()) + } + + fn test_start_async(&self, name: &str, package_name: &str) -> std::io::Result<()> { + let json = json!({"type": "test", "event": "started", "name": name, "suite": package_name}); + println!("{json}"); + Ok(()) + } + + fn test_end_async( + &self, + test_result: &TestResult, + file_manager: &FileManager, + show_output: bool, + _deny_warnings: bool, + silence_warnings: bool, + ) -> std::io::Result<()> { + let mut json = Map::new(); + json.insert("type".to_string(), json!("test")); + json.insert("name".to_string(), json!(&test_result.name)); + json.insert("exec_time".to_string(), json!(test_result.time_to_run.as_secs_f64())); + + let mut stdout = String::new(); + if show_output && !test_result.output.is_empty() { + stdout.push_str(test_result.output.trim()); + } + + match &test_result.status { + TestStatus::Pass => { + json.insert("event".to_string(), json!("ok")); + } + TestStatus::Fail { message, error_diagnostic } => { + json.insert("event".to_string(), json!("failed")); + + if !stdout.is_empty() { + stdout.push('\n'); + } + stdout.push_str(message.trim()); + + if let Some(diagnostic) = error_diagnostic { + if !(diagnostic.diagnostic.is_warning() && silence_warnings) { + stdout.push('\n'); + stdout.push_str(&diagnostic_to_string(diagnostic, file_manager)); + } + } + } + TestStatus::Skipped => { + json.insert("event".to_string(), json!("ignored")); + } + TestStatus::CompileError(diagnostic) => { + json.insert("event".to_string(), json!("failed")); + + if !(diagnostic.diagnostic.is_warning() && silence_warnings) { + if !stdout.is_empty() { + stdout.push('\n'); + } + stdout.push_str(&diagnostic_to_string(diagnostic, file_manager)); + } + } + } + + if !stdout.is_empty() { + json.insert("stdout".to_string(), json!(stdout)); + } + + let json = json!(json); + println!("{json}"); + + Ok(()) + } + + fn test_end_sync( + &self, + _test_result: &TestResult, + _current_test_count: usize, + _total_test_count: usize, + _file_manager: &FileManager, + _show_output: bool, + _deny_warnings: bool, + _silence_warnings: bool, + ) -> std::io::Result<()> { + Ok(()) + } + + fn package_end( + &self, + _package_name: &str, + test_results: &[TestResult], + _file_manager: &FileManager, + _show_output: bool, + _deny_warnings: bool, + _silence_warnings: bool, + ) -> std::io::Result<()> { + let mut passed = 0; + let mut failed = 0; + let mut ignored = 0; + for test_result in test_results { + match &test_result.status { + TestStatus::Pass => passed += 1, + TestStatus::Fail { .. } | TestStatus::CompileError(..) => failed += 1, + TestStatus::Skipped => ignored += 1, + } + } + let event = if failed == 0 { "ok" } else { "failed" }; + let json = json!({"type": "suite", "event": event, "passed": passed, "failed": failed, "ignored": ignored}); + println!("{json}"); + Ok(()) + } +} + fn package_start(package_name: &str, test_count: usize) -> std::io::Result<()> { let plural = if test_count == 1 { "" } else { "s" }; println!("[{package_name}] Running {test_count} test function{plural}"); Ok(()) } + +fn diagnostic_to_string(file_diagnostic: &FileDiagnostic, file_manager: &FileManager) -> String { + let file_map = file_manager.as_file_map(); + + let custom_diagnostic = &file_diagnostic.diagnostic; + let mut message = String::new(); + message.push_str(custom_diagnostic.message.trim()); + + for note in &custom_diagnostic.notes { + message.push('\n'); + message.push_str(note.trim()); + } + + if let Ok(name) = file_map.get_name(file_diagnostic.file_id) { + message.push('\n'); + message.push_str(&format!("at {name}")); + } + + if !custom_diagnostic.call_stack.is_empty() { + message.push('\n'); + message.push_str(&stack_trace(file_map, &custom_diagnostic.call_stack)); + } + + message +} diff --git a/yarn-project/Earthfile b/yarn-project/Earthfile index 0fed665d9da..adf74380994 100644 --- a/yarn-project/Earthfile +++ b/yarn-project/Earthfile @@ -53,7 +53,7 @@ build: WORKDIR /usr/src/yarn-project COPY . . ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=true - RUN ./bootstrap.sh full + RUN ./bootstrap.sh build-dev: diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index 3a2cd472738..dd29b991f0c 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -15,6 +15,7 @@ import { MerkleTreeId, NullifierMembershipWitness, type NullifierWithBlockSource, + P2PClientType, type ProcessedTx, type ProverConfig, PublicDataWitness, @@ -58,7 +59,7 @@ import { type ContractArtifact } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { padArrayEnd } from '@aztec/foundation/collection'; import { type Logger, createLogger } from '@aztec/foundation/log'; -import { Timer } from '@aztec/foundation/timer'; +import { DateProvider, Timer } from '@aztec/foundation/timer'; import { type AztecKVStore } from '@aztec/kv-store'; import { openTmpStore } from '@aztec/kv-store/lmdb'; import { SHA256Trunc, StandardTree, UnbalancedTree } from '@aztec/merkle-tree'; @@ -138,10 +139,12 @@ export class AztecNodeService implements AztecNode, Traceable { telemetry?: TelemetryClient; logger?: Logger; publisher?: L1Publisher; + dateProvider?: DateProvider; } = {}, ): Promise { const telemetry = deps.telemetry ?? new NoopTelemetryClient(); const log = deps.logger ?? createLogger('node'); + const dateProvider = deps.dateProvider ?? new DateProvider(); const ethereumChain = createEthereumChain(config.l1RpcUrl, config.l1ChainId); //validate that the actual chain id matches that specified in configuration if (config.l1ChainId !== ethereumChain.chainInfo.id) { @@ -154,7 +157,8 @@ export class AztecNodeService implements AztecNode, Traceable { // we identify the P2P transaction protocol by using the rollup contract address. // this may well change in future - config.transactionProtocol = `/aztec/tx/${config.l1Contracts.rollupAddress.toString()}`; + const rollupAddress = config.l1Contracts.rollupAddress; + config.transactionProtocol = `/aztec/tx/${rollupAddress.toString()}`; // now create the merkle trees and the world state synchronizer const worldStateSynchronizer = await createWorldStateSynchronizer(config, archiver, telemetry); @@ -164,12 +168,19 @@ export class AztecNodeService implements AztecNode, Traceable { } // create the tx pool and the p2p client, which will need the l2 block source - const p2pClient = await createP2PClient(config, archiver, proofVerifier, worldStateSynchronizer, telemetry); + const p2pClient = await createP2PClient( + P2PClientType.Full, + config, + archiver, + proofVerifier, + worldStateSynchronizer, + telemetry, + ); // start both and wait for them to sync from the block source await Promise.all([p2pClient.start(), worldStateSynchronizer.start()]); - const validatorClient = await createValidatorClient(config, config.l1Contracts.rollupAddress, p2pClient, telemetry); + const validatorClient = await createValidatorClient(config, rollupAddress, { p2pClient, telemetry, dateProvider }); // now create the sequencer const sequencer = config.disableValidator diff --git a/yarn-project/circuit-types/src/interfaces/p2p.ts b/yarn-project/circuit-types/src/interfaces/p2p.ts index 00fa526899d..d2032f9d129 100644 --- a/yarn-project/circuit-types/src/interfaces/p2p.ts +++ b/yarn-project/circuit-types/src/interfaces/p2p.ts @@ -3,6 +3,7 @@ import { type ApiSchemaFor, optional, schemas } from '@aztec/foundation/schemas' import { z } from 'zod'; import { BlockAttestation } from '../p2p/block_attestation.js'; +import { type P2PClientType } from '../p2p/client_type.js'; import { EpochProofQuote } from '../prover_coordination/epoch_proof_quote.js'; import { Tx } from '../tx/tx.js'; @@ -24,16 +25,7 @@ const PeerInfoSchema = z.discriminatedUnion('status', [ ]); /** Exposed API to the P2P module. */ -export interface P2PApi { - /** - * Queries the Attestation pool for attestations for the given slot - * - * @param slot - the slot to query - * @param proposalId - the proposal id to query, or undefined to query all proposals for the slot - * @returns BlockAttestations - */ - getAttestationsForSlot(slot: bigint, proposalId?: string): Promise; - +export interface P2PApiWithoutAttestations { /** * Queries the EpochProofQuote pool for quotes for the given epoch * @@ -59,6 +51,21 @@ export interface P2PApi { getPeers(includePending?: boolean): Promise; } +export interface P2PClient extends P2PApiWithoutAttestations { + /** + * Queries the Attestation pool for attestations for the given slot + * + * @param slot - the slot to query + * @param proposalId - the proposal id to query, or undefined to query all proposals for the slot + * @returns BlockAttestations + */ + getAttestationsForSlot(slot: bigint, proposalId?: string): Promise; +} + +export type P2PApi = T extends P2PClientType.Full + ? P2PClient & P2PApiWithoutAttestations + : P2PApiWithoutAttestations; + export const P2PApiSchema: ApiSchemaFor = { getAttestationsForSlot: z .function() diff --git a/yarn-project/circuit-types/src/p2p/client_type.ts b/yarn-project/circuit-types/src/p2p/client_type.ts new file mode 100644 index 00000000000..75d1fea547c --- /dev/null +++ b/yarn-project/circuit-types/src/p2p/client_type.ts @@ -0,0 +1,6 @@ +export enum P2PClientType { + // Full p2p clients will subscribe to all gossip topics + Full, + // Prove p2p clients will only subscribe to transaction and proving topics + Prover, +} diff --git a/yarn-project/circuit-types/src/p2p/index.ts b/yarn-project/circuit-types/src/p2p/index.ts index 0974775870a..972988c8b7f 100644 --- a/yarn-project/circuit-types/src/p2p/index.ts +++ b/yarn-project/circuit-types/src/p2p/index.ts @@ -5,3 +5,4 @@ export * from './gossipable.js'; export * from './interface.js'; export * from './signature_utils.js'; export * from './topic_type.js'; +export * from './client_type.js'; diff --git a/yarn-project/circuit-types/src/p2p/topic_type.ts b/yarn-project/circuit-types/src/p2p/topic_type.ts index 8094905276c..db8d215a5ca 100644 --- a/yarn-project/circuit-types/src/p2p/topic_type.ts +++ b/yarn-project/circuit-types/src/p2p/topic_type.ts @@ -1,3 +1,5 @@ +import { P2PClientType } from './client_type.js'; + /** Create Topic String * * The topic channel identifier @@ -18,6 +20,13 @@ export enum TopicType { epoch_proof_quote = 'epoch_proof_quote', } +export function getTopicTypeForClientType(clientType: P2PClientType) { + if (clientType === P2PClientType.Full) { + return Object.values(TopicType); + } + return [TopicType.tx, TopicType.epoch_proof_quote]; +} + /** * Convert the topic string into a set of labels * diff --git a/yarn-project/circuits.js/src/hash/hash.ts b/yarn-project/circuits.js/src/hash/hash.ts index a6ca04d5227..79fd9d4d98e 100644 --- a/yarn-project/circuits.js/src/hash/hash.ts +++ b/yarn-project/circuits.js/src/hash/hash.ts @@ -39,11 +39,11 @@ export function siloNoteHash(contract: AztecAddress, uniqueNoteHash: Fr): Fr { * Computes a unique note hash. * @dev Includes a nonce which contains data that guarantees the resulting note hash will be unique. * @param nonce - A nonce (typically derived from tx hash and note hash index in the tx). - * @param noteHash - A note hash. + * @param siloedNoteHash - A note hash. * @returns A unique note hash. */ -export function computeUniqueNoteHash(nonce: Fr, noteHash: Fr): Fr { - return poseidon2HashWithSeparator([nonce, noteHash], GeneratorIndex.UNIQUE_NOTE_HASH); +export function computeUniqueNoteHash(nonce: Fr, siloedNoteHash: Fr): Fr { + return poseidon2HashWithSeparator([nonce, siloedNoteHash], GeneratorIndex.UNIQUE_NOTE_HASH); } /** diff --git a/yarn-project/end-to-end/package.json b/yarn-project/end-to-end/package.json index c51b89150bb..4a31d3e2ee1 100644 --- a/yarn-project/end-to-end/package.json +++ b/yarn-project/end-to-end/package.json @@ -98,7 +98,6 @@ "devDependencies": { "0x": "^5.7.0", "@jest/globals": "^29.5.0", - "@sinonjs/fake-timers": "^13.0.5", "@types/jest": "^29.5.0", "@types/js-yaml": "^4.0.9", "@types/lodash.chunk": "^4.2.9", diff --git a/yarn-project/end-to-end/src/e2e_block_building.test.ts b/yarn-project/end-to-end/src/e2e_block_building.test.ts index 942e558d83e..29635955b8b 100644 --- a/yarn-project/end-to-end/src/e2e_block_building.test.ts +++ b/yarn-project/end-to-end/src/e2e_block_building.test.ts @@ -394,8 +394,10 @@ describe('e2e_block_building', () => { .send() .deployed(); - logger.info('Updating min txs per block to 4'); - await aztecNode.setConfig({ minTxsPerBlock: 4 }); + // We set the maximum number of txs per block to 12 to ensure that the sequencer will start building a block before it receives all the txs + // and also to avoid it building + logger.info('Updating min txs per block to 4, and max txs per block to 12'); + await aztecNode.setConfig({ minTxsPerBlock: 4, maxTxsPerBlock: 12 }); logger.info('Spamming the network with public txs'); const txs = []; diff --git a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts index afa5cdbb1cd..7a74cb96163 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts @@ -76,6 +76,7 @@ describe('e2e_p2p_network', () => { t.logger.info('Creating nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts b/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts index 09d282c4f43..a218e4e7fe3 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts @@ -93,7 +93,7 @@ export class P2PNetworkTest { */ public async syncMockSystemTime() { this.logger.info('Syncing mock system time'); - const { timer, deployL1ContractsValues } = this.ctx!; + const { dateProvider, deployL1ContractsValues } = this.ctx!; // Send a tx and only update the time after the tx is mined, as eth time is not continuous const tx = await deployL1ContractsValues.walletClient.sendTransaction({ to: this.baseAccount.address, @@ -104,8 +104,7 @@ export class P2PNetworkTest { hash: tx, }); const timestamp = await deployL1ContractsValues.publicClient.getBlock({ blockNumber: receipt.blockNumber }); - timer.setSystemTime(Number(timestamp.timestamp) * 1000); - this.logger.info(`Synced mock system time to ${timestamp.timestamp * 1000n}`); + dateProvider.setTime(Number(timestamp.timestamp) * 1000); } static async create({ @@ -133,7 +132,7 @@ export class P2PNetworkTest { async applyBaseSnapshots() { await this.snapshotManager.snapshot( 'add-validators', - async ({ deployL1ContractsValues, aztecNodeConfig, timer }) => { + async ({ deployL1ContractsValues, aztecNodeConfig, dateProvider }) => { const rollup = getContract({ address: deployL1ContractsValues.l1ContractAddresses.rollupAddress.toString(), abi: RollupAbi, @@ -203,7 +202,7 @@ export class P2PNetworkTest { // Set the system time in the node, only after we have warped the time and waited for a block // Time is only set in the NEXT block - timer.setSystemTime(Number(timestamp) * 1000); + dateProvider.setTime(Number(timestamp) * 1000); }, ); } @@ -244,7 +243,7 @@ export class P2PNetworkTest { async removeInitialNode() { await this.snapshotManager.snapshot( 'remove-inital-validator', - async ({ deployL1ContractsValues, aztecNode, timer }) => { + async ({ deployL1ContractsValues, aztecNode, dateProvider }) => { // Send and await a tx to make sure we mine a block for the warp to correctly progress. const receipt = await deployL1ContractsValues.publicClient.waitForTransactionReceipt({ hash: await deployL1ContractsValues.walletClient.sendTransaction({ @@ -256,7 +255,7 @@ export class P2PNetworkTest { const block = await deployL1ContractsValues.publicClient.getBlock({ blockNumber: receipt.blockNumber, }); - timer.setSystemTime(Number(block.timestamp) * 1000); + dateProvider.setTime(Number(block.timestamp) * 1000); await aztecNode.stop(); }, diff --git a/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts b/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts index 81d069b65c7..f7a8ffbfb4a 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts @@ -46,6 +46,7 @@ describe('e2e_p2p_rediscovery', () => { const contexts: NodeContext[] = []; nodes = await createNodes( t.ctx.aztecNodeConfig, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, BOOT_NODE_UDP_PORT, @@ -72,6 +73,7 @@ describe('e2e_p2p_rediscovery', () => { const newNode = await createNode( t.ctx.aztecNodeConfig, + t.ctx.dateProvider, i + 1 + BOOT_NODE_UDP_PORT, undefined, i, diff --git a/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts b/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts index 10b12d4da59..be5475755c0 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts @@ -65,6 +65,7 @@ describe('e2e_p2p_reex', () => { nodes = await createNodes( t.ctx.aztecNodeConfig, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/e2e_p2p/reqresp.test.ts b/yarn-project/end-to-end/src/e2e_p2p/reqresp.test.ts index d9be627e861..fed938743d4 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/reqresp.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/reqresp.test.ts @@ -65,6 +65,7 @@ describe('e2e_p2p_reqresp_tx', () => { t.logger.info('Creating nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/e2e_p2p/upgrade_governance_proposer.test.ts b/yarn-project/end-to-end/src/e2e_p2p/upgrade_governance_proposer.test.ts index f7ef6b05195..9499bce2eef 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/upgrade_governance_proposer.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/upgrade_governance_proposer.test.ts @@ -130,6 +130,7 @@ describe('e2e_p2p_governance_proposer', () => { t.logger.info('Creating nodes'); nodes = await createNodes( { ...t.ctx.aztecNodeConfig, governanceProposerPayload: newPayloadAddress }, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts b/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts index b69ae1c69e0..1d31d41ab6c 100644 --- a/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts +++ b/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts @@ -4,6 +4,7 @@ import { type AztecNodeConfig, AztecNodeService } from '@aztec/aztec-node'; import { type SentTx, createLogger } from '@aztec/aztec.js'; import { type AztecAddress } from '@aztec/circuits.js'; +import { type DateProvider } from '@aztec/foundation/timer'; import { type PXEService } from '@aztec/pxe'; import getPort from 'get-port'; @@ -34,6 +35,7 @@ export function generatePrivateKeys(startIndex: number, numberOfKeys: number): ` export function createNodes( config: AztecNodeConfig, + dateProvider: DateProvider, bootstrapNodeEnr: string, numNodes: number, bootNodePort: number, @@ -46,7 +48,7 @@ export function createNodes( const port = bootNodePort + i + 1; const dataDir = dataDirectory ? `${dataDirectory}-${i}` : undefined; - const nodePromise = createNode(config, port, bootstrapNodeEnr, i, dataDir, metricsPort); + const nodePromise = createNode(config, dateProvider, port, bootstrapNodeEnr, i, dataDir, metricsPort); nodePromises.push(nodePromise); } return Promise.all(nodePromises); @@ -55,6 +57,7 @@ export function createNodes( // creates a P2P enabled instance of Aztec Node Service export async function createNode( config: AztecNodeConfig, + dateProvider: DateProvider, tcpPort: number, bootstrapNode: string | undefined, accountIndex: number, @@ -68,6 +71,7 @@ export async function createNode( return await AztecNodeService.createAndSync(validatorConfig, { telemetry: telemetryClient, logger: createLogger(`node:${tcpPort}`), + dateProvider, }); } diff --git a/yarn-project/end-to-end/src/fixtures/snapshot_manager.ts b/yarn-project/end-to-end/src/fixtures/snapshot_manager.ts index 6743a7ef2c4..5581397e60c 100644 --- a/yarn-project/end-to-end/src/fixtures/snapshot_manager.ts +++ b/yarn-project/end-to-end/src/fixtures/snapshot_manager.ts @@ -20,11 +20,11 @@ import { startAnvil } from '@aztec/ethereum/test'; import { asyncMap } from '@aztec/foundation/async-map'; import { createLogger } from '@aztec/foundation/log'; import { resolver, reviver } from '@aztec/foundation/serialize'; +import { TestDateProvider } from '@aztec/foundation/timer'; import { type ProverNode } from '@aztec/prover-node'; import { type PXEService, createPXEService, getPXEServiceConfig } from '@aztec/pxe'; import { createAndStartTelemetryClient, getConfigEnvVars as getTelemetryConfig } from '@aztec/telemetry-client/start'; -import { type InstalledClock, install } from '@sinonjs/fake-timers'; import { type Anvil } from '@viem/anvil'; import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'fs'; import { copySync, removeSync } from 'fs-extra/esm'; @@ -50,7 +50,7 @@ export type SubsystemsContext = { proverNode?: ProverNode; watcher: AnvilTestWatcher; cheatCodes: CheatCodes; - timer: InstalledClock; + dateProvider: TestDateProvider; }; type SnapshotEntry = { @@ -250,7 +250,6 @@ async function teardown(context: SubsystemsContext | undefined) { await context.acvmConfig?.cleanup(); await context.anvil.stop(); await context.watcher.stop(); - context.timer?.uninstall(); } catch (err) { getLogger().error('Error during teardown', err); } @@ -272,9 +271,6 @@ async function setupFromFresh( ): Promise { logger.verbose(`Initializing state...`); - // Use sinonjs fake timers - const timer = install({ shouldAdvanceTime: true, advanceTimeDelta: 20, toFake: ['Date'] }); - // Fetch the AztecNode config. // TODO: For some reason this is currently the union of a bunch of subsystems. That needs fixing. const aztecNodeConfig: AztecNodeConfig & SetupOptions = { ...getConfigEnvVars(), ...opts }; @@ -358,7 +354,8 @@ async function setupFromFresh( const telemetry = await getEndToEndTestTelemetryClient(opts.metricsPort); logger.verbose('Creating and synching an aztec node...'); - const aztecNode = await AztecNodeService.createAndSync(aztecNodeConfig, { telemetry }); + const dateProvider = new TestDateProvider(); + const aztecNode = await AztecNodeService.createAndSync(aztecNodeConfig, { telemetry, dateProvider }); let proverNode: ProverNode | undefined = undefined; if (opts.startProverNode) { @@ -392,7 +389,7 @@ async function setupFromFresh( proverNode, watcher, cheatCodes, - timer, + dateProvider, }; } @@ -402,9 +399,6 @@ async function setupFromFresh( async function setupFromState(statePath: string, logger: Logger): Promise { logger.verbose(`Initializing with saved state at ${statePath}...`); - // TODO: make one function - const timer = install({ shouldAdvanceTime: true, advanceTimeDelta: 20, toFake: ['Date'] }); - // TODO: For some reason this is currently the union of a bunch of subsystems. That needs fixing. const aztecNodeConfig: AztecNodeConfig & SetupOptions = JSON.parse( readFileSync(`${statePath}/aztec_node_config.json`, 'utf-8'), @@ -445,7 +439,8 @@ async function setupFromState(statePath: string, logger: Logger): Promise Promise; }; @@ -415,7 +419,8 @@ export async function setup( const telemetry = await telemetryPromise; const publisher = new TestL1Publisher(config, telemetry); - const aztecNode = await AztecNodeService.createAndSync(config, { telemetry, publisher }); + const dateProvider = new TestDateProvider(); + const aztecNode = await AztecNodeService.createAndSync(config, { telemetry, publisher, dateProvider }); const sequencer = aztecNode.getSequencer(); let proverNode: ProverNode | undefined = undefined; @@ -466,6 +471,7 @@ export async function setup( cheatCodes, sequencer, watcher, + dateProvider, teardown, }; } diff --git a/yarn-project/epoch-cache/src/epoch_cache.ts b/yarn-project/epoch-cache/src/epoch_cache.ts index b0c019c6b63..1cbe315c2bb 100644 --- a/yarn-project/epoch-cache/src/epoch_cache.ts +++ b/yarn-project/epoch-cache/src/epoch_cache.ts @@ -7,6 +7,7 @@ import { import { RollupContract, createEthereumChain } from '@aztec/ethereum'; import { EthAddress } from '@aztec/foundation/eth-address'; import { type Logger, createLogger } from '@aztec/foundation/log'; +import { DateProvider } from '@aztec/foundation/timer'; import { EventEmitter } from 'node:events'; import { createPublicClient, encodeAbiParameters, http, keccak256 } from 'viem'; @@ -40,6 +41,7 @@ export class EpochCache extends EventEmitter<{ committeeChanged: [EthAddress[], initialValidators: EthAddress[] = [], initialSampleSeed: bigint = 0n, private readonly l1constants: L1RollupConstants = EmptyL1RollupConstants, + private readonly dateProvider: DateProvider = new DateProvider(), ) { super(); this.committee = initialValidators; @@ -47,10 +49,14 @@ export class EpochCache extends EventEmitter<{ committeeChanged: [EthAddress[], this.log.debug(`Initialized EpochCache with constants and validators`, { l1constants, initialValidators }); - this.cachedEpoch = getEpochNumberAtTimestamp(BigInt(Math.floor(Date.now() / 1000)), this.l1constants); + this.cachedEpoch = getEpochNumberAtTimestamp(this.nowInSeconds(), this.l1constants); } - static async create(rollupAddress: EthAddress, config?: EpochCacheConfig) { + static async create( + rollupAddress: EthAddress, + config?: EpochCacheConfig, + deps: { dateProvider?: DateProvider } = {}, + ) { config = config ?? getEpochCacheConfigEnvVars(); const chain = createEthereumChain(config.l1RpcUrl, config.l1ChainId); @@ -81,16 +87,20 @@ export class EpochCache extends EventEmitter<{ committeeChanged: [EthAddress[], initialValidators.map(v => EthAddress.fromString(v)), sampleSeed, l1RollupConstants, + deps.dateProvider, ); } + private nowInSeconds(): bigint { + return BigInt(Math.floor(this.dateProvider.now() / 1000)); + } + getEpochAndSlotNow(): EpochAndSlot { - const now = BigInt(Math.floor(Date.now() / 1000)); - return this.getEpochAndSlotAtTimestamp(now); + return this.getEpochAndSlotAtTimestamp(this.nowInSeconds()); } getEpochAndSlotInNextSlot(): EpochAndSlot { - const nextSlotTs = BigInt(Math.floor(Date.now() / 1000) + this.l1constants.slotDuration); + const nextSlotTs = this.nowInSeconds() + BigInt(this.l1constants.slotDuration); return this.getEpochAndSlotAtTimestamp(nextSlotTs); } diff --git a/yarn-project/foundation/src/retry/index.ts b/yarn-project/foundation/src/retry/index.ts index bec54c65d7d..ce05747f7da 100644 --- a/yarn-project/foundation/src/retry/index.ts +++ b/yarn-project/foundation/src/retry/index.ts @@ -64,7 +64,7 @@ export async function retry( throw err; } log.verbose(`${name} failed. Will retry in ${s}s...`); - !failSilently && log.error(err); + !failSilently && log.error(`Error while retrying ${name}`, err); await sleep(s * 1000); continue; } diff --git a/yarn-project/foundation/src/timer/date.test.ts b/yarn-project/foundation/src/timer/date.test.ts new file mode 100644 index 00000000000..55aeac96d55 --- /dev/null +++ b/yarn-project/foundation/src/timer/date.test.ts @@ -0,0 +1,33 @@ +import { sleep } from '../sleep/index.js'; +import { TestDateProvider } from './date.js'; + +describe('TestDateProvider', () => { + let dateProvider: TestDateProvider; + beforeEach(() => { + dateProvider = new TestDateProvider(); + }); + + it('should return the current datetime', () => { + const currentTime = Date.now(); + const result = dateProvider.now(); + expect(result).toBeGreaterThanOrEqual(currentTime); + expect(result).toBeLessThan(currentTime + 100); + }); + + it('should return the overridden datetime', () => { + const overriddenTime = Date.now() + 1000; + dateProvider.setTime(overriddenTime); + const result = dateProvider.now(); + expect(result).toBeGreaterThanOrEqual(overriddenTime); + expect(result).toBeLessThan(overriddenTime + 100); + }); + + it('should keep ticking after overriding', async () => { + const overriddenTime = Date.now() + 1000; + dateProvider.setTime(overriddenTime); + await sleep(510); + const result = dateProvider.now(); + expect(result).toBeGreaterThanOrEqual(overriddenTime + 500); + expect(result).toBeLessThan(overriddenTime + 600); + }); +}); diff --git a/yarn-project/foundation/src/timer/date.ts b/yarn-project/foundation/src/timer/date.ts new file mode 100644 index 00000000000..f1ed1ee4361 --- /dev/null +++ b/yarn-project/foundation/src/timer/date.ts @@ -0,0 +1,24 @@ +import { createLogger } from '../log/pino-logger.js'; + +/** Returns current datetime. */ +export class DateProvider { + public now(): number { + return Date.now(); + } +} + +/** Returns current datetime and allows to override it. */ +export class TestDateProvider implements DateProvider { + private offset = 0; + + constructor(private readonly logger = createLogger('foundation:test-date-provider')) {} + + public now(): number { + return Date.now() + this.offset; + } + + public setTime(timeMs: number) { + this.offset = timeMs - Date.now(); + this.logger.warn(`Time set to ${timeMs}`); + } +} diff --git a/yarn-project/foundation/src/timer/index.ts b/yarn-project/foundation/src/timer/index.ts index 972248c8f41..afb200bb32f 100644 --- a/yarn-project/foundation/src/timer/index.ts +++ b/yarn-project/foundation/src/timer/index.ts @@ -1,3 +1,4 @@ export { TimeoutTask, executeTimeoutWithCustomError } from './timeout.js'; export { Timer } from './timer.js'; export { elapsed, elapsedSync } from './elapsed.js'; +export * from './date.js'; diff --git a/yarn-project/l1-artifacts/scripts/generate-artifacts.sh b/yarn-project/l1-artifacts/scripts/generate-artifacts.sh index 3b78a796f50..896467dfd05 100755 --- a/yarn-project/l1-artifacts/scripts/generate-artifacts.sh +++ b/yarn-project/l1-artifacts/scripts/generate-artifacts.sh @@ -32,6 +32,13 @@ CONTRACTS=( "l1-contracts:ExtRollupLib" ) +# Read the error ABI's once and store it in COMBINED_ERRORS variable +COMBINED_ERRORS=$(jq -s ' + .[0].abi + .[1].abi | + unique_by({type: .type, name: .name}) +' \ + ../../l1-contracts/out/Errors.sol/Errors.json \ + ../../l1-contracts/out/libraries/Errors.sol/Errors.json) # create target dir if it doesn't exist mkdir -p "$target_dir"; @@ -44,7 +51,14 @@ for E in "${CONTRACTS[@]}"; do CONTRACT_NAME="${ARR[1]}"; echo -ne "/**\n * $CONTRACT_NAME ABI.\n */\nexport const ${CONTRACT_NAME}Abi = " > "$target_dir/${CONTRACT_NAME}Abi.ts"; - jq -j '.abi' ../../$ROOT/out/$CONTRACT_NAME.sol/$CONTRACT_NAME.json >> "$target_dir/${CONTRACT_NAME}Abi.ts"; + + # Merge contract abi and errors abi while removing duplicates based on both type and name + # Just merging it into all, it is not the cleanest, but it does the job. + jq -j --argjson errors "$COMBINED_ERRORS" ' + .abi + $errors | + unique_by({type: .type, name: .name}) + ' ../../$ROOT/out/$CONTRACT_NAME.sol/$CONTRACT_NAME.json >> "$target_dir/${CONTRACT_NAME}Abi.ts"; + echo " as const;" >> "$target_dir/${CONTRACT_NAME}Abi.ts"; echo -ne "/**\n * $CONTRACT_NAME bytecode.\n */\nexport const ${CONTRACT_NAME}Bytecode = \"" > "$target_dir/${CONTRACT_NAME}Bytecode.ts"; diff --git a/yarn-project/p2p/src/client/index.ts b/yarn-project/p2p/src/client/index.ts index 509e5d51614..3e3d5de5ca3 100644 --- a/yarn-project/p2p/src/client/index.ts +++ b/yarn-project/p2p/src/client/index.ts @@ -1,4 +1,9 @@ -import type { ClientProtocolCircuitVerifier, L2BlockSource, WorldStateSynchronizer } from '@aztec/circuit-types'; +import { + type ClientProtocolCircuitVerifier, + type L2BlockSource, + P2PClientType, + type WorldStateSynchronizer, +} from '@aztec/circuit-types'; import { createLogger } from '@aztec/foundation/log'; import { type AztecKVStore } from '@aztec/kv-store'; import { type DataStoreConfig } from '@aztec/kv-store/config'; @@ -21,27 +26,35 @@ import { configureP2PClientAddresses, createLibP2PPeerIdFromPrivateKey, getPeerI export * from './p2p_client.js'; -export const createP2PClient = async ( +type P2PClientDeps = { + txPool?: TxPool; + store?: AztecKVStore; + attestationPool?: T extends P2PClientType.Full ? AttestationPool : undefined; + epochProofQuotePool?: EpochProofQuotePool; +}; + +export const createP2PClient = async ( + clientType: T, _config: P2PConfig & DataStoreConfig, l2BlockSource: L2BlockSource, proofVerifier: ClientProtocolCircuitVerifier, worldStateSynchronizer: WorldStateSynchronizer, telemetry: TelemetryClient = new NoopTelemetryClient(), - deps: { - txPool?: TxPool; - store?: AztecKVStore; - attestationPool?: AttestationPool; - epochProofQuotePool?: EpochProofQuotePool; - } = {}, + deps: P2PClientDeps = {}, ) => { let config = { ..._config }; const logger = createLogger('p2p'); const store = deps.store ?? (await createStore('p2p', config, createLogger('p2p:lmdb'))); - const mempools: MemPools = { + const mempools: MemPools = { txPool: deps.txPool ?? new AztecKVTxPool(store, telemetry), - attestationPool: deps.attestationPool ?? new InMemoryAttestationPool(telemetry), epochProofQuotePool: deps.epochProofQuotePool ?? new MemoryEpochProofQuotePool(telemetry), + attestationPool: + clientType === P2PClientType.Full + ? ((deps.attestationPool ?? new InMemoryAttestationPool(telemetry)) as T extends P2PClientType.Full + ? AttestationPool + : undefined) + : undefined, }; let p2pService; @@ -55,7 +68,8 @@ export const createP2PClient = async ( const peerId = await createLibP2PPeerIdFromPrivateKey(peerIdPrivateKey); const discoveryService = new DiscV5Service(peerId, config, telemetry); - p2pService = await LibP2PService.new( + p2pService = await LibP2PService.new( + clientType, config, discoveryService, peerId, @@ -70,5 +84,13 @@ export const createP2PClient = async ( logger.verbose('P2P is disabled. Using dummy P2P service'); p2pService = new DummyP2PService(); } - return new P2PClient(store, l2BlockSource, mempools, p2pService, config.keepProvenTxsInPoolFor, telemetry); + return new P2PClient( + clientType, + store, + l2BlockSource, + mempools, + p2pService, + config.keepProvenTxsInPoolFor, + telemetry, + ); }; diff --git a/yarn-project/p2p/src/client/p2p_client.test.ts b/yarn-project/p2p/src/client/p2p_client.test.ts index 5d926dec481..577a9228bcd 100644 --- a/yarn-project/p2p/src/client/p2p_client.test.ts +++ b/yarn-project/p2p/src/client/p2p_client.test.ts @@ -1,5 +1,5 @@ import { MockL2BlockSource } from '@aztec/archiver/test'; -import { L2Block, mockEpochProofQuote, mockTx } from '@aztec/circuit-types'; +import { L2Block, P2PClientType, mockEpochProofQuote, mockTx } from '@aztec/circuit-types'; import { Fr } from '@aztec/circuits.js'; import { retryUntil } from '@aztec/foundation/retry'; import { sleep } from '@aztec/foundation/sleep'; @@ -49,7 +49,7 @@ describe('In-Memory P2P Client', () => { }; kvStore = openTmpStore(); - client = new P2PClient(kvStore, blockSource, mempools, p2pService, 0); + client = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService, 0); }); const advanceToProvenBlock = async (getProvenBlockNumber: number, provenEpochNumber = getProvenBlockNumber) => { @@ -119,7 +119,7 @@ describe('In-Memory P2P Client', () => { await client.start(); await client.stop(); - const client2 = new P2PClient(kvStore, blockSource, mempools, p2pService, 0); + const client2 = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService, 0); expect(client2.getSyncedLatestBlockNum()).toEqual(client.getSyncedLatestBlockNum()); }); @@ -134,7 +134,7 @@ describe('In-Memory P2P Client', () => { }); it('deletes txs after waiting the set number of blocks', async () => { - client = new P2PClient(kvStore, blockSource, mempools, p2pService, 10); + client = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService, 10); blockSource.setProvenBlockNumber(0); await client.start(); expect(txPool.deleteTxs).not.toHaveBeenCalled(); @@ -151,7 +151,7 @@ describe('In-Memory P2P Client', () => { }); it('stores and returns epoch proof quotes', async () => { - client = new P2PClient(kvStore, blockSource, mempools, p2pService, 0); + client = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService, 0); blockSource.setProvenEpochNumber(2); await client.start(); @@ -182,7 +182,7 @@ describe('In-Memory P2P Client', () => { }); it('deletes expired proof quotes', async () => { - client = new P2PClient(kvStore, blockSource, mempools, p2pService, 0); + client = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService, 0); blockSource.setProvenEpochNumber(1); blockSource.setProvenBlockNumber(1); @@ -245,7 +245,7 @@ describe('In-Memory P2P Client', () => { }); it('deletes txs created from a pruned block', async () => { - client = new P2PClient(kvStore, blockSource, mempools, p2pService, 10); + client = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService, 10); blockSource.setProvenBlockNumber(0); await client.start(); @@ -267,7 +267,7 @@ describe('In-Memory P2P Client', () => { }); it('moves mined and valid txs back to the pending set', async () => { - client = new P2PClient(kvStore, blockSource, mempools, p2pService, 10); + client = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService, 10); blockSource.setProvenBlockNumber(0); await client.start(); diff --git a/yarn-project/p2p/src/client/p2p_client.ts b/yarn-project/p2p/src/client/p2p_client.ts index eda78826d7a..d138e1e6ceb 100644 --- a/yarn-project/p2p/src/client/p2p_client.ts +++ b/yarn-project/p2p/src/client/p2p_client.ts @@ -8,6 +8,7 @@ import { type L2BlockStreamEvent, type L2Tips, type P2PApi, + type P2PClientType, type PeerInfo, type Tx, type TxHash, @@ -61,7 +62,7 @@ export interface P2PSyncState { /** * Interface of a P2P client. **/ -export interface P2P extends P2PApi { +export type P2P = P2PApi & { /** * Broadcasts a block proposal to other peers. * @@ -171,12 +172,15 @@ export interface P2P extends P2PApi { /** Identifies a p2p client. */ isP2PClient(): true; -} +}; /** * The P2P client implementation. */ -export class P2PClient extends WithTracer implements P2P { +export class P2PClient + extends WithTracer + implements P2P, P2P +{ /** Property that indicates whether the client is running. */ private stopping = false; @@ -194,7 +198,7 @@ export class P2PClient extends WithTracer implements P2P { private synchedProvenBlockNumber: AztecSingleton; private txPool: TxPool; - private attestationPool: AttestationPool; + private attestationPool: T extends P2PClientType.Full ? AttestationPool : undefined; private epochProofQuotePool: EpochProofQuotePool; /** How many slots to keep attestations for. */ @@ -212,9 +216,10 @@ export class P2PClient extends WithTracer implements P2P { * @param log - A logger. */ constructor( + clientType: T, store: AztecKVStore, private l2BlockSource: L2BlockSource, - mempools: MemPools, + mempools: MemPools, private p2pService: P2PService, private keepProvenTxsFor: number, telemetry: TelemetryClient = new NoopTelemetryClient(), @@ -238,8 +243,8 @@ export class P2PClient extends WithTracer implements P2P { this.synchedProvenBlockNumber = store.openSingleton('p2p_pool_last_proven_l2_block'); this.txPool = mempools.txPool; - this.attestationPool = mempools.attestationPool; this.epochProofQuotePool = mempools.epochProofQuotePool; + this.attestationPool = mempools.attestationPool!; } public isP2PClient(): true { @@ -406,7 +411,7 @@ export class P2PClient extends WithTracer implements P2P { } public getAttestationsForSlot(slot: bigint, proposalId: string): Promise { - return Promise.resolve(this.attestationPool.getAttestationsForSlot(slot, proposalId)); + return Promise.resolve(this.attestationPool?.getAttestationsForSlot(slot, proposalId) ?? []); } // REVIEW: https://github.com/AztecProtocol/aztec-packages/issues/7963 @@ -651,7 +656,7 @@ export class P2PClient extends WithTracer implements P2P { // We delete attestations older than the last block slot minus the number of slots we want to keep in the pool. const lastBlockSlotMinusKeepAttestationsInPoolFor = lastBlockSlot - BigInt(this.keepAttestationsInPoolFor); if (lastBlockSlotMinusKeepAttestationsInPoolFor >= BigInt(INITIAL_L2_BLOCK_NUM)) { - await this.attestationPool.deleteAttestationsOlderThan(lastBlockSlotMinusKeepAttestationsInPoolFor); + await this.attestationPool?.deleteAttestationsOlderThan(lastBlockSlotMinusKeepAttestationsInPoolFor); } await this.synchedProvenBlockNumber.set(lastBlockNum); diff --git a/yarn-project/p2p/src/errors/reqresp.error.ts b/yarn-project/p2p/src/errors/reqresp.error.ts index a31973d3e67..21749b7473d 100644 --- a/yarn-project/p2p/src/errors/reqresp.error.ts +++ b/yarn-project/p2p/src/errors/reqresp.error.ts @@ -3,7 +3,7 @@ * This error will be thrown when a request to a specific peer times out. * @category Errors */ -export class IndiviualReqRespTimeoutError extends Error { +export class IndividualReqRespTimeoutError extends Error { constructor() { super(`Request to peer timed out`); } @@ -19,3 +19,17 @@ export class CollectiveReqRespTimeoutError extends Error { super(`Request to all peers timed out`); } } + +/** Invalid response error + * + * This error will be thrown when a response is received that is not valid. + * + * This error does not need to be punished as message validators will handle punishing invalid + * requests + * @category Errors + */ +export class InvalidResponseError extends Error { + constructor() { + super(`Invalid response received`); + } +} diff --git a/yarn-project/p2p/src/mem_pools/interface.ts b/yarn-project/p2p/src/mem_pools/interface.ts index faf38e0638c..4db4c0fa899 100644 --- a/yarn-project/p2p/src/mem_pools/interface.ts +++ b/yarn-project/p2p/src/mem_pools/interface.ts @@ -1,3 +1,5 @@ +import { type P2PClientType } from '@aztec/circuit-types'; + import { type AttestationPool } from './attestation_pool/attestation_pool.js'; import { type EpochProofQuotePool } from './epoch_proof_quote_pool/epoch_proof_quote_pool.js'; import { type TxPool } from './tx_pool/tx_pool.js'; @@ -5,8 +7,8 @@ import { type TxPool } from './tx_pool/tx_pool.js'; /** * A interface the combines all mempools */ -export interface MemPools { +export type MemPools = { txPool: TxPool; - attestationPool: AttestationPool; + attestationPool?: T extends P2PClientType.Full ? AttestationPool : undefined; epochProofQuotePool: EpochProofQuotePool; -} +}; diff --git a/yarn-project/p2p/src/mocks/index.ts b/yarn-project/p2p/src/mocks/index.ts index d0524378806..bee6dc519fa 100644 --- a/yarn-project/p2p/src/mocks/index.ts +++ b/yarn-project/p2p/src/mocks/index.ts @@ -1,6 +1,7 @@ import { type ClientProtocolCircuitVerifier, type L2BlockSource, + type P2PClientType, type Tx, type WorldStateSynchronizer, } from '@aztec/circuit-types'; @@ -95,11 +96,12 @@ export async function createLibp2pNode( * * */ -export async function createTestLibP2PService( +export async function createTestLibP2PService( + clientType: T, boostrapAddrs: string[] = [], l2BlockSource: L2BlockSource, worldStateSynchronizer: WorldStateSynchronizer, - mempools: MemPools, + mempools: MemPools, telemetry: TelemetryClient, port: number = 0, peerId?: PeerId, @@ -123,7 +125,8 @@ export async function createTestLibP2PService( // No bootstrap nodes provided as the libp2p service will register them in the constructor const p2pNode = await createLibp2pNode([], peerId, port, /*enable gossip */ true, /**start */ false); - return new LibP2PService( + return new LibP2PService( + clientType, config, p2pNode as PubSubLibp2p, discoveryService, diff --git a/yarn-project/p2p/src/service/encoding.ts b/yarn-project/p2p/src/service/encoding.ts index 0713b7e8a26..3d9e41f7db3 100644 --- a/yarn-project/p2p/src/service/encoding.ts +++ b/yarn-project/p2p/src/service/encoding.ts @@ -49,13 +49,31 @@ export function getMsgIdFn(message: Message) { return sha256(Buffer.concat(vec)).subarray(0, 20); } +/** + * Snappy transform for libp2p gossipsub + */ export class SnappyTransform implements DataTransform { + // Topic string included to satisfy DataTransform interface inboundTransform(_topicStr: string, data: Uint8Array): Uint8Array { - const uncompressed = Buffer.from(uncompressSync(Buffer.from(data), { asBuffer: true })); - return new Uint8Array(uncompressed); + return this.inboundTransformNoTopic(Buffer.from(data)); + } + + public inboundTransformNoTopic(data: Buffer): Buffer { + if (data.length === 0) { + return data; + } + return Buffer.from(uncompressSync(data, { asBuffer: true })); } + // Topic string included to satisfy DataTransform interface outboundTransform(_topicStr: string, data: Uint8Array): Uint8Array { - return new Uint8Array(compressSync(Buffer.from(data))); + return this.outboundTransformNoTopic(Buffer.from(data)); + } + + public outboundTransformNoTopic(data: Buffer): Buffer { + if (data.length === 0) { + return data; + } + return Buffer.from(compressSync(data)); } } diff --git a/yarn-project/p2p/src/service/libp2p_service.ts b/yarn-project/p2p/src/service/libp2p_service.ts index 393bae440ec..a95d1368064 100644 --- a/yarn-project/p2p/src/service/libp2p_service.ts +++ b/yarn-project/p2p/src/service/libp2p_service.ts @@ -8,13 +8,14 @@ import { MerkleTreeId, type PeerInfo, type RawGossipMessage, - TopicType, TopicTypeMap, Tx, TxHash, type WorldStateSynchronizer, + getTopicTypeForClientType, metricsTopicStrToLabels, } from '@aztec/circuit-types'; +import { P2PClientType } from '@aztec/circuit-types'; import { Fr } from '@aztec/circuits.js'; import { createLogger } from '@aztec/foundation/log'; import { SerialQueue } from '@aztec/foundation/queue'; @@ -64,7 +65,7 @@ import type { P2PService, PeerDiscoveryService } from './service.js'; /** * Lib P2P implementation of the P2PService interface. */ -export class LibP2PService extends WithTracer implements P2PService { +export class LibP2PService extends WithTracer implements P2PService { private jobQueue: SerialQueue = new SerialQueue(); private peerManager: PeerManager; private discoveryRunningPromise?: RunningPromise; @@ -80,10 +81,11 @@ export class LibP2PService extends WithTracer implements P2PService { private blockReceivedCallback: (block: BlockProposal) => Promise; constructor( + private clientType: T, private config: P2PConfig, private node: PubSubLibp2p, private peerDiscoveryService: PeerDiscoveryService, - private mempools: MemPools, + private mempools: MemPools, private l2BlockSource: L2BlockSource, private proofVerifier: ClientProtocolCircuitVerifier, private worldStateSynchronizer: WorldStateSynchronizer, @@ -131,7 +133,7 @@ export class LibP2PService extends WithTracer implements P2PService { await this.node.start(); // Subscribe to standard GossipSub topics by default - for (const topic in TopicType) { + for (const topic of getTopicTypeForClientType(this.clientType)) { this.subscribeToTopic(TopicTypeMap[topic].p2pTopic); } @@ -188,11 +190,12 @@ export class LibP2PService extends WithTracer implements P2PService { * @param txPool - The transaction pool to be accessed by the service. * @returns The new service. */ - public static async new( + public static async new( + clientType: T, config: P2PConfig, peerDiscoveryService: PeerDiscoveryService, peerId: PeerId, - mempools: MemPools, + mempools: MemPools, l2BlockSource: L2BlockSource, proofVerifier: ClientProtocolCircuitVerifier, worldStateSynchronizer: WorldStateSynchronizer, @@ -301,6 +304,7 @@ export class LibP2PService extends WithTracer implements P2PService { }; return new LibP2PService( + clientType, config, node, peerDiscoveryService, @@ -383,7 +387,7 @@ export class LibP2PService extends WithTracer implements P2PService { const tx = Tx.fromBuffer(Buffer.from(message.data)); await this.processTxFromPeer(tx, peerId); } - if (message.topic === BlockAttestation.p2pTopic) { + if (message.topic === BlockAttestation.p2pTopic && this.clientType === P2PClientType.Full) { const attestation = BlockAttestation.fromBuffer(Buffer.from(message.data)); await this.processAttestationFromPeer(attestation); } @@ -412,7 +416,7 @@ export class LibP2PService extends WithTracer implements P2PService { })) private async processAttestationFromPeer(attestation: BlockAttestation): Promise { this.logger.debug(`Received attestation ${attestation.p2pMessageIdentifier()} from external peer.`); - await this.mempools.attestationPool.addAttestations([attestation]); + await this.mempools.attestationPool!.addAttestations([attestation]); } /**Process block from peer diff --git a/yarn-project/p2p/src/service/reqresp/reqresp.integration.test.ts b/yarn-project/p2p/src/service/reqresp/reqresp.integration.test.ts index 739da0d5a09..f6483ba9563 100644 --- a/yarn-project/p2p/src/service/reqresp/reqresp.integration.test.ts +++ b/yarn-project/p2p/src/service/reqresp/reqresp.integration.test.ts @@ -1,6 +1,11 @@ // An integration test for the p2p client to test req resp protocols import { MockL2BlockSource } from '@aztec/archiver/test'; -import { type ClientProtocolCircuitVerifier, type WorldStateSynchronizer, mockTx } from '@aztec/circuit-types'; +import { + type ClientProtocolCircuitVerifier, + P2PClientType, + type WorldStateSynchronizer, + mockTx, +} from '@aztec/circuit-types'; import { createLogger } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; import { type AztecKVStore } from '@aztec/kv-store'; @@ -148,7 +153,15 @@ describe('Req Resp p2p client integration', () => { epochProofQuotePool: epochProofQuotePool as unknown as EpochProofQuotePool, store: kvStore, }; - const client = await createP2PClient(config, l2BlockSource, proofVerifier, worldState, undefined, deps); + const client = await createP2PClient( + P2PClientType.Full, + config, + l2BlockSource, + proofVerifier, + worldState, + undefined, + deps, + ); await client.start(); clients.push(client); diff --git a/yarn-project/p2p/src/service/reqresp/reqresp.test.ts b/yarn-project/p2p/src/service/reqresp/reqresp.test.ts index f7091c2d5eb..0caeda85991 100644 --- a/yarn-project/p2p/src/service/reqresp/reqresp.test.ts +++ b/yarn-project/p2p/src/service/reqresp/reqresp.test.ts @@ -4,7 +4,7 @@ import { sleep } from '@aztec/foundation/sleep'; import { describe, expect, it, jest } from '@jest/globals'; import { type MockProxy, mock } from 'jest-mock-extended'; -import { CollectiveReqRespTimeoutError } from '../../errors/reqresp.error.js'; +import { CollectiveReqRespTimeoutError, IndividualReqRespTimeoutError } from '../../errors/reqresp.error.js'; import { MOCK_SUB_PROTOCOL_HANDLERS, MOCK_SUB_PROTOCOL_VALIDATORS, @@ -86,9 +86,27 @@ describe('ReqResp', () => { void nodes[1].req.stop(); void nodes[2].req.stop(); + const loggerSpy = jest.spyOn((nodes[0].req as any).logger, 'debug'); + // send from the first node const res = await nodes[0].req.sendRequest(PING_PROTOCOL, PING_REQUEST); + // We expect the logger to have been called twice with the peer ids citing the inability to connect + expect(loggerSpy).toHaveBeenCalledWith( + expect.stringContaining(`Connection reset: ${nodes[1].p2p.peerId.toString()}`), + { + peerId: nodes[1].p2p.peerId.toString(), + subProtocol: PING_PROTOCOL, + }, + ); + expect(loggerSpy).toHaveBeenCalledWith( + expect.stringContaining(`Connection reset: ${nodes[2].p2p.peerId.toString()}`), + { + peerId: nodes[2].p2p.peerId.toString(), + subProtocol: PING_PROTOCOL, + }, + ); + expect(res?.toBuffer().toString('utf-8')).toEqual('pong'); }); @@ -111,7 +129,7 @@ describe('ReqResp', () => { // Make sure the error message is logged const errorMessage = `Rate limit exceeded for ${PING_PROTOCOL} from ${nodes[0].p2p.peerId.toString()}`; - expect(loggerSpy).toHaveBeenCalledWith(errorMessage); + expect(loggerSpy).toHaveBeenCalledWith(expect.stringContaining(errorMessage)); }); describe('Tx req protocol', () => { @@ -139,6 +157,29 @@ describe('ReqResp', () => { expect(res).toEqual(tx); }); + it('Handle returning empty buffers', async () => { + const tx = mockTx(); + const txHash = tx.getTxHash(); + + const protocolHandlers = MOCK_SUB_PROTOCOL_HANDLERS; + protocolHandlers[TX_REQ_PROTOCOL] = (_message: Buffer): Promise => { + return Promise.resolve(Buffer.alloc(0)); + }; + + nodes = await createNodes(peerManager, 2); + + const spySendRequestToPeer = jest.spyOn(nodes[0].req, 'sendRequestToPeer'); + + await startNodes(nodes, protocolHandlers); + await sleep(500); + await connectToPeers(nodes); + await sleep(500); + + const res = await nodes[0].req.sendRequest(TX_REQ_PROTOCOL, txHash); + expect(spySendRequestToPeer).toHaveBeenCalledTimes(1); + expect(res).toEqual(undefined); + }); + it('Does not crash if tx hash returns undefined', async () => { const tx = mockTx(); const txHash = tx.getTxHash(); @@ -170,7 +211,7 @@ describe('ReqResp', () => { }); // Spy on the logger to make sure the error message is logged - const loggerSpy = jest.spyOn((nodes[0].req as any).logger, 'error'); + const loggerSpy = jest.spyOn((nodes[0].req as any).logger, 'debug'); await sleep(500); await connectToPeers(nodes); @@ -183,9 +224,13 @@ describe('ReqResp', () => { // Make sure the error message is logged const peerId = nodes[1].p2p.peerId.toString(); expect(loggerSpy).toHaveBeenCalledWith( - expect.stringMatching(/Error sending request to peer/i), - expect.any(Error), - { peerId, subProtocol: '/aztec/req/tx/0.1.0' }, + `Timeout error: ${ + new IndividualReqRespTimeoutError().message + } | peerId: ${peerId} | subProtocol: ${TX_REQ_PROTOCOL}`, + expect.objectContaining({ + peerId: peerId, + subProtocol: TX_REQ_PROTOCOL, + }), ); // Expect the peer to be penalized for timing out @@ -209,7 +254,7 @@ describe('ReqResp', () => { } // Spy on the logger to make sure the error message is logged - const loggerSpy = jest.spyOn((nodes[0].req as any).logger, 'error'); + const loggerSpy = jest.spyOn((nodes[0].req as any).logger, 'debug'); await sleep(500); await connectToPeers(nodes); diff --git a/yarn-project/p2p/src/service/reqresp/reqresp.ts b/yarn-project/p2p/src/service/reqresp/reqresp.ts index e6794c6fecf..43aa2cfa75d 100644 --- a/yarn-project/p2p/src/service/reqresp/reqresp.ts +++ b/yarn-project/p2p/src/service/reqresp/reqresp.ts @@ -5,10 +5,14 @@ import { executeTimeoutWithCustomError } from '@aztec/foundation/timer'; import { type IncomingStreamData, type PeerId, type Stream } from '@libp2p/interface'; import { pipe } from 'it-pipe'; import { type Libp2p } from 'libp2p'; -import { compressSync, uncompressSync } from 'snappy'; import { type Uint8ArrayList } from 'uint8arraylist'; -import { CollectiveReqRespTimeoutError, IndiviualReqRespTimeoutError } from '../../errors/reqresp.error.js'; +import { + CollectiveReqRespTimeoutError, + IndividualReqRespTimeoutError, + InvalidResponseError, +} from '../../errors/reqresp.error.js'; +import { SnappyTransform } from '../encoding.js'; import { type PeerManager } from '../peer_manager.js'; import { PeerErrorSeverity } from '../peer_scoring.js'; import { type P2PReqRespConfig } from './config.js'; @@ -49,6 +53,8 @@ export class ReqResp { private rateLimiter: RequestResponseRateLimiter; + private snappyTransform: SnappyTransform; + constructor(config: P2PReqRespConfig, protected readonly libp2p: Libp2p, private peerManager: PeerManager) { this.logger = createLogger('p2p:reqresp'); @@ -56,6 +62,7 @@ export class ReqResp { this.individualRequestTimeoutMs = config.individualRequestTimeoutMs; this.rateLimiter = new RequestResponseRateLimiter(peerManager); + this.snappyTransform = new SnappyTransform(); } /** @@ -143,8 +150,7 @@ export class ReqResp { // The response validator handles peer punishment within const isValid = await responseValidator(request, object, peer); if (!isValid) { - this.logger.error(`Invalid response for ${subProtocol} from ${peer.toString()}`); - return undefined; + throw new InvalidResponseError(); } return object; } @@ -159,7 +165,7 @@ export class ReqResp { () => new CollectiveReqRespTimeoutError(), ); } catch (e: any) { - this.logger.error(`${e.message} | subProtocol: ${subProtocol}`); + this.logger.debug(`${e.message} | subProtocol: ${subProtocol}`); return undefined; } } @@ -200,18 +206,14 @@ export class ReqResp { // Open the stream with a timeout const result = await executeTimeoutWithCustomError( - (): Promise => pipe([payload], stream!, this.readMessage), + (): Promise => pipe([payload], stream!, this.readMessage.bind(this)), this.individualRequestTimeoutMs, - () => new IndiviualReqRespTimeoutError(), + () => new IndividualReqRespTimeoutError(), ); - await stream.close(); - this.logger.trace(`Stream closed with ${peerId.toString()} for ${subProtocol}`); - return result; } catch (e: any) { - this.logger.error(`Error sending request to peer`, e, { peerId: peerId.toString(), subProtocol }); - this.peerManager.penalizePeer(peerId, PeerErrorSeverity.HighToleranceError); + this.handleResponseError(e, peerId, subProtocol); } finally { if (stream) { try { @@ -224,7 +226,70 @@ export class ReqResp { } } } - return undefined; + } + + /** + * Handle a response error + * + * ReqResp errors are punished differently depending on the severity of the offense + * + * @param e - The error + * @param peerId - The peer id + * @param subProtocol - The sub protocol + * @returns If the error is non pubishable, then undefined is returned, otherwise the peer is penalized + */ + private handleResponseError(e: any, peerId: PeerId, subProtocol: ReqRespSubProtocol): void { + const severity = this.categorizeError(e, peerId, subProtocol); + if (severity) { + this.peerManager.penalizePeer(peerId, severity); + } + } + + /** + * Categorize the error and log it. + */ + private categorizeError(e: any, peerId: PeerId, subProtocol: ReqRespSubProtocol): PeerErrorSeverity | undefined { + // Non pubishable errors + // We do not punish a collective timeout, as the node triggers this interupt, independent of the peer's behaviour + const logTags = { + peerId: peerId.toString(), + subProtocol, + }; + if (e instanceof CollectiveReqRespTimeoutError || e instanceof InvalidResponseError) { + this.logger.debug( + `Non-punishable error: ${e.message} | peerId: ${peerId.toString()} | subProtocol: ${subProtocol}`, + logTags, + ); + return undefined; + } + + // Pubishable errors + // Connection reset errors in the networking stack are punished with high severity + // it just signals an unreliable peer + // We assume that the requesting node has a functioning networking stack. + if (e?.code === 'ECONNRESET' || e?.code === 'EPIPE') { + this.logger.debug(`Connection reset: ${peerId.toString()}`, logTags); + return PeerErrorSeverity.HighToleranceError; + } + + if (e?.code === 'ECONNREFUSED') { + this.logger.debug(`Connection refused: ${peerId.toString()}`, logTags); + return PeerErrorSeverity.HighToleranceError; + } + + // Timeout errors are punished with high tolerance, they can be due to a geogrpahically far away peer or an + // overloaded peer + if (e instanceof IndividualReqRespTimeoutError) { + this.logger.debug( + `Timeout error: ${e.message} | peerId: ${peerId.toString()} | subProtocol: ${subProtocol}`, + logTags, + ); + return PeerErrorSeverity.HighToleranceError; + } + + // Catch all error + this.logger.error(`Unexpected error sending request to peer`, e, logTags); + return PeerErrorSeverity.HighToleranceError; } /** @@ -235,8 +300,8 @@ export class ReqResp { for await (const chunk of source) { chunks.push(chunk.subarray()); } - const messageData = chunks.concat(); - return uncompressSync(Buffer.concat(messageData), { asBuffer: true }) as Buffer; + const messageData = Buffer.concat(chunks); + return this.snappyTransform.inboundTransformNoTopic(messageData); } /** @@ -266,6 +331,7 @@ export class ReqResp { } const handler = this.subProtocolHandlers[protocol]; + const transform = this.snappyTransform; try { await pipe( @@ -274,7 +340,7 @@ export class ReqResp { for await (const chunkList of source) { const msg = Buffer.from(chunkList.subarray()); const response = await handler(msg); - yield new Uint8Array(compressSync(response)); + yield new Uint8Array(transform.outboundTransformNoTopic(response)); } }, stream, diff --git a/yarn-project/prover-node/src/prover-coordination/factory.ts b/yarn-project/prover-node/src/prover-coordination/factory.ts index e8e94f1153a..48194d44c47 100644 --- a/yarn-project/prover-node/src/prover-coordination/factory.ts +++ b/yarn-project/prover-node/src/prover-coordination/factory.ts @@ -1,6 +1,11 @@ import { type ArchiveSource, type Archiver } from '@aztec/archiver'; import { BBCircuitVerifier, TestCircuitVerifier } from '@aztec/bb-prover'; -import { type ProverCoordination, type WorldStateSynchronizer, createAztecNodeClient } from '@aztec/circuit-types'; +import { + P2PClientType, + type ProverCoordination, + type WorldStateSynchronizer, + createAztecNodeClient, +} from '@aztec/circuit-types'; import { createLogger } from '@aztec/foundation/log'; import { type DataStoreConfig } from '@aztec/kv-store/config'; import { createP2PClient } from '@aztec/p2p'; @@ -42,6 +47,7 @@ export async function createProverCoordination( const proofVerifier = config.realProofs ? await BBCircuitVerifier.new(config) : new TestCircuitVerifier(); const p2pClient = await createP2PClient( + P2PClientType.Prover, config, deps.archiver, proofVerifier, diff --git a/yarn-project/prover-node/src/prover-node.test.ts b/yarn-project/prover-node/src/prover-node.test.ts index 8d09f5174d3..c7b70d6136a 100644 --- a/yarn-project/prover-node/src/prover-node.test.ts +++ b/yarn-project/prover-node/src/prover-node.test.ts @@ -7,6 +7,7 @@ import { type L2Block, type L2BlockSource, type MerkleTreeWriteOperations, + P2PClientType, type ProverCache, type ProverCoordination, WorldStateRunningState, @@ -18,13 +19,7 @@ import { Signature } from '@aztec/foundation/eth-signature'; import { makeBackoff, retry } from '@aztec/foundation/retry'; import { sleep } from '@aztec/foundation/sleep'; import { openTmpStore } from '@aztec/kv-store/lmdb'; -import { - type BootstrapNode, - InMemoryAttestationPool, - InMemoryTxPool, - MemoryEpochProofQuotePool, - P2PClient, -} from '@aztec/p2p'; +import { type BootstrapNode, InMemoryTxPool, MemoryEpochProofQuotePool, P2PClient } from '@aztec/p2p'; import { createBootstrapNode, createTestLibP2PService } from '@aztec/p2p/mocks'; import { type L1Publisher } from '@aztec/sequencer-client'; import { type PublicProcessorFactory } from '@aztec/simulator'; @@ -299,16 +294,16 @@ describe('prover-node', () => { // - The prover node can get the it is missing via p2p, or it has them in it's mempool describe('Using a p2p coordination', () => { let bootnode: BootstrapNode; - let p2pClient: P2PClient; - let otherP2PClient: P2PClient; + let p2pClient: P2PClient; + let otherP2PClient: P2PClient; const createP2PClient = async (bootnodeAddr: string, port: number) => { const mempools = { txPool: new InMemoryTxPool(telemetryClient), - attestationPool: new InMemoryAttestationPool(telemetryClient), epochProofQuotePool: new MemoryEpochProofQuotePool(telemetryClient), }; const libp2pService = await createTestLibP2PService( + P2PClientType.Prover, [bootnodeAddr], l2BlockSource, worldState, @@ -317,7 +312,7 @@ describe('prover-node', () => { port, ); const kvStore = openTmpStore(); - return new P2PClient(kvStore, l2BlockSource, mempools, libp2pService, 0); + return new P2PClient(P2PClientType.Prover, kvStore, l2BlockSource, mempools, libp2pService, 0); }; beforeEach(async () => { diff --git a/yarn-project/prover-node/src/prover-node.ts b/yarn-project/prover-node/src/prover-node.ts index d70ce17bb87..ceabaf00e33 100644 --- a/yarn-project/prover-node/src/prover-node.ts +++ b/yarn-project/prover-node/src/prover-node.ts @@ -6,6 +6,7 @@ import { type L1ToL2MessageSource, type L2Block, type L2BlockSource, + type P2PClientType, type ProverCache, type ProverCoordination, type ProverNodeApi, @@ -80,7 +81,7 @@ export class ProverNode implements ClaimsMonitorHandler, EpochMonitorHandler, Pr } public getP2P() { - const asP2PClient = this.coordination as P2P; + const asP2PClient = this.coordination as P2P; if (typeof asP2PClient.isP2PClient === 'function' && asP2PClient.isP2PClient()) { return asP2PClient; } diff --git a/yarn-project/simulator/src/avm/avm_simulator.test.ts b/yarn-project/simulator/src/avm/avm_simulator.test.ts index e35b4d2519f..8b6f89fcca5 100644 --- a/yarn-project/simulator/src/avm/avm_simulator.test.ts +++ b/yarn-project/simulator/src/avm/avm_simulator.test.ts @@ -8,7 +8,14 @@ import { SerializableContractInstance, } from '@aztec/circuits.js'; import { Grumpkin } from '@aztec/circuits.js/barretenberg'; -import { computePublicDataTreeLeafSlot, computeVarArgsHash, siloNullifier } from '@aztec/circuits.js/hash'; +import { + computeNoteHashNonce, + computePublicDataTreeLeafSlot, + computeUniqueNoteHash, + computeVarArgsHash, + siloNoteHash, + siloNullifier, +} from '@aztec/circuits.js/hash'; import { makeContractClassPublic, makeContractInstanceFromClassId } from '@aztec/circuits.js/testing'; import { FunctionSelector } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; @@ -66,6 +73,7 @@ import { mockGetContractClass, mockGetContractInstance, mockL1ToL2MessageExists, + mockNoteHashCount, mockNoteHashExists, mockNullifierExists, mockStorageRead, @@ -155,6 +163,7 @@ describe('AVM simulator: transpiled Noir contracts', () => { const trace = mock(); const nestedTrace = mock(); + mockNoteHashCount(trace, 0); mockTraceFork(trace, nestedTrace); const ephemeralTrees = await AvmEphemeralForest.create(worldStateDB.getMerkleInterface()); const persistableState = initPersistableStateManager({ worldStateDB, trace, merkleTrees: ephemeralTrees }); @@ -621,13 +630,17 @@ describe('AVM simulator: transpiled Noir contracts', () => { const calldata = [value0]; const context = createContext(calldata); const bytecode = getAvmTestContractBytecode('new_note_hash'); + mockNoteHashCount(trace, 0); const results = await new AvmSimulator(context).executeBytecode(bytecode); expect(results.reverted).toBe(false); expect(results.output).toEqual([]); expect(trace.traceNewNoteHash).toHaveBeenCalledTimes(1); - expect(trace.traceNewNoteHash).toHaveBeenCalledWith(expect.objectContaining(address), /*noteHash=*/ value0); + const siloedNotehash = siloNoteHash(address, value0); + const nonce = computeNoteHashNonce(Fr.fromBuffer(context.persistableState.txHash.toBuffer()), 0); + const uniqueNoteHash = computeUniqueNoteHash(nonce, siloedNotehash); + expect(trace.traceNewNoteHash).toHaveBeenCalledWith(uniqueNoteHash); }); it('Should append a new nullifier correctly', async () => { diff --git a/yarn-project/simulator/src/avm/fixtures/index.ts b/yarn-project/simulator/src/avm/fixtures/index.ts index ac84ef7d702..53a259419a8 100644 --- a/yarn-project/simulator/src/avm/fixtures/index.ts +++ b/yarn-project/simulator/src/avm/fixtures/index.ts @@ -1,4 +1,4 @@ -import { isNoirCallStackUnresolved } from '@aztec/circuit-types'; +import { TxHash, isNoirCallStackUnresolved } from '@aztec/circuit-types'; import { GasFees, GlobalVariables, MAX_L2_GAS_PER_TX_PUBLIC_PORTION } from '@aztec/circuits.js'; import { type FunctionArtifact, FunctionSelector } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; @@ -45,6 +45,7 @@ export function initPersistableStateManager(overrides?: { nullifiers?: NullifierManager; doMerkleOperations?: boolean; merkleTrees?: AvmEphemeralForest; + txHash?: TxHash; }): AvmPersistableStateManager { const worldStateDB = overrides?.worldStateDB || mock(); return new AvmPersistableStateManager( @@ -54,6 +55,7 @@ export function initPersistableStateManager(overrides?: { overrides?.nullifiers || new NullifierManager(worldStateDB), overrides?.doMerkleOperations || false, overrides?.merkleTrees || mock(), + overrides?.txHash || new TxHash(new Fr(27).toBuffer()), ); } diff --git a/yarn-project/simulator/src/avm/journal/journal.test.ts b/yarn-project/simulator/src/avm/journal/journal.test.ts index dd50228ab55..9aae7f88889 100644 --- a/yarn-project/simulator/src/avm/journal/journal.test.ts +++ b/yarn-project/simulator/src/avm/journal/journal.test.ts @@ -1,5 +1,5 @@ import { AztecAddress, SerializableContractInstance, computePublicBytecodeCommitment } from '@aztec/circuits.js'; -import { siloNullifier } from '@aztec/circuits.js/hash'; +import { computeNoteHashNonce, computeUniqueNoteHash, siloNoteHash, siloNullifier } from '@aztec/circuits.js/hash'; import { makeContractClassPublic } from '@aztec/circuits.js/testing'; import { Fr } from '@aztec/foundation/fields'; @@ -13,6 +13,7 @@ import { mockGetContractClass, mockGetContractInstance, mockL1ToL2MessageExists, + mockNoteHashCount, mockNoteHashExists, mockNullifierExists, mockStorageRead, @@ -80,9 +81,13 @@ describe('journal', () => { }); it('writeNoteHash works', () => { + mockNoteHashCount(trace, 1); persistableState.writeNoteHash(address, utxo); expect(trace.traceNewNoteHash).toHaveBeenCalledTimes(1); - expect(trace.traceNewNoteHash).toHaveBeenCalledWith(expect.objectContaining(address), /*noteHash=*/ utxo); + const siloedNotehash = siloNoteHash(address, utxo); + const nonce = computeNoteHashNonce(Fr.fromBuffer(persistableState.txHash.toBuffer()), 1); + const uniqueNoteHash = computeUniqueNoteHash(nonce, siloedNotehash); + expect(trace.traceNewNoteHash).toHaveBeenCalledWith(uniqueNoteHash); }); it('checkNullifierExists works for missing nullifiers', async () => { diff --git a/yarn-project/simulator/src/avm/journal/journal.ts b/yarn-project/simulator/src/avm/journal/journal.ts index 2d0ab2f7a5f..35d349add3b 100644 --- a/yarn-project/simulator/src/avm/journal/journal.ts +++ b/yarn-project/simulator/src/avm/journal/journal.ts @@ -1,4 +1,4 @@ -import { MerkleTreeId } from '@aztec/circuit-types'; +import { MerkleTreeId, type TxHash } from '@aztec/circuit-types'; import { AztecAddress, CANONICAL_AUTH_REGISTRY_ADDRESS, @@ -12,7 +12,13 @@ import { ROUTER_ADDRESS, SerializableContractInstance, } from '@aztec/circuits.js'; -import { computePublicDataTreeLeafSlot, siloNoteHash, siloNullifier } from '@aztec/circuits.js/hash'; +import { + computeNoteHashNonce, + computePublicDataTreeLeafSlot, + computeUniqueNoteHash, + siloNoteHash, + siloNullifier, +} from '@aztec/circuits.js/hash'; import { Fr } from '@aztec/foundation/fields'; import { jsonStringify } from '@aztec/foundation/json-rpc'; import { createLogger } from '@aztec/foundation/log'; @@ -55,6 +61,7 @@ export class AvmPersistableStateManager { private readonly doMerkleOperations: boolean = false, /** Ephmeral forest for merkle tree operations */ public merkleTrees: AvmEphemeralForest, + public readonly txHash: TxHash, ) {} /** @@ -65,6 +72,7 @@ export class AvmPersistableStateManager { trace: PublicSideEffectTraceInterface, pendingSiloedNullifiers: Fr[], doMerkleOperations: boolean = false, + txHash: TxHash, ) { const parentNullifiers = NullifierManager.newWithPendingSiloedNullifiers(worldStateDB, pendingSiloedNullifiers); const ephemeralForest = await AvmEphemeralForest.create(worldStateDB.getMerkleInterface()); @@ -75,6 +83,7 @@ export class AvmPersistableStateManager { /*nullifiers=*/ parentNullifiers.fork(), doMerkleOperations, ephemeralForest, + txHash, ); } @@ -85,6 +94,7 @@ export class AvmPersistableStateManager { worldStateDB: WorldStateDB, trace: PublicSideEffectTraceInterface, doMerkleOperations: boolean = false, + txHash: TxHash, ) { const ephemeralForest = await AvmEphemeralForest.create(worldStateDB.getMerkleInterface()); return new AvmPersistableStateManager( @@ -94,6 +104,7 @@ export class AvmPersistableStateManager { /*nullifiers=*/ new NullifierManager(worldStateDB), /*doMerkleOperations=*/ doMerkleOperations, ephemeralForest, + txHash, ); } @@ -108,6 +119,7 @@ export class AvmPersistableStateManager { this.nullifiers.fork(), this.doMerkleOperations, this.merkleTrees.fork(), + this.txHash, ); } @@ -290,20 +302,41 @@ export class AvmPersistableStateManager { } /** - * Write a note hash, trace the write. + * Write a raw note hash, silo it and make it unique, then trace the write. * @param noteHash - the unsiloed note hash to write */ public writeNoteHash(contractAddress: AztecAddress, noteHash: Fr): void { - this.log.debug(`noteHashes(${contractAddress}) += @${noteHash}.`); + const siloedNoteHash = siloNoteHash(contractAddress, noteHash); + + this.writeSiloedNoteHash(siloedNoteHash); + } + + /** + * Write a note hash, make it unique, trace the write. + * @param noteHash - the non unique note hash to write + */ + public writeSiloedNoteHash(noteHash: Fr): void { + const txHash = Fr.fromBuffer(this.txHash.toBuffer()); + const nonce = computeNoteHashNonce(txHash, this.trace.getNoteHashCount()); + const uniqueNoteHash = computeUniqueNoteHash(nonce, noteHash); + + this.writeUniqueNoteHash(uniqueNoteHash); + } + + /** + * Write a note hash, trace the write. + * @param noteHash - the siloed unique hash to write + */ + public writeUniqueNoteHash(noteHash: Fr): void { + this.log.debug(`noteHashes += @${noteHash}.`); if (this.doMerkleOperations) { // Should write a helper for this const leafIndex = new Fr(this.merkleTrees.treeMap.get(MerkleTreeId.NOTE_HASH_TREE)!.leafCount); - const siloedNoteHash = siloNoteHash(contractAddress, noteHash); - const insertionPath = this.merkleTrees.appendNoteHash(siloedNoteHash); - this.trace.traceNewNoteHash(contractAddress, noteHash, leafIndex, insertionPath); + const insertionPath = this.merkleTrees.appendNoteHash(noteHash); + this.trace.traceNewNoteHash(noteHash, leafIndex, insertionPath); } else { - this.trace.traceNewNoteHash(contractAddress, noteHash); + this.trace.traceNewNoteHash(noteHash); } } diff --git a/yarn-project/simulator/src/avm/opcodes/accrued_substate.test.ts b/yarn-project/simulator/src/avm/opcodes/accrued_substate.test.ts index f38a335ebd5..af4fd095786 100644 --- a/yarn-project/simulator/src/avm/opcodes/accrued_substate.test.ts +++ b/yarn-project/simulator/src/avm/opcodes/accrued_substate.test.ts @@ -1,5 +1,5 @@ import { AztecAddress, Fr } from '@aztec/circuits.js'; -import { siloNullifier } from '@aztec/circuits.js/hash'; +import { computeNoteHashNonce, computeUniqueNoteHash, siloNoteHash, siloNullifier } from '@aztec/circuits.js/hash'; import { mock } from 'jest-mock-extended'; @@ -10,7 +10,7 @@ import { Field, Uint8, Uint32 } from '../avm_memory_types.js'; import { InstructionExecutionError, StaticCallAlterationError } from '../errors.js'; import { initContext, initExecutionEnvironment, initPersistableStateManager } from '../fixtures/index.js'; import { type AvmPersistableStateManager } from '../journal/journal.js'; -import { mockL1ToL2MessageExists, mockNoteHashExists, mockNullifierExists } from '../test_utils.js'; +import { mockL1ToL2MessageExists, mockNoteHashCount, mockNoteHashExists, mockNullifierExists } from '../test_utils.js'; import { EmitNoteHash, EmitNullifier, @@ -120,10 +120,14 @@ describe('Accrued Substate', () => { }); it('Should append a new note hash correctly', async () => { + mockNoteHashCount(trace, 0); context.machineState.memory.set(value0Offset, new Field(value0)); await new EmitNoteHash(/*indirect=*/ 0, /*offset=*/ value0Offset).execute(context); expect(trace.traceNewNoteHash).toHaveBeenCalledTimes(1); - expect(trace.traceNewNoteHash).toHaveBeenCalledWith(expect.objectContaining(address), /*noteHash=*/ value0); + const siloedNotehash = siloNoteHash(address, value0); + const nonce = computeNoteHashNonce(Fr.fromBuffer(context.persistableState.txHash.toBuffer()), 0); + const uniqueNoteHash = computeUniqueNoteHash(nonce, siloedNotehash); + expect(trace.traceNewNoteHash).toHaveBeenCalledWith(uniqueNoteHash); }); }); diff --git a/yarn-project/simulator/src/avm/test_utils.ts b/yarn-project/simulator/src/avm/test_utils.ts index 48afe3c9e32..27785930567 100644 --- a/yarn-project/simulator/src/avm/test_utils.ts +++ b/yarn-project/simulator/src/avm/test_utils.ts @@ -28,6 +28,10 @@ export function mockStorageRead(worldStateDB: WorldStateDB, value: Fr) { (worldStateDB as jest.Mocked).storageRead.mockResolvedValue(value); } +export function mockNoteHashCount(mockedTrace: PublicSideEffectTraceInterface, count: number) { + (mockedTrace as jest.Mocked).getNoteHashCount.mockReturnValue(count); +} + export function mockStorageReadWithMap(worldStateDB: WorldStateDB, mockedStorage: Map) { (worldStateDB as jest.Mocked).storageRead.mockImplementation((_address, slot) => Promise.resolve(mockedStorage.get(slot.toBigInt()) ?? Fr.ZERO), diff --git a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.test.ts b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.test.ts index 7ede684befe..a6b34e58e13 100644 --- a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.test.ts +++ b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.test.ts @@ -99,10 +99,10 @@ describe('Enqueued-call Side Effect Trace', () => { }); it('Should trace note hashes', () => { - trace.traceNewNoteHash(address, utxo, leafIndex, siblingPath); + trace.traceNewNoteHash(utxo, leafIndex, siblingPath); expect(trace.getCounter()).toBe(startCounterPlus1); - const expected = [new NoteHash(utxo, startCounter).scope(address)]; + const expected = [new NoteHash(utxo, startCounter)]; expect(trace.getSideEffects().noteHashes).toEqual(expected); const expectedHint = new AvmAppendTreeHint(leafIndex, utxo, siblingPath); @@ -285,11 +285,9 @@ describe('Enqueued-call Side Effect Trace', () => { it('Should enforce maximum number of new note hashes', () => { for (let i = 0; i < MAX_NOTE_HASHES_PER_TX; i++) { - trace.traceNewNoteHash(AztecAddress.fromNumber(i), new Fr(i), Fr.ZERO, []); + trace.traceNewNoteHash(new Fr(i), Fr.ZERO, []); } - expect(() => trace.traceNewNoteHash(AztecAddress.fromNumber(42), new Fr(42), Fr.ZERO, [])).toThrow( - SideEffectLimitReachedError, - ); + expect(() => trace.traceNewNoteHash(new Fr(42), Fr.ZERO, [])).toThrow(SideEffectLimitReachedError); }); it('Should enforce maximum number of new nullifiers', () => { @@ -339,9 +337,7 @@ describe('Enqueued-call Side Effect Trace', () => { expect(() => trace.tracePublicStorageWrite(AztecAddress.fromNumber(42), new Fr(42), new Fr(42), true)).toThrow( SideEffectLimitReachedError, ); - expect(() => trace.traceNewNoteHash(AztecAddress.fromNumber(42), new Fr(42), new Fr(42))).toThrow( - SideEffectLimitReachedError, - ); + expect(() => trace.traceNewNoteHash(new Fr(42), new Fr(42))).toThrow(SideEffectLimitReachedError); expect(() => trace.traceNewNullifier(new Fr(42))).toThrow(SideEffectLimitReachedError); expect(() => trace.traceNewL2ToL1Message(AztecAddress.fromNumber(42), new Fr(42), new Fr(42))).toThrow( SideEffectLimitReachedError, @@ -366,7 +362,7 @@ describe('Enqueued-call Side Effect Trace', () => { testCounter++; nestedTrace.traceNoteHashCheck(address, utxo, leafIndex, existsDefault, []); // counter does not increment for note hash checks - nestedTrace.traceNewNoteHash(address, utxo, Fr.ZERO, []); + nestedTrace.traceNewNoteHash(utxo, Fr.ZERO, []); testCounter++; nestedTrace.traceNullifierCheck(utxo, true, lowLeafPreimage, Fr.ZERO, []); testCounter++; diff --git a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts index 5ff5e7427c1..040ebbaaa66 100644 --- a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts +++ b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts @@ -42,7 +42,6 @@ import { PublicDataWrite, ScopedL2ToL1Message, ScopedLogHash, - type ScopedNoteHash, SerializableContractInstance, type TreeSnapshots, } from '@aztec/circuits.js'; @@ -74,7 +73,7 @@ export type SideEffects = { enqueuedCalls: PublicCallRequest[]; publicDataWrites: PublicDataUpdateRequest[]; - noteHashes: ScopedNoteHash[]; + noteHashes: NoteHash[]; nullifiers: Nullifier[]; l2ToL1Msgs: ScopedL2ToL1Message[]; @@ -111,7 +110,7 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI private publicDataWrites: PublicDataUpdateRequest[] = []; private protocolPublicDataWritesLength: number = 0; private userPublicDataWritesLength: number = 0; - private noteHashes: ScopedNoteHash[] = []; + private noteHashes: NoteHash[] = []; private nullifiers: Nullifier[] = []; private l2ToL1Messages: ScopedL2ToL1Message[] = []; private unencryptedLogs: UnencryptedL2Log[] = []; @@ -194,6 +193,10 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI this.sideEffectCounter++; } + public getNoteHashCount() { + return this.previousSideEffectArrayLengths.noteHashes + this.noteHashes.length; + } + public tracePublicStorageRead( contractAddress: AztecAddress, slot: Fr, @@ -272,19 +275,12 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI // NOTE: counter does not increment for note hash checks (because it doesn't rely on pending note hashes) } - public traceNewNoteHash( - contractAddress: AztecAddress, - noteHash: Fr, - leafIndex: Fr = Fr.zero(), - path: Fr[] = emptyNoteHashPath(), - ) { + public traceNewNoteHash(noteHash: Fr, leafIndex: Fr = Fr.zero(), path: Fr[] = emptyNoteHashPath()) { if (this.noteHashes.length + this.previousSideEffectArrayLengths.noteHashes >= MAX_NOTE_HASHES_PER_TX) { throw new SideEffectLimitReachedError('note hash', MAX_NOTE_HASHES_PER_TX); } - // TODO(dbanks12): make unique and silo instead of scoping - //const siloedNoteHash = siloNoteHash(contractAddress, noteHash); - this.noteHashes.push(new NoteHash(noteHash, this.sideEffectCounter).scope(contractAddress)); + this.noteHashes.push(new NoteHash(noteHash, this.sideEffectCounter)); this.log.debug(`NEW_NOTE_HASH cnt: ${this.sideEffectCounter}`); this.avmCircuitHints.noteHashWrites.items.push(new AvmAppendTreeHint(leafIndex, noteHash, path)); this.incrementSideEffectCounter(); diff --git a/yarn-project/simulator/src/public/execution.ts b/yarn-project/simulator/src/public/execution.ts index e6fe8c17457..cf9832201a2 100644 --- a/yarn-project/simulator/src/public/execution.ts +++ b/yarn-project/simulator/src/public/execution.ts @@ -20,7 +20,6 @@ import { RevertCode, type ScopedL2ToL1Message, type ScopedLogHash, - type ScopedNoteHash, type TreeLeafReadRequest, } from '@aztec/circuits.js'; import { computeVarArgsHash } from '@aztec/circuits.js/hash'; @@ -29,7 +28,7 @@ export interface PublicSideEffects { /** The contract storage update requests performed. */ publicDataWrites: PublicDataUpdateRequest[]; /** The new note hashes to be inserted into the note hashes tree. */ - noteHashes: ScopedNoteHash[]; + noteHashes: NoteHash[]; /** The new nullifiers to be inserted into the nullifier tree. */ nullifiers: Nullifier[]; /** The new l2 to l1 messages generated to be inserted into the messages tree. */ diff --git a/yarn-project/simulator/src/public/public_tx_context.ts b/yarn-project/simulator/src/public/public_tx_context.ts index dc4807dcb2f..185327d5a87 100644 --- a/yarn-project/simulator/src/public/public_tx_context.ts +++ b/yarn-project/simulator/src/public/public_tx_context.ts @@ -91,7 +91,7 @@ export class PublicTxContext { const previousAccumulatedDataArrayLengths = new SideEffectArrayLengths( /*publicDataWrites*/ 0, /*protocolPublicDataWrites*/ 0, - countAccumulatedItems(nonRevertibleAccumulatedDataFromPrivate.noteHashes), + /*noteHashes*/ 0, /*nullifiers=*/ 0, countAccumulatedItems(nonRevertibleAccumulatedDataFromPrivate.l2ToL1Msgs), /*unencryptedLogsHashes*/ 0, @@ -102,7 +102,12 @@ export class PublicTxContext { ); // Transaction level state manager that will be forked for revertible phases. - const txStateManager = await AvmPersistableStateManager.create(worldStateDB, enqueuedCallTrace, doMerkleOperations); + const txStateManager = await AvmPersistableStateManager.create( + worldStateDB, + enqueuedCallTrace, + doMerkleOperations, + fetchTxHash(nonRevertibleAccumulatedDataFromPrivate), + ); const gasSettings = tx.data.constants.txContext.gasSettings; const gasUsedByPrivate = tx.data.gasUsed; @@ -186,12 +191,7 @@ export class PublicTxContext { * @returns The transaction's hash. */ getTxHash(): TxHash { - // Private kernel functions are executed client side and for this reason tx hash is already set as first nullifier - const firstNullifier = this.nonRevertibleAccumulatedDataFromPrivate.nullifiers[0]; - if (!firstNullifier || firstNullifier.isZero()) { - throw new Error(`Cannot get tx hash since first nullifier is missing`); - } - return new TxHash(firstNullifier.toBuffer()); + return fetchTxHash(this.nonRevertibleAccumulatedDataFromPrivate); } /** @@ -425,3 +425,12 @@ function applyMaxToAvailableGas(availableGas: Gas) { /*l2Gas=*/ Math.min(availableGas.l2Gas, MAX_L2_GAS_PER_TX_PUBLIC_PORTION), ); } + +function fetchTxHash(nonRevertibleAccumulatedData: PrivateToPublicAccumulatedData): TxHash { + // Private kernel functions are executed client side and for this reason tx hash is already set as first nullifier + const firstNullifier = nonRevertibleAccumulatedData.nullifiers[0]; + if (!firstNullifier || firstNullifier.isZero()) { + throw new Error(`Cannot get tx hash since first nullifier is missing`); + } + return new TxHash(firstNullifier.toBuffer()); +} diff --git a/yarn-project/simulator/src/public/public_tx_simulator.ts b/yarn-project/simulator/src/public/public_tx_simulator.ts index 37921773c48..24bc8089bdc 100644 --- a/yarn-project/simulator/src/public/public_tx_simulator.ts +++ b/yarn-project/simulator/src/public/public_tx_simulator.ts @@ -378,6 +378,11 @@ export class PublicTxSimulator { ); } } + for (const noteHash of context.nonRevertibleAccumulatedDataFromPrivate.noteHashes) { + if (!noteHash.isEmpty()) { + stateManager.writeUniqueNoteHash(noteHash); + } + } } /** @@ -397,6 +402,12 @@ export class PublicTxSimulator { ); } } + for (const noteHash of context.revertibleAccumulatedDataFromPrivate.noteHashes) { + if (!noteHash.isEmpty()) { + // Revertible note hashes from private are not hashed with nonce, since private can't know their final position, only we can. + stateManager.writeSiloedNoteHash(noteHash); + } + } } private async payFee(context: PublicTxContext) { diff --git a/yarn-project/simulator/src/public/side_effect_trace_interface.ts b/yarn-project/simulator/src/public/side_effect_trace_interface.ts index d422f5fdb82..a08aa391d13 100644 --- a/yarn-project/simulator/src/public/side_effect_trace_interface.ts +++ b/yarn-project/simulator/src/public/side_effect_trace_interface.ts @@ -39,7 +39,8 @@ export interface PublicSideEffectTraceInterface { insertionPath?: Fr[], ): void; traceNoteHashCheck(contractAddress: AztecAddress, noteHash: Fr, leafIndex: Fr, exists: boolean, path?: Fr[]): void; - traceNewNoteHash(contractAddress: AztecAddress, noteHash: Fr, leafIndex?: Fr, path?: Fr[]): void; + traceNewNoteHash(uniqueNoteHash: Fr, leafIndex?: Fr, path?: Fr[]): void; + getNoteHashCount(): number; traceNullifierCheck( siloedNullifier: Fr, exists: boolean, diff --git a/yarn-project/simulator/src/public/transitional_adapters.ts b/yarn-project/simulator/src/public/transitional_adapters.ts index 36f700166b4..d6fcfea0c66 100644 --- a/yarn-project/simulator/src/public/transitional_adapters.ts +++ b/yarn-project/simulator/src/public/transitional_adapters.ts @@ -6,7 +6,6 @@ import { type GasSettings, type GlobalVariables, MAX_L2_TO_L1_MSGS_PER_TX, - MAX_NOTE_HASHES_PER_TX, MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, PrivateToAvmAccumulatedData, PrivateToAvmAccumulatedDataArrayLengths, @@ -19,7 +18,6 @@ import { countAccumulatedItems, mergeAccumulatedData, } from '@aztec/circuits.js'; -import { computeNoteHashNonce, computeUniqueNoteHash, siloNoteHash } from '@aztec/circuits.js/hash'; import { padArrayEnd } from '@aztec/foundation/collection'; import { assertLength } from '@aztec/foundation/serialize'; @@ -86,55 +84,6 @@ export function generateAvmCircuitPublicInputs( revertibleAccumulatedDataFromPrivate, ); - const txHash = avmCircuitPublicInputs.previousNonRevertibleAccumulatedData.nullifiers[0]; - - // Add nonces to revertible note hashes from private. These don't have nonces since we don't know - // the final position in the tx until the AVM has executed. - // TODO: Use the final position in the tx - for ( - let revertibleIndex = 0; - revertibleIndex < avmCircuitPublicInputs.previousRevertibleAccumulatedData.noteHashes.length; - revertibleIndex++ - ) { - const noteHash = avmCircuitPublicInputs.previousRevertibleAccumulatedData.noteHashes[revertibleIndex]; - if (noteHash.isZero()) { - continue; - } - const indexInTx = - revertibleIndex + avmCircuitPublicInputs.previousNonRevertibleAccumulatedDataArrayLengths.noteHashes; - - const nonce = computeNoteHashNonce(txHash, indexInTx); - const uniqueNoteHash = computeUniqueNoteHash(nonce, noteHash); - avmCircuitPublicInputs.previousRevertibleAccumulatedData.noteHashes[revertibleIndex] = uniqueNoteHash; - } - - // merge all revertible & non-revertible side effects into output accumulated data - const noteHashesFromPrivate = revertCode.isOK() - ? mergeAccumulatedData( - avmCircuitPublicInputs.previousNonRevertibleAccumulatedData.noteHashes, - avmCircuitPublicInputs.previousRevertibleAccumulatedData.noteHashes, - ) - : avmCircuitPublicInputs.previousNonRevertibleAccumulatedData.noteHashes; - avmCircuitPublicInputs.accumulatedData.noteHashes = assertLength( - mergeAccumulatedData(noteHashesFromPrivate, avmCircuitPublicInputs.accumulatedData.noteHashes), - MAX_NOTE_HASHES_PER_TX, - ); - - // Silo and add nonces for note hashes emitted by the AVM - const scopedNoteHashesFromPublic = trace.getSideEffects().noteHashes; - for (let i = 0; i < scopedNoteHashesFromPublic.length; i++) { - const scopedNoteHash = scopedNoteHashesFromPublic[i]; - const noteHash = scopedNoteHash.value; - if (!noteHash.isZero()) { - const noteHashIndexInTx = i + countAccumulatedItems(noteHashesFromPrivate); - const nonce = computeNoteHashNonce(txHash, noteHashIndexInTx); - const siloedNoteHash = siloNoteHash(scopedNoteHash.contractAddress, noteHash); - const uniqueNoteHash = computeUniqueNoteHash(nonce, siloedNoteHash); - - avmCircuitPublicInputs.accumulatedData.noteHashes[noteHashIndexInTx] = uniqueNoteHash; - } - } - const msgsFromPrivate = revertCode.isOK() ? mergeAccumulatedData( avmCircuitPublicInputs.previousNonRevertibleAccumulatedData.l2ToL1Msgs, diff --git a/yarn-project/validator-client/src/factory.ts b/yarn-project/validator-client/src/factory.ts index 7ea8b09e8d6..3382163f408 100644 --- a/yarn-project/validator-client/src/factory.ts +++ b/yarn-project/validator-client/src/factory.ts @@ -1,5 +1,6 @@ import { EpochCache, type EpochCacheConfig } from '@aztec/epoch-cache'; import { type EthAddress } from '@aztec/foundation/eth-address'; +import { type DateProvider } from '@aztec/foundation/timer'; import { type P2P } from '@aztec/p2p'; import { type TelemetryClient } from '@aztec/telemetry-client'; @@ -11,8 +12,11 @@ import { ValidatorClient } from './validator.js'; export async function createValidatorClient( config: ValidatorClientConfig & EpochCacheConfig, rollupAddress: EthAddress, - p2pClient: P2P, - telemetry: TelemetryClient, + deps: { + p2pClient: P2P; + telemetry: TelemetryClient; + dateProvider: DateProvider; + }, ) { if (config.disableValidator) { return undefined; @@ -22,7 +26,7 @@ export async function createValidatorClient( } // Create the epoch cache - const epochCache = await EpochCache.create(rollupAddress, /*l1TimestampSource,*/ config); + const epochCache = await EpochCache.create(rollupAddress, config, deps); - return ValidatorClient.new(config, epochCache, p2pClient, telemetry); + return ValidatorClient.new(config, epochCache, deps.p2pClient, deps.telemetry); } diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index 2d5cab8dbf8..d478feb7ad3 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -551,7 +551,6 @@ __metadata: "@aztec/world-state": "workspace:^" "@jest/globals": ^29.5.0 "@noble/curves": ^1.0.0 - "@sinonjs/fake-timers": ^13.0.5 "@swc/core": ^1.4.11 "@swc/jest": ^0.2.36 "@types/fs-extra": ^11.0.2 @@ -4538,7 +4537,7 @@ __metadata: languageName: node linkType: hard -"@sinonjs/commons@npm:^3.0.0, @sinonjs/commons@npm:^3.0.1": +"@sinonjs/commons@npm:^3.0.0": version: 3.0.1 resolution: "@sinonjs/commons@npm:3.0.1" dependencies: @@ -4556,15 +4555,6 @@ __metadata: languageName: node linkType: hard -"@sinonjs/fake-timers@npm:^13.0.5": - version: 13.0.5 - resolution: "@sinonjs/fake-timers@npm:13.0.5" - dependencies: - "@sinonjs/commons": ^3.0.1 - checksum: b1c6ba87fadb7666d3aa126c9e8b4ac32b2d9e84c9e5fd074aa24cab3c8342fd655459de014b08e603be1e6c24c9f9716d76d6d2a36c50f59bb0091be61601dd - languageName: node - linkType: hard - "@swc/core-darwin-arm64@npm:1.5.5": version: 1.5.5 resolution: "@swc/core-darwin-arm64@npm:1.5.5"