diff --git a/.noir-sync-commit b/.noir-sync-commit index 1de91bb1a7a..9bbde85e56b 100644 --- a/.noir-sync-commit +++ b/.noir-sync-commit @@ -1 +1 @@ -13856a121125b1ccca15919942081a5d157d280e +68c32b4ffd9b069fe4b119327dbf4018c17ab9d4 diff --git a/barretenberg/.gitrepo b/barretenberg/.gitrepo index 975e115bcf2..e97fcb7665e 100644 --- a/barretenberg/.gitrepo +++ b/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = 9c8cb12a6deec4e427c013cf719b9f714f57ccab - parent = 85c8a3b29c861e61274cc0e33d47ca4aa89c144d + commit = ab3ba56bb526145969e5a615e1a9c17b566c8310 + parent = ec34442fa3e8df0f8f1ef1e4c88df3f1895fc2dd method = merge cmdver = 0.4.6 diff --git a/barretenberg/cpp/src/barretenberg/bb/main.cpp b/barretenberg/cpp/src/barretenberg/bb/main.cpp index 2d9d5d9e382..593a38c5597 100644 --- a/barretenberg/cpp/src/barretenberg/bb/main.cpp +++ b/barretenberg/cpp/src/barretenberg/bb/main.cpp @@ -590,7 +590,7 @@ void prove_tube(const std::string& output_path) } ClientIVC verifier{ builder, input }; - verifier.verify(proof); + ClientIVC::Output client_ivc_rec_verifier_output = verifier.verify(proof); PairingPointAccumulatorIndices current_aggregation_object = stdlib::recursion::init_default_agg_obj_indices(*builder); @@ -599,6 +599,12 @@ void prove_tube(const std::string& output_path) // This is currently just setting the aggregation object to the default one. builder->add_pairing_point_accumulator(current_aggregation_object); + // The tube only calls an IPA recursive verifier once, so we can just add this IPA claim and proof + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1154): We shouldn't add these to the public inputs for + // now since we don't handle them correctly. Uncomment when we start using UltraRollupHonk in the rollup. + // builder->add_ipa_claim(client_ivc_rec_verifier_output.opening_claim.get_witness_indices()); + // builder->ipa_proof = convert_stdlib_proof_to_native(client_ivc_rec_verifier_output.ipa_transcript->proof_data); + using Prover = UltraProver_; using Verifier = UltraVerifier_; Prover tube_prover{ *builder }; @@ -622,8 +628,9 @@ void prove_tube(const std::string& output_path) write_file(tubeAsFieldsVkPath, { data.begin(), data.end() }); info("Native verification of the tube_proof"); - Verifier tube_verifier(tube_verification_key); - bool verified = tube_verifier.verify_proof(tube_proof); + auto ipa_verification_key = std::make_shared>(1 << CONST_ECCVM_LOG_N); + Verifier tube_verifier(tube_verification_key, ipa_verification_key); + bool verified = tube_verifier.verify_proof(tube_proof, builder->ipa_proof); info("Tube proof verification: ", verified); } @@ -1066,7 +1073,7 @@ UltraProver_ compute_valid_prover(const std::string& bytecodePath, using Prover = UltraProver_; bool honk_recursion = false; - if constexpr (IsAnyOf) { + if constexpr (IsAnyOf) { honk_recursion = true; } auto constraint_system = get_constraint_system(bytecodePath, honk_recursion); @@ -1132,14 +1139,22 @@ template bool verify_honk(const std::string& proof_path, { using VerificationKey = Flavor::VerificationKey; using Verifier = UltraVerifier_; - using VerifierCommitmentKey = bb::VerifierCommitmentKey; auto g2_data = get_bn254_g2_data(CRS_PATH); srs::init_crs_factory({}, g2_data); auto proof = from_buffer>(read_file(proof_path)); auto vk = std::make_shared(from_buffer(read_file(vk_path))); - vk->pcs_verification_key = std::make_shared(); - Verifier verifier{ vk }; + vk->pcs_verification_key = std::make_shared>(); + + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1154): Remove this and pass in the IPA proof to the + // verifier. + std::shared_ptr> ipa_verification_key = nullptr; + if constexpr (HasIPAAccumulatorFlavor) { + init_grumpkin_crs(1 << 16); + vk->contains_ipa_claim = false; + ipa_verification_key = std::make_shared>(1 << CONST_ECCVM_LOG_N); + } + Verifier verifier{ vk, ipa_verification_key }; bool verified = verifier.verify_proof(proof); diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp index 9119e17f6ea..8f889fc8b2c 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp @@ -25,7 +25,7 @@ void ClientIVC::instantiate_stdlib_verification_queue( size_t key_idx = 0; for (auto& [proof, vkey, type] : verification_queue) { // Construct stdlib proof directly from the internal native queue data - auto stdlib_proof = bb::convert_proof_to_witness(&circuit, proof); + auto stdlib_proof = bb::convert_native_proof_to_stdlib(&circuit, proof); // Use the provided stdlib vkey if present, otherwise construct one from the internal native queue auto stdlib_vkey = @@ -176,10 +176,11 @@ void ClientIVC::accumulate(ClientCircuit& circuit, const std::shared_ptr(circuit, trace_settings); trace_usage_tracker = ExecutionTraceUsageTracker(trace_settings); } else { - proving_key = std::make_shared( - circuit, trace_settings, fold_output.accumulator->proving_key.commitment_key); + proving_key = std::make_shared(circuit, trace_settings); } + proving_key->proving_key.commitment_key = bn254_commitment_key; + vinfo("getting honk vk... precomputed?: ", precomputed_vk); // Update the accumulator trace usage based on the present circuit trace_usage_tracker.update(circuit); @@ -261,7 +262,7 @@ HonkProof ClientIVC::construct_and_prove_hiding_circuit() auto stdlib_decider_vk = std::make_shared(&builder, verification_queue[0].honk_verification_key); - auto stdlib_proof = bb::convert_proof_to_witness(&builder, fold_proof); + auto stdlib_proof = bb::convert_native_proof_to_stdlib(&builder, fold_proof); // Perform recursive folding verification of the last folding proof FoldingRecursiveVerifier folding_verifier{ &builder, stdlib_verifier_accumulator, { stdlib_decider_vk } }; @@ -278,7 +279,7 @@ HonkProof ClientIVC::construct_and_prove_hiding_circuit() MergeProof merge_proof = goblin.prove_merge(builder); merge_verification_queue.emplace_back(merge_proof); - auto decider_pk = std::make_shared(builder); + auto decider_pk = std::make_shared(builder, TraceSettings(), bn254_commitment_key); honk_vk = std::make_shared(decider_pk->proving_key); MegaProver prover(decider_pk); @@ -338,6 +339,7 @@ bool ClientIVC::verify(const Proof& proof) HonkProof ClientIVC::decider_prove() const { vinfo("prove decider..."); + fold_output.accumulator->proving_key.commitment_key = bn254_commitment_key; MegaDeciderProver decider_prover(fold_output.accumulator); return decider_prover.construct_proof(); vinfo("finished decider proving."); @@ -352,11 +354,19 @@ HonkProof ClientIVC::decider_prove() const bool ClientIVC::prove_and_verify() { auto start = std::chrono::steady_clock::now(); - auto proof = prove(); + const auto proof = prove(); auto end = std::chrono::steady_clock::now(); auto diff = std::chrono::duration_cast(end - start); vinfo("time to call ClientIVC::prove: ", diff.count(), " ms."); - return verify(proof); + + start = end; + const bool verified = verify(proof); + end = std::chrono::steady_clock::now(); + + diff = std::chrono::duration_cast(end - start); + vinfo("time to verify ClientIVC proof: ", diff.count(), " ms."); + + return verified; } /** diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp index c1389ed814d..c0028a791ff 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp @@ -98,8 +98,6 @@ class ClientIVC { using ProverFoldOutput = FoldingResult; public: - GoblinProver goblin; - ProverFoldOutput fold_output; // prover accumulator and fold proof std::shared_ptr verifier_accumulator; // verifier accumulator @@ -122,11 +120,19 @@ class ClientIVC { // Setting auto_verify_mode = true will cause kernel completion logic to be added to kernels automatically bool auto_verify_mode; + std::shared_ptr bn254_commitment_key; + + GoblinProver goblin; + bool initialized = false; // Is the IVC accumulator initialized ClientIVC(TraceSettings trace_settings = {}, bool auto_verify_mode = false) : trace_settings(trace_settings) , auto_verify_mode(auto_verify_mode) + , bn254_commitment_key(trace_settings.structure.has_value() + ? std::make_shared>(trace_settings.dyadic_size()) + : nullptr) + , goblin(bn254_commitment_key) {} void instantiate_stdlib_verification_queue( diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/claim.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/claim.hpp index 129cb4df521..27917a072fa 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/claim.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/claim.hpp @@ -1,7 +1,9 @@ #pragma once #include "barretenberg/commitment_schemes/commitment_key.hpp" +#include "barretenberg/plonk_honk_shared/types/aggregation_object_type.hpp" #include "barretenberg/polynomials/polynomial.hpp" +#include "barretenberg/stdlib/primitives/curves/grumpkin.hpp" namespace bb { /** @@ -51,6 +53,32 @@ template class OpeningClaim { // commitment to univariate polynomial p(X) Commitment commitment; + IPAClaimIndices get_witness_indices() const + requires(std::is_same_v>) + { + return { opening_pair.challenge.binary_basis_limbs[0].element.normalize().witness_index, + opening_pair.challenge.binary_basis_limbs[1].element.normalize().witness_index, + opening_pair.challenge.binary_basis_limbs[2].element.normalize().witness_index, + opening_pair.challenge.binary_basis_limbs[3].element.normalize().witness_index, + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1153): Uncomment this when we turn the + // eval into witnesses. + // opening_pair.evaluation.binary_basis_limbs[0].element.normalize().witness_index, + // opening_pair.evaluation.binary_basis_limbs[1].element.normalize().witness_index, + // opening_pair.evaluation.binary_basis_limbs[2].element.normalize().witness_index, + // opening_pair.evaluation.binary_basis_limbs[3].element.normalize().witness_index, + commitment.x.normalize().witness_index, // no idea if we need these normalize() calls... + commitment.y.normalize().witness_index }; + } + + auto get_native_opening_claim() const + requires(Curve::is_stdlib_type) + { + return OpeningClaim{ + { static_cast(opening_pair.challenge.get_value()), + static_cast(opening_pair.evaluation.get_value()) }, + commitment.get_value() + }; + } /** * @brief inefficiently check that the claim is correct by recomputing the commitment * and evaluating the polynomial in r. diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp index 3a0afa05a06..c3e5bae6705 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp @@ -749,8 +749,8 @@ template class IPA { } /** - * @brief Takes two IPA claims and accumulates them into 1 IPA claim. - * @details We create an IPA accumulator by running the IPA recursive verifier on each claim. Then, we generate challenges, and use these challenges to compute the new accumulator. We also create the accumulated polynomial. + * @brief Takes two IPA claims and accumulates them into 1 IPA claim. Also computes IPA proof for the claim. + * @details We create an IPA accumulator by running the IPA recursive verifier on each claim. Then, we generate challenges, and use these challenges to compute the new accumulator. We also create the accumulated polynomial, and generate the IPA proof for the accumulated claim. * More details are described here: https://hackmd.io/IXoLIPhVT_ej8yhZ_Ehvuw?both. * * @param verifier_ck @@ -758,11 +758,12 @@ template class IPA { * @param claim_1 * @param transcript_2 * @param claim_2 - * @return std::pair, Polynomial> + * @return std::pair, HonkProof> */ - static std::pair, Polynomial> accumulate(auto& transcript_1, OpeningClaim claim_1, auto& transcript_2, OpeningClaim claim_2) + static std::pair, HonkProof> accumulate(const std::shared_ptr>& ck, auto& transcript_1, OpeningClaim claim_1, auto& transcript_2, OpeningClaim claim_2) requires Curve::is_stdlib_type { + using NativeCurve = curve::Grumpkin; using Builder = typename Curve::Builder; // Step 1: Run the verifier for each IPA instance VerifierAccumulator pair_1 = reduce_verify(claim_1, transcript_1); @@ -793,7 +794,23 @@ template class IPA { for (Fr u_inv_i : pair_2.u_challenges_inv) { native_u_challenges_inv_2.push_back(bb::fq(u_inv_i.get_value())); } - return {output_claim, create_challenge_poly(uint32_t(pair_1.log_poly_length.get_value()), native_u_challenges_inv_1, uint32_t(pair_2.log_poly_length.get_value()), native_u_challenges_inv_2, fq(alpha.get_value()))}; + + // Compute proof for the claim + auto prover_transcript = std::make_shared(); + const OpeningPair opening_pair{ bb::fq(output_claim.opening_pair.challenge.get_value()), + bb::fq(output_claim.opening_pair.evaluation.get_value()) }; + Polynomial challenge_poly = create_challenge_poly(uint32_t(pair_1.log_poly_length.get_value()), native_u_challenges_inv_1, uint32_t(pair_2.log_poly_length.get_value()), native_u_challenges_inv_2, fq(alpha.get_value())); + + ASSERT(challenge_poly.evaluate(opening_pair.challenge) == opening_pair.evaluation && "Opening claim does not hold for challenge polynomial."); + + IPA::compute_opening_proof(ck, { challenge_poly, opening_pair }, prover_transcript); + + // Since we know this circuit will not have any more IPA claims to accumulate, add IPA Claim to public inputs of circuit and add the proof to the builder. + Builder* builder = r.get_context(); + builder->add_ipa_claim(output_claim.get_witness_indices()); + builder->ipa_proof = prover_transcript->proof_data; + + return {output_claim, prover_transcript->proof_data}; } }; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/ipa_recursive.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/ipa_recursive.test.cpp index 8b59b2b90b8..fa55d812441 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/ipa_recursive.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/ipa_recursive.test.cpp @@ -56,8 +56,8 @@ class IPARecursiveTests : public CommitmentTest { OpeningClaim stdlib_opening_claim{ { stdlib_x, stdlib_eval }, stdlib_comm }; // Construct stdlib verifier transcript - auto recursive_verifier_transcript = - std::make_shared(bb::convert_proof_to_witness(&builder, prover_transcript->proof_data)); + auto recursive_verifier_transcript = std::make_shared( + bb::convert_native_proof_to_stdlib(&builder, prover_transcript->proof_data)); return { recursive_verifier_transcript, stdlib_opening_claim }; } @@ -158,25 +158,21 @@ class IPARecursiveTests : public CommitmentTest { // Creates two IPA accumulators and accumulators from the two claims. Also constructs the accumulated h // polynomial. - auto [output_claim, challenge_poly] = RecursiveIPA::accumulate(transcript_1, claim_1, transcript_2, claim_2); + auto [output_claim, ipa_proof] = + RecursiveIPA::accumulate(this->ck(), transcript_1, claim_1, transcript_2, claim_2); builder.finalize_circuit(/*ensure_nonzero=*/false); info("Circuit with 2 IPA Recursive Verifiers and IPA Accumulation num finalized gates = ", builder.get_num_finalized_gates()); EXPECT_TRUE(CircuitChecker::check(builder)); - // Run the IPA prover on this new accumulated claim. - auto prover_transcript = std::make_shared(); const OpeningPair opening_pair{ bb::fq(output_claim.opening_pair.challenge.get_value()), bb::fq(output_claim.opening_pair.evaluation.get_value()) }; Commitment native_comm = output_claim.commitment.get_value(); const OpeningClaim opening_claim{ opening_pair, native_comm }; - NativeIPA::compute_opening_proof(this->ck(), { challenge_poly, opening_pair }, prover_transcript); - - EXPECT_EQ(challenge_poly.evaluate(opening_pair.challenge), opening_pair.evaluation); // Natively verify this proof to check it. - auto verifier_transcript = std::make_shared(prover_transcript->proof_data); + auto verifier_transcript = std::make_shared(ipa_proof); auto result = NativeIPA::reduce_verify(this->vk(), opening_claim, verifier_transcript); EXPECT_TRUE(result); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp index b94b0c95085..97051dc2b08 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp @@ -88,7 +88,7 @@ TEST(ShpleminiRecursionTest, ProveAndVerifySingle) N, RefVector(f_polynomials), RefVector(g_polynomials), u_challenge, commitment_key, prover_transcript); KZG::compute_opening_proof(commitment_key, prover_opening_claims, prover_transcript); Builder builder; - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&builder, prover_transcript->proof_data); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(&builder, prover_transcript->proof_data); auto stdlib_verifier_transcript = std::make_shared(stdlib_proof); stdlib_verifier_transcript->template receive_from_prover("Init"); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/zeromorph.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/zeromorph.test.cpp index 5c6f22d6af4..fb70282a51f 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/zeromorph.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/zeromorph.test.cpp @@ -95,7 +95,7 @@ TEST(ZeroMorphRecursionTest, ProveAndVerifySingle) prover_transcript); Builder builder; - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&builder, prover_transcript->proof_data); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(&builder, prover_transcript->proof_data); auto stdlib_verifier_transcript = std::make_shared(stdlib_proof); [[maybe_unused]] auto _ = stdlib_verifier_transcript->template receive_from_prover("Init"); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp index d0eb9d767d6..1cb9bb642bd 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp @@ -299,7 +299,7 @@ void process_plonk_recursion_constraints(Builder& builder, // they want these constants set by keeping the nested aggregation object attached to // the proof as public inputs. As this is the only object that can prepended to the // proof if the proof is above the expected size (with public inputs stripped) - PairingPointAccumPubInputIndices nested_aggregation_object = {}; + PairingPointAccumulatorPubInputIndices nested_aggregation_object = {}; // If the proof has public inputs attached to it, we should handle setting the nested // aggregation object if (constraint.proof.size() > proof_size_no_pub_inputs) { @@ -343,16 +343,9 @@ void process_plonk_recursion_constraints(Builder& builder, // inputs. if (!constraint_system.recursion_constraints.empty()) { - // First add the output aggregation object as public inputs - // Set the indices as public inputs because they are no longer being - // created in ACIR - for (const auto& idx : current_output_aggregation_object) { - builder.set_public_input(idx); - } - // Make sure the verification key records the public input indices of the // final recursion output. - builder.set_pairing_point_accumulator(current_output_aggregation_object); + builder.add_pairing_point_accumulator(current_output_aggregation_object); } } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp index 25d4f90d85a..6e045c395c5 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp @@ -236,7 +236,7 @@ WASM_EXPORT void acir_prove_and_verify_aztec_client(uint8_t const* acir_stack, } // TODO(#7371) dedupe this with the rest of the similar code // TODO(https://github.com/AztecProtocol/barretenberg/issues/1101): remove use of auto_verify_mode - ClientIVC ivc{ { E2E_FULL_TEST_STRUCTURE }, /*auto_verify_mode=*/true }; + ClientIVC ivc{ { CLIENT_IVC_BENCH_STRUCTURE }, /*auto_verify_mode=*/true }; // Accumulate the entire program stack into the IVC // TODO(https://github.com/AztecProtocol/barretenberg/issues/1116): remove manual setting of is_kernel once databus @@ -267,6 +267,10 @@ WASM_EXPORT void acir_prove_and_verify_aztec_client(uint8_t const* acir_stack, bool result = ivc.prove_and_verify(); info("verified?: ", result); + end = std::chrono::steady_clock::now(); + diff = std::chrono::duration_cast(end - start); + vinfo("time to construct, accumulate, prove and verify all circuits: ", diff.count()); + *verified = result; } diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp index 99f1350f920..d95a2f01ad7 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp @@ -104,8 +104,7 @@ void ECCVMProver::execute_relation_check_rounds() gate_challenges[idx] = transcript->template get_challenge("Sumcheck:gate_challenge_" + std::to_string(idx)); } - auto commitment_key = std::make_shared(Flavor::BATCHED_RELATION_PARTIAL_LENGTH); - zk_sumcheck_data = ZKSumcheckData(key->log_circuit_size, transcript, commitment_key); + zk_sumcheck_data = ZKSumcheckData(key->log_circuit_size, transcript, key->commitment_key); sumcheck_output = sumcheck.prove(key->polynomials, relation_parameters, alpha, gate_challenges, zk_sumcheck_data); } diff --git a/barretenberg/cpp/src/barretenberg/examples/join_split/join_split.test.cpp b/barretenberg/cpp/src/barretenberg/examples/join_split/join_split.test.cpp index f56dbf882e7..677e30f198c 100644 --- a/barretenberg/cpp/src/barretenberg/examples/join_split/join_split.test.cpp +++ b/barretenberg/cpp/src/barretenberg/examples/join_split/join_split.test.cpp @@ -33,8 +33,8 @@ using namespace bb::join_split_example::proofs::notes::native; using key_pair = join_split_example::fixtures::grumpkin_key_pair; auto create_account_leaf_data(fr const& account_alias_hash, - grumpkin::g1::affine_element const& owner_key, - grumpkin::g1::affine_element const& signing_key) + bb::grumpkin::g1::affine_element const& owner_key, + bb::grumpkin::g1::affine_element const& signing_key) { return notes::native::account::account_note{ account_alias_hash, owner_key, signing_key }.commit(); } @@ -869,7 +869,7 @@ TEST_P(test_allow_chain_to_other_users_fail, ) { join_split_tx tx = simple_setup(); tx.allow_chain = GetParam(); - tx.output_note[tx.allow_chain - 1].owner = grumpkin::g1::element::random_element(); // i.e. not owned by self. + tx.output_note[tx.allow_chain - 1].owner = bb::grumpkin::g1::element::random_element(); // i.e. not owned by self. auto result = sign_and_verify_logic(tx, user.owner); EXPECT_FALSE(result.valid); EXPECT_EQ(result.err, "inter-user chaining disallowed"); @@ -1028,7 +1028,7 @@ TEST_F(join_split_tests, test_total_output_value_larger_than_total_input_value_f TEST_F(join_split_tests, test_different_input_note_owners_fails) { join_split_tx tx = simple_setup({ 1, 2 }); - tx.input_note[0].owner = grumpkin::g1::affine_element::hash_to_curve({ 1 }); + tx.input_note[0].owner = bb::grumpkin::g1::affine_element::hash_to_curve({ 1 }); auto result = sign_and_verify_logic(tx, user.owner); EXPECT_FALSE(result.valid); @@ -1073,7 +1073,7 @@ TEST_F(join_split_tests, test_different_note_account_required_vs_account_require TEST_F(join_split_tests, test_wrong_input_note_owner_fails) { join_split_tx tx = simple_setup(); - tx.input_note[0].owner = grumpkin::g1::element::random_element(); + tx.input_note[0].owner = bb::grumpkin::g1::element::random_element(); tx.input_note[1].owner = tx.input_note[0].owner; auto result = sign_and_verify_logic(tx, user.owner); @@ -1084,8 +1084,8 @@ TEST_F(join_split_tests, test_wrong_input_note_owner_fails) TEST_F(join_split_tests, test_random_output_note_owners) { join_split_tx tx = simple_setup(); - tx.output_note[0].owner = grumpkin::g1::element::random_element(); - tx.output_note[1].owner = grumpkin::g1::element::random_element(); + tx.output_note[0].owner = bb::grumpkin::g1::element::random_element(); + tx.output_note[1].owner = bb::grumpkin::g1::element::random_element(); EXPECT_TRUE(sign_and_verify_logic(tx, user.owner).valid); } @@ -1097,7 +1097,7 @@ TEST_F(join_split_tests, test_random_output_note_owners) TEST_F(join_split_tests, test_wrong_account_private_key_fails) { join_split_tx tx = simple_setup(); - tx.account_private_key = grumpkin::fr::random_element(); + tx.account_private_key = bb::grumpkin::fr::random_element(); auto result = sign_and_verify_logic(tx, user.owner); EXPECT_FALSE(result.valid); diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp index f0b7158e5c6..b170033f7b2 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp @@ -110,7 +110,7 @@ template class ProvingKey_ { public: size_t circuit_size; bool contains_pairing_point_accumulator; - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices; bb::EvaluationDomain evaluation_domain; std::shared_ptr commitment_key; size_t num_public_inputs; @@ -152,7 +152,7 @@ class VerificationKey_ : public PrecomputedCommitments { using Commitment = typename VerifierCommitmentKey::Commitment; std::shared_ptr pcs_verification_key; bool contains_pairing_point_accumulator = false; - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices = {}; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices = {}; uint64_t pub_inputs_offset = 0; bool operator==(const VerificationKey_&) const = default; @@ -323,6 +323,7 @@ template constexpr auto create_tuple_of_arrays_of_values() namespace bb { class UltraFlavor; class UltraFlavorWithZK; +class UltraRollupFlavor; class ECCVMFlavor; class UltraKeccakFlavor; class MegaFlavor; @@ -357,10 +358,10 @@ template concept IsPlonkFlavor = IsAnyOf; template -concept IsUltraPlonkOrHonk = IsAnyOf; +concept IsUltraPlonkOrHonk = IsAnyOf; template -concept IsUltraFlavor = IsAnyOf; +concept IsUltraFlavor = IsAnyOf; template concept IsMegaFlavor = IsAnyOf>; template concept HasDataBus = IsMegaFlavor; +template +concept HasIPAAccumulatorFlavor = IsAnyOf; + template concept IsRecursiveFlavor = IsAnyOf, UltraRecursiveFlavor_, @@ -395,6 +399,7 @@ template concept IsECCVMRecursiveFlavor = IsAnyOf concept IsFoldingFlavor = IsAnyOf op_queue = std::make_shared(); + std::shared_ptr> commitment_key; MergeProof merge_proof; GoblinProof goblin_proof; @@ -70,11 +71,12 @@ class GoblinProver { GoblinAccumulationOutput accumulator; // Used only for ACIR methods for now public: - GoblinProver() + GoblinProver(const std::shared_ptr>& bn254_commitment_key = nullptr) { // Mocks the interaction of a first circuit with the op queue due to the inability to currently handle zero // commitments (https://github.com/AztecProtocol/barretenberg/issues/871) which would otherwise appear in the // first round of the merge protocol. To be removed once the issue has been resolved. - GoblinMockCircuits::perform_op_queue_interactions_for_mock_first_circuit(op_queue); + commitment_key = bn254_commitment_key ? bn254_commitment_key : nullptr; + GoblinMockCircuits::perform_op_queue_interactions_for_mock_first_circuit(op_queue, commitment_key); } /** * @brief Construct a MegaHonk proof and a merge proof for the present circuit. @@ -160,7 +162,7 @@ class GoblinProver { merge_proof_exists = true; } - MergeProver merge_prover{ circuit_builder.op_queue }; + MergeProver merge_prover{ circuit_builder.op_queue, commitment_key }; return merge_prover.construct_proof(); }; @@ -209,7 +211,7 @@ class GoblinProver { auto translator_builder = std::make_unique(translation_batching_challenge_v, evaluation_challenge_x, op_queue); - translator_prover = std::make_unique(*translator_builder, transcript); + translator_prover = std::make_unique(*translator_builder, transcript, commitment_key); } { diff --git a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp index 2ddcdfb2038..e2d1c598799 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp @@ -121,7 +121,8 @@ class GoblinMockCircuits { * * @param op_queue */ - static void perform_op_queue_interactions_for_mock_first_circuit(std::shared_ptr& op_queue) + static void perform_op_queue_interactions_for_mock_first_circuit( + std::shared_ptr& op_queue, std::shared_ptr commitment_key = nullptr) { PROFILE_THIS(); @@ -134,11 +135,12 @@ class GoblinMockCircuits { // Manually compute the op queue transcript commitments (which would normally be done by the merge prover) bb::srs::init_crs_factory("../srs_db/ignition"); - auto commitment_key = CommitmentKey(op_queue->get_current_size()); + auto bn254_commitment_key = + commitment_key ? commitment_key : std::make_shared(op_queue->get_current_size()); std::array op_queue_commitments; size_t idx = 0; for (auto& entry : op_queue->get_aggregate_transcript()) { - op_queue_commitments[idx++] = commitment_key.commit({ 0, entry }); + op_queue_commitments[idx++] = bn254_commitment_key->commit({ 0, entry }); } // Store the commitment data for use by the prover of the next circuit op_queue->set_commitment_data(op_queue_commitments); @@ -212,7 +214,7 @@ class GoblinMockCircuits { // Execute recursive aggregation of function proof auto verification_key = std::make_shared(&builder, function_accum.verification_key); - auto proof = bb::convert_proof_to_witness(&builder, function_accum.proof); + auto proof = bb::convert_native_proof_to_stdlib(&builder, function_accum.proof); RecursiveVerifier verifier1{ &builder, verification_key }; verifier1.verify_proof( proof, stdlib::recursion::init_default_aggregation_state(builder)); @@ -221,7 +223,7 @@ class GoblinMockCircuits { if (!prev_kernel_accum.proof.empty()) { auto verification_key = std::make_shared(&builder, prev_kernel_accum.verification_key); - auto proof = bb::convert_proof_to_witness(&builder, prev_kernel_accum.proof); + auto proof = bb::convert_native_proof_to_stdlib(&builder, prev_kernel_accum.proof); RecursiveVerifier verifier2{ &builder, verification_key }; verifier2.verify_proof( proof, stdlib::recursion::init_default_aggregation_state(builder)); diff --git a/barretenberg/cpp/src/barretenberg/plonk/proof_system/proving_key/proving_key.hpp b/barretenberg/cpp/src/barretenberg/plonk/proof_system/proving_key/proving_key.hpp index e0a2097adda..eedd49442d0 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/proof_system/proving_key/proving_key.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk/proof_system/proving_key/proving_key.hpp @@ -25,7 +25,7 @@ struct proving_key_data { uint32_t circuit_size; uint32_t num_public_inputs; bool contains_pairing_point_accumulator; - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices; std::vector memory_read_records; std::vector memory_write_records; #ifdef __wasm__ @@ -60,7 +60,7 @@ struct proving_key { size_t log_circuit_size; size_t num_public_inputs; bool contains_pairing_point_accumulator = false; - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices; std::vector memory_read_records; // Used by UltraPlonkComposer only; for ROM, RAM reads. std::vector memory_write_records; // Used by UltraPlonkComposer only, for RAM writes. diff --git a/barretenberg/cpp/src/barretenberg/plonk/proof_system/verification_key/verification_key.hpp b/barretenberg/cpp/src/barretenberg/plonk/proof_system/verification_key/verification_key.hpp index face2fba637..f3b84e29fc9 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/proof_system/verification_key/verification_key.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk/proof_system/verification_key/verification_key.hpp @@ -18,7 +18,7 @@ struct verification_key_data { uint32_t num_public_inputs; std::map commitments; bool contains_pairing_point_accumulator = false; - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices; bool is_recursive_circuit = false; // for serialization: update with any new fields @@ -99,7 +99,7 @@ struct verification_key { bb::fr z_pow_n; // ʓ^n (ʓ being the 'evaluation challenge') bool contains_pairing_point_accumulator = false; - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices; bool is_recursive_circuit = false; diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp index a30a879142b..26edf196c2d 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp @@ -59,8 +59,15 @@ template struct MegaTraceBlockData { }; } - static uint32_t size() { return 0; } - static uint32_t dyadic_size() { return 0; } + size_t size() const + requires std::same_as + { + size_t result{ 0 }; + for (const auto& block_size : get()) { + result += block_size; + } + return static_cast(result); + } bool operator==(const MegaTraceBlockData& other) const = default; }; @@ -72,6 +79,15 @@ struct TraceSettings { // The size of the overflow block. Specified separately because it is allowed to be determined at runtime in the // context of VK computation uint32_t overflow_capacity = 0; + + size_t size() const { return structure->size() + static_cast(overflow_capacity); } + + size_t dyadic_size() const + { + const size_t total_size = size(); + const size_t lower_dyadic = 1 << numeric::get_msb(total_size); + return total_size > lower_dyadic ? lower_dyadic << 1 : lower_dyadic; + } }; class MegaTraceBlock : public ExecutionTraceBlock { diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/types/aggregation_object_type.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/types/aggregation_object_type.hpp index 5f9bb79cda6..99608cc626f 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/types/aggregation_object_type.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/types/aggregation_object_type.hpp @@ -7,10 +7,14 @@ namespace bb { // An aggregation state is represented by two G1 affine elements. Each G1 point has // two field element coordinates (x, y). Thus, four base field elements // Four limbs are used when simulating a non-native field using the bigfield class, so 16 total field elements. -constexpr uint32_t PAIRING_POINT_ACCUMULATOR_SIZE = 16; +static constexpr uint32_t PAIRING_POINT_ACCUMULATOR_SIZE = 16; // PairingPointAccumulatorIndices represents an array of 16 witness indices pointing to the nested aggregation object. using PairingPointAccumulatorIndices = std::array; -// PairingPointAccumPubInputIndices represents an array of 16 public input indices pointing to the witness indices of -// the nested aggregation object. -using PairingPointAccumPubInputIndices = std::array; +// PairingPointAccumulatorPubInputIndices represents an array of 16 public input indices pointing to the witness indices +// of the nested aggregation object. +using PairingPointAccumulatorPubInputIndices = std::array; + +static constexpr uint32_t IPA_CLAIM_SIZE = 6; +using IPAClaimIndices = std::array; +using IPAClaimPubInputIndices = std::array; } // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp index 292cf11bbd7..11ea759d4f2 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp @@ -60,10 +60,29 @@ void Polynomial::allocate_backing_memory(size_t size, size_t virtual_size, s * * @param size The size of the polynomial. */ -template Polynomial::Polynomial(size_t size, size_t virtual_size, size_t start_index) +template +Polynomial::Polynomial(size_t size, size_t virtual_size, size_t start_index, bool disable_parallelisation) { + PROFILE_THIS_NAME("polynomial allocation with zeroing"); + allocate_backing_memory(size, virtual_size, start_index); - memset(static_cast(coefficients_.backing_memory_.get()), 0, sizeof(Fr) * size); + if (disable_parallelisation) { + // In AVM polynomials are small and already constructed in parallel + memset(static_cast(coefficients_.backing_memory_.get()), 0, sizeof(Fr) * size); + return; + } + + size_t num_threads = calculate_num_threads(size); + size_t range_per_thread = size / num_threads; + size_t leftovers = size - (range_per_thread * num_threads); + + parallel_for(num_threads, [&](size_t j) { + size_t offset = j * range_per_thread; + size_t range = (j == num_threads - 1) ? range_per_thread + leftovers : range_per_thread; + ASSERT(offset < size || size == 0); + ASSERT((offset + range) <= size); + memset(static_cast(coefficients_.backing_memory_.get() + offset), 0, sizeof(Fr) * range); + }); } /** @@ -76,6 +95,7 @@ template Polynomial::Polynomial(size_t size, size_t virtual_si template Polynomial::Polynomial(size_t size, size_t virtual_size, size_t start_index, [[maybe_unused]] DontZeroMemory flag) { + PROFILE_THIS_NAME("polynomial allocation without zeroing"); allocate_backing_memory(size, virtual_size, start_index); } diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp index c51597d2276..17d6ab34c61 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp @@ -65,13 +65,11 @@ template class Polynomial { using FF = Fr; enum class DontZeroMemory { FLAG }; - Polynomial(size_t size, size_t virtual_size, size_t start_index = 0); + Polynomial(size_t size, size_t virtual_size, size_t start_index = 0, bool disable_parallelisation = false); // Intended just for plonk, where size == virtual_size always Polynomial(size_t size) - : Polynomial(size, size) - { - PROFILE_THIS(); - } + : Polynomial(size, size){}; + // Constructor that does not initialize values, use with caution to save time. Polynomial(size_t size, size_t virtual_size, size_t start_index, DontZeroMemory flag); Polynomial(size_t size, size_t virtual_size, DontZeroMemory flag) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.cpp index 15f7ce40f3d..90a7df0e894 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.cpp @@ -8,7 +8,7 @@ namespace bb::stdlib::recursion::honk { * @todo (https://github.com/AztecProtocol/barretenberg/issues/934): Add logic for accumulating the pairing points * produced by the verifiers (and potentially IPA accumulators for ECCVM verifier) */ -void ClientIVCRecursiveVerifier::verify(const ClientIVC::Proof& proof) +ClientIVCRecursiveVerifier::Output ClientIVCRecursiveVerifier::verify(const ClientIVC::Proof& proof) { // Construct stdlib Mega verification key auto stdlib_mega_vk = @@ -24,7 +24,9 @@ void ClientIVCRecursiveVerifier::verify(const ClientIVC::Proof& proof) // Perform Goblin recursive verification GoblinVerifier goblin_verifier{ builder.get(), verifier_input.goblin_input }; - goblin_verifier.verify(proof.goblin_proof); + GoblinRecursiveVerifierOutput output = goblin_verifier.verify(proof.goblin_proof); + + return output; } } // namespace bb::stdlib::recursion::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp index d6954bb0532..4381b1f5faf 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.hpp @@ -21,6 +21,8 @@ class ClientIVCRecursiveVerifier { using Proof = ClientIVC::Proof; using FoldVerifierInput = FoldingVerifier::VerifierInput; using GoblinVerifierInput = GoblinVerifier::VerifierInput; + using Output = GoblinRecursiveVerifierOutput; + struct VerifierInput { std::shared_ptr mega_verification_key; GoblinVerifierInput goblin_input; @@ -30,7 +32,7 @@ class ClientIVCRecursiveVerifier { : builder(builder) , verifier_input(verifier_input){}; - void verify(const ClientIVC::Proof&); + Output verify(const ClientIVC::Proof&); private: std::shared_ptr builder; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.test.cpp index aff80907915..3c4cc8a4a2c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/client_ivc_verifier/client_ivc_recursive_verifier.test.cpp @@ -16,7 +16,7 @@ class ClientIVCRecursionTests : public testing::Test { using TranslatorVK = GoblinVerifier::TranslatorVerificationKey; using Proof = ClientIVC::Proof; using Flavor = UltraRecursiveFlavor_; - using NativeFlavor = Flavor::NativeFlavor; + using NativeFlavor = UltraRollupFlavor; using UltraRecursiveVerifier = UltraRecursiveVerifier_; static void SetUpTestSuite() @@ -74,6 +74,8 @@ TEST_F(ClientIVCRecursionTests, NativeVerification) */ TEST_F(ClientIVCRecursionTests, Basic) { + using CIVCRecVerifierOutput = ClientIVCRecursiveVerifier::Output; + // Generate a genuine ClientIVC prover output ClientIVC ivc{ {}, /*auto_verify_mode=*/true }; auto [proof, verifier_input] = construct_client_ivc_prover_output(ivc); @@ -83,7 +85,7 @@ TEST_F(ClientIVCRecursionTests, Basic) ClientIVCVerifier verifier{ builder, verifier_input }; // Generate the recursive verification circuit - verifier.verify(proof); + CIVCRecVerifierOutput output = verifier.verify(proof); EXPECT_EQ(builder->failed(), false) << builder->err(); @@ -95,6 +97,8 @@ TEST_F(ClientIVCRecursionTests, Basic) TEST_F(ClientIVCRecursionTests, ClientTubeBase) { + using CIVCRecVerifierOutput = ClientIVCRecursiveVerifier::Output; + // Generate a genuine ClientIVC prover output ClientIVC ivc{ {}, /*auto_verify_mode=*/true }; auto [proof, verifier_input] = construct_client_ivc_prover_output(ivc); @@ -104,10 +108,15 @@ TEST_F(ClientIVCRecursionTests, ClientTubeBase) ClientIVCVerifier verifier{ tube_builder, verifier_input }; // Generate the recursive verification circuit - verifier.verify(proof); + CIVCRecVerifierOutput client_ivc_rec_verifier_output = verifier.verify(proof); + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1069): fix this by taking it from the output instead of + // just using default. tube_builder->add_pairing_point_accumulator( stdlib::recursion::init_default_agg_obj_indices(*tube_builder)); + // The tube only calls an IPA recursive verifier once, so we can just add this IPA claim and proof + tube_builder->add_ipa_claim(client_ivc_rec_verifier_output.opening_claim.get_witness_indices()); + tube_builder->ipa_proof = convert_stdlib_proof_to_native(client_ivc_rec_verifier_output.ipa_transcript->proof_data); info("ClientIVC Recursive Verifier: num prefinalized gates = ", tube_builder->num_gates); @@ -116,14 +125,21 @@ TEST_F(ClientIVCRecursionTests, ClientTubeBase) // EXPECT_TRUE(CircuitChecker::check(*tube_builder)); // Construct and verify a proof for the ClientIVC Recursive Verifier circuit - auto proving_key = std::make_shared>(*tube_builder); - UltraProver tube_prover{ proving_key }; + auto proving_key = std::make_shared>(*tube_builder); + UltraProver_ tube_prover{ proving_key }; auto native_tube_proof = tube_prover.construct_proof(); + // Natively verify the tube proof + auto native_vk_with_ipa = std::make_shared(proving_key->proving_key); + auto ipa_verification_key = std::make_shared>(1 << CONST_ECCVM_LOG_N); + UltraVerifier_ native_verifier(native_vk_with_ipa, ipa_verification_key); + EXPECT_TRUE(native_verifier.verify_proof(native_tube_proof, tube_prover.proving_key->proving_key.ipa_proof)); + + // Construct a base rollup circuit that recursively verifies the tube proof. Builder base_builder; - auto native_vk = std::make_shared(proving_key->proving_key); + auto native_vk = std::make_shared(proving_key->proving_key); auto vk = std::make_shared(&base_builder, native_vk); - auto tube_proof = bb::convert_proof_to_witness(&base_builder, native_tube_proof); + auto tube_proof = bb::convert_native_proof_to_stdlib(&base_builder, native_tube_proof); UltraRecursiveVerifier base_verifier{ &base_builder, vk }; base_verifier.verify_proof(tube_proof, stdlib::recursion::init_default_aggregation_state(base_builder)); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp index dd1868f2cc6..a9422482854 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp @@ -15,8 +15,11 @@ ECCVMRecursiveVerifier_::ECCVMRecursiveVerifier_( /** * @brief This function verifies an ECCVM Honk proof for given program settings up to sumcheck. + * */ -template void ECCVMRecursiveVerifier_::verify_proof(const ECCVMProof& proof) +template +std::pair, std::shared_ptr::Transcript>> +ECCVMRecursiveVerifier_::verify_proof(const ECCVMProof& proof) { using Curve = typename Flavor::Curve; using Shplemini = ShpleminiVerifier_; @@ -25,8 +28,8 @@ template void ECCVMRecursiveVerifier_::verify_proof(co RelationParameters relation_parameters; - StdlibProof stdlib_proof = bb::convert_proof_to_witness(builder, proof.pre_ipa_proof); - StdlibProof stdlib_ipa_proof = bb::convert_proof_to_witness(builder, proof.ipa_proof); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(builder, proof.pre_ipa_proof); + StdlibProof stdlib_ipa_proof = bb::convert_native_proof_to_stdlib(builder, proof.ipa_proof); transcript = std::make_shared(stdlib_proof); ipa_transcript = std::make_shared(stdlib_ipa_proof); @@ -142,10 +145,8 @@ template void ECCVMRecursiveVerifier_::verify_proof(co const OpeningClaim batch_opening_claim = Shplonk::reduce_verification(key->pcs_verification_key->get_g1_identity(), opening_claims, transcript); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1142): Handle this return value correctly. - const typename PCS::VerifierAccumulator batched_opening_accumulator = - PCS::reduce_verify(batch_opening_claim, ipa_transcript); ASSERT(sumcheck_verified); + return { batch_opening_claim, ipa_transcript }; } template class ECCVMRecursiveVerifier_>; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp index 58103e9644b..729740aa707 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp @@ -21,7 +21,7 @@ template class ECCVMRecursiveVerifier_ { const std::shared_ptr& native_verifier_key); // TODO(https://github.com/AztecProtocol/barretenberg/issues/991): switch recursive verifiers to StdlibProof - void verify_proof(const ECCVMProof& proof); + std::pair, std::shared_ptr> verify_proof(const ECCVMProof& proof); std::shared_ptr key; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp index 0aae8567845..8c5581e22b6 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp @@ -83,7 +83,8 @@ template class ECCVMRecursiveTests : public ::testing info("ECCVM Recursive Verifier"); OuterBuilder outer_circuit; RecursiveVerifier verifier{ &outer_circuit, verification_key }; - verifier.verify_proof(proof); + auto [opening_claim, ipa_transcript] = verifier.verify_proof(proof); + info("Recursive Verifier: num gates = ", outer_circuit.get_estimated_num_finalized_gates()); // Check for a failure flag in the recursive verifier circuit diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp index ea93c1c161d..a852624235a 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp @@ -7,14 +7,13 @@ #include "schnorr.hpp" using namespace bb; -using namespace bb::stdlib; using namespace bb::crypto; using Builder = UltraCircuitBuilder; -using bool_ct = bool_t; -using byte_array_ct = byte_array; -using field_ct = field_t; -using witness_ct = witness_t; +using bool_ct = stdlib::bool_t; +using byte_array_ct = stdlib::byte_array; +using field_ct = stdlib::field_t; +using witness_ct = stdlib::witness_t; /** * @test Test circuit verifying a Schnorr signature generated by \see{crypto::schnorr_verify_signature}. @@ -42,10 +41,10 @@ TEST(stdlib_schnorr, schnorr_verify_signature) message_string, account.public_key, signature); EXPECT_EQ(first_result, true); - cycle_group pub_key{ witness_ct(&builder, account.public_key.x), - witness_ct(&builder, account.public_key.y), - false }; - schnorr_signature_bits sig = schnorr_convert_signature(&builder, signature); + stdlib::cycle_group pub_key{ witness_ct(&builder, account.public_key.x), + witness_ct(&builder, account.public_key.y), + false }; + stdlib::schnorr_signature_bits sig = stdlib::schnorr_convert_signature(&builder, signature); byte_array_ct message(&builder, message_string); schnorr_verify_signature(message, pub_key, sig); @@ -84,10 +83,10 @@ TEST(stdlib_schnorr, verify_signature_failure) EXPECT_EQ(native_result, false); // check stdlib verification with account 2 public key fails - cycle_group pub_key2_ct{ witness_ct(&builder, account2.public_key.x), - witness_ct(&builder, account2.public_key.y), - false }; - schnorr_signature_bits sig = schnorr_convert_signature(&builder, signature); + stdlib::cycle_group pub_key2_ct{ witness_ct(&builder, account2.public_key.x), + witness_ct(&builder, account2.public_key.y), + false }; + stdlib::schnorr_signature_bits sig = stdlib::schnorr_convert_signature(&builder, signature); byte_array_ct message(&builder, message_string); schnorr_verify_signature(message, pub_key2_ct, sig); @@ -118,10 +117,10 @@ TEST(stdlib_schnorr, schnorr_signature_verification_result) longer_string, account.public_key, signature); EXPECT_EQ(first_result, true); - cycle_group pub_key{ witness_ct(&builder, account.public_key.x), - witness_ct(&builder, account.public_key.y), - false }; - schnorr_signature_bits sig = schnorr_convert_signature(&builder, signature); + stdlib::cycle_group pub_key{ witness_ct(&builder, account.public_key.x), + witness_ct(&builder, account.public_key.y), + false }; + stdlib::schnorr_signature_bits sig = stdlib::schnorr_convert_signature(&builder, signature); byte_array_ct message(&builder, longer_string); bool_ct signature_result = schnorr_signature_verification_result(message, pub_key, sig); EXPECT_EQ(signature_result.witness_bool, true); @@ -161,10 +160,10 @@ TEST(stdlib_schnorr, signature_verification_result_failure) EXPECT_EQ(native_result, false); // check stdlib verification with account 2 public key fails - cycle_group pub_key2_ct{ witness_ct(&builder, account2.public_key.x), - witness_ct(&builder, account2.public_key.y), - false }; - schnorr_signature_bits sig = schnorr_convert_signature(&builder, signature); + stdlib::cycle_group pub_key2_ct{ witness_ct(&builder, account2.public_key.x), + witness_ct(&builder, account2.public_key.y), + false }; + stdlib::schnorr_signature_bits sig = stdlib::schnorr_convert_signature(&builder, signature); byte_array_ct message(&builder, message_string); bool_ct signature_result = schnorr_signature_verification_result(message, pub_key2_ct, sig); EXPECT_EQ(signature_result.witness_bool, false); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.cpp index 8a6980ff4b0..b9bf165bc96 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.cpp @@ -8,11 +8,11 @@ namespace bb::stdlib::recursion::honk { * @todo https://github.com/AztecProtocol/barretenberg/issues/934: Add logic for accumulating the pairing points * produced by the translator and merge verifier (and potentially IPA accumulators for ECCVM verifier) */ -void GoblinRecursiveVerifier::verify(const GoblinProof& proof) +GoblinRecursiveVerifierOutput GoblinRecursiveVerifier::verify(const GoblinProof& proof) { // Run the ECCVM recursive verifier ECCVMVerifier eccvm_verifier{ builder, verification_keys.eccvm_verification_key }; - eccvm_verifier.verify_proof(proof.eccvm_proof); + auto [opening_claim, ipa_transcript] = eccvm_verifier.verify_proof(proof.eccvm_proof); // Run the Translator recursive verifier TranslatorVerifier translator_verifier{ builder, @@ -36,5 +36,6 @@ void GoblinRecursiveVerifier::verify(const GoblinProof& proof) MergeVerifier merge_verifier{ builder }; merge_verifier.verify_proof(proof.merge_proof); + return { opening_claim, ipa_transcript }; } } // namespace bb::stdlib::recursion::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.hpp index 92ae344af66..a3c0828ce90 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.hpp @@ -5,6 +5,16 @@ #include "barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.hpp" namespace bb::stdlib::recursion::honk { + +struct GoblinRecursiveVerifierOutput { + using Builder = UltraCircuitBuilder; + using ECCVMFlavor = ECCVMRecursiveFlavor_; + using Curve = grumpkin; + using Transcript = bb::BaseTranscript>; + OpeningClaim opening_claim; + std::shared_ptr ipa_transcript; +}; + class GoblinRecursiveVerifier { public: // Goblin Recursive Verifier circuit is using Ultra arithmetisation @@ -35,7 +45,7 @@ class GoblinRecursiveVerifier { * * @todo(https://github.com/AztecProtocol/barretenberg/issues/991): The GoblinProof should aleady be a stdlib proof */ - void verify(const GoblinProof&); + GoblinRecursiveVerifierOutput verify(const GoblinProof&); private: Builder* builder; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp index 80ee773f2df..a7f9512ec9b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp @@ -113,7 +113,7 @@ TEST_F(GoblinRecursiveVerifierTests, ECCVMFailure) // Tamper with the ECCVM proof for (auto& val : proof.eccvm_proof.pre_ipa_proof) { - if (val > 0) { // tamper by finding the tenth non-zero value and incrementing it by 1 + if (val > 0) { // tamper by finding the first non-zero value and incrementing it by 1 // tamper by finding the first non-zero value // and incrementing it by 1 val += 1; @@ -123,8 +123,18 @@ TEST_F(GoblinRecursiveVerifierTests, ECCVMFailure) Builder builder; GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - - EXPECT_DEBUG_DEATH(verifier.verify(proof), "(ipa_relation.get_value.* == -opening_claim.commitment.get_value.*)"); + GoblinRecursiveVerifierOutput goblin_rec_verifier_output = verifier.verify(proof); + + auto crs_factory = + std::make_shared>("../srs_db/grumpkin", 1 << CONST_ECCVM_LOG_N); + auto grumpkin_verifier_commitment_key = + std::make_shared>(1 << CONST_ECCVM_LOG_N, crs_factory); + OpeningClaim native_claim = goblin_rec_verifier_output.opening_claim.get_native_opening_claim(); + auto native_ipa_transcript = std::make_shared( + convert_stdlib_proof_to_native(goblin_rec_verifier_output.ipa_transcript->proof_data)); + + EXPECT_FALSE( + IPA::reduce_verify(grumpkin_verifier_commitment_key, native_claim, native_ipa_transcript)); } /** diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/merge_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/merge_recursive_verifier.cpp index 593564fe4ed..6f3ea52555c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/merge_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/merge_recursive_verifier.cpp @@ -19,7 +19,7 @@ std::array::Element, 2> MergeRecursiveVerifier_ stdlib_proof = bb::convert_proof_to_witness(builder, proof); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(builder, proof); transcript = std::make_shared(stdlib_proof); // Receive commitments [t_i^{shift}], [T_{i-1}], and [T_i] diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp index 57fc7f27b08..162a890e5ff 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp @@ -19,7 +19,7 @@ std::array DeciderRecursiveVerifier_:: using VerifierCommitments = typename Flavor::VerifierCommitments; using Transcript = typename Flavor::Transcript; - StdlibProof stdlib_proof = bb::convert_proof_to_witness(builder, proof); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(builder, proof); transcript = std::make_shared(stdlib_proof); VerifierCommitments commitments{ accumulator->verification_key, accumulator->witness_commitments }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp index 26c2297b41d..2bd074dfd97 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp @@ -27,7 +27,7 @@ template UltraRecursiveVerifier_::AggregationObject UltraRecursiveVerifier_::verify_proof( const HonkProof& proof, AggregationObject agg_obj) { - StdlibProof stdlib_proof = bb::convert_proof_to_witness(builder, proof); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(builder, proof); return verify_proof(stdlib_proof, agg_obj); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/verification_key/verification_key.hpp b/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/verification_key/verification_key.hpp index fc4f6ad3993..5fc139f72ba 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/verification_key/verification_key.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/verification_key/verification_key.hpp @@ -240,7 +240,7 @@ template struct verification_key { Builder* ctx, const std::vector>& fields, bool inner_proof_contains_pairing_point_accumulator = false, - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices = {}) + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices = {}) { std::vector fields_raw; std::shared_ptr key = std::make_shared(); @@ -446,7 +446,7 @@ template struct verification_key { plonk::PolynomialManifest polynomial_manifest; // Used to check in the circuit if a proof contains any aggregated state. bool contains_pairing_point_accumulator = false; - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices; size_t program_width = 4; Builder* context; }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.test.cpp index 84a1b22d036..81586173b6c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.test.cpp @@ -203,7 +203,8 @@ template class ProtogalaxyRecursiveTests : public tes auto recursive_decider_vk_1 = std::make_shared(&folding_circuit, decider_vk_1); auto recursive_decider_vk_2 = std::make_shared(&folding_circuit, decider_vk_2->verification_key); - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&folding_circuit, folding_proof.proof); + StdlibProof stdlib_proof = + bb::convert_native_proof_to_stdlib(&folding_circuit, folding_proof.proof); auto verifier = FoldingRecursiveVerifier{ &folding_circuit, recursive_decider_vk_1, { recursive_decider_vk_2 } }; @@ -292,7 +293,8 @@ template class ProtogalaxyRecursiveTests : public tes auto recursive_decider_vk_1 = std::make_shared(&folding_circuit, decider_vk_1); auto recursive_decider_vk_2 = std::make_shared(&folding_circuit, decider_vk_2->verification_key); - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&folding_circuit, folding_proof.proof); + StdlibProof stdlib_proof = + bb::convert_native_proof_to_stdlib(&folding_circuit, folding_proof.proof); auto verifier = FoldingRecursiveVerifier{ &folding_circuit, recursive_decider_vk_1, { recursive_decider_vk_2 } }; @@ -398,7 +400,8 @@ template class ProtogalaxyRecursiveTests : public tes std::make_shared(&folding_circuit, verifier_accumulator); auto recursive_decider_vk_2 = std::make_shared(&folding_circuit, verifier_inst->verification_key); - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&folding_circuit, folding_proof.proof); + StdlibProof stdlib_proof = + bb::convert_native_proof_to_stdlib(&folding_circuit, folding_proof.proof); auto verifier = FoldingRecursiveVerifier{ &folding_circuit, recursive_decider_vk_1, { recursive_decider_vk_2 } }; @@ -436,7 +439,8 @@ template class ProtogalaxyRecursiveTests : public tes auto recursive_decider_vk_1 = std::make_shared(&verifier_circuit, honk_vk_1); auto recursive_decider_vk_2 = std::make_shared(&verifier_circuit, honk_vk_2); - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&verifier_circuit, fold_result.proof); + StdlibProof stdlib_proof = + bb::convert_native_proof_to_stdlib(&verifier_circuit, fold_result.proof); auto verifier = FoldingRecursiveVerifier{ &verifier_circuit, recursive_decider_vk_1, { recursive_decider_vk_2 } }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/transcript/transcript.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/transcript/transcript.test.cpp index 0344b1f8835..debefb03a5d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/transcript/transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/transcript/transcript.test.cpp @@ -111,7 +111,7 @@ TEST(RecursiveHonkTranscript, InterfacesMatch) EXPECT_EQ(prover_transcript.get_manifest(), native_transcript.get_manifest()); // Instantiate a stdlib Transcript and perform the same operations - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&builder, proof_data); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(&builder, proof_data); StdlibTranscript transcript{ stdlib_proof }; perform_mock_verifier_transcript_operations(transcript); @@ -164,7 +164,7 @@ TEST(RecursiveHonkTranscript, ReturnValuesMatch) auto [native_alpha, native_beta] = native_transcript.template get_challenges("alpha", "beta"); // Perform the same operations with the stdlib verifier transcript - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&builder, proof_data); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(&builder, proof_data); StdlibTranscript stdlib_transcript{ stdlib_proof }; auto stdlib_scalar = stdlib_transcript.template receive_from_prover("scalar"); auto stdlib_commitment = stdlib_transcript.template receive_from_prover("commitment"); @@ -210,7 +210,7 @@ TEST(RecursiveTranscript, InfinityConsistencyGrumpkin) verifier_transcript.receive_from_prover("infinity"); auto verifier_challenge = verifier_transcript.get_challenge("challenge"); - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&builder, proof_data); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(&builder, proof_data); StdlibTranscript stdlib_transcript{ stdlib_proof }; auto stdlib_infinity = stdlib_transcript.receive_from_prover("infinity"); EXPECT_TRUE(stdlib_infinity.is_point_at_infinity().get_value()); @@ -248,7 +248,7 @@ TEST(RecursiveTranscript, InfinityConsistencyBN254) verifier_transcript.receive_from_prover("infinity"); auto verifier_challenge = verifier_transcript.get_challenge("challenge"); - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&builder, proof_data); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(&builder, proof_data); StdlibTranscript stdlib_transcript{ stdlib_proof }; auto stdlib_commitment = stdlib_transcript.receive_from_prover("infinity"); EXPECT_TRUE(stdlib_commitment.is_point_at_infinity().get_value()); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp index 2849fdd48b5..ce321b18ea8 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp @@ -66,7 +66,7 @@ std::array TranslatorRecursiveVerifier_ stdlib_proof = bb::convert_proof_to_witness(builder, proof); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(builder, proof); transcript->load_proof(stdlib_proof); batching_challenge_v = transcript->template get_challenge("Translation:batching_challenge"); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp index 27de41a35d3..1e2ab4beac0 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp @@ -75,7 +75,7 @@ template class TranslatorRecursiveTests : public ::te OuterBuilder outer_circuit; // Mock a previous verifier that would in reality be the ECCVM recursive verifier - StdlibProof stdlib_proof = bb::convert_proof_to_witness(&outer_circuit, fake_inital_proof); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(&outer_circuit, fake_inital_proof); auto transcript = std::make_shared(stdlib_proof); transcript->template receive_from_prover("init"); diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_builder_base.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_builder_base.hpp index 642b8f0ba82..28b2c9ad8f6 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_builder_base.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_builder_base.hpp @@ -37,9 +37,13 @@ template class CircuitBuilderBase { std::map tau; // Public input indices which contain recursive proof information - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices; bool contains_pairing_point_accumulator = false; + // Public input indices which contain the output IPA opening claim + IPAClaimPubInputIndices ipa_claim_public_input_indices; + bool contains_ipa_claim = false; + // We know from the CLI arguments during proving whether a circuit should use a prover which produces // proofs that are friendly to verify in a circuit themselves. A verifier does not need a full circuit // description and should be able to verify a proof with just the verification key and the proof. @@ -206,18 +210,9 @@ template class CircuitBuilderBase { * @param proof_output_witness_indices Witness indices that need to become public and stored as recurisve proof * specific */ - void add_pairing_point_accumulator(const PairingPointAccumulatorIndices& proof_output_witness_indices); + void add_pairing_point_accumulator(const PairingPointAccumulatorIndices& pairing_point_accum_witness_indices); - /** - * TODO: We can remove this and use `add_pairing_point_accumulator` once my question has been addressed - * TODO: using `add_pairing_point_accumulator` also means that we will need to remove the cde which is - * TODO: adding the public_inputs - * @brief Update pairing_point_accumulator_public_input_indices with existing public inputs that represent a - * recursive proof - * - * @param proof_output_witness_indices - */ - void set_pairing_point_accumulator(const PairingPointAccumulatorIndices& proof_output_witness_indices); + void add_ipa_claim(const IPAClaimIndices& ipa_claim_witness_indices); bool failed() const; const std::string& err() const; diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_builder_base_impl.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_builder_base_impl.hpp index a192a8587b3..e3e4bbcfe9d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_builder_base_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_builder_base_impl.hpp @@ -237,34 +237,35 @@ void CircuitBuilderBase::assert_valid_variables(const std::vector template void CircuitBuilderBase::add_pairing_point_accumulator( - const PairingPointAccumulatorIndices& proof_output_witness_indices) + const PairingPointAccumulatorIndices& pairing_point_accum_witness_indices) { if (contains_pairing_point_accumulator) { - failure("added recursive proof when one already exists"); + failure("added pairing point accumulator when one already exists"); ASSERT(0); } contains_pairing_point_accumulator = true; size_t i = 0; - for (const auto& idx : proof_output_witness_indices) { + for (const auto& idx : pairing_point_accum_witness_indices) { set_public_input(idx); pairing_point_accumulator_public_input_indices[i] = static_cast(public_inputs.size() - 1); ++i; } } -template -void CircuitBuilderBase::set_pairing_point_accumulator( - const PairingPointAccumulatorIndices& proof_output_witness_indices) +template void CircuitBuilderBase::add_ipa_claim(const IPAClaimIndices& ipa_claim_witness_indices) { - if (contains_pairing_point_accumulator) { - failure("added recursive proof when one already exists"); + if (contains_ipa_claim) { + failure("added IPA claim when one already exists"); ASSERT(0); } - contains_pairing_point_accumulator = true; - for (size_t i = 0; i < proof_output_witness_indices.size(); ++i) { - pairing_point_accumulator_public_input_indices[i] = - get_public_input_index(real_variable_index[proof_output_witness_indices[i]]); + contains_ipa_claim = true; + + size_t i = 0; + for (const auto& idx : ipa_claim_witness_indices) { + set_public_input(idx); + ipa_claim_public_input_indices[i] = static_cast(public_inputs.size() - 1); + ++i; } } diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_simulator.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_simulator.hpp index dc8ed11da34..017c04e8d0d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_simulator.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/circuit_simulator.hpp @@ -191,7 +191,7 @@ class CircuitSimulatorBN254 { [[nodiscard]] bool check_circuit() const { return !_failed; } // Public input indices which contain recursive proof information - PairingPointAccumPubInputIndices pairing_point_accumulator_public_input_indices; + PairingPointAccumulatorPubInputIndices pairing_point_accumulator_public_input_indices; }; class SimulatorCircuitChecker { diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp index e87ebe9ea76..aaac39d7a00 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp @@ -581,11 +581,12 @@ class MegaFlavor { VerificationKey(ProvingKey& proving_key) { set_metadata(proving_key); - if (proving_key.commitment_key == nullptr) { - proving_key.commitment_key = std::make_shared(proving_key.circuit_size); + auto& ck = proving_key.commitment_key; + if (!ck || ck->srs->get_monomial_size() < proving_key.circuit_size) { + ck = std::make_shared(proving_key.circuit_size); } for (auto [polynomial, commitment] : zip_view(proving_key.polynomials.get_precomputed(), this->get_all())) { - commitment = proving_key.commitment_key->commit(polynomial); + commitment = ck->commit(polynomial); } } @@ -625,7 +626,7 @@ class MegaFlavor { const size_t num_public_inputs, const size_t pub_inputs_offset, const bool contains_pairing_point_accumulator, - const PairingPointAccumPubInputIndices& pairing_point_accumulator_public_input_indices, + const PairingPointAccumulatorPubInputIndices& pairing_point_accumulator_public_input_indices, const DatabusPropagationData& databus_propagation_data, const Commitment& q_m, const Commitment& q_c, diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp index 058d6fca95a..572e095f604 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp @@ -330,6 +330,8 @@ class UltraCircuitBuilder_ : public CircuitBuilderBase ipa_proof; + void process_non_native_field_multiplications(); UltraCircuitBuilder_(const size_t size_hint = 0) : CircuitBuilderBase(size_hint) diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp index d8be56ad096..8d69028950f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp @@ -284,7 +284,6 @@ class UltraFlavor { }; }; - public: /** * @brief A field element for each entity of the flavor. These entities represent the prover polynomials * evaluated at one point. @@ -468,7 +467,7 @@ class UltraFlavor { const uint64_t num_public_inputs, const uint64_t pub_inputs_offset, const bool contains_pairing_point_accumulator, - const PairingPointAccumPubInputIndices& pairing_point_accumulator_public_input_indices, + const PairingPointAccumulatorPubInputIndices& pairing_point_accumulator_public_input_indices, const Commitment& q_m, const Commitment& q_c, const Commitment& q_l, diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp new file mode 100644 index 00000000000..102b5ca6e63 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp @@ -0,0 +1,170 @@ +#pragma once +#include "barretenberg/stdlib_circuit_builders/ultra_flavor.hpp" + +namespace bb { + +class UltraRollupFlavor : public bb::UltraFlavor { + public: + using UltraFlavor::UltraFlavor; + class ProvingKey : public UltraFlavor::ProvingKey { + public: + using UltraFlavor::ProvingKey::ProvingKey; + bool contains_ipa_claim; + IPAClaimPubInputIndices ipa_claim_public_input_indices; + HonkProof ipa_proof; + }; + + /** + * @brief The verification key is responsible for storing the commitments to the precomputed (non-witnessk) + * polynomials used by the verifier. + * + * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to resolve + * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our + * circuits. + */ + class VerificationKey : public VerificationKey_, VerifierCommitmentKey> { + public: + bool contains_ipa_claim; + IPAClaimPubInputIndices ipa_claim_public_input_indices; + + bool operator==(const VerificationKey&) const = default; + VerificationKey() = default; + VerificationKey(const size_t circuit_size, const size_t num_public_inputs) + : VerificationKey_(circuit_size, num_public_inputs) + {} + VerificationKey(ProvingKey& proving_key) + : contains_ipa_claim(proving_key.contains_ipa_claim) + , ipa_claim_public_input_indices(proving_key.ipa_claim_public_input_indices) + { + this->pcs_verification_key = std::make_shared(); + this->circuit_size = proving_key.circuit_size; + this->log_circuit_size = numeric::get_msb(this->circuit_size); + this->num_public_inputs = proving_key.num_public_inputs; + this->pub_inputs_offset = proving_key.pub_inputs_offset; + this->contains_pairing_point_accumulator = proving_key.contains_pairing_point_accumulator; + this->pairing_point_accumulator_public_input_indices = + proving_key.pairing_point_accumulator_public_input_indices; + + if (proving_key.commitment_key == nullptr) { + proving_key.commitment_key = std::make_shared(proving_key.circuit_size); + } + for (auto [polynomial, commitment] : zip_view(proving_key.polynomials.get_precomputed(), this->get_all())) { + commitment = proving_key.commitment_key->commit(polynomial); + } + } + // TODO(https://github.com/AztecProtocol/barretenberg/issues/964): Clean the boilerplate + // up. + VerificationKey(const uint64_t circuit_size, + const uint64_t num_public_inputs, + const uint64_t pub_inputs_offset, + const bool contains_pairing_point_accumulator, + const PairingPointAccumulatorPubInputIndices& pairing_point_accumulator_public_input_indices, + const bool contains_ipa_claim, + const IPAClaimPubInputIndices& ipa_claim_public_input_indices, + const Commitment& q_m, + const Commitment& q_c, + const Commitment& q_l, + const Commitment& q_r, + const Commitment& q_o, + const Commitment& q_4, + const Commitment& q_arith, + const Commitment& q_delta_range, + const Commitment& q_elliptic, + const Commitment& q_aux, + const Commitment& q_lookup, + const Commitment& q_poseidon2_external, + const Commitment& q_poseidon2_internal, + const Commitment& sigma_1, + const Commitment& sigma_2, + const Commitment& sigma_3, + const Commitment& sigma_4, + const Commitment& id_1, + const Commitment& id_2, + const Commitment& id_3, + const Commitment& id_4, + const Commitment& table_1, + const Commitment& table_2, + const Commitment& table_3, + const Commitment& table_4, + const Commitment& lagrange_first, + const Commitment& lagrange_last) + : contains_ipa_claim(contains_ipa_claim) + , ipa_claim_public_input_indices(ipa_claim_public_input_indices) + { + this->circuit_size = circuit_size; + this->log_circuit_size = numeric::get_msb(this->circuit_size); + this->num_public_inputs = num_public_inputs; + this->pub_inputs_offset = pub_inputs_offset; + this->contains_pairing_point_accumulator = contains_pairing_point_accumulator; + this->pairing_point_accumulator_public_input_indices = pairing_point_accumulator_public_input_indices; + this->q_m = q_m; + this->q_c = q_c; + this->q_l = q_l; + this->q_r = q_r; + this->q_o = q_o; + this->q_4 = q_4; + this->q_arith = q_arith; + this->q_delta_range = q_delta_range; + this->q_elliptic = q_elliptic; + this->q_aux = q_aux; + this->q_lookup = q_lookup; + this->q_poseidon2_external = q_poseidon2_external; + this->q_poseidon2_internal = q_poseidon2_internal; + this->sigma_1 = sigma_1; + this->sigma_2 = sigma_2; + this->sigma_3 = sigma_3; + this->sigma_4 = sigma_4; + this->id_1 = id_1; + this->id_2 = id_2; + this->id_3 = id_3; + this->id_4 = id_4; + this->table_1 = table_1; + this->table_2 = table_2; + this->table_3 = table_3; + this->table_4 = table_4; + this->lagrange_first = lagrange_first; + this->lagrange_last = lagrange_last; + } + + // For serialising and deserialising data + MSGPACK_FIELDS(circuit_size, + log_circuit_size, + num_public_inputs, + pub_inputs_offset, + contains_pairing_point_accumulator, + pairing_point_accumulator_public_input_indices, + contains_ipa_claim, + ipa_claim_public_input_indices, + q_m, + q_c, + q_l, + q_r, + q_o, + q_4, + q_arith, + q_delta_range, + q_elliptic, + q_aux, + q_lookup, + q_poseidon2_external, + q_poseidon2_internal, + sigma_1, + sigma_2, + sigma_3, + sigma_4, + id_1, + id_2, + id_3, + id_4, + table_1, + table_2, + table_3, + table_4, + lagrange_first, + lagrange_last); + }; + + using VerifierCommitments = VerifierCommitments_; +}; + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/trace_to_polynomials/trace_to_polynomials.cpp b/barretenberg/cpp/src/barretenberg/trace_to_polynomials/trace_to_polynomials.cpp index 0dc60e9438f..f778a03c503 100644 --- a/barretenberg/cpp/src/barretenberg/trace_to_polynomials/trace_to_polynomials.cpp +++ b/barretenberg/cpp/src/barretenberg/trace_to_polynomials/trace_to_polynomials.cpp @@ -4,6 +4,7 @@ #include "barretenberg/stdlib_circuit_builders/mega_zk_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_keccak_flavor.hpp" +#include "barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp" namespace bb { template void TraceToPolynomials::populate_public_inputs_block(Builder& builder) @@ -174,6 +175,7 @@ void TraceToPolynomials::add_ecc_op_wires_to_proving_key(Builder& builde template class TraceToPolynomials; template class TraceToPolynomials; +template class TraceToPolynomials; template class TraceToPolynomials; template class TraceToPolynomials; template class TraceToPolynomials; diff --git a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp index 06f69bdcb13..fa7853a3d96 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp +++ b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp @@ -441,7 +441,7 @@ template class BaseTranscript { }; template -static bb::StdlibProof convert_proof_to_witness(Builder* builder, const HonkProof& proof) +static bb::StdlibProof convert_native_proof_to_stdlib(Builder* builder, const HonkProof& proof) { bb::StdlibProof result; for (const auto& element : proof) { @@ -450,6 +450,15 @@ static bb::StdlibProof convert_proof_to_witness(Builder* builder, const return result; } +template static bb::HonkProof convert_stdlib_proof_to_native(const StdlibProof& proof) +{ + bb::HonkProof result; + for (const auto& element : proof) { + result.push_back(element.get_value()); + } + return result; +} + using NativeTranscript = BaseTranscript; /////////////////////////////////////////// diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_prover.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_prover.cpp index 0172648feeb..a106f571648 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_prover.cpp @@ -8,17 +8,18 @@ namespace bb { -TranslatorProver::TranslatorProver(CircuitBuilder& circuit_builder, const std::shared_ptr& transcript) +TranslatorProver::TranslatorProver(CircuitBuilder& circuit_builder, + const std::shared_ptr& transcript, + std::shared_ptr commitment_key) : dyadic_circuit_size(Flavor::compute_dyadic_circuit_size(circuit_builder)) , mini_circuit_dyadic_size(Flavor::compute_mini_circuit_dyadic_size(circuit_builder)) , transcript(transcript) + , key(std::make_shared(circuit_builder)) { PROFILE_THIS(); - // Compute total number of gates, dyadic circuit size, etc. - key = std::make_shared(circuit_builder); + key->commitment_key = commitment_key ? commitment_key : std::make_shared(key->circuit_size); compute_witness(circuit_builder); - compute_commitment_key(key->circuit_size); } /** @@ -159,9 +160,8 @@ void TranslatorProver::execute_relation_check_rounds() gate_challenges[idx] = transcript->template get_challenge("Sumcheck:gate_challenge_" + std::to_string(idx)); } - // create masking polynomials for sumcheck round univariates and auxiliary data - auto commitment_key = std::make_shared(Flavor::BATCHED_RELATION_PARTIAL_LENGTH); - zk_sumcheck_data = ZKSumcheckData(key->log_circuit_size, transcript, commitment_key); + // // create masking polynomials for sumcheck round univariates and auxiliary data + zk_sumcheck_data = ZKSumcheckData(key->log_circuit_size, transcript, key->commitment_key); sumcheck_output = sumcheck.prove(key->polynomials, relation_parameters, alpha, gate_challenges, zk_sumcheck_data); } diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_prover.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_prover.hpp index a8a19591ef8..989596fd02b 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_prover.hpp @@ -28,7 +28,9 @@ class TranslatorProver { size_t dyadic_circuit_size = 0; // final power-of-2 circuit size size_t mini_circuit_dyadic_size = 0; // The size of the small circuit that contains non-range constraint relations - explicit TranslatorProver(CircuitBuilder& circuit_builder, const std::shared_ptr& transcript); + explicit TranslatorProver(CircuitBuilder& circuit_builder, + const std::shared_ptr& transcript, + std::shared_ptr commitment_key = nullptr); void compute_witness(CircuitBuilder& circuit_builder); void compute_commitment_key(size_t circuit_size); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.cpp index bdee7b3d8cd..7052c8e831e 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.cpp @@ -57,33 +57,31 @@ template void DeciderProver_::execute_relation_ch */ template void DeciderProver_::execute_pcs_rounds() { - if (proving_key->proving_key.commitment_key == nullptr) { - proving_key->proving_key.commitment_key = - std::make_shared(proving_key->proving_key.circuit_size); - } - vinfo("made commitment key"); using OpeningClaim = ProverOpeningClaim; + auto& ck = proving_key->proving_key.commitment_key; + ck = ck ? ck : std::make_shared(proving_key->proving_key.circuit_size); + OpeningClaim prover_opening_claim; if constexpr (!Flavor::HasZK) { prover_opening_claim = ShpleminiProver_::prove(proving_key->proving_key.circuit_size, proving_key->proving_key.polynomials.get_unshifted(), proving_key->proving_key.polynomials.get_to_be_shifted(), sumcheck_output.challenge, - proving_key->proving_key.commitment_key, + ck, transcript); } else { prover_opening_claim = ShpleminiProver_::prove(proving_key->proving_key.circuit_size, proving_key->proving_key.polynomials.get_unshifted(), proving_key->proving_key.polynomials.get_to_be_shifted(), sumcheck_output.challenge, - proving_key->proving_key.commitment_key, + ck, transcript, zk_sumcheck_data.libra_univariates_monomial, sumcheck_output.claimed_libra_evaluations); } vinfo("executed multivariate-to-univarite reduction"); - PCS::compute_opening_proof(proving_key->proving_key.commitment_key, prover_opening_claim, transcript); + PCS::compute_opening_proof(ck, prover_opening_claim, transcript); vinfo("computed opening proof"); } @@ -110,6 +108,7 @@ template HonkProof DeciderProver_::construct_proo } template class DeciderProver_; +template class DeciderProver_; template class DeciderProver_; template class DeciderProver_; template class DeciderProver_; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.hpp index b4f54e9cf96..1d8a7a85424 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.hpp @@ -5,6 +5,7 @@ #include "barretenberg/stdlib_circuit_builders/mega_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/mega_zk_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_flavor.hpp" +#include "barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp" #include "barretenberg/sumcheck/sumcheck_output.hpp" #include "barretenberg/sumcheck/zk_sumcheck_data.hpp" #include "barretenberg/transcript/transcript.hpp" diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.cpp index 93a00d5c37c..e82b2ccacf9 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.cpp @@ -189,6 +189,7 @@ void DeciderProvingKey_::move_structured_trace_overflow_to_overflow_bloc template class DeciderProvingKey_; template class DeciderProvingKey_; +template class DeciderProvingKey_; template class DeciderProvingKey_; template class DeciderProvingKey_; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp index cb9f954d193..ad9f2eacb99 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp @@ -8,6 +8,7 @@ #include "barretenberg/stdlib_circuit_builders/mega_zk_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_keccak_flavor.hpp" +#include "barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp" #include "barretenberg/trace_to_polynomials/trace_to_polynomials.hpp" namespace bb { @@ -44,12 +45,12 @@ template class DeciderProvingKey_ { std::vector gate_challenges; // The target sum, which is typically nonzero for a ProtogalaxyProver's accmumulator FF target_sum; - size_t final_active_wire_idx{ 0 }; // idx of last non-trivial wire value in the trace + size_t dyadic_circuit_size{ 0 }; // final power-of-2 circuit size DeciderProvingKey_(Circuit& circuit, TraceSettings trace_settings = {}, - std::shared_ptr commitment_key = nullptr) + std::shared_ptr commitment_key = nullptr) : is_structured(trace_settings.structure.has_value()) { PROFILE_THIS_NAME("DeciderProvingKey(Circuit&)"); @@ -96,7 +97,7 @@ template class DeciderProvingKey_ { } { - PROFILE_THIS_NAME("constructing proving key"); + PROFILE_THIS_NAME("allocating proving key"); proving_key = ProvingKey(dyadic_circuit_size, circuit.public_inputs.size(), commitment_key); // If not using structured trace OR if using structured trace but overflow has occurred (overflow block in @@ -104,6 +105,7 @@ template class DeciderProvingKey_ { if ((IsMegaFlavor && !is_structured) || (is_structured && circuit.blocks.has_overflow)) { // Allocate full size polynomials proving_key.polynomials = typename Flavor::ProverPolynomials(dyadic_circuit_size); + vinfo("allocated polynomials object in proving key"); } else { // Allocate only a correct amount of memory for each polynomial // Allocate the wires and selectors polynomials { @@ -188,7 +190,7 @@ template class DeciderProvingKey_ { // Allocate the table polynomials if constexpr (IsUltraFlavor) { for (auto& poly : proving_key.polynomials.get_tables()) { - poly = typename Flavor::Polynomial(max_tables_size, dyadic_circuit_size, table_offset); + poly = Polynomial(max_tables_size, dyadic_circuit_size, table_offset); } } } @@ -196,19 +198,19 @@ template class DeciderProvingKey_ { PROFILE_THIS_NAME("allocating sigmas and ids"); for (auto& sigma : proving_key.polynomials.get_sigmas()) { - sigma = typename Flavor::Polynomial(proving_key.circuit_size); + sigma = Polynomial(proving_key.circuit_size); } for (auto& id : proving_key.polynomials.get_ids()) { - id = typename Flavor::Polynomial(proving_key.circuit_size); + id = Polynomial(proving_key.circuit_size); } } { ZoneScopedN("allocating lookup read counts and tags"); // Allocate the read counts and tags polynomials proving_key.polynomials.lookup_read_counts = - typename Flavor::Polynomial(max_tables_size, dyadic_circuit_size, table_offset); + Polynomial(max_tables_size, dyadic_circuit_size, table_offset); proving_key.polynomials.lookup_read_tags = - typename Flavor::Polynomial(max_tables_size, dyadic_circuit_size, table_offset); + Polynomial(max_tables_size, dyadic_circuit_size, table_offset); } { ZoneScopedN("allocating lookup and databus inverses"); @@ -261,6 +263,7 @@ template class DeciderProvingKey_ { /* size=*/dyadic_circuit_size, /*virtual size=*/dyadic_circuit_size, /*start_idx=*/0); } } + vinfo("allocated polynomials object in proving key"); // We can finally set the shifted polynomials now that all of the to_be_shifted polynomials are // defined. proving_key.polynomials.set_shifted(); // Ensure shifted wires are set correctly @@ -307,12 +310,17 @@ template class DeciderProvingKey_ { proving_key.public_inputs.emplace_back(proving_key.polynomials.w_r[idx]); } - // Set the recursive proof indices + if constexpr (HasIPAAccumulatorFlavor) { // Set the IPA claim indices + proving_key.ipa_claim_public_input_indices = circuit.ipa_claim_public_input_indices; + proving_key.contains_ipa_claim = circuit.contains_ipa_claim; + proving_key.ipa_proof = circuit.ipa_proof; + } + // Set the pairing point accumulator indices proving_key.pairing_point_accumulator_public_input_indices = circuit.pairing_point_accumulator_public_input_indices; proving_key.contains_pairing_point_accumulator = circuit.contains_pairing_point_accumulator; - if constexpr (IsMegaFlavor) { // Set databus commitment propagation data + if constexpr (HasDataBus) { // Set databus commitment propagation data proving_key.databus_propagation_data = circuit.databus_propagation_data; } auto end = std::chrono::steady_clock::now(); @@ -328,7 +336,6 @@ template class DeciderProvingKey_ { private: static constexpr size_t num_zero_rows = Flavor::has_zero_row ? 1 : 0; static constexpr size_t NUM_WIRES = Circuit::NUM_WIRES; - size_t dyadic_circuit_size = 0; // final power-of-2 circuit size size_t compute_dyadic_size(Circuit&); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp index 996df320f95..22e93f76360 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp @@ -89,6 +89,7 @@ template bool DeciderVerifier_::verify() template class DeciderVerifier_; template class DeciderVerifier_; +template class DeciderVerifier_; template class DeciderVerifier_; template class DeciderVerifier_; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp index d4bb3526c87..00d3757db44 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp @@ -10,13 +10,14 @@ namespace bb { * */ template -MergeProver_::MergeProver_(const std::shared_ptr& op_queue) +MergeProver_::MergeProver_(const std::shared_ptr& op_queue, + std::shared_ptr commitment_key) : op_queue(op_queue) { // Update internal size data in the op queue that allows for extraction of e.g. previous aggregate transcript op_queue->set_size_data(); - // Get the appropriate commitment based on the updated ultra ops size - pcs_commitment_key = std::make_shared(op_queue->get_current_size()); + pcs_commitment_key = + commitment_key ? commitment_key : std::make_shared(op_queue->get_current_size()); } /** diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.hpp index 9499ed5fa01..9a265698c09 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.hpp @@ -27,7 +27,8 @@ template class MergeProver_ { public: std::shared_ptr transcript; - explicit MergeProver_(const std::shared_ptr&); + explicit MergeProver_(const std::shared_ptr& op_queue, + std::shared_ptr commitment_key = nullptr); BB_PROFILE HonkProof construct_proof(); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp index cbea1470ab5..111497e9175 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp @@ -58,8 +58,10 @@ template void OinkProver::prove() // Generate relation separators alphas for sumcheck/combiner computation proving_key->alphas = generate_alphas_round(); +#ifndef __wasm__ // Free the commitment key proving_key->proving_key.commitment_key = nullptr; +#endif } /** @@ -264,6 +266,7 @@ template typename Flavor::RelationSeparator OinkProver; template class OinkProver; +template class OinkProver; template class OinkProver; template class OinkProver; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_verifier.cpp index bd7aa34fb88..8196f7f77e7 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_verifier.cpp @@ -3,6 +3,7 @@ #include "barretenberg/stdlib_circuit_builders/mega_zk_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_keccak_flavor.hpp" +#include "barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp" namespace bb { @@ -160,6 +161,7 @@ template typename Flavor::RelationSeparator OinkVerifier< template class OinkVerifier; template class OinkVerifier; +template class OinkVerifier; template class OinkVerifier; template class OinkVerifier; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp index b96d3d80748..152605af4b6 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp @@ -63,6 +63,7 @@ template HonkProof UltraProver_::construct_proof( template class UltraProver_; template class UltraProver_; +template class UltraProver_; template class UltraProver_; template class UltraProver_; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.hpp index 1ad13e57374..11264c72162 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.hpp @@ -3,6 +3,7 @@ #include "barretenberg/relations/relation_parameters.hpp" #include "barretenberg/stdlib_circuit_builders/mega_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_flavor.hpp" +#include "barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp" #include "barretenberg/sumcheck/sumcheck_output.hpp" #include "barretenberg/transcript/transcript.hpp" #include "barretenberg/ultra_honk/decider_proving_key.hpp" diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp index d43f4cadb1c..ad8ed0139ee 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp @@ -1,4 +1,5 @@ #include "./ultra_verifier.hpp" +#include "barretenberg/commitment_schemes/ipa/ipa.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/transcript/transcript.hpp" #include "barretenberg/ultra_honk/oink_verifier.hpp" @@ -9,7 +10,7 @@ namespace bb { * @brief This function verifies an Ultra Honk proof for a given Flavor. * */ -template bool UltraVerifier_::verify_proof(const HonkProof& proof) +template bool UltraVerifier_::verify_proof(const HonkProof& proof, const HonkProof& ipa_proof) { using FF = typename Flavor::FF; @@ -22,6 +23,40 @@ template bool UltraVerifier_::verify_proof(const HonkP transcript->template get_challenge("Sumcheck:gate_challenge_" + std::to_string(idx))); } + const auto recover_fq_from_public_inputs = [](std::array limbs) { + const uint256_t limb = uint256_t(limbs[0]) + + (uint256_t(limbs[1]) << stdlib::NUM_LIMB_BITS_IN_FIELD_SIMULATION) + + (uint256_t(limbs[2]) << (stdlib::NUM_LIMB_BITS_IN_FIELD_SIMULATION * 2)) + + (uint256_t(limbs[3]) << (stdlib::NUM_LIMB_BITS_IN_FIELD_SIMULATION * 3)); + return fq(limb); + }; + + // Parse out the nested IPA claim using key->ipa_claim_public_input_indices and runs the native IPA verifier. + if constexpr (HasIPAAccumulatorFlavor) { + if (verification_key->verification_key->contains_ipa_claim) { + OpeningClaim ipa_claim; + std::array bigfield_limbs; + for (size_t k = 0; k < 4; k++) { + bigfield_limbs[k] = + verification_key + ->public_inputs[verification_key->verification_key->ipa_claim_public_input_indices[k]]; + } + ipa_claim.opening_pair.challenge = recover_fq_from_public_inputs(bigfield_limbs); + ipa_claim.opening_pair.evaluation = 0; + ipa_claim.commitment = { + verification_key->public_inputs[verification_key->verification_key->ipa_claim_public_input_indices[4]], + verification_key->public_inputs[verification_key->verification_key->ipa_claim_public_input_indices[5]] + }; + + // verify the ipa_proof with this claim + auto ipa_transcript = std::make_shared(ipa_proof); + bool ipa_result = IPA::reduce_verify(ipa_verification_key, ipa_claim, ipa_transcript); + if (!ipa_result) { + return false; + } + } + } + DeciderVerifier decider_verifier{ verification_key, transcript }; return decider_verifier.verify(); @@ -29,6 +64,7 @@ template bool UltraVerifier_::verify_proof(const HonkP template class UltraVerifier_; template class UltraVerifier_; +template class UltraVerifier_; template class UltraVerifier_; template class UltraVerifier_; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp index 7365303b655..effd6a3de94 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp @@ -12,23 +12,27 @@ template class UltraVerifier_ { using FF = typename Flavor::FF; using Commitment = typename Flavor::Commitment; using VerificationKey = typename Flavor::VerificationKey; - using VerifierCommitmentKey = typename Flavor::VerifierCommitmentKey; using Transcript = typename Flavor::Transcript; using DeciderVK = DeciderVerificationKey_; using DeciderVerifier = DeciderVerifier_; public: - explicit UltraVerifier_(const std::shared_ptr& verifier_key) + explicit UltraVerifier_( + const std::shared_ptr& verifier_key, + const std::shared_ptr>& ipa_verification_key = nullptr) : verification_key(std::make_shared(verifier_key)) + , ipa_verification_key(ipa_verification_key) {} - bool verify_proof(const HonkProof& proof); + bool verify_proof(const HonkProof& proof, const HonkProof& ipa_proof = {}); std::shared_ptr transcript{ nullptr }; std::shared_ptr verification_key; + std::shared_ptr> ipa_verification_key; }; using UltraVerifier = UltraVerifier_; +using UltraRollupVerifier = UltraVerifier_; using UltraKeccakVerifier = UltraVerifier_; using MegaVerifier = UltraVerifier_; using MegaZKVerifier = UltraVerifier_; diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/generated/circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/vm/avm/generated/circuit_builder.cpp index 6a3d9ae9f17..5f37b4e56b4 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/generated/circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/generated/circuit_builder.cpp @@ -51,49 +51,54 @@ AvmCircuitBuilder::ProverPolynomials AvmCircuitBuilder::compute_polynomials() co })); // catch-all with fully formed polynomials - AVM_TRACK_TIME( - "circuit_builder/init_polys_unshifted", ({ - auto unshifted = polys.get_unshifted(); + AVM_TRACK_TIME("circuit_builder/init_polys_unshifted", ({ + auto unshifted = polys.get_unshifted(); - // An array which stores for each column of the trace the smallest size of the - // truncated column containing all non-zero elements. - // It is used to allocate the polynomials without memory overhead for the tail of zeros. - std::array col_nonzero_size{}; + // An array which stores for each column of the trace the smallest size of the + // truncated column containing all non-zero elements. + // It is used to allocate the polynomials without memory overhead for the tail of zeros. + std::array col_nonzero_size{}; - // Computation of size of columns. - // Non-parallel version takes 0.5 second for a trace size of 200k rows. - // A parallel version might be considered in the future. - for (size_t i = 0; i < num_rows; i++) { - const auto row = rows[i].as_vector(); - for (size_t col = 0; col < Row::SIZE; col++) { - if (!row[col].is_zero()) { - col_nonzero_size[col] = i + 1; - } - } - } + // Computation of size of columns. + // Non-parallel version takes 0.5 second for a trace size of 200k rows. + // A parallel version might be considered in the future. + for (size_t i = 0; i < num_rows; i++) { + const auto row = rows[i].as_vector(); + for (size_t col = 0; col < Row::SIZE; col++) { + if (!row[col].is_zero()) { + col_nonzero_size[col] = i + 1; + } + } + } - // Set of the labels for derived/inverse polynomials. - const auto derived_labels = polys.get_derived_labels(); - std::set derived_labels_set(derived_labels.begin(), derived_labels.end()); + // Set of the labels for derived/inverse polynomials. + const auto derived_labels = polys.get_derived_labels(); + std::set derived_labels_set(derived_labels.begin(), derived_labels.end()); - bb::parallel_for(num_unshifted, [&](size_t i) { - auto& poly = unshifted[i]; - const auto col_idx = polys_to_cols_unshifted_idx[i]; - size_t col_size = 0; + bb::parallel_for(num_unshifted, [&](size_t i) { + auto& poly = unshifted[i]; + const auto col_idx = polys_to_cols_unshifted_idx[i]; + size_t col_size = 0; - // We fully allocate the inverse polynomials. We leave this potential memory optimization for later. - if (derived_labels_set.contains(labels[i])) { - col_size = num_rows; - } else { - col_size = col_nonzero_size[col_idx]; - } + // We fully allocate the inverse polynomials. We leave this potential memory optimization for + // later. + if (derived_labels_set.contains(labels[i])) { + col_size = num_rows; + } else { + col_size = col_nonzero_size[col_idx]; + } - if (poly.is_empty()) { - // Not set above - poly = Polynomial{ /*memory size*/ col_size, /*largest possible index*/ circuit_subgroup_size }; - } - }); - })); + if (poly.is_empty()) { + // Not set above + poly = Polynomial{ /*memory size*/ + col_size, + /*largest possible index as virtual size*/ circuit_subgroup_size, + /*start_index=*/0, + /*/*disable parallel initialisation=*/true + }; + } + }); + })); AVM_TRACK_TIME( "circuit_builder/set_polys_unshifted", ({ diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/generated/recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/vm/avm/generated/recursive_verifier.cpp index d61e6d7f0be..71518f7b6cf 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/generated/recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/generated/recursive_verifier.cpp @@ -48,7 +48,7 @@ template AvmRecursiveVerifier_::AggregationObject AvmRecursiveVerifier_::verify_proof( const HonkProof& proof, const std::vector>& public_inputs_vec_nt, AggregationObject agg_obj) { - StdlibProof stdlib_proof = bb::convert_proof_to_witness(builder, proof); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(builder, proof); std::vector> public_inputs_ct; public_inputs_ct.reserve(public_inputs_vec_nt.size()); diff --git a/barretenberg/ts/README.md b/barretenberg/ts/README.md index 07101e7b726..53df298aa07 100644 --- a/barretenberg/ts/README.md +++ b/barretenberg/ts/README.md @@ -109,6 +109,33 @@ in size) is loaded and keeps page load times responsive. const { Barretenberg, RawBuffer, Crs } = await import('@aztec/bb.js'); ``` +### Multithreading in browser + +Multithreading in bb.js requires [`SharedArrayBuffer`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SharedArrayBuffer) to be enabled. It is only enabled in browsers if COOP and COEP headers are set by the server. Read more [here](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SharedArrayBuffer#security_requirements). + +You can configure your server to set these headers for pages that perform proof generation. See [this example project](https://github.com/saleel/gitclaim/blob/main/app/next.config.mjs#L48-L67) that implements multi-threaded browser proving, which contains the below Next.js config: + +```typescript +{ + ... + async headers() { + return [ + { + source: '/:path*', + headers: [ + { key: 'Cross-Origin-Embedder-Policy', value: 'require-corp' }, + { key: 'Cross-Origin-Opener-Policy', value: 'same-origin' }, + ], + }, + ]; + }, +} +``` + +Note that adding COOP and COEP headers will disable loading of external scripts, which might be required by your application. + +You can enable these headers for specific pages that perform proof generation, but this may be challenging, especially in single-page applications. One workaround is to move the proof generation to a separate page, load it in an invisible iframe within your main application, and then use `postMessage` to communicate between the pages for generating proofs. + ## Development Create a symlink to the root script `bb.js-dev` in your path. You can now run the current state of the code from diff --git a/barretenberg/ts/scripts/build_wasm.sh b/barretenberg/ts/scripts/build_wasm.sh index b12348a16e8..983468b05d1 100755 --- a/barretenberg/ts/scripts/build_wasm.sh +++ b/barretenberg/ts/scripts/build_wasm.sh @@ -5,7 +5,9 @@ if [ -z "$SKIP_CPP_BUILD" ]; then # Build the wasms and strip debug symbols. cd ../cpp cmake --preset wasm-threads -DCMAKE_MESSAGE_LOG_LEVEL=Warning && cmake --build --preset wasm-threads - cmake --preset wasm -DCMAKE_MESSAGE_LOG_LEVEL=Warning && cmake --build --preset wasm + if [ -z "$SKIP_ST_BUILD" ]; then + cmake --preset wasm -DCMAKE_MESSAGE_LOG_LEVEL=Warning && cmake --build --preset wasm + fi ./scripts/strip-wasm.sh cd ../ts fi diff --git a/barretenberg/ts/src/barretenberg/index.ts b/barretenberg/ts/src/barretenberg/index.ts index 81481db9130..4cd972257b7 100644 --- a/barretenberg/ts/src/barretenberg/index.ts +++ b/barretenberg/ts/src/barretenberg/index.ts @@ -68,8 +68,8 @@ export class Barretenberg extends BarretenbergApi { async initSRSClientIVC(): Promise { // crsPath can be undefined - const crs = await Crs.new(2 ** 21 + 1, this.options.crsPath); - const grumpkinCrs = await GrumpkinCrs.new(2 ** 16 + 1, this.options.crsPath); + const crs = await Crs.new(2 ** 19 + 1, this.options.crsPath); + const grumpkinCrs = await GrumpkinCrs.new(2 ** 14 + 1, this.options.crsPath); // Load CRS into wasm global CRS state. // TODO: Make RawBuffer be default behavior, and have a specific Vector type for when wanting length prefixed. diff --git a/bb-pilcom/bb-pil-backend/templates/circuit_builder.cpp.hbs b/bb-pilcom/bb-pil-backend/templates/circuit_builder.cpp.hbs index 5d79f5e0389..537361df77e 100644 --- a/bb-pilcom/bb-pil-backend/templates/circuit_builder.cpp.hbs +++ b/bb-pilcom/bb-pil-backend/templates/circuit_builder.cpp.hbs @@ -89,8 +89,11 @@ namespace bb { if (poly.is_empty()) { // Not set above - poly = Polynomial{ /*memory size*/ col_size, /*largest possible index*/ circuit_subgroup_size }; - } + poly = Polynomial{ /*memory size*/ col_size, + /*largest possible index as virtual size*/ circuit_subgroup_size, + /*start_index=*/0, + /*disable parallel initialization=*/true + }; } }); })); diff --git a/bb-pilcom/bb-pil-backend/templates/recursive_verifier.cpp.hbs b/bb-pilcom/bb-pil-backend/templates/recursive_verifier.cpp.hbs index 3e7d779b523..80e5538a7d1 100644 --- a/bb-pilcom/bb-pil-backend/templates/recursive_verifier.cpp.hbs +++ b/bb-pilcom/bb-pil-backend/templates/recursive_verifier.cpp.hbs @@ -48,7 +48,7 @@ template {{name}}RecursiveVerifier_::AggregationObject {{name}}RecursiveVerifier_::verify_proof( const HonkProof& proof, const std::vector>& public_inputs_vec_nt, AggregationObject agg_obj) { - StdlibProof stdlib_proof = bb::convert_proof_to_witness(builder, proof); + StdlibProof stdlib_proof = bb::convert_native_proof_to_stdlib(builder, proof); std::vector> public_inputs_ct; public_inputs_ct.reserve(public_inputs_vec_nt.size()); diff --git a/docs/docs/reference/developer_references/sandbox_reference/cli_reference.md b/docs/docs/reference/developer_references/cli_reference.md similarity index 99% rename from docs/docs/reference/developer_references/sandbox_reference/cli_reference.md rename to docs/docs/reference/developer_references/cli_reference.md index dd5200742bc..1bb2392d3b7 100644 --- a/docs/docs/reference/developer_references/sandbox_reference/cli_reference.md +++ b/docs/docs/reference/developer_references/cli_reference.md @@ -1,6 +1,7 @@ --- title: CLI Reference tags: [sandbox] +sidebar_position: 1 --- :::warning diff --git a/docs/docs/reference/developer_references/sandbox_reference/cli_wallet_reference.md b/docs/docs/reference/developer_references/cli_wallet_reference.md similarity index 83% rename from docs/docs/reference/developer_references/sandbox_reference/cli_wallet_reference.md rename to docs/docs/reference/developer_references/cli_wallet_reference.md index 76e094fb0b3..22c1e67e892 100644 --- a/docs/docs/reference/developer_references/sandbox_reference/cli_wallet_reference.md +++ b/docs/docs/reference/developer_references/cli_wallet_reference.md @@ -2,15 +2,16 @@ title: CLI Wallet tags: [sandbox, wallet, cli] keywords: [wallet, cli wallet] +sidebar_position: 2 --- For development, it may be useful to deploy, transact, or create notes in a non-programmatic way. You can use Aztec's CLI Wallet for thing such as: - Deploying contracts - Sending transactions -- Bridging L1 [Fee Juice](../../../protocol-specs/gas-and-fees/fee-juice.md) into Aztec -- Pushing arbitrary [notes](../../../guides/developer_guides/smart_contracts/writing_contracts/notes/index.md) to your PXE -- Creating [authwits](../../../guides/developer_guides/smart_contracts/writing_contracts/authwit.md) +- Bridging L1 [Fee Juice](../../protocol-specs/gas-and-fees/fee-juice.md) into Aztec +- Pushing arbitrary [notes](../../guides/developer_guides/smart_contracts/writing_contracts/notes/index.md) to your PXE +- Creating [authwits](../../guides/developer_guides/smart_contracts/writing_contracts/authwit.md) - Aliasing info and secrets for further usage :::info @@ -67,9 +68,9 @@ $ aztec-wallet deploy-account -f master_yoda ### Deploy -You can deploy a [compiled contract](../../../guides/developer_guides/smart_contracts/how_to_compile_contract.md) to the network. +You can deploy a [compiled contract](../../guides/developer_guides/smart_contracts/how_to_compile_contract.md) to the network. -You probably want to look at flags such as `--init` which allows you to specify the [initializer function](../../../guides/developer_guides/smart_contracts/writing_contracts/initializers.md) to call, or `-k` for the [encryption public key](../../../aztec/concepts/accounts/keys.md#incoming-viewing-keys) if the contract is expected to have notes being encrypted to it. +You probably want to look at flags such as `--init` which allows you to specify the [initializer function](../../guides/developer_guides/smart_contracts/writing_contracts/initializers.md) to call, or `-k` for the [encryption public key](../../aztec/concepts/accounts/keys.md#incoming-viewing-keys) if the contract is expected to have notes being encrypted to it. You can pass arguments with the `--arg` flag. @@ -96,7 +97,7 @@ Again, notice how it's not necessary to pass `contracts:jedi_order` as the walle ### Manage authwits -You can use the CLI wallet to quickly generate [Authentication Witnesses](../../../guides/developer_guides/smart_contracts/writing_contracts/authwit.md). These allow you to authorize the caller to execute an action on behalf of an account. They get aliased into the `authwits` type. +You can use the CLI wallet to quickly generate [Authentication Witnesses](../../guides/developer_guides/smart_contracts/writing_contracts/authwit.md). These allow you to authorize the caller to execute an action on behalf of an account. They get aliased into the `authwits` type. ### In private @@ -132,7 +133,7 @@ aztec-wallet simulate --from master_yoda --contract-address jedi_order --args "l ### Profile -Simulates a transaction with profiling enabled. This allows you to get the gate count of each private function in the transaction. Read more about profiling [here](../../../guides/developer_guides/smart_contracts/profiling_transactions.md). +Simulates a transaction with profiling enabled. This allows you to get the gate count of each private function in the transaction. Read more about profiling [here](../../guides/developer_guides/smart_contracts/profiling_transactions.md). #### Example @@ -142,7 +143,7 @@ aztec-wallet simulate --profile --from master_yoda --contract-address jedi_order ### Bridge Fee Juice -The wallet provides an easy way to mint the fee-paying asset on L1 and bridging it to L2. We call it Fee Juice and you can read more about it in the [protocol specs](../../../protocol-specs/gas-and-fees/fee-juice.md). +The wallet provides an easy way to mint the fee-paying asset on L1 and bridging it to L2. We call it Fee Juice and you can read more about it in the [protocol specs](../../protocol-specs/gas-and-fees/fee-juice.md). Using the sandbox, there's already a Fee Juice contract that manages this enshrined asset. You can optionally mint more Juice before bridging it. diff --git a/docs/docs/reference/developer_references/debugging.md b/docs/docs/reference/developer_references/debugging.md index 59bd3436f6a..01b8d6d06aa 100644 --- a/docs/docs/reference/developer_references/debugging.md +++ b/docs/docs/reference/developer_references/debugging.md @@ -1,6 +1,6 @@ --- title: Debugging -sidebar_position: 2 +sidebar_position: 4 --- ## Logging in Aztec.nr diff --git a/docs/docs/reference/developer_references/sandbox_reference/cheat_codes.md b/docs/docs/reference/developer_references/sandbox_reference/cheat_codes.md index 72095bf8c31..2f96ebf6165 100644 --- a/docs/docs/reference/developer_references/sandbox_reference/cheat_codes.md +++ b/docs/docs/reference/developer_references/sandbox_reference/cheat_codes.md @@ -1,6 +1,7 @@ --- title: Cheat Codes tags: [sandbox] +sidebar_position: 1 --- import Disclaimer from "@site/src/components/Disclaimers/\_wip_disclaimer.mdx"; diff --git a/docs/docs/reference/developer_references/sandbox_reference/sandbox-reference.md b/docs/docs/reference/developer_references/sandbox_reference/sandbox-reference.md index fedfb1c41b5..82435874756 100644 --- a/docs/docs/reference/developer_references/sandbox_reference/sandbox-reference.md +++ b/docs/docs/reference/developer_references/sandbox_reference/sandbox-reference.md @@ -1,6 +1,7 @@ --- title: Sandbox Reference tags: [sandbox] +sidebar_position: 0 --- :::tip diff --git a/docs/docs/reference/developer_references/smart_contract_reference/_category_.json b/docs/docs/reference/developer_references/smart_contract_reference/_category_.json index 968f9bcf0a6..13f5f1ecc0a 100644 --- a/docs/docs/reference/developer_references/smart_contract_reference/_category_.json +++ b/docs/docs/reference/developer_references/smart_contract_reference/_category_.json @@ -1,6 +1,6 @@ { "label": "Smart Contract Reference", - "position": 1, + "position": 3, "collapsible": true, "collapsed": true } diff --git a/docs/docs/reference/index.md b/docs/docs/reference/index.md index 714e945f3b9..2b725e63ad9 100644 --- a/docs/docs/reference/index.md +++ b/docs/docs/reference/index.md @@ -10,7 +10,7 @@ Welcome to the References section! In this section you will find reference mater This page lists popular references. Please see the sidebar for them all. -## Popular +## Popular ### Smart contracts @@ -49,7 +49,7 @@ This page lists popular references. Please see the sidebar for them all. - +

CLI reference

diff --git a/docs/docs/tutorials/codealong/cli_wallet/faceid_wallet.md b/docs/docs/tutorials/codealong/cli_wallet/faceid_wallet.md index fc9b89d6b24..cf2cd0bcde0 100644 --- a/docs/docs/tutorials/codealong/cli_wallet/faceid_wallet.md +++ b/docs/docs/tutorials/codealong/cli_wallet/faceid_wallet.md @@ -5,7 +5,7 @@ keywords: [wallet, cli wallet, faceid] importance: 3 --- -In this tutorial, we will use Apple Mac's Secure Enclave to store the private key, and use it in Aztec's [CLI Wallet](../../../reference/developer_references/sandbox_reference/cli_wallet_reference.md). This enables fully private, native, and seedless account abstraction! +In this tutorial, we will use Apple Mac's Secure Enclave to store the private key, and use it in Aztec's [CLI Wallet](../../../reference/developer_references/cli_wallet_reference.md). This enables fully private, native, and seedless account abstraction! :::warning @@ -92,7 +92,7 @@ aztec-wallet deploy --from accounts:my-wallet token_contract@Token --args accoun You should get prompted to sign with TouchID or password. Once authorized, you should see `Contract stored in database with aliases last & devtoken` ``` -Check [the reference](../../../reference/developer_references/sandbox_reference/cli_wallet_reference.md) for the whole set of commands, but these mean: +Check [the reference](../../../reference/developer_references/cli_wallet_reference.md) for the whole set of commands, but these mean: - --from is the sender: our account `my-wallet`. We use the alias because it's easier than writing the key stored in our Secure Enclave. The wallet resolves the alias and knows where to grab it. - token_contract@Token is a shorthand to look in the `target` folder for our contract `token_contract-Token` @@ -111,7 +111,7 @@ aztec-wallet simulate balance_of_public -ca contracts:devtoken --args accounts:n ### What next -In this tutorial, we created an account with the Aztec's [CLI Wallet](../../../reference/developer_references/sandbox_reference/cli_wallet_reference.md), using the Apple Mac's Secure Enclave to store the private key. +In this tutorial, we created an account with the Aztec's [CLI Wallet](../../../reference/developer_references/cli_wallet_reference.md), using the Apple Mac's Secure Enclave to store the private key. You can use a multitude of authentication methods, for example with RSA you could use a passport as a recovery, or even as a signer in a multisig. All of this is based on the account contract. diff --git a/docs/docs/tutorials/codealong/contract_tutorials/token_contract.md b/docs/docs/tutorials/codealong/contract_tutorials/token_contract.md index e272dbe5850..e650379aee4 100644 --- a/docs/docs/tutorials/codealong/contract_tutorials/token_contract.md +++ b/docs/docs/tutorials/codealong/contract_tutorials/token_contract.md @@ -79,8 +79,8 @@ These are functions that have private logic and will be executed on user devices - [`transfer`](#transfer) enables an account to send tokens from their private balance to another account's private balance - [`transfer_in_private`](#transfer_in_private) enables an account to send tokens from another account's private balance to another account's private balance -- [`transfer_to_private`](#transfer_to_private) transfers a specified `amount` from an accounts public balance to a designated recipient. This flow starts in private, but will be completed in public. -- [`transfer_to_public`](#transfer_to_public) transfers tokens from a private balance, to a (potentially different account's) public balance +- [`transfer_to_private`](#transfer_to_private) transfers a specified `amount` from an accounts public balance to a designated recipient's private balance. This flow starts in private, but will be completed in public. +- [`transfer_to_public`](#transfer_to_public) transfers tokens from the private balance of another account, to a (potentially different account's) public balance - [`mint_to_private`](#mint_to_private) enables an authorized minter to mint tokens to a specified address - [`cancel_authwit`](#cancel_authwit) enables an account to cancel an authorization to spend tokens - [`burn_private`](#burn_private) enables tokens to be burned privately diff --git a/noir-projects/aztec-nr/.gitrepo b/noir-projects/aztec-nr/.gitrepo index 09c4a663409..e23acc7d49c 100644 --- a/noir-projects/aztec-nr/.gitrepo +++ b/noir-projects/aztec-nr/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/aztec-nr branch = master - commit = 4b0d547a81fd1d37cebf1e41a1d8e0d5b8aa13f3 + commit = 4148bced3369d2f4abb68bd74e495cb429c45834 method = merge cmdver = 0.4.6 - parent = a9750f21a000a546dc44cf3b42e5144c88ad6918 + parent = 622635e92b4b0c48ce4fb44656dab422c11290bd diff --git a/noir/noir-repo/compiler/noirc_evaluator/Cargo.toml b/noir/noir-repo/compiler/noirc_evaluator/Cargo.toml index aac07339dd6..e25b5bf855a 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/Cargo.toml +++ b/noir/noir-repo/compiler/noirc_evaluator/Cargo.toml @@ -20,7 +20,6 @@ fxhash.workspace = true iter-extended.workspace = true thiserror.workspace = true num-bigint = "0.4" -num-traits.workspace = true im.workspace = true serde.workspace = true serde_json.workspace = true @@ -33,6 +32,7 @@ cfg-if.workspace = true [dev-dependencies] proptest.workspace = true similar-asserts.workspace = true +num-traits.workspace = true [features] bn254 = ["noirc_frontend/bn254"] diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/noir/noir-repo/compiler/noirc_evaluator/src/acir/acir_variable.rs similarity index 99% rename from noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs rename to noir/noir-repo/compiler/noirc_evaluator/src/acir/acir_variable.rs index c6e4a261897..6ba072f01a4 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/acir/acir_variable.rs @@ -1,27 +1,18 @@ -use super::big_int::BigIntContext; -use super::generated_acir::{BrilligStdlibFunc, GeneratedAcir, PLACEHOLDER_BRILLIG_INDEX}; -use crate::brillig::brillig_gen::brillig_directive; -use crate::brillig::brillig_ir::artifact::GeneratedBrillig; -use crate::errors::{InternalBug, InternalError, RuntimeError, SsaReport}; -use crate::ssa::acir_gen::{AcirDynamicArray, AcirValue}; -use crate::ssa::ir::dfg::CallStack; -use crate::ssa::ir::types::Type as SsaType; -use crate::ssa::ir::{instruction::Endian, types::NumericType}; -use acvm::acir::circuit::brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}; -use acvm::acir::circuit::opcodes::{ - AcirFunctionId, BlockId, BlockType, ConstantOrWitnessEnum, MemOp, -}; -use acvm::acir::circuit::{AssertionPayload, ExpressionOrMemory, ExpressionWidth, Opcode}; -use acvm::brillig_vm::{MemoryValue, VMStatus, VM}; -use acvm::BlackBoxFunctionSolver; use acvm::{ - acir::AcirField, acir::{ brillig::Opcode as BrilligOpcode, - circuit::opcodes::FunctionInput, + circuit::{ + brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, + opcodes::{ + AcirFunctionId, BlockId, BlockType, ConstantOrWitnessEnum, FunctionInput, MemOp, + }, + AssertionPayload, ExpressionOrMemory, ExpressionWidth, Opcode, + }, native_types::{Expression, Witness}, - BlackBoxFunc, + AcirField, BlackBoxFunc, }, + brillig_vm::{MemoryValue, VMStatus, VM}, + BlackBoxFunctionSolver, }; use fxhash::FxHashMap as HashMap; use iter_extended::{try_vecmap, vecmap}; @@ -29,6 +20,16 @@ use num_bigint::BigUint; use std::cmp::Ordering; use std::{borrow::Cow, hash::Hash}; +use crate::brillig::brillig_ir::artifact::GeneratedBrillig; +use crate::errors::{InternalBug, InternalError, RuntimeError, SsaReport}; +use crate::ssa::ir::{ + dfg::CallStack, instruction::Endian, types::NumericType, types::Type as SsaType, +}; + +use super::big_int::BigIntContext; +use super::generated_acir::{BrilligStdlibFunc, GeneratedAcir, PLACEHOLDER_BRILLIG_INDEX}; +use super::{brillig_directive, AcirDynamicArray, AcirValue}; + #[derive(Clone, Debug, PartialEq, Eq, Hash)] /// High level Type descriptor for Variables. /// diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/big_int.rs b/noir/noir-repo/compiler/noirc_evaluator/src/acir/big_int.rs similarity index 100% rename from noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/big_int.rs rename to noir/noir-repo/compiler/noirc_evaluator/src/acir/big_int.rs diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs b/noir/noir-repo/compiler/noirc_evaluator/src/acir/brillig_directive.rs similarity index 100% rename from noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs rename to noir/noir-repo/compiler/noirc_evaluator/src/acir/brillig_directive.rs diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs b/noir/noir-repo/compiler/noirc_evaluator/src/acir/generated_acir.rs similarity index 98% rename from noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs rename to noir/noir-repo/compiler/noirc_evaluator/src/acir/generated_acir.rs index 6b215839f34..91206abe732 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/acir/generated_acir.rs @@ -1,22 +1,24 @@ //! `GeneratedAcir` is constructed as part of the `acir_gen` pass to accumulate all of the ACIR //! program as it is being converted from SSA form. -use std::{collections::BTreeMap, u32}; +use std::collections::BTreeMap; -use crate::{ - brillig::{brillig_gen::brillig_directive, brillig_ir::artifact::GeneratedBrillig}, - errors::{InternalError, RuntimeError, SsaReport}, - ssa::ir::{dfg::CallStack, instruction::ErrorType}, -}; use acvm::acir::{ circuit::{ brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, opcodes::{BlackBoxFuncCall, FunctionInput, Opcode as AcirOpcode}, AssertionPayload, BrilligOpcodeLocation, ErrorSelector, OpcodeLocation, }, - native_types::Witness, - BlackBoxFunc, + native_types::{Expression, Witness}, + AcirField, BlackBoxFunc, +}; + +use super::brillig_directive; +use crate::{ + brillig::brillig_ir::artifact::GeneratedBrillig, + errors::{InternalError, RuntimeError, SsaReport}, + ssa::ir::dfg::CallStack, + ErrorType, }; -use acvm::{acir::native_types::Expression, acir::AcirField}; use iter_extended::vecmap; use noirc_errors::debug_info::ProcedureDebugId; @@ -155,7 +157,7 @@ impl GeneratedAcir { /// This means you cannot multiply an infinite amount of `Expression`s together. /// Once the `Expression` goes over degree-2, then it needs to be reduced to a `Witness` /// which has degree-1 in order to be able to continue the multiplication chain. - pub(crate) fn create_witness_for_expression(&mut self, expression: &Expression) -> Witness { + fn create_witness_for_expression(&mut self, expression: &Expression) -> Witness { let fresh_witness = self.next_witness_index(); // Create a constraint that sets them to be equal to each other diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/acir/mod.rs similarity index 98% rename from noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs rename to noir/noir-repo/compiler/noirc_evaluator/src/acir/mod.rs index 33fdf2abc82..5c7899b5035 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/acir/mod.rs @@ -1,47 +1,58 @@ //! This file holds the pass to convert from Noir's SSA IR to ACIR. -mod acir_ir; +use fxhash::FxHashMap as HashMap; +use im::Vector; use std::collections::{BTreeMap, HashSet}; use std::fmt::Debug; -use self::acir_ir::acir_variable::{AcirContext, AcirType, AcirVar}; -use self::acir_ir::generated_acir::BrilligStdlibFunc; -use super::function_builder::data_bus::DataBus; -use super::ir::dfg::CallStack; -use super::ir::function::FunctionId; -use super::ir::instruction::ConstrainError; -use super::ir::printer::try_to_extract_string_from_error_payload; -use super::{ +use acvm::acir::{ + circuit::{ + brillig::{BrilligBytecode, BrilligFunctionId}, + opcodes::{AcirFunctionId, BlockType}, + AssertionPayload, ErrorSelector, ExpressionWidth, OpcodeLocation, + }, + native_types::Witness, + BlackBoxFunc, +}; +use acvm::{acir::circuit::opcodes::BlockId, acir::AcirField, FieldElement}; +use bn254_blackbox_solver::Bn254BlackBoxSolver; +use iter_extended::{try_vecmap, vecmap}; +use noirc_frontend::monomorphization::ast::InlineType; + +mod acir_variable; +mod big_int; +mod brillig_directive; +mod generated_acir; + +use crate::brillig::{ + brillig_gen::brillig_fn::FunctionContext as BrilligFunctionContext, + brillig_ir::{ + artifact::{BrilligParameter, GeneratedBrillig}, + BrilligContext, + }, + Brillig, +}; +use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; +use crate::ssa::{ + function_builder::data_bus::DataBus, ir::{ - dfg::DataFlowGraph, - function::{Function, RuntimeType}, + dfg::{CallStack, DataFlowGraph}, + function::{Function, FunctionId, RuntimeType}, instruction::{ - Binary, BinaryOp, Instruction, InstructionId, Intrinsic, TerminatorInstruction, + Binary, BinaryOp, ConstrainError, Instruction, InstructionId, Intrinsic, + TerminatorInstruction, }, map::Id, + printer::try_to_extract_string_from_error_payload, types::{NumericType, Type}, value::{Value, ValueId}, }, ssa_gen::Ssa, }; -use crate::brillig::brillig_ir::artifact::{BrilligParameter, GeneratedBrillig}; -use crate::brillig::brillig_ir::BrilligContext; -use crate::brillig::{brillig_gen::brillig_fn::FunctionContext as BrilligFunctionContext, Brillig}; -use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; -pub(crate) use acir_ir::generated_acir::GeneratedAcir; -use acvm::acir::circuit::opcodes::{AcirFunctionId, BlockType}; -use bn254_blackbox_solver::Bn254BlackBoxSolver; -use noirc_frontend::monomorphization::ast::InlineType; - -use acvm::acir::circuit::brillig::{BrilligBytecode, BrilligFunctionId}; -use acvm::acir::circuit::{AssertionPayload, ErrorSelector, ExpressionWidth, OpcodeLocation}; -use acvm::acir::native_types::Witness; -use acvm::acir::BlackBoxFunc; -use acvm::{acir::circuit::opcodes::BlockId, acir::AcirField, FieldElement}; -use fxhash::FxHashMap as HashMap; -use im::Vector; -use iter_extended::{try_vecmap, vecmap}; -use noirc_frontend::Type as HirType; +use acir_variable::{AcirContext, AcirType, AcirVar}; +use generated_acir::BrilligStdlibFunc; +pub(crate) use generated_acir::GeneratedAcir; +use noirc_frontend::hir_def::types::Type as HirType; #[derive(Default)] struct SharedContext { @@ -772,6 +783,12 @@ impl<'a> Context<'a> { Instruction::IfElse { .. } => { unreachable!("IfElse instruction remaining in acir-gen") } + Instruction::MakeArray { elements, typ: _ } => { + let elements = elements.iter().map(|element| self.convert_value(*element, dfg)); + let value = AcirValue::Array(elements.collect()); + let result = dfg.instruction_results(instruction_id)[0]; + self.ssa_values.insert(result, value); + } } self.acir_context.set_call_stack(CallStack::new()); @@ -1562,7 +1579,7 @@ impl<'a> Context<'a> { if !already_initialized { let value = &dfg[array]; match value { - Value::Array { .. } | Value::Instruction { .. } => { + Value::Instruction { .. } => { let value = self.convert_value(array, dfg); let array_typ = dfg.type_of_value(array); let len = if !array_typ.contains_slice_element() { @@ -1605,13 +1622,6 @@ impl<'a> Context<'a> { match array_typ { Type::Array(_, _) | Type::Slice(_) => { match &dfg[array_id] { - Value::Array { array, .. } => { - for (i, value) in array.iter().enumerate() { - flat_elem_type_sizes.push( - self.flattened_slice_size(*value, dfg) + flat_elem_type_sizes[i], - ); - } - } Value::Instruction { .. } | Value::Param { .. } => { // An instruction representing the slice means it has been processed previously during ACIR gen. // Use the previously defined result of an array operation to fetch the internal type information. @@ -1744,13 +1754,6 @@ impl<'a> Context<'a> { fn flattened_slice_size(&mut self, array_id: ValueId, dfg: &DataFlowGraph) -> usize { let mut size = 0; match &dfg[array_id] { - Value::Array { array, .. } => { - // The array is going to be the flattened outer array - // Flattened slice size from SSA value does not need to be multiplied by the len - for value in array { - size += self.flattened_slice_size(*value, dfg); - } - } Value::NumericConstant { .. } => { size += 1; } @@ -1914,10 +1917,6 @@ impl<'a> Context<'a> { Value::NumericConstant { constant, typ } => { AcirValue::Var(self.acir_context.add_constant(*constant), typ.into()) } - Value::Array { array, .. } => { - let elements = array.iter().map(|element| self.convert_value(*element, dfg)); - AcirValue::Array(elements.collect()) - } Value::Intrinsic(..) => todo!(), Value::Function(function_id) => { // This conversion is for debugging support only, to allow the @@ -2840,22 +2839,6 @@ impl<'a> Context<'a> { Ok(()) } - /// Given an array value, return the numerical type of its element. - /// Panics if the given value is not an array or has a non-numeric element type. - fn array_element_type(dfg: &DataFlowGraph, value: ValueId) -> AcirType { - match dfg.type_of_value(value) { - Type::Array(elements, _) => { - assert_eq!(elements.len(), 1); - (&elements[0]).into() - } - Type::Slice(elements) => { - assert_eq!(elements.len(), 1); - (&elements[0]).into() - } - _ => unreachable!("Expected array type"), - } - } - /// Convert a Vec into a Vec using the given result ids. /// If the type of a result id is an array, several acir vars are collected into /// a single AcirValue::Array of the same length. @@ -2946,9 +2929,9 @@ mod test { use std::collections::BTreeMap; use crate::{ + acir::BrilligStdlibFunc, brillig::Brillig, ssa::{ - acir_gen::acir_ir::generated_acir::BrilligStdlibFunc, function_builder::FunctionBuilder, ir::{function::FunctionId, instruction::BinaryOp, map::Id, types::Type}, }, diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen.rs index 313fd65a197..786a03031d6 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen.rs @@ -1,7 +1,6 @@ pub(crate) mod brillig_black_box; pub(crate) mod brillig_block; pub(crate) mod brillig_block_variables; -pub(crate) mod brillig_directive; pub(crate) mod brillig_fn; pub(crate) mod brillig_slice_ops; mod constant_allocation; diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index 40224e132ab..36e1ee90e11 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -160,13 +160,9 @@ impl<'block> BrilligBlock<'block> { ); } TerminatorInstruction::Return { return_values, .. } => { - let return_registers: Vec<_> = return_values - .iter() - .map(|value_id| { - let return_variable = self.convert_ssa_value(*value_id, dfg); - return_variable.extract_register() - }) - .collect(); + let return_registers = vecmap(return_values, |value_id| { + self.convert_ssa_value(*value_id, dfg).extract_register() + }); self.brillig_context.codegen_return(&return_registers); } } @@ -763,6 +759,43 @@ impl<'block> BrilligBlock<'block> { Instruction::IfElse { .. } => { unreachable!("IfElse instructions should not be possible in brillig") } + Instruction::MakeArray { elements: array, typ } => { + let value_id = dfg.instruction_results(instruction_id)[0]; + if !self.variables.is_allocated(&value_id) { + let new_variable = self.variables.define_variable( + self.function_context, + self.brillig_context, + value_id, + dfg, + ); + + // Initialize the variable + match new_variable { + BrilligVariable::BrilligArray(brillig_array) => { + self.brillig_context.codegen_initialize_array(brillig_array); + } + BrilligVariable::BrilligVector(vector) => { + let size = self + .brillig_context + .make_usize_constant_instruction(array.len().into()); + self.brillig_context.codegen_initialize_vector(vector, size, None); + self.brillig_context.deallocate_single_addr(size); + } + _ => unreachable!( + "ICE: Cannot initialize array value created as {new_variable:?}" + ), + }; + + // Write the items + let items_pointer = self + .brillig_context + .codegen_make_array_or_vector_items_pointer(new_variable); + + self.initialize_constant_array(array, typ, dfg, items_pointer); + + self.brillig_context.deallocate_register(items_pointer); + } + } }; let dead_variables = self @@ -1500,46 +1533,6 @@ impl<'block> BrilligBlock<'block> { new_variable } } - Value::Array { array, typ } => { - if self.variables.is_allocated(&value_id) { - self.variables.get_allocation(self.function_context, value_id, dfg) - } else { - let new_variable = self.variables.define_variable( - self.function_context, - self.brillig_context, - value_id, - dfg, - ); - - // Initialize the variable - match new_variable { - BrilligVariable::BrilligArray(brillig_array) => { - self.brillig_context.codegen_initialize_array(brillig_array); - } - BrilligVariable::BrilligVector(vector) => { - let size = self - .brillig_context - .make_usize_constant_instruction(array.len().into()); - self.brillig_context.codegen_initialize_vector(vector, size, None); - self.brillig_context.deallocate_single_addr(size); - } - _ => unreachable!( - "ICE: Cannot initialize array value created as {new_variable:?}" - ), - }; - - // Write the items - let items_pointer = self - .brillig_context - .codegen_make_array_or_vector_items_pointer(new_variable); - - self.initialize_constant_array(array, typ, dfg, items_pointer); - - self.brillig_context.deallocate_register(items_pointer); - - new_variable - } - } Value::Function(_) => { // For the debugger instrumentation we want to allow passing // around values representing function pointers, even though diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs index f9ded224b33..61ca20be2f5 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs @@ -89,8 +89,7 @@ impl ConstantAllocation { } if let Some(terminator_instruction) = block.terminator() { terminator_instruction.for_each_value(|value_id| { - let variables = collect_variables_of_value(value_id, &func.dfg); - for variable in variables { + if let Some(variable) = collect_variables_of_value(value_id, &func.dfg) { record_if_constant(block_id, variable, InstructionLocation::Terminator); } }); @@ -166,7 +165,7 @@ impl ConstantAllocation { } pub(crate) fn is_constant_value(id: ValueId, dfg: &DataFlowGraph) -> bool { - matches!(&dfg[dfg.resolve(id)], Value::NumericConstant { .. } | Value::Array { .. }) + matches!(&dfg[dfg.resolve(id)], Value::NumericConstant { .. }) } /// For a given function, finds all the blocks that are within loops diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs index a18461bc0cd..87165c36dff 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs @@ -45,32 +45,19 @@ fn find_back_edges( } /// Collects the underlying variables inside a value id. It might be more than one, for example in constant arrays that are constructed with multiple vars. -pub(crate) fn collect_variables_of_value(value_id: ValueId, dfg: &DataFlowGraph) -> Vec { +pub(crate) fn collect_variables_of_value( + value_id: ValueId, + dfg: &DataFlowGraph, +) -> Option { let value_id = dfg.resolve(value_id); let value = &dfg[value_id]; match value { - Value::Instruction { .. } | Value::Param { .. } => { - vec![value_id] - } - // Literal arrays are constants, but might use variable values to initialize. - Value::Array { array, .. } => { - let mut value_ids = vec![value_id]; - - array.iter().for_each(|item_id| { - let underlying_ids = collect_variables_of_value(*item_id, dfg); - value_ids.extend(underlying_ids); - }); - - value_ids - } - Value::NumericConstant { .. } => { - vec![value_id] + Value::Instruction { .. } | Value::Param { .. } | Value::NumericConstant { .. } => { + Some(value_id) } // Functions are not variables in a defunctionalized SSA. Only constant function values should appear. - Value::ForeignFunction(_) | Value::Function(_) | Value::Intrinsic(..) => { - vec![] - } + Value::ForeignFunction(_) | Value::Function(_) | Value::Intrinsic(..) => None, } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs index a0e2a500e20..599c05fc0e8 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs @@ -47,6 +47,7 @@ impl BrilligContext< let destinations_of_temp = movements_map.remove(first_source).unwrap(); movements_map.insert(temp_register, destinations_of_temp); } + // After removing loops we should have an DAG with each node having only one ancestor (but could have multiple successors) // Now we should be able to move the registers just by performing a DFS on the movements map let heads: Vec<_> = movements_map @@ -54,6 +55,7 @@ impl BrilligContext< .filter(|source| !destinations_set.contains(source)) .copied() .collect(); + for head in heads { self.perform_movements(&movements_map, head); } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/lib.rs b/noir/noir-repo/compiler/noirc_evaluator/src/lib.rs index 5f0c7a5bbb8..8127e3d03ef 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/lib.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/lib.rs @@ -5,11 +5,9 @@ pub mod errors; -// SSA code to create the SSA based IR -// for functions and execute different optimizations. -pub mod ssa; - +mod acir; pub mod brillig; +pub mod ssa; pub use ssa::create_program; pub use ssa::ir::instruction::ErrorType; @@ -31,3 +29,22 @@ pub(crate) fn trim_leading_whitespace_from_lines(src: &str) -> String { } result } + +/// Trim comments from the lines, ie. content starting with `//`. +#[cfg(test)] +pub(crate) fn trim_comments_from_lines(src: &str) -> String { + let mut result = String::new(); + let mut first = true; + for line in src.lines() { + if !first { + result.push('\n'); + } + if let Some(comment) = line.find("//") { + result.push_str(line[..comment].trim_end()); + } else { + result.push_str(line); + } + first = false; + } + result +} diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs index 5e2c0f0827d..9e11441caf4 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs @@ -31,19 +31,17 @@ use noirc_errors::debug_info::{DebugFunctions, DebugInfo, DebugTypes, DebugVaria use noirc_frontend::ast::Visibility; use noirc_frontend::{hir_def::function::FunctionSignature, monomorphization::ast::Program}; +use ssa_gen::Ssa; use tracing::{span, Level}; -use self::{ - acir_gen::{Artifacts, GeneratedAcir}, - ssa_gen::Ssa, -}; +use crate::acir::{Artifacts, GeneratedAcir}; -mod acir_gen; mod checks; pub(super) mod function_builder; pub mod ir; mod opt; -mod parser; +#[cfg(test)] +pub(crate) mod parser; pub mod ssa_gen; pub struct SsaEvaluatorOptions { @@ -96,28 +94,28 @@ pub(crate) fn optimize_into_acir( .run_pass(Ssa::remove_paired_rc, "After Removing Paired rc_inc & rc_decs:") .run_pass(Ssa::separate_runtime, "After Runtime Separation:") .run_pass(Ssa::resolve_is_unconstrained, "After Resolving IsUnconstrained:") - .run_pass(|ssa| ssa.inline_functions(options.inliner_aggressiveness), "After Inlining:") + .run_pass(|ssa| ssa.inline_functions(options.inliner_aggressiveness), "After Inlining (1st):") // Run mem2reg with the CFG separated into blocks - .run_pass(Ssa::mem2reg, "After Mem2Reg:") - .run_pass(Ssa::simplify_cfg, "After Simplifying:") + .run_pass(Ssa::mem2reg, "After Mem2Reg (1st):") + .run_pass(Ssa::simplify_cfg, "After Simplifying (1st):") .run_pass(Ssa::as_slice_optimization, "After `as_slice` optimization") .try_run_pass( Ssa::evaluate_static_assert_and_assert_constant, "After `static_assert` and `assert_constant`:", )? .try_run_pass(Ssa::unroll_loops_iteratively, "After Unrolling:")? - .run_pass(Ssa::simplify_cfg, "After Simplifying:") + .run_pass(Ssa::simplify_cfg, "After Simplifying (2nd):") .run_pass(Ssa::flatten_cfg, "After Flattening:") .run_pass(Ssa::remove_bit_shifts, "After Removing Bit Shifts:") // Run mem2reg once more with the flattened CFG to catch any remaining loads/stores - .run_pass(Ssa::mem2reg, "After Mem2Reg:") + .run_pass(Ssa::mem2reg, "After Mem2Reg (2nd):") // Run the inlining pass again to handle functions with `InlineType::NoPredicates`. // Before flattening is run, we treat functions marked with the `InlineType::NoPredicates` as an entry point. // This pass must come immediately following `mem2reg` as the succeeding passes // may create an SSA which inlining fails to handle. .run_pass( |ssa| ssa.inline_functions_with_no_predicates(options.inliner_aggressiveness), - "After Inlining:", + "After Inlining (2nd):", ) .run_pass(Ssa::remove_if_else, "After Remove IfElse:") .run_pass(Ssa::fold_constants, "After Constant Folding:") diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs deleted file mode 100644 index 090d5bb0a83..00000000000 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub(crate) mod acir_variable; -pub(crate) mod big_int; -pub(crate) mod generated_acir; diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs index 90eb79ccb69..cf884c98be9 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs @@ -191,7 +191,8 @@ impl Context { | Instruction::Load { .. } | Instruction::Not(..) | Instruction::Store { .. } - | Instruction::Truncate { .. } => { + | Instruction::Truncate { .. } + | Instruction::MakeArray { .. } => { self.value_sets.push(instruction_arguments_and_results); } @@ -247,8 +248,7 @@ impl Context { Value::ForeignFunction(..) => { panic!("Should not be able to reach foreign function from non-brillig functions, {func_id} in function {}", function.name()); } - Value::Array { .. } - | Value::Instruction { .. } + Value::Instruction { .. } | Value::NumericConstant { .. } | Value::Param { .. } => { panic!("At the point we are running disconnect there shouldn't be any other values as arguments") diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs index 5a62e9c8e9a..e4a2eeb8c22 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use crate::ssa::ir::{types::Type, value::ValueId}; +use crate::ssa::ir::{function::RuntimeType, types::Type, value::ValueId}; use acvm::FieldElement; use fxhash::FxHashMap as HashMap; use noirc_frontend::ast; @@ -100,7 +100,8 @@ impl DataBus { ) -> DataBus { let mut call_data_args = Vec::new(); for call_data_item in call_data { - let array_id = call_data_item.databus.expect("Call data should have an array id"); + // databus can be None if `main` is a brillig function + let Some(array_id) = call_data_item.databus else { continue }; let call_data_id = call_data_item.call_data_id.expect("Call data should have a user id"); call_data_args.push(CallData { array_id, call_data_id, index_map: call_data_item.map }); @@ -161,13 +162,11 @@ impl FunctionBuilder { } let len = databus.values.len(); - let array = if len > 0 { - let array = self - .array_constant(databus.values, Type::Array(Arc::new(vec![Type::field()]), len)); - Some(array) - } else { - None - }; + let array = (len > 0 && matches!(self.current_function.runtime(), RuntimeType::Acir(_))) + .then(|| { + let array_type = Type::Array(Arc::new(vec![Type::field()]), len); + self.insert_make_array(databus.values, array_type) + }); DataBusBuilder { index: 0, diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index 63a9453a430..0479f8da0b7 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -137,11 +137,6 @@ impl FunctionBuilder { self.numeric_constant(value.into(), Type::length_type()) } - /// Insert an array constant into the current function with the given element values. - pub(crate) fn array_constant(&mut self, elements: im::Vector, typ: Type) -> ValueId { - self.current_function.dfg.make_array(elements, typ) - } - /// Returns the type of the given value. pub(crate) fn type_of_value(&self, value: ValueId) -> Type { self.current_function.dfg.type_of_value(value) @@ -356,6 +351,17 @@ impl FunctionBuilder { self.insert_instruction(Instruction::EnableSideEffectsIf { condition }, None); } + /// Insert a `make_array` instruction to create a new array or slice. + /// Returns the new array value. Expects `typ` to be an array or slice type. + pub(crate) fn insert_make_array( + &mut self, + elements: im::Vector, + typ: Type, + ) -> ValueId { + assert!(matches!(typ, Type::Array(..) | Type::Slice(_))); + self.insert_instruction(Instruction::MakeArray { elements, typ }, None).first() + } + /// Terminates the current block with the given terminator instruction /// if the current block does not already have a terminator instruction. fn terminate_block_with(&mut self, terminator: TerminatorInstruction) { @@ -511,7 +517,6 @@ mod tests { instruction::{Endian, Intrinsic}, map::Id, types::Type, - value::Value, }; use super::FunctionBuilder; @@ -533,10 +538,7 @@ mod tests { let call_results = builder.insert_call(to_bits_id, vec![input, length], result_types).into_owned(); - let slice = match &builder.current_function.dfg[call_results[0]] { - Value::Array { array, .. } => array, - _ => panic!(), - }; + let slice = builder.current_function.dfg.get_array_constant(call_results[0]).unwrap().0; assert_eq!(slice[0], one); assert_eq!(slice[1], one); assert_eq!(slice[2], one); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/dfg.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/dfg.rs index 2be9ffa9afa..e3f3f33682b 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/dfg.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/dfg.rs @@ -266,12 +266,6 @@ impl DataFlowGraph { id } - /// Create a new constant array value from the given elements - pub(crate) fn make_array(&mut self, array: im::Vector, typ: Type) -> ValueId { - assert!(matches!(typ, Type::Array(..) | Type::Slice(_))); - self.make_value(Value::Array { array, typ }) - } - /// Gets or creates a ValueId for the given FunctionId. pub(crate) fn import_function(&mut self, function: FunctionId) -> ValueId { if let Some(existing) = self.functions.get(&function) { @@ -458,8 +452,11 @@ impl DataFlowGraph { /// Otherwise, this returns None. pub(crate) fn get_array_constant(&self, value: ValueId) -> Option<(im::Vector, Type)> { match &self.values[self.resolve(value)] { + Value::Instruction { instruction, .. } => match &self.instructions[*instruction] { + Instruction::MakeArray { elements, typ } => Some((elements.clone(), typ.clone())), + _ => None, + }, // Arrays are shared, so cloning them is cheap - Value::Array { array, typ } => Some((array.clone(), typ.clone())), _ => None, } } @@ -522,8 +519,13 @@ impl DataFlowGraph { /// True if the given ValueId refers to a (recursively) constant value pub(crate) fn is_constant(&self, argument: ValueId) -> bool { match &self[self.resolve(argument)] { - Value::Instruction { .. } | Value::Param { .. } => false, - Value::Array { array, .. } => array.iter().all(|element| self.is_constant(*element)), + Value::Param { .. } => false, + Value::Instruction { instruction, .. } => match &self[*instruction] { + Instruction::MakeArray { elements, .. } => { + elements.iter().all(|element| self.is_constant(*element)) + } + _ => false, + }, _ => true, } } @@ -575,6 +577,7 @@ impl std::ops::IndexMut for DataFlowGraph { // The result of calling DataFlowGraph::insert_instruction can // be a list of results or a single ValueId if the instruction was simplified // to an existing value. +#[derive(Debug)] pub(crate) enum InsertInstructionResult<'dfg> { /// Results is the standard case containing the instruction id and the results of that instruction. Results(InstructionId, &'dfg [ValueId]), diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/dom.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/dom.rs index 94f7a405c05..c1a7f14e0d1 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/dom.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/dom.rs @@ -86,7 +86,7 @@ impl DominatorTree { /// /// This function panics if either of the blocks are unreachable. /// - /// An instruction is considered to dominate itself. + /// A block is considered to dominate itself. pub(crate) fn dominates(&mut self, block_a_id: BasicBlockId, block_b_id: BasicBlockId) -> bool { if let Some(res) = self.cache.get(&(block_a_id, block_b_id)) { return *res; diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function.rs index e8245ff6036..b1233e3063e 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function.rs @@ -46,6 +46,14 @@ impl RuntimeType { | RuntimeType::Brillig(InlineType::NoPredicates) ) } + + pub(crate) fn is_brillig(&self) -> bool { + matches!(self, RuntimeType::Brillig(_)) + } + + pub(crate) fn is_acir(&self) -> bool { + matches!(self, RuntimeType::Acir(_)) + } } /// A function holds a list of instructions. diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs index 991ff22c902..5e133072067 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs @@ -18,15 +18,26 @@ pub(crate) struct FunctionInserter<'f> { pub(crate) function: &'f mut Function, values: HashMap, + /// Map containing repeat array constants so that we do not initialize a new /// array unnecessarily. An extra tuple field is included as part of the key to /// distinguish between array/slice types. - const_arrays: HashMap<(im::Vector, Type), ValueId>, + /// + /// This is optional since caching arrays relies on the inserter inserting strictly + /// in control-flow order. Otherwise, if arrays later in the program are cached first, + /// they may be refered to by instructions earlier in the program. + array_cache: Option, + + /// If this pass is loop unrolling, store the block before the loop to optionally + /// hoist any make_array instructions up to after they are retrieved from the `array_cache`. + pre_loop: Option, } +pub(crate) type ArrayCache = HashMap, HashMap>; + impl<'f> FunctionInserter<'f> { pub(crate) fn new(function: &'f mut Function) -> FunctionInserter<'f> { - Self { function, values: HashMap::default(), const_arrays: HashMap::default() } + Self { function, values: HashMap::default(), array_cache: None, pre_loop: None } } /// Resolves a ValueId to its new, updated value. @@ -36,27 +47,7 @@ impl<'f> FunctionInserter<'f> { value = self.function.dfg.resolve(value); match self.values.get(&value) { Some(value) => self.resolve(*value), - None => match &self.function.dfg[value] { - super::value::Value::Array { array, typ } => { - let array = array.clone(); - let typ = typ.clone(); - let new_array: im::Vector = - array.iter().map(|id| self.resolve(*id)).collect(); - - if let Some(fetched_value) = - self.const_arrays.get(&(new_array.clone(), typ.clone())) - { - return *fetched_value; - }; - - let new_array_clone = new_array.clone(); - let new_id = self.function.dfg.make_array(new_array, typ.clone()); - self.values.insert(value, new_id); - self.const_arrays.insert((new_array_clone, typ), new_id); - new_id - } - _ => value, - }, + None => value, } } @@ -80,6 +71,7 @@ impl<'f> FunctionInserter<'f> { } } + /// Get an instruction and make sure all the values in it are freshly resolved. pub(crate) fn map_instruction(&mut self, id: InstructionId) -> (Instruction, CallStack) { ( self.function.dfg[id].clone().map_values(|id| self.resolve(id)), @@ -122,7 +114,7 @@ impl<'f> FunctionInserter<'f> { &mut self, instruction: Instruction, id: InstructionId, - block: BasicBlockId, + mut block: BasicBlockId, call_stack: CallStack, ) -> InsertInstructionResult { let results = self.function.dfg.instruction_results(id); @@ -132,6 +124,30 @@ impl<'f> FunctionInserter<'f> { .requires_ctrl_typevars() .then(|| vecmap(&results, |result| self.function.dfg.type_of_value(*result))); + // Large arrays can lead to OOM panics if duplicated from being unrolled in loops. + // To prevent this, try to reuse the same ID for identical arrays instead of inserting + // another MakeArray instruction. Note that this assumes the function inserter is inserting + // in control-flow order. Otherwise we could refer to ValueIds defined later in the program. + let make_array = if let Instruction::MakeArray { elements, typ } = &instruction { + if self.array_is_constant(elements) { + if let Some(fetched_value) = self.get_cached_array(elements, typ) { + assert_eq!(results.len(), 1); + self.values.insert(results[0], fetched_value); + return InsertInstructionResult::SimplifiedTo(fetched_value); + } + + // Hoist constant arrays out of the loop and cache their value + if let Some(pre_loop) = self.pre_loop { + block = pre_loop; + } + Some((elements.clone(), typ.clone())) + } else { + None + } + } else { + None + }; + let new_results = self.function.dfg.insert_instruction_and_results( instruction, block, @@ -139,10 +155,54 @@ impl<'f> FunctionInserter<'f> { call_stack, ); + // Cache an array in the fresh_array_cache if array caching is enabled. + // The fresh cache isn't used for deduplication until an external pass confirms we + // pass a sequence point and all blocks that may be before the current insertion point + // are finished. + if let Some((elements, typ)) = make_array { + Self::cache_array(&mut self.array_cache, elements, typ, new_results.first()); + } + Self::insert_new_instruction_results(&mut self.values, &results, &new_results); new_results } + fn get_cached_array(&self, elements: &im::Vector, typ: &Type) -> Option { + self.array_cache.as_ref()?.get(elements)?.get(typ).copied() + } + + fn cache_array( + arrays: &mut Option, + elements: im::Vector, + typ: Type, + result_id: ValueId, + ) { + if let Some(arrays) = arrays { + arrays.entry(elements).or_default().insert(typ, result_id); + } + } + + fn array_is_constant(&self, elements: &im::Vector) -> bool { + elements.iter().all(|element| self.function.dfg.is_constant(*element)) + } + + pub(crate) fn set_array_cache( + &mut self, + new_cache: Option, + pre_loop: BasicBlockId, + ) { + self.array_cache = new_cache; + self.pre_loop = Some(pre_loop); + } + + /// Finish this inserter, returning its array cache merged with the fresh array cache. + /// Since this consumes the inserter this assumes we're at a sequence point where all + /// predecessor blocks to the current block are finished. Since this is true, the fresh + /// array cache can be merged with the existing array cache. + pub(crate) fn into_array_cache(self) -> Option { + self.array_cache + } + /// Modify the values HashMap to remember the mapping between an instruction result's previous /// ValueId (from the source_function) and its new ValueId in the destination function. pub(crate) fn insert_new_instruction_results( diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 2b40bccca7b..936dc854c51 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -278,6 +278,12 @@ pub(crate) enum Instruction { else_condition: ValueId, else_value: ValueId, }, + + /// Creates a new array or slice. + /// + /// `typ` should be an array or slice type with an element type + /// matching each of the `elements` values' types. + MakeArray { elements: im::Vector, typ: Type }, } impl Instruction { @@ -290,7 +296,9 @@ impl Instruction { pub(crate) fn result_type(&self) -> InstructionResultType { match self { Instruction::Binary(binary) => binary.result_type(), - Instruction::Cast(_, typ) => InstructionResultType::Known(typ.clone()), + Instruction::Cast(_, typ) | Instruction::MakeArray { typ, .. } => { + InstructionResultType::Known(typ.clone()) + } Instruction::Not(value) | Instruction::Truncate { value, .. } | Instruction::ArraySet { array: value, .. } @@ -344,6 +352,9 @@ impl Instruction { // We can deduplicate these instructions if we know the predicate is also the same. Constrain(..) | RangeCheck { .. } => deduplicate_with_predicate, + // This should never be side-effectful + MakeArray { .. } => true, + // These can have different behavior depending on the EnableSideEffectsIf context. // Replacing them with a similar instruction potentially enables replacing an instruction // with one that was disabled. See @@ -381,7 +392,8 @@ impl Instruction { | Load { .. } | ArrayGet { .. } | IfElse { .. } - | ArraySet { .. } => true, + | ArraySet { .. } + | MakeArray { .. } => true, Constrain(..) | Store { .. } @@ -444,7 +456,8 @@ impl Instruction { | Instruction::Store { .. } | Instruction::IfElse { .. } | Instruction::IncrementRc { .. } - | Instruction::DecrementRc { .. } => false, + | Instruction::DecrementRc { .. } + | Instruction::MakeArray { .. } => false, } } @@ -519,6 +532,10 @@ impl Instruction { else_value: f(*else_value), } } + Instruction::MakeArray { elements, typ } => Instruction::MakeArray { + elements: elements.iter().copied().map(f).collect(), + typ: typ.clone(), + }, } } @@ -579,6 +596,11 @@ impl Instruction { f(*else_condition); f(*else_value); } + Instruction::MakeArray { elements, typ: _ } => { + for element in elements { + f(*element); + } + } } } @@ -634,20 +656,28 @@ impl Instruction { None } } - Instruction::ArraySet { array, index, value, .. } => { - let array_const = dfg.get_array_constant(*array); - let index_const = dfg.get_numeric_constant(*index); - if let (Some((array, element_type)), Some(index)) = (array_const, index_const) { + Instruction::ArraySet { array: array_id, index: index_id, value, .. } => { + let array = dfg.get_array_constant(*array_id); + let index = dfg.get_numeric_constant(*index_id); + if let (Some((array, _element_type)), Some(index)) = (array, index) { let index = index.try_to_u32().expect("Expected array index to fit in u32") as usize; if index < array.len() { - let new_array = dfg.make_array(array.update(index, *value), element_type); - return SimplifiedTo(new_array); + let elements = array.update(index, *value); + let typ = dfg.type_of_value(*array_id); + let instruction = Instruction::MakeArray { elements, typ }; + let new_array = dfg.insert_instruction_and_results( + instruction, + block, + Option::None, + call_stack.clone(), + ); + return SimplifiedTo(new_array.first()); } } - try_optimize_array_set_from_previous_get(dfg, *array, *index, *value) + try_optimize_array_set_from_previous_get(dfg, *array_id, *index_id, *value) } Instruction::Truncate { value, bit_size, max_bit_size } => { if bit_size == max_bit_size { @@ -760,6 +790,7 @@ impl Instruction { None } } + Instruction::MakeArray { .. } => None, } } } @@ -803,13 +834,13 @@ fn try_optimize_array_get_from_previous_set( return SimplifyResult::None; } } + Instruction::MakeArray { elements: array, typ: _ } => { + elements = Some(array.clone()); + break; + } _ => return SimplifyResult::None, } } - Value::Array { array, typ: _ } => { - elements = Some(array.clone()); - break; - } _ => return SimplifyResult::None, } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 9dbd2c56993..e1e967b9a43 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -60,7 +60,7 @@ pub(super) fn simplify_call( } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; - constant_to_radix(endian, field, 2, limb_count, dfg) + constant_to_radix(endian, field, 2, limb_count, dfg, block, call_stack) } else { SimplifyResult::None } @@ -77,7 +77,7 @@ pub(super) fn simplify_call( } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; - constant_to_radix(endian, field, radix, limb_count, dfg) + constant_to_radix(endian, field, radix, limb_count, dfg, block, call_stack) } else { SimplifyResult::None } @@ -109,7 +109,8 @@ pub(super) fn simplify_call( let slice_length_value = array.len() / elements_size; let slice_length = dfg.make_constant(slice_length_value.into(), Type::length_type()); - let new_slice = dfg.make_array(array, Type::Slice(inner_element_types)); + let new_slice = + make_array(dfg, array, Type::Slice(inner_element_types), block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![slice_length, new_slice]) } else { SimplifyResult::None @@ -129,7 +130,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, call_stack); return SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]); } @@ -154,7 +155,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]) } else { SimplifyResult::None @@ -196,7 +197,7 @@ pub(super) fn simplify_call( results.push(new_slice_length); - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); // The slice is the last item returned for pop_front results.push(new_slice); @@ -227,7 +228,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]) } else { SimplifyResult::None @@ -260,7 +261,7 @@ pub(super) fn simplify_call( results.push(slice.remove(index)); } - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); results.insert(0, new_slice); let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Sub, block); @@ -317,7 +318,9 @@ pub(super) fn simplify_call( SimplifyResult::None } } - Intrinsic::BlackBox(bb_func) => simplify_black_box_func(bb_func, arguments, dfg), + Intrinsic::BlackBox(bb_func) => { + simplify_black_box_func(bb_func, arguments, dfg, block, call_stack) + } Intrinsic::AsField => { let instruction = Instruction::Cast( arguments[0], @@ -350,7 +353,7 @@ pub(super) fn simplify_call( Intrinsic::IsUnconstrained => SimplifyResult::None, Intrinsic::DerivePedersenGenerators => { if let Some(Type::Array(_, len)) = ctrl_typevars.unwrap().first() { - simplify_derive_generators(dfg, arguments, *len as u32) + simplify_derive_generators(dfg, arguments, *len as u32, block, call_stack) } else { unreachable!("Derive Pedersen Generators must return an array"); } @@ -419,7 +422,7 @@ fn simplify_slice_push_back( } let slice_size = slice.len(); let element_size = element_type.element_size(); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, &call_stack); let set_last_slice_value_instr = Instruction::ArraySet { array: new_slice, @@ -505,6 +508,8 @@ fn simplify_black_box_func( bb_func: BlackBoxFunc, arguments: &[ValueId], dfg: &mut DataFlowGraph, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { cfg_if::cfg_if! { if #[cfg(feature = "bn254")] { @@ -514,8 +519,12 @@ fn simplify_black_box_func( } }; match bb_func { - BlackBoxFunc::Blake2s => simplify_hash(dfg, arguments, acvm::blackbox_solver::blake2s), - BlackBoxFunc::Blake3 => simplify_hash(dfg, arguments, acvm::blackbox_solver::blake3), + BlackBoxFunc::Blake2s => { + simplify_hash(dfg, arguments, acvm::blackbox_solver::blake2s, block, call_stack) + } + BlackBoxFunc::Blake3 => { + simplify_hash(dfg, arguments, acvm::blackbox_solver::blake3, block, call_stack) + } BlackBoxFunc::Keccakf1600 => { if let Some((array_input, _)) = dfg.get_array_constant(arguments[0]) { if array_is_constant(dfg, &array_input) { @@ -533,8 +542,14 @@ fn simplify_black_box_func( const_input.try_into().expect("Keccakf1600 input should have length of 25"), ) .expect("Rust solvable black box function should not fail"); - let state_values = vecmap(state, |x| FieldElement::from(x as u128)); - let result_array = make_constant_array(dfg, state_values, Type::unsigned(64)); + let state_values = state.iter().map(|x| FieldElement::from(*x as u128)); + let result_array = make_constant_array( + dfg, + state_values, + Type::unsigned(64), + block, + call_stack, + ); SimplifyResult::SimplifiedTo(result_array) } else { SimplifyResult::None @@ -544,7 +559,7 @@ fn simplify_black_box_func( } } BlackBoxFunc::Poseidon2Permutation => { - blackbox::simplify_poseidon2_permutation(dfg, solver, arguments) + blackbox::simplify_poseidon2_permutation(dfg, solver, arguments, block, call_stack) } BlackBoxFunc::EcdsaSecp256k1 => blackbox::simplify_signature( dfg, @@ -557,8 +572,12 @@ fn simplify_black_box_func( acvm::blackbox_solver::ecdsa_secp256r1_verify, ), - BlackBoxFunc::MultiScalarMul => SimplifyResult::None, - BlackBoxFunc::EmbeddedCurveAdd => blackbox::simplify_ec_add(dfg, solver, arguments), + BlackBoxFunc::MultiScalarMul => { + blackbox::simplify_msm(dfg, solver, arguments, block, call_stack) + } + BlackBoxFunc::EmbeddedCurveAdd => { + blackbox::simplify_ec_add(dfg, solver, arguments, block, call_stack) + } BlackBoxFunc::SchnorrVerify => blackbox::simplify_schnorr_verify(dfg, solver, arguments), BlackBoxFunc::BigIntAdd @@ -585,23 +604,47 @@ fn simplify_black_box_func( } } -fn make_constant_array(dfg: &mut DataFlowGraph, results: Vec, typ: Type) -> ValueId { - let result_constants = vecmap(results, |element| dfg.make_constant(element, typ.clone())); +fn make_constant_array( + dfg: &mut DataFlowGraph, + results: impl Iterator, + typ: Type, + block: BasicBlockId, + call_stack: &CallStack, +) -> ValueId { + let result_constants: im::Vector<_> = + results.map(|element| dfg.make_constant(element, typ.clone())).collect(); let typ = Type::Array(Arc::new(vec![typ]), result_constants.len()); - dfg.make_array(result_constants.into(), typ) + make_array(dfg, result_constants, typ, block, call_stack) +} + +fn make_array( + dfg: &mut DataFlowGraph, + elements: im::Vector, + typ: Type, + block: BasicBlockId, + call_stack: &CallStack, +) -> ValueId { + let instruction = Instruction::MakeArray { elements, typ }; + let call_stack = call_stack.clone(); + dfg.insert_instruction_and_results(instruction, block, None, call_stack).first() } fn make_constant_slice( dfg: &mut DataFlowGraph, results: Vec, typ: Type, + block: BasicBlockId, + call_stack: &CallStack, ) -> (ValueId, ValueId) { let result_constants = vecmap(results, |element| dfg.make_constant(element, typ.clone())); let typ = Type::Slice(Arc::new(vec![typ])); let length = FieldElement::from(result_constants.len() as u128); - (dfg.make_constant(length, Type::length_type()), dfg.make_array(result_constants.into(), typ)) + let length = dfg.make_constant(length, Type::length_type()); + + let slice = make_array(dfg, result_constants.into(), typ, block, call_stack); + (length, slice) } /// Returns a slice (represented by a tuple (len, slice)) of constants corresponding to the limbs of the radix decomposition. @@ -611,6 +654,8 @@ fn constant_to_radix( radix: u32, limb_count: u32, dfg: &mut DataFlowGraph, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { let bit_size = u32::BITS - (radix - 1).leading_zeros(); let radix_big = BigUint::from(radix); @@ -631,7 +676,13 @@ fn constant_to_radix( if endian == Endian::Big { limbs.reverse(); } - let result_array = make_constant_array(dfg, limbs, Type::unsigned(bit_size)); + let result_array = make_constant_array( + dfg, + limbs.into_iter(), + Type::unsigned(bit_size), + block, + call_stack, + ); SimplifyResult::SimplifiedTo(result_array) } } @@ -656,6 +707,8 @@ fn simplify_hash( dfg: &mut DataFlowGraph, arguments: &[ValueId], hash_function: fn(&[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match dfg.get_array_constant(arguments[0]) { Some((input, _)) if array_is_constant(dfg, &input) => { @@ -664,9 +717,10 @@ fn simplify_hash( let hash = hash_function(&input_bytes) .expect("Rust solvable black box function should not fail"); - let hash_values = vecmap(hash, |byte| FieldElement::from_be_bytes_reduce(&[byte])); + let hash_values = hash.iter().map(|byte| FieldElement::from_be_bytes_reduce(&[*byte])); - let result_array = make_constant_array(dfg, hash_values, Type::unsigned(8)); + let result_array = + make_constant_array(dfg, hash_values, Type::unsigned(8), block, call_stack); SimplifyResult::SimplifiedTo(result_array) } _ => SimplifyResult::None, @@ -725,6 +779,8 @@ fn simplify_derive_generators( dfg: &mut DataFlowGraph, arguments: &[ValueId], num_generators: u32, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { if arguments.len() == 2 { let domain_separator_string = dfg.get_array_constant(arguments[0]); @@ -754,8 +810,8 @@ fn simplify_derive_generators( results.push(is_infinite); } let len = results.len(); - let result = - dfg.make_array(results.into(), Type::Array(vec![Type::field()].into(), len)); + let typ = Type::Array(vec![Type::field()].into(), len); + let result = make_array(dfg, results.into(), typ, block, call_stack); SimplifyResult::SimplifiedTo(result) } else { SimplifyResult::None diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs index 3881646d5e4..4f2a31e2fb0 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs @@ -1,8 +1,13 @@ +use std::sync::Arc; + use acvm::{acir::AcirField, BlackBoxFunctionSolver, BlackBoxResolutionError, FieldElement}; -use iter_extended::vecmap; use crate::ssa::ir::{ - dfg::DataFlowGraph, instruction::SimplifyResult, types::Type, value::ValueId, + basic_block::BasicBlockId, + dfg::{CallStack, DataFlowGraph}, + instruction::{Instruction, SimplifyResult}, + types::Type, + value::ValueId, }; use super::{array_is_constant, make_constant_array, to_u8_vec}; @@ -11,6 +16,8 @@ pub(super) fn simplify_ec_add( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match ( dfg.get_numeric_constant(arguments[0]), @@ -39,13 +46,76 @@ pub(super) fn simplify_ec_add( return SimplifyResult::None; }; - let result_array = make_constant_array( - dfg, - vec![result_x, result_y, result_is_infinity], - Type::field(), - ); + let result_x = dfg.make_constant(result_x, Type::field()); + let result_y = dfg.make_constant(result_y, Type::field()); + let result_is_infinity = dfg.make_constant(result_is_infinity, Type::bool()); - SimplifyResult::SimplifiedTo(result_array) + let typ = Type::Array(Arc::new(vec![Type::field()]), 3); + + let elements = im::vector![result_x, result_y, result_is_infinity]; + let instruction = Instruction::MakeArray { elements, typ }; + let result_array = + dfg.insert_instruction_and_results(instruction, block, None, call_stack.clone()); + + SimplifyResult::SimplifiedTo(result_array.first()) + } + _ => SimplifyResult::None, + } +} + +pub(super) fn simplify_msm( + dfg: &mut DataFlowGraph, + solver: impl BlackBoxFunctionSolver, + arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, +) -> SimplifyResult { + // TODO: Handle MSMs where a subset of the terms are constant. + match (dfg.get_array_constant(arguments[0]), dfg.get_array_constant(arguments[1])) { + (Some((points, _)), Some((scalars, _))) => { + let Some(points) = points + .into_iter() + .map(|id| dfg.get_numeric_constant(id)) + .collect::>>() + else { + return SimplifyResult::None; + }; + + let Some(scalars) = scalars + .into_iter() + .map(|id| dfg.get_numeric_constant(id)) + .collect::>>() + else { + return SimplifyResult::None; + }; + + let mut scalars_lo = Vec::new(); + let mut scalars_hi = Vec::new(); + for (i, scalar) in scalars.into_iter().enumerate() { + if i % 2 == 0 { + scalars_lo.push(scalar); + } else { + scalars_hi.push(scalar); + } + } + + let Ok((result_x, result_y, result_is_infinity)) = + solver.multi_scalar_mul(&points, &scalars_lo, &scalars_hi) + else { + return SimplifyResult::None; + }; + + let result_x = dfg.make_constant(result_x, Type::field()); + let result_y = dfg.make_constant(result_y, Type::field()); + let result_is_infinity = dfg.make_constant(result_is_infinity, Type::bool()); + + let elements = im::vector![result_x, result_y, result_is_infinity]; + let typ = Type::Array(Arc::new(vec![Type::field()]), 3); + let instruction = Instruction::MakeArray { elements, typ }; + let result_array = + dfg.insert_instruction_and_results(instruction, block, None, call_stack.clone()); + + SimplifyResult::SimplifiedTo(result_array.first()) } _ => SimplifyResult::None, } @@ -55,6 +125,8 @@ pub(super) fn simplify_poseidon2_permutation( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match (dfg.get_array_constant(arguments[0]), dfg.get_numeric_constant(arguments[1])) { (Some((state, _)), Some(state_length)) if array_is_constant(dfg, &state) => { @@ -74,7 +146,9 @@ pub(super) fn simplify_poseidon2_permutation( return SimplifyResult::None; }; - let result_array = make_constant_array(dfg, new_state, Type::field()); + let new_state = new_state.into_iter(); + let typ = Type::field(); + let result_array = make_constant_array(dfg, new_state, typ, block, call_stack); SimplifyResult::SimplifiedTo(result_array) } @@ -119,6 +193,8 @@ pub(super) fn simplify_hash( dfg: &mut DataFlowGraph, arguments: &[ValueId], hash_function: fn(&[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match dfg.get_array_constant(arguments[0]) { Some((input, _)) if array_is_constant(dfg, &input) => { @@ -127,9 +203,10 @@ pub(super) fn simplify_hash( let hash = hash_function(&input_bytes) .expect("Rust solvable black box function should not fail"); - let hash_values = vecmap(hash, |byte| FieldElement::from_be_bytes_reduce(&[byte])); + let hash_values = hash.iter().map(|byte| FieldElement::from_be_bytes_reduce(&[*byte])); - let result_array = make_constant_array(dfg, hash_values, Type::unsigned(8)); + let u8_type = Type::unsigned(8); + let result_array = make_constant_array(dfg, hash_values, u8_type, block, call_stack); SimplifyResult::SimplifiedTo(result_array) } _ => SimplifyResult::None, diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/mod.rs similarity index 100% rename from noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir.rs rename to noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/mod.rs diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/post_order.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/post_order.rs index 94ff96ba1d7..398ce887b96 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/post_order.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/post_order.rs @@ -22,7 +22,7 @@ impl PostOrder { } impl PostOrder { - /// Allocate and compute a function's block post-order. Pos + /// Allocate and compute a function's block post-order. pub(crate) fn with_function(func: &Function) -> Self { PostOrder(Self::compute_post_order(func)) } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/printer.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/printer.rs index 3bbe14f866a..c44e7d8a388 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/printer.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/printer.rs @@ -69,17 +69,6 @@ fn value(function: &Function, id: ValueId) -> String { } Value::Function(id) => id.to_string(), Value::Intrinsic(intrinsic) => intrinsic.to_string(), - Value::Array { array, typ } => { - let elements = vecmap(array, |element| value(function, *element)); - let element_types = &typ.clone().element_types(); - let element_types_str = - element_types.iter().map(|typ| typ.to_string()).collect::>().join(", "); - if element_types.len() == 1 { - format!("[{}] of {}", elements.join(", "), element_types_str) - } else { - format!("[{}] of ({})", elements.join(", "), element_types_str) - } - } Value::Param { .. } | Value::Instruction { .. } | Value::ForeignFunction(_) => { id.to_string() } @@ -230,6 +219,18 @@ fn display_instruction_inner( "if {then_condition} then {then_value} else if {else_condition} then {else_value}" ) } + Instruction::MakeArray { elements, typ } => { + write!(f, "make_array [")?; + + for (i, element) in elements.iter().enumerate() { + if i != 0 { + write!(f, ", ")?; + } + write!(f, "{}", show(*element))?; + } + + writeln!(f, "] : {typ}") + } } } @@ -253,13 +254,9 @@ pub(crate) fn try_to_extract_string_from_error_payload( (is_string_type && (values.len() == 1)) .then_some(()) .and_then(|()| { - let Value::Array { array: values, .. } = &dfg[values[0]] else { - return None; - }; - let fields: Option> = - values.iter().map(|value_id| dfg.get_numeric_constant(*value_id)).collect(); - - fields + let (values, _) = &dfg.get_array_constant(values[0])?; + let values = values.iter().map(|value_id| dfg.get_numeric_constant(*value_id)); + values.collect::>>() }) .map(|fields| { fields diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/value.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/value.rs index 795d45c75e9..ef494200308 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/value.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/value.rs @@ -36,9 +36,6 @@ pub(crate) enum Value { /// This Value originates from a numeric constant NumericConstant { constant: FieldElement, typ: Type }, - /// Represents a constant array value - Array { array: im::Vector, typ: Type }, - /// This Value refers to a function in the IR. /// Functions always have the type Type::Function. /// If the argument or return types are needed, users should retrieve @@ -63,7 +60,6 @@ impl Value { Value::Instruction { typ, .. } => typ, Value::Param { typ, .. } => typ, Value::NumericConstant { typ, .. } => typ, - Value::Array { typ, .. } => typ, Value::Function { .. } => &Type::Function, Value::Intrinsic { .. } => &Type::Function, Value::ForeignFunction { .. } => &Type::Function, diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/array_set.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/array_set.rs index 865a1e31eb3..96de22600a4 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/array_set.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/array_set.rs @@ -199,29 +199,31 @@ mod tests { let src = " brillig(inline) fn main f0 { b0(): - v1 = allocate -> &mut [Field; 5] - store [[Field 0, Field 0, Field 0, Field 0, Field 0] of Field, [Field 0, Field 0, Field 0, Field 0, Field 0] of Field] of [Field; 5] at v1 - v6 = allocate -> &mut [Field; 5] - store [[Field 0, Field 0, Field 0, Field 0, Field 0] of Field, [Field 0, Field 0, Field 0, Field 0, Field 0] of Field] of [Field; 5] at v6 + v2 = make_array [Field 0, Field 0, Field 0, Field 0, Field 0] : [Field; 5] + v3 = make_array [v2, v2] : [[Field; 5]; 2] + v4 = allocate -> &mut [Field; 5] + store v3 at v4 + v5 = allocate -> &mut [Field; 5] + store v3 at v5 jmp b1(u32 0) b1(v0: u32): - v12 = lt v0, u32 5 - jmpif v12 then: b3, else: b2 + v8 = lt v0, u32 5 + jmpif v8 then: b3, else: b2 b3(): - v13 = eq v0, u32 5 - jmpif v13 then: b4, else: b5 + v9 = eq v0, u32 5 + jmpif v9 then: b4, else: b5 b4(): - v14 = load v1 -> [[Field; 5]; 2] - store v14 at v6 + v10 = load v4 -> [[Field; 5]; 2] + store v10 at v5 jmp b5() b5(): - v15 = load v1 -> [[Field; 5]; 2] - v16 = array_get v15, index Field 0 -> [Field; 5] - v18 = array_set v16, index v0, value Field 20 - v19 = array_set v15, index v0, value v18 - store v19 at v1 - v21 = add v0, u32 1 - jmp b1(v21) + v11 = load v4 -> [[Field; 5]; 2] + v12 = array_get v11, index Field 0 -> [Field; 5] + v14 = array_set v12, index v0, value Field 20 + v15 = array_set v11, index v0, value v14 + store v15 at v4 + v17 = add v0, u32 1 + jmp b1(v17) b2(): return } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index de8e5b25926..b421e925bbf 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -484,7 +484,8 @@ mod test { acir(inline) fn main f0 { b0(v0: Field): v2 = add v0, Field 1 - return [v2] of Field + v3 = make_array [v2] : [Field; 1] + return v3 } "; let ssa = Ssa::from_str(src).unwrap(); @@ -595,16 +596,17 @@ mod test { // the other is not. If one is removed, it is possible e.g. v4 is replaced with v2 which // is disabled (only gets from index 0) and thus returns the wrong result. let src = " - acir(inline) fn main f0 { - b0(v0: u1, v1: u64): - enable_side_effects v0 - v5 = array_get [Field 0, Field 1] of Field, index v1 -> Field - v6 = not v0 - enable_side_effects v6 - v8 = array_get [Field 0, Field 1] of Field, index v1 -> Field - return - } - "; + acir(inline) fn main f0 { + b0(v0: u1, v1: u64): + enable_side_effects v0 + v4 = make_array [Field 0, Field 1] : [Field; 2] + v5 = array_get v4, index v1 -> Field + v6 = not v0 + enable_side_effects v6 + v7 = array_get v4, index v1 -> Field + return + } + "; let ssa = Ssa::from_str(src).unwrap(); // Expected output is unchanged @@ -682,12 +684,12 @@ mod test { let zero = builder.numeric_constant(0u128, Type::unsigned(64)); let typ = Type::Array(Arc::new(vec![Type::unsigned(64)]), 25); - let array_contents = vec![ + let array_contents = im::vector![ v0, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, ]; - let array1 = builder.array_constant(array_contents.clone().into(), typ.clone()); - let array2 = builder.array_constant(array_contents.into(), typ.clone()); + let array1 = builder.insert_make_array(array_contents.clone(), typ.clone()); + let array2 = builder.insert_make_array(array_contents, typ.clone()); assert_eq!(array1, array2, "arrays were assigned different value ids"); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/die.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/die.rs index f28b076a5f9..666a8e32246 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/die.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/die.rs @@ -192,29 +192,11 @@ impl Context { }); } - /// Inspects a value recursively (as it could be an array) and marks all comprised instruction - /// results as used. + /// Inspects a value and marks all instruction results as used. fn mark_used_instruction_results(&mut self, dfg: &DataFlowGraph, value_id: ValueId) { let value_id = dfg.resolve(value_id); - match &dfg[value_id] { - Value::Instruction { .. } => { - self.used_values.insert(value_id); - } - Value::Array { array, .. } => { - self.used_values.insert(value_id); - for elem in array { - self.mark_used_instruction_results(dfg, *elem); - } - } - Value::Param { .. } => { - self.used_values.insert(value_id); - } - Value::NumericConstant { .. } => { - self.used_values.insert(value_id); - } - _ => { - // Does not comprise of any instruction results - } + if matches!(&dfg[value_id], Value::Instruction { .. } | Value::Param { .. }) { + self.used_values.insert(value_id); } } @@ -740,10 +722,11 @@ mod test { fn keep_inc_rc_on_borrowed_array_store() { // acir(inline) fn main f0 { // b0(): + // v1 = make_array [u32 0, u32 0] // v2 = allocate - // inc_rc [u32 0, u32 0] - // store [u32 0, u32 0] at v2 - // inc_rc [u32 0, u32 0] + // inc_rc v1 + // store v1 at v2 + // inc_rc v1 // jmp b1() // b1(): // v3 = load v2 @@ -756,11 +739,11 @@ mod test { let mut builder = FunctionBuilder::new("main".into(), main_id); let zero = builder.numeric_constant(0u128, Type::unsigned(32)); let array_type = Type::Array(Arc::new(vec![Type::unsigned(32)]), 2); - let array = builder.array_constant(vector![zero, zero], array_type.clone()); + let v1 = builder.insert_make_array(vector![zero, zero], array_type.clone()); let v2 = builder.insert_allocate(array_type.clone()); - builder.increment_array_reference_count(array); - builder.insert_store(v2, array); - builder.increment_array_reference_count(array); + builder.increment_array_reference_count(v1); + builder.insert_store(v2, v1); + builder.increment_array_reference_count(v1); let b1 = builder.insert_block(); builder.terminate_with_jmp(b1, vec![]); @@ -775,14 +758,14 @@ mod test { let main = ssa.main(); // The instruction count never includes the terminator instruction - assert_eq!(main.dfg[main.entry_block()].instructions().len(), 4); + assert_eq!(main.dfg[main.entry_block()].instructions().len(), 5); assert_eq!(main.dfg[b1].instructions().len(), 2); // We expect the output to be unchanged let ssa = ssa.dead_instruction_elimination(); let main = ssa.main(); - assert_eq!(main.dfg[main.entry_block()].instructions().len(), 4); + assert_eq!(main.dfg[main.entry_block()].instructions().len(), 5); assert_eq!(main.dfg[b1].instructions().len(), 2); } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index db2d96aac81..a2b8e20d20f 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -826,8 +826,8 @@ impl<'f> Context<'f> { } Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)) => { let points_array_idx = if matches!( - self.inserter.function.dfg[arguments[0]], - Value::Array { .. } + self.inserter.function.dfg.type_of_value(arguments[0]), + Type::Array { .. } ) { 0 } else { @@ -835,15 +835,15 @@ impl<'f> Context<'f> { // which means the array is the second argument 1 }; - let (array_with_predicate, array_typ) = self - .apply_predicate_to_msm_argument( - arguments[points_array_idx], - condition, - call_stack.clone(), - ); - - arguments[points_array_idx] = - self.inserter.function.dfg.make_array(array_with_predicate, array_typ); + let (elements, typ) = self.apply_predicate_to_msm_argument( + arguments[points_array_idx], + condition, + call_stack.clone(), + ); + + let instruction = Instruction::MakeArray { elements, typ }; + let array = self.insert_instruction(instruction, call_stack); + arguments[points_array_idx] = array; Instruction::Call { func, arguments } } _ => Instruction::Call { func, arguments }, @@ -866,7 +866,7 @@ impl<'f> Context<'f> { ) -> (im::Vector, Type) { let array_typ; let mut array_with_predicate = im::Vector::new(); - if let Value::Array { array, typ } = &self.inserter.function.dfg[argument] { + if let Some((array, typ)) = &self.inserter.function.dfg.get_array_constant(argument) { array_typ = typ.clone(); for (i, value) in array.clone().iter().enumerate() { if i % 3 == 2 { @@ -1259,56 +1259,41 @@ mod test { // }; // } // - // // Translates to the following before the flattening pass: - // fn main f2 { - // b0(v0: u1): - // jmpif v0 then: b1, else: b2 - // b1(): - // v2 = allocate - // store Field 0 at v2 - // v4 = load v2 - // jmp b2() - // b2(): - // return - // } + // Translates to the following before the flattening pass: + let src = " + acir(inline) fn main f0 { + b0(v0: u1): + jmpif v0 then: b1, else: b2 + b1(): + v1 = allocate -> &mut Field + store Field 0 at v1 + v3 = load v1 -> Field + jmp b2() + b2(): + return + }"; // The bug is that the flattening pass previously inserted a load // before the first store to allocate, which loaded an uninitialized value. // In this test we assert the ordering is strictly Allocate then Store then Load. - let main_id = Id::test_new(0); - let mut builder = FunctionBuilder::new("main".into(), main_id); - - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - - let v0 = builder.add_parameter(Type::bool()); - builder.terminate_with_jmpif(v0, b1, b2); - - builder.switch_to_block(b1); - let v2 = builder.insert_allocate(Type::field()); - let zero = builder.field_constant(0u128); - builder.insert_store(v2, zero); - let _v4 = builder.insert_load(v2, Type::field()); - builder.terminate_with_jmp(b2, vec![]); - - builder.switch_to_block(b2); - builder.terminate_with_return(vec![]); - - let ssa = builder.finish().flatten_cfg(); - let main = ssa.main(); + let ssa = Ssa::from_str(src).unwrap(); + let flattened_ssa = ssa.flatten_cfg(); // Now assert that there is not a load between the allocate and its first store // The Expected IR is: - // - // fn main f2 { - // b0(v0: u1): - // enable_side_effects v0 - // v6 = allocate - // store Field 0 at v6 - // v7 = load v6 - // v8 = not v0 - // enable_side_effects u1 1 - // return - // } + let expected = " + acir(inline) fn main f0 { + b0(v0: u1): + enable_side_effects v0 + v1 = allocate -> &mut Field + store Field 0 at v1 + v3 = load v1 -> Field + v4 = not v0 + enable_side_effects u1 1 + return + } + "; + + let main = flattened_ssa.main(); let instructions = main.dfg[main.entry_block()].instructions(); let find_instruction = |predicate: fn(&Instruction) -> bool| { @@ -1321,6 +1306,8 @@ mod test { assert!(allocate_index < store_index); assert!(store_index < load_index); + + assert_normalized_ssa_equals(flattened_ssa, expected); } /// Work backwards from an instruction to find all the constant values @@ -1416,29 +1403,22 @@ mod test { // } let main_id = Id::test_new(1); let mut builder = FunctionBuilder::new("main".into(), main_id); - builder.insert_block(); // b0 let b1 = builder.insert_block(); let b2 = builder.insert_block(); let b3 = builder.insert_block(); - let element_type = Arc::new(vec![Type::unsigned(8)]); let array_type = Type::Array(element_type.clone(), 2); let array = builder.add_parameter(array_type); - let zero = builder.numeric_constant(0_u128, Type::unsigned(8)); - let v5 = builder.insert_array_get(array, zero, Type::unsigned(8)); let v6 = builder.insert_cast(v5, Type::unsigned(32)); let i_two = builder.numeric_constant(2_u128, Type::unsigned(32)); let v8 = builder.insert_binary(v6, BinaryOp::Mod, i_two); let v9 = builder.insert_cast(v8, Type::bool()); - let v10 = builder.insert_allocate(Type::field()); builder.insert_store(v10, zero); - builder.terminate_with_jmpif(v9, b1, b2); - builder.switch_to_block(b1); let one = builder.field_constant(1_u128); let v5b = builder.insert_cast(v5, Type::field()); @@ -1446,21 +1426,17 @@ mod test { let v14 = builder.insert_cast(v13, Type::unsigned(8)); builder.insert_store(v10, v14); builder.terminate_with_jmp(b3, vec![]); - builder.switch_to_block(b2); builder.insert_store(v10, zero); builder.terminate_with_jmp(b3, vec![]); - builder.switch_to_block(b3); let v_true = builder.numeric_constant(true, Type::bool()); let v12 = builder.insert_binary(v9, BinaryOp::Eq, v_true); builder.insert_constrain(v12, v_true, None); builder.terminate_with_return(vec![]); - let ssa = builder.finish(); let flattened_ssa = ssa.flatten_cfg(); let main = flattened_ssa.main(); - // Now assert that there is not an always-false constraint after flattening: let mut constrain_count = 0; for instruction in main.dfg[main.entry_block()].instructions() { diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs index ef208588718..ddc8b0bfe6b 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs @@ -26,25 +26,23 @@ impl<'a> SliceCapacityTracker<'a> { ) { match instruction { Instruction::ArrayGet { array, .. } => { - let array_typ = self.dfg.type_of_value(*array); - let array_value = &self.dfg[*array]; - if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() - { - // Initial insertion into the slice sizes map - // Any other insertions should only occur if the value is already - // a part of the map. - self.compute_slice_capacity(*array, slice_sizes); + if let Some((_, array_type)) = self.dfg.get_array_constant(*array) { + if array_type.contains_slice_element() { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } } } Instruction::ArraySet { array, value, .. } => { - let array_typ = self.dfg.type_of_value(*array); - let array_value = &self.dfg[*array]; - if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() - { - // Initial insertion into the slice sizes map - // Any other insertions should only occur if the value is already - // a part of the map. - self.compute_slice_capacity(*array, slice_sizes); + if let Some((_, array_type)) = self.dfg.get_array_constant(*array) { + if array_type.contains_slice_element() { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } } let value_typ = self.dfg.type_of_value(*value); @@ -161,7 +159,7 @@ impl<'a> SliceCapacityTracker<'a> { array_id: ValueId, slice_sizes: &mut HashMap, ) { - if let Value::Array { array, typ } = &self.dfg[array_id] { + if let Some((array, typ)) = self.dfg.get_array_constant(array_id) { // Compiler sanity check assert!(!typ.is_nested_slice(), "ICE: Nested slices are not allowed and should not have reached the flattening pass of SSA"); if let Type::Slice(_) = typ { diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index 75ee57dd4fa..bee58278aa8 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -202,7 +202,9 @@ impl<'a> ValueMerger<'a> { } } - self.dfg.make_array(merged, typ) + let instruction = Instruction::MakeArray { elements: merged, typ }; + let call_stack = self.call_stack.clone(); + self.dfg.insert_instruction_and_results(instruction, self.block, None, call_stack).first() } fn merge_slice_values( @@ -276,7 +278,9 @@ impl<'a> ValueMerger<'a> { } } - self.dfg.make_array(merged, typ) + let instruction = Instruction::MakeArray { elements: merged, typ }; + let call_stack = self.call_stack.clone(); + self.dfg.insert_instruction_and_results(instruction, self.block, None, call_stack).first() } /// Construct a dummy value to be attached to the smaller of two slices being merged. @@ -296,7 +300,11 @@ impl<'a> ValueMerger<'a> { array.push_back(self.make_slice_dummy_data(typ)); } } - self.dfg.make_array(array, typ.clone()) + let instruction = Instruction::MakeArray { elements: array, typ: typ.clone() }; + let call_stack = self.call_stack.clone(); + self.dfg + .insert_instruction_and_results(instruction, self.block, None, call_stack) + .first() } Type::Slice(_) => { // TODO(#3188): Need to update flattening to use true user facing length of slices diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs index 2eb0f2eda0f..f91487fd73e 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs @@ -476,10 +476,6 @@ impl<'function> PerFunctionContext<'function> { Value::ForeignFunction(function) => { self.context.builder.import_foreign_function(function) } - Value::Array { array, typ } => { - let elements = array.iter().map(|value| self.translate_value(*value)).collect(); - self.context.builder.array_constant(elements, typ.clone()) - } }; self.values.insert(id, new_value); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs index a052abc5e16..0690dbbf204 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -171,9 +171,7 @@ impl<'f> PerFunctionContext<'f> { let block_params = self.inserter.function.dfg.block_parameters(*block_id); per_func_block_params.extend(block_params.iter()); let terminator = self.inserter.function.dfg[*block_id].unwrap_terminator(); - terminator.for_each_value(|value| { - self.recursively_add_values(value, &mut all_terminator_values); - }); + terminator.for_each_value(|value| all_terminator_values.insert(value)); } // If we never load from an address within a function we can remove all stores to that address. @@ -268,15 +266,6 @@ impl<'f> PerFunctionContext<'f> { .collect() } - fn recursively_add_values(&self, value: ValueId, set: &mut HashSet) { - set.insert(value); - if let Some((elements, _)) = self.inserter.function.dfg.get_array_constant(value) { - for array_element in elements { - self.recursively_add_values(array_element, set); - } - } - } - /// The value of each reference at the start of the given block is the unification /// of the value of the same reference at the end of its predecessor blocks. fn find_starting_references(&mut self, block: BasicBlockId) -> Block { @@ -426,8 +415,6 @@ impl<'f> PerFunctionContext<'f> { let address = self.inserter.function.dfg.resolve(*address); let value = self.inserter.function.dfg.resolve(*value); - self.check_array_aliasing(references, value); - // If there was another store to this instruction without any (unremoved) loads or // function calls in-between, we can remove the previous store. if let Some(last_store) = references.last_stores.get(&address) { @@ -512,24 +499,22 @@ impl<'f> PerFunctionContext<'f> { } self.mark_all_unknown(arguments, references); } - _ => (), - } - } - - /// If `array` is an array constant that contains reference types, then insert each element - /// as a potential alias to the array itself. - fn check_array_aliasing(&self, references: &mut Block, array: ValueId) { - if let Some((elements, typ)) = self.inserter.function.dfg.get_array_constant(array) { - if Self::contains_references(&typ) { - // TODO: Check if type directly holds references or holds arrays that hold references - let expr = Expression::ArrayElement(Box::new(Expression::Other(array))); - references.expressions.insert(array, expr.clone()); - let aliases = references.aliases.entry(expr).or_default(); - - for element in elements { - aliases.insert(element); + Instruction::MakeArray { elements, typ } => { + // If `array` is an array constant that contains reference types, then insert each element + // as a potential alias to the array itself. + if Self::contains_references(typ) { + let array = self.inserter.function.dfg.instruction_results(instruction)[0]; + + let expr = Expression::ArrayElement(Box::new(Expression::Other(array))); + references.expressions.insert(array, expr.clone()); + let aliases = references.aliases.entry(expr).or_default(); + + for element in elements { + aliases.insert(*element); + } } } + _ => (), } } @@ -634,10 +619,11 @@ mod tests { // fn func() { // b0(): // v0 = allocate - // store [Field 1, Field 2] in v0 - // v1 = load v0 - // v2 = array_get v1, index 1 - // return v2 + // v1 = make_array [Field 1, Field 2] + // store v1 in v0 + // v2 = load v0 + // v3 = array_get v2, index 1 + // return v3 // } let func_id = Id::test_new(0); @@ -648,12 +634,12 @@ mod tests { let element_type = Arc::new(vec![Type::field()]); let array_type = Type::Array(element_type, 2); - let array = builder.array_constant(vector![one, two], array_type.clone()); + let v1 = builder.insert_make_array(vector![one, two], array_type.clone()); - builder.insert_store(v0, array); - let v1 = builder.insert_load(v0, array_type); - let v2 = builder.insert_array_get(v1, one, Type::field()); - builder.terminate_with_return(vec![v2]); + builder.insert_store(v0, v1); + let v2 = builder.insert_load(v0, array_type); + let v3 = builder.insert_array_get(v2, one, Type::field()); + builder.terminate_with_return(vec![v3]); let ssa = builder.finish().mem2reg().fold_constants(); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs index 098f62bceba..10e86c6601a 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs @@ -35,13 +35,14 @@ pub(crate) fn assert_normalized_ssa_equals(mut ssa: super::Ssa, expected: &str) panic!("`expected` argument of `assert_ssa_equals` is not valid SSA:\n{:?}", err); } - use crate::{ssa::Ssa, trim_leading_whitespace_from_lines}; + use crate::{ssa::Ssa, trim_comments_from_lines, trim_leading_whitespace_from_lines}; ssa.normalize_ids(); let ssa = ssa.to_string(); let ssa = trim_leading_whitespace_from_lines(&ssa); let expected = trim_leading_whitespace_from_lines(expected); + let expected = trim_comments_from_lines(&expected); if ssa != expected { println!("Expected:\n~~~\n{expected}\n~~~\nGot:\n~~~\n{ssa}\n~~~"); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs index 6914bf87c5d..a5b60fb5fcd 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs @@ -179,19 +179,6 @@ impl IdMaps { Value::NumericConstant { constant, typ } => { new_function.dfg.make_constant(*constant, typ.clone()) } - Value::Array { array, typ } => { - if let Some(value) = self.values.get(&old_value) { - return *value; - } - - let array = array - .iter() - .map(|value| self.map_value(new_function, old_function, *value)) - .collect(); - let new_value = new_function.dfg.make_array(array, typ.clone()); - self.values.insert(old_value, new_value); - new_value - } Value::Intrinsic(intrinsic) => new_function.dfg.import_intrinsic(*intrinsic), Value::ForeignFunction(name) => new_function.dfg.import_foreign_function(name), } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/rc.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/rc.rs index c3606ac4311..ffe4ada39b7 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/rc.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/rc.rs @@ -197,7 +197,8 @@ mod test { // inc_rc v0 // inc_rc v0 // dec_rc v0 - // return [v0] + // v1 = make_array [v0] + // return v1 // } let main_id = Id::test_new(0); let mut builder = FunctionBuilder::new("foo".into(), main_id); @@ -211,8 +212,8 @@ mod test { builder.insert_dec_rc(v0); let outer_array_type = Type::Array(Arc::new(vec![inner_array_type]), 1); - let array = builder.array_constant(vec![v0].into(), outer_array_type); - builder.terminate_with_return(vec![array]); + let v1 = builder.insert_make_array(vec![v0].into(), outer_array_type); + builder.terminate_with_return(vec![v1]); let ssa = builder.finish().remove_paired_rc(); let main = ssa.main(); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs index 012f6e6b27d..0517f9ef89f 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs @@ -145,7 +145,8 @@ impl Context { | RangeCheck { .. } | IfElse { .. } | IncrementRc { .. } - | DecrementRc { .. } => false, + | DecrementRc { .. } + | MakeArray { .. } => false, EnableSideEffectsIf { .. } | ArrayGet { .. } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 267dc6a3c20..89f1b2b2d7d 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -7,16 +7,20 @@ //! b. If we have previously modified any of the blocks in the loop, //! restart from step 1 to refresh the context. //! c. If not, try to unroll the loop. If successful, remember the modified -//! blocks. If unsuccessfully either error if the abort_on_error flag is set, +//! blocks. If unsuccessful either error if the abort_on_error flag is set, //! or otherwise remember that the loop failed to unroll and leave it unmodified. //! //! Note that this pass also often creates superfluous jmp instructions in the -//! program that will need to be removed by a later simplify cfg pass. -//! Note also that unrolling is skipped for Brillig runtime and as a result -//! we remove reference count instructions because they are only used by Brillig bytecode +//! program that will need to be removed by a later simplify CFG pass. +//! +//! Note also that unrolling is skipped for Brillig runtime, unless the loops are deemed +//! sufficiently small that inlining can be done without increasing the bytecode. +//! +//! When unrolling ACIR code, we remove reference count instructions because they are +//! only used by Brillig bytecode. use std::collections::HashSet; -use acvm::acir::AcirField; +use acvm::{acir::AcirField, FieldElement}; use crate::{ errors::RuntimeError, @@ -26,9 +30,9 @@ use crate::{ cfg::ControlFlowGraph, dfg::{CallStack, DataFlowGraph}, dom::DominatorTree, - function::{Function, RuntimeType}, - function_inserter::FunctionInserter, - instruction::{Instruction, TerminatorInstruction}, + function::Function, + function_inserter::{ArrayCache, FunctionInserter}, + instruction::{Binary, BinaryOp, Instruction, InstructionId, TerminatorInstruction}, post_order::PostOrder, value::ValueId, }, @@ -42,16 +46,9 @@ impl Ssa { /// This meta-pass will keep trying to unroll loops and simplifying the SSA until no more errors are found. #[tracing::instrument(level = "trace", skip(ssa))] pub(crate) fn unroll_loops_iteratively(mut ssa: Ssa) -> Result { - let acir_functions = ssa.functions.iter_mut().filter(|(_, func)| { - // Loop unrolling in brillig can lead to a code explosion currently. This can - // also be true for ACIR, but we have no alternative to unrolling in ACIR. - // Brillig also generally prefers smaller code rather than faster code. - !matches!(func.runtime(), RuntimeType::Brillig(_)) - }); - - for (_, function) in acir_functions { + for (_, function) in ssa.functions.iter_mut() { // Try to unroll loops first: - let mut unroll_errors = function.try_to_unroll_loops(); + let mut unroll_errors = function.try_unroll_loops(); // Keep unrolling until no more errors are found while !unroll_errors.is_empty() { @@ -66,21 +63,24 @@ impl Ssa { function.mem2reg(); // Unroll again - unroll_errors = function.try_to_unroll_loops(); + unroll_errors = function.try_unroll_loops(); // If we didn't manage to unroll any more loops, exit if unroll_errors.len() >= prev_unroll_err_count { return Err(unroll_errors.swap_remove(0)); } } } - Ok(ssa) } } impl Function { - fn try_to_unroll_loops(&mut self) -> Vec { - find_all_loops(self).unroll_each_loop(self) + // Loop unrolling in brillig can lead to a code explosion currently. + // This can also be true for ACIR, but we have no alternative to unrolling in ACIR. + // Brillig also generally prefers smaller code rather than faster code, + // so we only attempt to unroll small loops, which we decide on a case-by-case basis. + fn try_unroll_loops(&mut self) -> Vec { + Loops::find_all(self).unroll_each(self) } } @@ -94,7 +94,7 @@ struct Loop { back_edge_start: BasicBlockId, /// All the blocks contained within the loop, including `header` and `back_edge_start`. - pub(crate) blocks: HashSet, + blocks: HashSet, } struct Loops { @@ -107,60 +107,88 @@ struct Loops { cfg: ControlFlowGraph, } -/// Find a loop in the program by finding a node that dominates any predecessor node. -/// The edge where this happens will be the back-edge of the loop. -fn find_all_loops(function: &Function) -> Loops { - let cfg = ControlFlowGraph::with_function(function); - let post_order = PostOrder::with_function(function); - let mut dom_tree = DominatorTree::with_cfg_and_post_order(&cfg, &post_order); - - let mut loops = vec![]; - - for (block, _) in function.dfg.basic_blocks_iter() { - // These reachable checks wouldn't be needed if we only iterated over reachable blocks - if dom_tree.is_reachable(block) { - for predecessor in cfg.predecessors(block) { - if dom_tree.is_reachable(predecessor) && dom_tree.dominates(block, predecessor) { - // predecessor -> block is the back-edge of a loop - loops.push(find_blocks_in_loop(block, predecessor, &cfg)); +impl Loops { + /// Find a loop in the program by finding a node that dominates any predecessor node. + /// The edge where this happens will be the back-edge of the loop. + /// + /// For example consider the following SSA of a basic loop: + /// ```text + /// main(): + /// v0 = ... start ... + /// v1 = ... end ... + /// jmp loop_entry(v0) + /// loop_entry(i: Field): + /// v2 = lt i v1 + /// jmpif v2, then: loop_body, else: loop_end + /// loop_body(): + /// v3 = ... body ... + /// v4 = add 1, i + /// jmp loop_entry(v4) + /// loop_end(): + /// ``` + /// + /// The CFG will look something like this: + /// ```text + /// main + /// ↓ + /// loop_entry ←---↰ + /// ↓ ↘ | + /// loop_end loop_body + /// ``` + /// `loop_entry` has two predecessors: `main` and `loop_body`, and it dominates `loop_body`. + fn find_all(function: &Function) -> Self { + let cfg = ControlFlowGraph::with_function(function); + let post_order = PostOrder::with_function(function); + let mut dom_tree = DominatorTree::with_cfg_and_post_order(&cfg, &post_order); + + let mut loops = vec![]; + + for (block, _) in function.dfg.basic_blocks_iter() { + // These reachable checks wouldn't be needed if we only iterated over reachable blocks + if dom_tree.is_reachable(block) { + for predecessor in cfg.predecessors(block) { + // In the above example, we're looking for when `block` is `loop_entry` and `predecessor` is `loop_body`. + if dom_tree.is_reachable(predecessor) && dom_tree.dominates(block, predecessor) + { + // predecessor -> block is the back-edge of a loop + loops.push(Loop::find_blocks_in_loop(block, predecessor, &cfg)); + } } } } - } - // Sort loops by block size so that we unroll the larger, outer loops of nested loops first. - // This is needed because inner loops may use the induction variable from their outer loops in - // their loop range. - loops.sort_by_key(|loop_| loop_.blocks.len()); + // Sort loops by block size so that we unroll the larger, outer loops of nested loops first. + // This is needed because inner loops may use the induction variable from their outer loops in + // their loop range. We will start popping loops from the back. + loops.sort_by_key(|loop_| loop_.blocks.len()); - Loops { - failed_to_unroll: HashSet::new(), - yet_to_unroll: loops, - modified_blocks: HashSet::new(), - cfg, + Self { + failed_to_unroll: HashSet::new(), + yet_to_unroll: loops, + modified_blocks: HashSet::new(), + cfg, + } } -} -impl Loops { /// Unroll all loops within a given function. /// Any loops which fail to be unrolled (due to using non-constant indices) will be unmodified. - fn unroll_each_loop(mut self, function: &mut Function) -> Vec { + fn unroll_each(mut self, function: &mut Function) -> Vec { let mut unroll_errors = vec![]; while let Some(next_loop) = self.yet_to_unroll.pop() { + if function.runtime().is_brillig() && !next_loop.is_small_loop(function, &self.cfg) { + continue; + } // If we've previously modified a block in this loop we need to refresh the context. // This happens any time we have nested loops. if next_loop.blocks.iter().any(|block| self.modified_blocks.contains(block)) { - let mut new_context = find_all_loops(function); - new_context.failed_to_unroll = self.failed_to_unroll; - return unroll_errors - .into_iter() - .chain(new_context.unroll_each_loop(function)) - .collect(); + let mut new_loops = Self::find_all(function); + new_loops.failed_to_unroll = self.failed_to_unroll; + return unroll_errors.into_iter().chain(new_loops.unroll_each(function)).collect(); } // Don't try to unroll the loop again if it is known to fail if !self.failed_to_unroll.contains(&next_loop.header) { - match unroll_loop(function, &self.cfg, &next_loop) { + match next_loop.unroll(function, &self.cfg) { Ok(_) => self.modified_blocks.extend(next_loop.blocks), Err(call_stack) => { self.failed_to_unroll.insert(next_loop.header); @@ -173,73 +201,522 @@ impl Loops { } } -/// Return each block that is in a loop starting in the given header block. -/// Expects back_edge_start -> header to be the back edge of the loop. -fn find_blocks_in_loop( - header: BasicBlockId, - back_edge_start: BasicBlockId, - cfg: &ControlFlowGraph, -) -> Loop { - let mut blocks = HashSet::new(); - blocks.insert(header); - - let mut insert = |block, stack: &mut Vec| { - if !blocks.contains(&block) { - blocks.insert(block); - stack.push(block); +impl Loop { + /// Return each block that is in a loop starting in the given header block. + /// Expects back_edge_start -> header to be the back edge of the loop. + fn find_blocks_in_loop( + header: BasicBlockId, + back_edge_start: BasicBlockId, + cfg: &ControlFlowGraph, + ) -> Self { + let mut blocks = HashSet::new(); + blocks.insert(header); + + let mut insert = |block, stack: &mut Vec| { + if !blocks.contains(&block) { + blocks.insert(block); + stack.push(block); + } + }; + + // Starting from the back edge of the loop, each predecessor of this block until + // the header is within the loop. + let mut stack = vec![]; + insert(back_edge_start, &mut stack); + + while let Some(block) = stack.pop() { + for predecessor in cfg.predecessors(block) { + insert(predecessor, &mut stack); + } } - }; - // Starting from the back edge of the loop, each predecessor of this block until - // the header is within the loop. - let mut stack = vec![]; - insert(back_edge_start, &mut stack); + Self { header, back_edge_start, blocks } + } + + /// Find the lower bound of the loop in the pre-header and return it + /// if it's a numeric constant, which it will be if the previous SSA + /// steps managed to inline it. + /// + /// Consider the following example of a `for i in 0..4` loop: + /// ```text + /// brillig(inline) fn main f0 { + /// b0(v0: u32): // Pre-header + /// ... + /// jmp b1(u32 0) // Lower-bound + /// b1(v1: u32): // Induction variable + /// v5 = lt v1, u32 4 + /// jmpif v5 then: b3, else: b2 + /// ``` + fn get_const_lower_bound( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Result, CallStack> { + let pre_header = self.get_pre_header(function, cfg)?; + let jump_value = get_induction_variable(function, pre_header)?; + Ok(function.dfg.get_numeric_constant(jump_value)) + } - while let Some(block) = stack.pop() { - for predecessor in cfg.predecessors(block) { - insert(predecessor, &mut stack); + /// Find the upper bound of the loop in the loop header and return it + /// if it's a numeric constant, which it will be if the previous SSA + /// steps managed to inline it. + /// + /// Consider the following example of a `for i in 0..4` loop: + /// ```text + /// brillig(inline) fn main f0 { + /// b0(v0: u32): + /// ... + /// jmp b1(u32 0) + /// b1(v1: u32): // Loop header + /// v5 = lt v1, u32 4 // Upper bound + /// jmpif v5 then: b3, else: b2 + /// ``` + fn get_const_upper_bound(&self, function: &Function) -> Option { + let block = &function.dfg[self.header]; + let instructions = block.instructions(); + assert_eq!( + instructions.len(), + 1, + "The header should just compare the induction variable and jump" + ); + match &function.dfg[instructions[0]] { + Instruction::Binary(Binary { lhs: _, operator: BinaryOp::Lt, rhs }) => { + function.dfg.get_numeric_constant(*rhs) + } + Instruction::Binary(Binary { lhs: _, operator: BinaryOp::Eq, rhs }) => { + // `for i in 0..1` is turned into: + // b1(v0: u32): + // v12 = eq v0, u32 0 + // jmpif v12 then: b3, else: b2 + function.dfg.get_numeric_constant(*rhs).map(|c| c + FieldElement::one()) + } + other => panic!("Unexpected instruction in header: {other:?}"), } } - Loop { header, back_edge_start, blocks } -} + /// Get the lower and upper bounds of the loop if both are constant numeric values. + fn get_const_bounds( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Result, CallStack> { + let Some(lower) = self.get_const_lower_bound(function, cfg)? else { + return Ok(None); + }; + let Some(upper) = self.get_const_upper_bound(function) else { + return Ok(None); + }; + Ok(Some((lower, upper))) + } + + /// Unroll a single loop in the function. + /// Returns Ok(()) if it succeeded, Err(callstack) if it failed, + /// where the callstack indicates the location of the instruction + /// that could not be processed, or empty if such information was + /// not available. + /// + /// Consider this example: + /// ```text + /// main(): + /// v0 = 0 + /// v1 = 2 + /// jmp loop_entry(v0) + /// loop_entry(i: Field): + /// v2 = lt i v1 + /// jmpif v2, then: loop_body, else: loop_end + /// ``` + /// + /// The first step is to unroll the header by recognizing that jump condition + /// is a constant, which means it will go to `loop_body`: + /// ```text + /// main(): + /// v0 = 0 + /// v1 = 2 + /// v2 = lt v0 v1 + /// // jmpif v2, then: loop_body, else: loop_end + /// jmp dest: loop_body + /// ``` + /// + /// Following that we unroll the loop body, which is the next source, replace + /// the induction variable with the new value created in the body, and have + /// another go at the header. + /// ```text + /// main(): + /// v0 = 0 + /// v1 = 2 + /// v2 = lt v0 v1 + /// v3 = ... body ... + /// v4 = add 1, 0 + /// jmp loop_entry(v4) + /// ``` + /// + /// At the end we reach a point where the condition evaluates to 0 and we jump to the end. + /// ```text + /// main(): + /// v0 = 0 + /// v1 = 2 + /// v2 = lt 0 + /// v3 = ... body ... + /// v4 = add 1, v0 + /// v5 = lt v4 v1 + /// v6 = ... body ... + /// v7 = add v4, 1 + /// v8 = lt v5 v1 + /// jmp loop_end + /// ``` + /// + /// When e.g. `v8 = lt v5 v1` cannot be evaluated to a constant, the loop signals by returning `Err` + /// that a few SSA passes are required to evaluate and simplify these values. + fn unroll(&self, function: &mut Function, cfg: &ControlFlowGraph) -> Result<(), CallStack> { + let mut unroll_into = self.get_pre_header(function, cfg)?; + let mut jump_value = get_induction_variable(function, unroll_into)?; + let mut array_cache = Some(ArrayCache::default()); + + while let Some(mut context) = self.unroll_header(function, unroll_into, jump_value)? { + // The inserter's array cache must be explicitly enabled. This is to + // confirm that we're inserting in insertion order. This is true here since: + // 1. We have a fresh inserter for each loop + // 2. Each loop is unrolled in iteration order + // + // Within a loop we do not insert in insertion order. This is fine however since the + // array cache is buffered with a separate fresh_array_cache which collects arrays + // but does not deduplicate. When we later call `into_array_cache`, that will merge + // the fresh cache in with the old one so that each iteration of the loop can cache + // from previous iterations but not the current iteration. + context.inserter.set_array_cache(array_cache, unroll_into); + (unroll_into, jump_value, array_cache) = context.unroll_loop_iteration(); + } + + Ok(()) + } + + /// The loop pre-header is the block that comes before the loop begins. Generally a header block + /// is expected to have 2 predecessors: the pre-header and the final block of the loop which jumps + /// back to the beginning. Other predecessors can come from `break` or `continue`. + fn get_pre_header( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Result { + let mut pre_header = cfg + .predecessors(self.header) + .filter(|predecessor| *predecessor != self.back_edge_start) + .collect::>(); + + if function.runtime().is_acir() { + assert_eq!(pre_header.len(), 1); + Ok(pre_header.remove(0)) + } else if pre_header.len() == 1 { + Ok(pre_header.remove(0)) + } else { + // We can come back into the header from multiple blocks, so we can't unroll this. + Err(CallStack::new()) + } + } + + /// Unrolls the header block of the loop. This is the block that dominates all other blocks in the + /// loop and contains the jmpif instruction that lets us know if we should continue looping. + /// Returns Some(iteration context) if we should perform another iteration. + fn unroll_header<'a>( + &'a self, + function: &'a mut Function, + unroll_into: BasicBlockId, + induction_value: ValueId, + ) -> Result>, CallStack> { + // We insert into a fresh block first and move instructions into the unroll_into block later + // only once we verify the jmpif instruction has a constant condition. If it does not, we can + // just discard this fresh block and leave the loop unmodified. + let fresh_block = function.dfg.make_block(); + + let mut context = LoopIteration::new(function, self, fresh_block, self.header); + let source_block = &context.dfg()[context.source_block]; + assert_eq!(source_block.parameters().len(), 1, "Expected only 1 argument in loop header"); + + // Insert the current value of the loop induction variable into our context. + let first_param = source_block.parameters()[0]; + context.inserter.try_map_value(first_param, induction_value); + // Copy over all instructions and a fresh terminator. + context.inline_instructions_from_block(); + // Mutate the terminator if possible so that it points at the iteration block. + match context.dfg()[fresh_block].unwrap_terminator() { + TerminatorInstruction::JmpIf { condition, then_destination, else_destination, call_stack } => { + let condition = *condition; + let next_blocks = context.handle_jmpif(condition, *then_destination, *else_destination, call_stack.clone()); + + // If there is only 1 next block the jmpif evaluated to a single known block. + // This is the expected case and lets us know if we should loop again or not. + if next_blocks.len() == 1 { + context.dfg_mut().inline_block(fresh_block, unroll_into); + + // The fresh block is gone now so we're committing to insert into the original + // unroll_into block from now on. + context.insert_block = unroll_into; + + // In the last iteration, `handle_jmpif` will have replaced `context.source_block` + // with the `else_destination`, that is, the `loop_end`, which signals that we + // have no more loops to unroll, because that block was not part of the loop itself, + // ie. it wasn't between `loop_header` and `loop_body`. Otherwise we have the `loop_body` + // in `source_block` and can unroll that into the destination. + Ok(self.blocks.contains(&context.source_block).then_some(context)) + } else { + // If this case is reached the loop either uses non-constant indices or we need + // another pass, such as mem2reg to resolve them to constants. + Err(context.inserter.function.dfg.get_value_call_stack(condition)) + } + } + other => unreachable!("Expected loop header to terminate in a JmpIf to the loop body, but found {other:?} instead"), + } + } + + /// Find all reference values which were allocated before the pre-header. + /// + /// These are accessible inside the loop body, and they can be involved + /// in load/store operations that could be eliminated if we unrolled the + /// body into the pre-header. + /// + /// Consider this loop: + /// ```text + /// let mut sum = 0; + /// let mut arr = &[]; + /// for i in 0..3 { + /// sum = sum + i; + /// arr.push_back(sum) + /// } + /// sum + /// ``` + /// + /// The SSA has a load+store for the `sum` and a load+push for the `arr`: + /// ```text + /// b0(v0: u32): + /// v2 = allocate -> &mut u32 // reference allocated for `sum` + /// store u32 0 at v2 // initial value for `sum` + /// v4 = allocate -> &mut u32 // reference allocated for the length of `arr` + /// store u32 0 at v4 // initial length of `arr` + /// inc_rc [] of u32 // storage for `arr` + /// v6 = allocate -> &mut [u32] // reference allocated to point at the storage of `arr` + /// store [] of u32 at v6 // initial value for the storage of `arr` + /// jmp b1(u32 0) // start looping from 0 + /// b1(v1: u32): // `i` induction variable + /// v8 = lt v1, u32 3 // loop until 3 + /// jmpif v8 then: b3, else: b2 + /// b3(): + /// v11 = load v2 -> u32 // load `sum` + /// v12 = add v11, v1 // add `i` to `sum` + /// store v12 at v2 // store updated `sum` + /// v13 = load v4 -> u32 // load length of `arr` + /// v14 = load v6 -> [u32] // load storage of `arr` + /// v16, v17 = call slice_push_back(v13, v14, v12) -> (u32, [u32]) // builtin to push, will store to storage and length references + /// v19 = add v1, u32 1 // increase `arr` + /// jmp b1(v19) // back-edge of the loop + /// b2(): // after the loop + /// v9 = load v2 -> u32 // read final value of `sum` + /// ``` + /// + /// We won't always find load _and_ store ops (e.g. the push above doesn't come with a store), + /// but it's likely that mem2reg could eliminate a lot of the loads we can find, so we can + /// use this as an approximation of the gains we would see. + fn find_pre_header_reference_values( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Result, CallStack> { + // We need to traverse blocks from the pre-header up to the block entry point. + let pre_header = self.get_pre_header(function, cfg)?; + let function_entry = function.entry_block(); + + // The algorithm in `find_blocks_in_loop` expects to collect the blocks between the header and the back-edge of the loop, + // but technically works the same if we go from the pre-header up to the function entry as well. + let blocks = Self::find_blocks_in_loop(function_entry, pre_header, cfg).blocks; + + // Collect allocations in all blocks above the header. + let allocations = blocks.iter().flat_map(|b| { + function.dfg[*b] + .instructions() + .iter() + .filter(|i| matches!(&function.dfg[**i], Instruction::Allocate)) + .map(|i| { + // Get the value into which the allocation was stored. + function.dfg.instruction_results(*i)[0] + }) + }); + + // Collect reference parameters of the function itself. + let params = + function.parameters().iter().filter(|p| function.dfg.value_is_reference(**p)).copied(); + + Ok(params.chain(allocations).collect()) + } + + /// Count the number of load and store instructions of specific variables in the loop. + /// + /// Returns `(loads, stores)` in case we want to differentiate in the estimates. + fn count_loads_and_stores( + &self, + function: &Function, + refs: &HashSet, + ) -> (usize, usize) { + let mut loads = 0; + let mut stores = 0; + for block in &self.blocks { + for instruction in function.dfg[*block].instructions() { + match &function.dfg[*instruction] { + Instruction::Load { address } if refs.contains(address) => { + loads += 1; + } + Instruction::Store { address, .. } if refs.contains(address) => { + stores += 1; + } + _ => {} + } + } + } + (loads, stores) + } + + /// Count the number of instructions in the loop, including the terminating jumps. + fn count_all_instructions(&self, function: &Function) -> usize { + self.blocks + .iter() + .map(|block| { + let block = &function.dfg[*block]; + block.instructions().len() + block.terminator().map(|_| 1).unwrap_or_default() + }) + .sum() + } -/// Unroll a single loop in the function. -/// Returns Err(()) if it failed to unroll and Ok(()) otherwise. -fn unroll_loop( - function: &mut Function, - cfg: &ControlFlowGraph, - loop_: &Loop, -) -> Result<(), CallStack> { - let mut unroll_into = get_pre_header(cfg, loop_); - let mut jump_value = get_induction_variable(function, unroll_into)?; + /// Count the number of increments to the induction variable. + /// It should be one, but it can be duplicated. + /// The increment should be in the block where the back-edge was found. + fn count_induction_increments(&self, function: &Function) -> usize { + let back = &function.dfg[self.back_edge_start]; + let header = &function.dfg[self.header]; + let induction_var = header.parameters()[0]; + + back.instructions().iter().filter(|instruction| { + let instruction = &function.dfg[**instruction]; + matches!(instruction, Instruction::Binary(Binary { lhs, operator: BinaryOp::Add, rhs: _ }) if *lhs == induction_var) + }).count() + } - while let Some(context) = unroll_loop_header(function, loop_, unroll_into, jump_value)? { - let (last_block, last_value) = context.unroll_loop_iteration(); - unroll_into = last_block; - jump_value = last_value; + /// Decide if this loop is small enough that it can be inlined in a way that the number + /// of unrolled instructions times the number of iterations would result in smaller bytecode + /// than if we keep the loops with their overheads. + fn is_small_loop(&self, function: &Function, cfg: &ControlFlowGraph) -> bool { + self.boilerplate_stats(function, cfg).map(|s| s.is_small()).unwrap_or_default() } - Ok(()) + /// Collect boilerplate stats if we can figure out the upper and lower bounds of the loop, + /// and the loop doesn't have multiple back-edges from breaks and continues. + fn boilerplate_stats( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Option { + let Ok(Some((lower, upper))) = self.get_const_bounds(function, cfg) else { + return None; + }; + let Some(lower) = lower.try_to_u64() else { + return None; + }; + let Some(upper) = upper.try_to_u64() else { + return None; + }; + let Ok(refs) = self.find_pre_header_reference_values(function, cfg) else { + return None; + }; + let (loads, stores) = self.count_loads_and_stores(function, &refs); + let increments = self.count_induction_increments(function); + let all_instructions = self.count_all_instructions(function); + + Some(BoilerplateStats { + iterations: (upper - lower) as usize, + loads, + stores, + increments, + all_instructions, + }) + } } -/// The loop pre-header is the block that comes before the loop begins. Generally a header block -/// is expected to have 2 predecessors: the pre-header and the final block of the loop which jumps -/// back to the beginning. -fn get_pre_header(cfg: &ControlFlowGraph, loop_: &Loop) -> BasicBlockId { - let mut pre_header = cfg - .predecessors(loop_.header) - .filter(|predecessor| *predecessor != loop_.back_edge_start) - .collect::>(); - - assert_eq!(pre_header.len(), 1); - pre_header.remove(0) +/// All the instructions in the following example are boilerplate: +/// ```text +/// brillig(inline) fn main f0 { +/// b0(v0: u32): +/// ... +/// jmp b1(u32 0) +/// b1(v1: u32): +/// v5 = lt v1, u32 4 +/// jmpif v5 then: b3, else: b2 +/// b3(): +/// ... +/// v11 = add v1, u32 1 +/// jmp b1(v11) +/// b2(): +/// ... +/// } +/// ``` +#[derive(Debug)] +struct BoilerplateStats { + /// Number of iterations in the loop. + iterations: usize, + /// Number of loads pre-header references in the loop. + loads: usize, + /// Number of stores into pre-header references in the loop. + stores: usize, + /// Number of increments to the induction variable (might be duplicated). + increments: usize, + /// Number of instructions in the loop, including boilerplate, + /// but excluding the boilerplate which is outside the loop. + all_instructions: usize, +} + +impl BoilerplateStats { + /// Instruction count if we leave the loop as-is. + /// It's the instructions in the loop, plus the one to kick it off in the pre-header. + fn baseline_instructions(&self) -> usize { + self.all_instructions + 1 + } + + /// Estimated number of _useful_ instructions, which is the ones in the loop + /// minus all in-loop boilerplate. + fn useful_instructions(&self) -> usize { + // Two jumps + plus the comparison with the upper bound + let boilerplate = 3; + // Be conservative and only assume that mem2reg gets rid of load followed by store. + // NB we have not checked that these are actual pairs. + let load_and_store = self.loads.min(self.stores) * 2; + self.all_instructions - self.increments - load_and_store - boilerplate + } + + /// Estimated number of instructions if we unroll the loop. + fn unrolled_instructions(&self) -> usize { + self.useful_instructions() * self.iterations + } + + /// A small loop is where if we unroll it into the pre-header then considering the + /// number of iterations we still end up with a smaller bytecode than if we leave + /// the blocks in tact with all the boilerplate involved in jumping, and the extra + /// reference access instructions. + fn is_small(&self) -> bool { + self.unrolled_instructions() < self.baseline_instructions() + } } /// Return the induction value of the current iteration of the loop, from the given block's jmp arguments. /// /// Expects the current block to terminate in `jmp h(N)` where h is the loop header and N is -/// a Field value. +/// a Field value. Returns an `Err` if this isn't the case. +/// +/// Consider the following example: +/// ```text +/// main(): +/// v0 = ... start ... +/// v1 = ... end ... +/// jmp loop_entry(v0) +/// loop_entry(i: Field): +/// ... +/// ``` +/// We're looking for the terminating jump of the `main` predecessor of `loop_entry`. fn get_induction_variable(function: &Function, block: BasicBlockId) -> Result { match function.dfg[block].terminator() { Some(TerminatorInstruction::Jmp { arguments, call_stack: location, .. }) => { @@ -260,54 +737,6 @@ fn get_induction_variable(function: &Function, block: BasicBlockId) -> Result( - function: &'a mut Function, - loop_: &'a Loop, - unroll_into: BasicBlockId, - induction_value: ValueId, -) -> Result>, CallStack> { - // We insert into a fresh block first and move instructions into the unroll_into block later - // only once we verify the jmpif instruction has a constant condition. If it does not, we can - // just discard this fresh block and leave the loop unmodified. - let fresh_block = function.dfg.make_block(); - - let mut context = LoopIteration::new(function, loop_, fresh_block, loop_.header); - let source_block = &context.dfg()[context.source_block]; - assert_eq!(source_block.parameters().len(), 1, "Expected only 1 argument in loop header"); - - // Insert the current value of the loop induction variable into our context. - let first_param = source_block.parameters()[0]; - context.inserter.try_map_value(first_param, induction_value); - context.inline_instructions_from_block(); - - match context.dfg()[fresh_block].unwrap_terminator() { - TerminatorInstruction::JmpIf { condition, then_destination, else_destination, call_stack } => { - let condition = *condition; - let next_blocks = context.handle_jmpif(condition, *then_destination, *else_destination, call_stack.clone()); - - // If there is only 1 next block the jmpif evaluated to a single known block. - // This is the expected case and lets us know if we should loop again or not. - if next_blocks.len() == 1 { - context.dfg_mut().inline_block(fresh_block, unroll_into); - - // The fresh block is gone now so we're committing to insert into the original - // unroll_into block from now on. - context.insert_block = unroll_into; - - Ok(loop_.blocks.contains(&context.source_block).then_some(context)) - } else { - // If this case is reached the loop either uses non-constant indices or we need - // another pass, such as mem2reg to resolve them to constants. - Err(context.inserter.function.dfg.get_value_call_stack(condition)) - } - } - other => unreachable!("Expected loop header to terminate in a JmpIf to the loop body, but found {other:?} instead"), - } -} - /// The context object for each loop iteration. /// Notably each loop iteration maps each loop block to a fresh, unrolled block. struct LoopIteration<'f> { @@ -357,7 +786,7 @@ impl<'f> LoopIteration<'f> { /// It is expected the terminator instructions are set up to branch into an empty block /// for further unrolling. When the loop is finished this will need to be mutated to /// jump to the end of the loop instead. - fn unroll_loop_iteration(mut self) -> (BasicBlockId, ValueId) { + fn unroll_loop_iteration(mut self) -> (BasicBlockId, ValueId, Option) { let mut next_blocks = self.unroll_loop_block(); while let Some(block) = next_blocks.pop() { @@ -369,14 +798,20 @@ impl<'f> LoopIteration<'f> { next_blocks.append(&mut blocks); } } + // After having unrolled all blocks in the loop body, we must know how to get back to the header; + // this is also the block into which we have to unroll into next. + let (end_block, induction_value) = self + .induction_value + .expect("Expected to find the induction variable by end of loop iteration"); - self.induction_value - .expect("Expected to find the induction variable by end of loop iteration") + (end_block, induction_value, self.inserter.into_array_cache()) } /// Unroll a single block in the current iteration of the loop fn unroll_loop_block(&mut self) -> Vec { let mut next_blocks = self.unroll_loop_block_helper(); + // Guarantee that the next blocks we set up to be unrolled, are actually part of the loop, + // which we recorded while inlining the instructions of the blocks already processed. next_blocks.retain(|block| { let b = self.get_original_block(*block); self.loop_.blocks.contains(&b) @@ -386,6 +821,7 @@ impl<'f> LoopIteration<'f> { /// Unroll a single block in the current iteration of the loop fn unroll_loop_block_helper(&mut self) -> Vec { + // Copy instructions from the loop body to the unroll destination, replacing the terminator. self.inline_instructions_from_block(); self.visited_blocks.insert(self.source_block); @@ -403,6 +839,7 @@ impl<'f> LoopIteration<'f> { ), TerminatorInstruction::Jmp { destination, arguments, call_stack: _ } => { if self.get_original_block(*destination) == self.loop_.header { + // We found the back-edge of the loop. assert_eq!(arguments.len(), 1); self.induction_value = Some((self.insert_block, arguments[0])); } @@ -414,7 +851,10 @@ impl<'f> LoopIteration<'f> { /// Find the next branch(es) to take from a jmpif terminator and return them. /// If only one block is returned, it means the jmpif condition evaluated to a known - /// constant and we can safely take only the given branch. + /// constant and we can safely take only the given branch. In this case the method + /// also replaces the terminator of the insert block (a.k.a fresh block) to be a `Jmp`, + /// and changes the source block in the context for the next iteration to be the + /// destination indicated by the constant condition (ie. the `then` or the `else`). fn handle_jmpif( &mut self, condition: ValueId, @@ -460,10 +900,13 @@ impl<'f> LoopIteration<'f> { } } + /// Find the original ID of a block that replaced it. fn get_original_block(&self, block: BasicBlockId) -> BasicBlockId { self.original_blocks.get(&block).copied().unwrap_or(block) } + /// Copy over instructions from the source into the insert block, + /// while simplifying instructions and keeping track of original block IDs. fn inline_instructions_from_block(&mut self) { let source_block = &self.dfg()[self.source_block]; let instructions = source_block.instructions().to_vec(); @@ -472,23 +915,31 @@ impl<'f> LoopIteration<'f> { // instances of the induction variable or any values that were changed as a result // of the new induction variable value. for instruction in instructions { - // Skip reference count instructions since they are only used for brillig, and brillig code is not unrolled - if !matches!( - self.dfg()[instruction], - Instruction::IncrementRc { .. } | Instruction::DecrementRc { .. } - ) { - self.inserter.push_instruction(instruction, self.insert_block); + // Reference counting is only used by Brillig, ACIR doesn't need them. + if self.inserter.function.runtime().is_acir() && self.is_refcount(instruction) { + continue; } + self.inserter.push_instruction(instruction, self.insert_block); } let mut terminator = self.dfg()[self.source_block] .unwrap_terminator() .clone() .map_values(|value| self.inserter.resolve(value)); + // Replace the blocks in the terminator with fresh one with the same parameters, + // while remembering which were the original block IDs. terminator.mutate_blocks(|block| self.get_or_insert_block(block)); self.inserter.function.dfg.set_block_terminator(self.insert_block, terminator); } + /// Is the instruction an `Rc`? + fn is_refcount(&self, instruction: InstructionId) -> bool { + matches!( + self.dfg()[instruction], + Instruction::IncrementRc { .. } | Instruction::DecrementRc { .. } + ) + } + fn dfg(&self) -> &DataFlowGraph { &self.inserter.function.dfg } @@ -500,22 +951,19 @@ impl<'f> LoopIteration<'f> { #[cfg(test)] mod tests { - use crate::{ - errors::RuntimeError, - ssa::{ - function_builder::FunctionBuilder, - ir::{instruction::BinaryOp, map::Id, types::Type}, - }, - }; + use acvm::FieldElement; + + use crate::errors::RuntimeError; + use crate::ssa::{ir::value::ValueId, opt::assert_normalized_ssa_equals, Ssa}; - use super::Ssa; + use super::{BoilerplateStats, Loops}; /// Tries to unroll all loops in each SSA function. /// If any loop cannot be unrolled, it is left as-is or in a partially unrolled state. - fn try_to_unroll_loops(mut ssa: Ssa) -> (Ssa, Vec) { + fn try_unroll_loops(mut ssa: Ssa) -> (Ssa, Vec) { let mut errors = vec![]; for function in ssa.functions.values_mut() { - errors.extend(function.try_to_unroll_loops()); + errors.extend(function.try_unroll_loops()); } (ssa, errors) } @@ -529,166 +977,406 @@ mod tests { // } // } // } - // - // fn main f0 { - // b0(): - // jmp b1(Field 0) - // b1(v0: Field): // header of outer loop - // v1 = lt v0, Field 3 - // jmpif v1, then: b2, else: b3 - // b2(): - // jmp b4(Field 0) - // b4(v2: Field): // header of inner loop - // v3 = lt v2, Field 4 - // jmpif v3, then: b5, else: b6 - // b5(): - // v4 = add v0, v2 - // v5 = lt Field 10, v4 - // constrain v5 - // v6 = add v2, Field 1 - // jmp b4(v6) - // b6(): // end of inner loop - // v7 = add v0, Field 1 - // jmp b1(v7) - // b3(): // end of outer loop - // return Field 0 - // } - let main_id = Id::test_new(0); - - // Compiling main - let mut builder = FunctionBuilder::new("main".into(), main_id); - - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - let b3 = builder.insert_block(); - let b4 = builder.insert_block(); - let b5 = builder.insert_block(); - let b6 = builder.insert_block(); - - let v0 = builder.add_block_parameter(b1, Type::field()); - let v2 = builder.add_block_parameter(b4, Type::field()); - - let zero = builder.field_constant(0u128); - let one = builder.field_constant(1u128); - let three = builder.field_constant(3u128); - let four = builder.field_constant(4u128); - let ten = builder.field_constant(10u128); - - builder.terminate_with_jmp(b1, vec![zero]); - - // b1 - builder.switch_to_block(b1); - let v1 = builder.insert_binary(v0, BinaryOp::Lt, three); - builder.terminate_with_jmpif(v1, b2, b3); - - // b2 - builder.switch_to_block(b2); - builder.terminate_with_jmp(b4, vec![zero]); - - // b3 - builder.switch_to_block(b3); - builder.terminate_with_return(vec![zero]); - - // b4 - builder.switch_to_block(b4); - let v3 = builder.insert_binary(v2, BinaryOp::Lt, four); - builder.terminate_with_jmpif(v3, b5, b6); - - // b5 - builder.switch_to_block(b5); - let v4 = builder.insert_binary(v0, BinaryOp::Add, v2); - let v5 = builder.insert_binary(ten, BinaryOp::Lt, v4); - builder.insert_constrain(v5, one, None); - let v6 = builder.insert_binary(v2, BinaryOp::Add, one); - builder.terminate_with_jmp(b4, vec![v6]); - - // b6 - builder.switch_to_block(b6); - let v7 = builder.insert_binary(v0, BinaryOp::Add, one); - builder.terminate_with_jmp(b1, vec![v7]); - - let ssa = builder.finish(); - assert_eq!(ssa.main().reachable_blocks().len(), 7); - - // Expected output: - // - // fn main f0 { - // b0(): - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // jmp b23() - // b23(): - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // jmp b27() - // b27(): - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // jmp b31() - // b31(): - // jmp b3() - // b3(): - // return Field 0 - // } + let src = " + acir(inline) fn main f0 { + b0(): + jmp b1(Field 0) + b1(v0: Field): // header of outer loop + v1 = lt v0, Field 3 + jmpif v1 then: b2, else: b3 + b2(): + jmp b4(Field 0) + b4(v2: Field): // header of inner loop + v3 = lt v2, Field 4 + jmpif v3 then: b5, else: b6 + b5(): + v4 = add v0, v2 + v5 = lt Field 10, v4 + constrain v5 == Field 1 + v6 = add v2, Field 1 + jmp b4(v6) + b6(): // end of inner loop + v7 = add v0, Field 1 + jmp b1(v7) + b3(): // end of outer loop + return Field 0 + } + "; + let ssa = Ssa::from_str(src).unwrap(); + + let expected = " + acir(inline) fn main f0 { + b0(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b1() + b1(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b2() + b2(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b3() + b3(): + jmp b4() + b4(): + return Field 0 + } + "; + // The final block count is not 1 because unrolling creates some unnecessary jmps. // If a simplify cfg pass is ran afterward, the expected block count will be 1. - let (ssa, errors) = try_to_unroll_loops(ssa); + let (ssa, errors) = try_unroll_loops(ssa); assert_eq!(errors.len(), 0, "All loops should be unrolled"); assert_eq!(ssa.main().reachable_blocks().len(), 5); + + assert_normalized_ssa_equals(ssa, expected); } // Test that the pass can still be run on loops which fail to unroll properly #[test] fn fail_to_unroll_loop() { - // fn main f0 { - // b0(v0: Field): - // jmp b1(v0) - // b1(v1: Field): - // v2 = lt v1, 5 - // jmpif v2, then: b2, else: b3 - // b2(): - // v3 = add v1, Field 1 - // jmp b1(v3) - // b3(): - // return Field 0 - // } - let main_id = Id::test_new(0); - let mut builder = FunctionBuilder::new("main".into(), main_id); + let src = " + acir(inline) fn main f0 { + b0(v0: Field): + jmp b1(v0) + b1(v1: Field): + v2 = lt v1, Field 5 + jmpif v2 then: b2, else: b3 + b2(): + v3 = add v1, Field 1 + jmp b1(v3) + b3(): + return Field 0 + } + "; + let ssa = Ssa::from_str(src).unwrap(); - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - let b3 = builder.insert_block(); + // Sanity check + assert_eq!(ssa.main().reachable_blocks().len(), 4); - let v0 = builder.add_parameter(Type::field()); - let v1 = builder.add_block_parameter(b1, Type::field()); + // Expected that we failed to unroll the loop + let (_, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 1, "Expected to fail to unroll loop"); + } - builder.terminate_with_jmp(b1, vec![v0]); + #[test] + fn test_get_const_bounds() { + let ssa = brillig_unroll_test_case(); + let function = ssa.main(); + let loops = Loops::find_all(function); + assert_eq!(loops.yet_to_unroll.len(), 1); + + let (lower, upper) = loops.yet_to_unroll[0] + .get_const_bounds(function, &loops.cfg) + .expect("should find bounds") + .expect("bounds are numeric const"); + + assert_eq!(lower, FieldElement::from(0u32)); + assert_eq!(upper, FieldElement::from(4u32)); + } - builder.switch_to_block(b1); - let five = builder.field_constant(5u128); - let v2 = builder.insert_binary(v1, BinaryOp::Lt, five); - builder.terminate_with_jmpif(v2, b2, b3); + #[test] + fn test_find_pre_header_reference_values() { + let ssa = brillig_unroll_test_case(); + let function = ssa.main(); + let mut loops = Loops::find_all(function); + let loop0 = loops.yet_to_unroll.pop().unwrap(); + + let refs = loop0.find_pre_header_reference_values(function, &loops.cfg).unwrap(); + assert_eq!(refs.len(), 1); + assert!(refs.contains(&ValueId::new(2))); + + let (loads, stores) = loop0.count_loads_and_stores(function, &refs); + assert_eq!(loads, 1); + assert_eq!(stores, 1); + + let all = loop0.count_all_instructions(function); + assert_eq!(all, 7); + } - builder.switch_to_block(b2); - let one = builder.field_constant(1u128); - let v3 = builder.insert_binary(v1, BinaryOp::Add, one); - builder.terminate_with_jmp(b1, vec![v3]); + #[test] + fn test_boilerplate_stats() { + let ssa = brillig_unroll_test_case(); + let stats = loop0_stats(&ssa); + assert_eq!(stats.iterations, 4); + assert_eq!(stats.all_instructions, 2 + 5); // Instructions in b1 and b3 + assert_eq!(stats.increments, 1); + assert_eq!(stats.loads, 1); + assert_eq!(stats.stores, 1); + assert_eq!(stats.useful_instructions(), 1); // Adding to sum + assert_eq!(stats.baseline_instructions(), 8); + assert!(stats.is_small()); + } - builder.switch_to_block(b3); - let zero = builder.field_constant(0u128); - builder.terminate_with_return(vec![zero]); + #[test] + fn test_boilerplate_stats_6470() { + let ssa = brillig_unroll_test_case_6470(3); + let stats = loop0_stats(&ssa); + assert_eq!(stats.iterations, 3); + assert_eq!(stats.all_instructions, 2 + 8); // Instructions in b1 and b3 + assert_eq!(stats.increments, 2); + assert_eq!(stats.loads, 1); + assert_eq!(stats.stores, 1); + assert_eq!(stats.useful_instructions(), 3); // array get, add, array set + assert_eq!(stats.baseline_instructions(), 11); + assert!(stats.is_small()); + } - let ssa = builder.finish(); - assert_eq!(ssa.main().reachable_blocks().len(), 4); + /// Test that we can unroll a small loop. + #[test] + fn test_brillig_unroll_small_loop() { + let ssa = brillig_unroll_test_case(); + + // Expectation taken by compiling the Noir program as ACIR, + // ie. by removing the `unconstrained` from `main`. + let expected = " + brillig(inline) fn main f0 { + b0(v0: u32): + v1 = allocate -> &mut u32 + store u32 0 at v1 + v3 = load v1 -> u32 + store v3 at v1 + v4 = load v1 -> u32 + v6 = add v4, u32 1 + store v6 at v1 + v7 = load v1 -> u32 + v9 = add v7, u32 2 + store v9 at v1 + v10 = load v1 -> u32 + v12 = add v10, u32 3 + store v12 at v1 + jmp b1() + b1(): + v13 = load v1 -> u32 + v14 = eq v13, v0 + constrain v13 == v0 + return + } + "; - // Expected that we failed to unroll the loop - let (_, errors) = try_to_unroll_loops(ssa); - assert_eq!(errors.len(), 1, "Expected to fail to unroll loop"); + let (ssa, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 0, "Unroll should have no errors"); + assert_eq!(ssa.main().reachable_blocks().len(), 2, "The loop should be unrolled"); + + assert_normalized_ssa_equals(ssa, expected); + } + + /// Test that we can unroll the loop in the ticket if we don't have too many iterations. + #[test] + fn test_brillig_unroll_6470_small() { + // Few enough iterations so that we can perform the unroll. + let ssa = brillig_unroll_test_case_6470(3); + let (ssa, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 0, "Unroll should have no errors"); + assert_eq!(ssa.main().reachable_blocks().len(), 2, "The loop should be unrolled"); + + // The IDs are shifted by one compared to what the ACIR version printed. + let expected = " + brillig(inline) fn main f0 { + b0(v0: [u64; 6]): + inc_rc v0 + v2 = make_array [u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] : [u64; 6] + inc_rc v2 + v3 = allocate -> &mut [u64; 6] + store v2 at v3 + v4 = load v3 -> [u64; 6] + v6 = array_get v0, index u32 0 -> u64 + v8 = add v6, u64 1 + v9 = array_set v4, index u32 0, value v8 + store v9 at v3 + v10 = load v3 -> [u64; 6] + v12 = array_get v0, index u32 1 -> u64 + v13 = add v12, u64 1 + v14 = array_set v10, index u32 1, value v13 + store v14 at v3 + v15 = load v3 -> [u64; 6] + v17 = array_get v0, index u32 2 -> u64 + v18 = add v17, u64 1 + v19 = array_set v15, index u32 2, value v18 + store v19 at v3 + jmp b1() + b1(): + v20 = load v3 -> [u64; 6] + dec_rc v0 + return v20 + } + "; + assert_normalized_ssa_equals(ssa, expected); + } + + /// Test that with more iterations it's not unrolled. + #[test] + fn test_brillig_unroll_6470_large() { + // More iterations than it can unroll + let parse_ssa = || brillig_unroll_test_case_6470(6); + let ssa = parse_ssa(); + let stats = loop0_stats(&ssa); + assert!(!stats.is_small(), "the loop should be considered large"); + + let (ssa, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 0, "Unroll should have no errors"); + assert_normalized_ssa_equals(ssa, parse_ssa().to_string().as_str()); + } + + /// Test that `break` and `continue` stop unrolling without any panic. + #[test] + fn test_brillig_unroll_break_and_continue() { + // unconstrained fn main() { + // let mut count = 0; + // for i in 0..10 { + // if i == 2 { + // continue; + // } + // if i == 5 { + // break; + // } + // count += 1; + // } + // assert(count == 4); + // } + let src = " + brillig(inline) fn main f0 { + b0(): + v1 = allocate -> &mut Field + store Field 0 at v1 + jmp b1(u32 0) + b1(v0: u32): + v5 = lt v0, u32 10 + jmpif v5 then: b2, else: b6 + b2(): + v7 = eq v0, u32 2 + jmpif v7 then: b7, else: b3 + b7(): + v18 = add v0, u32 1 + jmp b1(v18) + b3(): + v9 = eq v0, u32 5 + jmpif v9 then: b5, else: b4 + b5(): + jmp b6() + b6(): + v15 = load v1 -> Field + v17 = eq v15, Field 4 + constrain v15 == Field 4 + return + b4(): + v10 = load v1 -> Field + v12 = add v10, Field 1 + store v12 at v1 + v14 = add v0, u32 1 + jmp b1(v14) + } + "; + let ssa = Ssa::from_str(src).unwrap(); + let (ssa, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 0, "Unroll should have no errors"); + assert_normalized_ssa_equals(ssa, src); + } + + /// Simple test loop: + /// ```text + /// unconstrained fn main(sum: u32) { + /// assert(loop(0, 4) == sum); + /// } + /// + /// fn loop(from: u32, to: u32) -> u32 { + /// let mut sum = 0; + /// for i in from..to { + /// sum = sum + i; + /// } + /// sum + /// } + /// ``` + /// We can check what the ACIR unrolling behavior would be by + /// removing the `unconstrained` from the `main` function and + /// compiling the program with `nargo --test-program . compile --show-ssa`. + fn brillig_unroll_test_case() -> Ssa { + let src = " + // After `static_assert` and `assert_constant`: + brillig(inline) fn main f0 { + b0(v0: u32): + v2 = allocate -> &mut u32 + store u32 0 at v2 + jmp b1(u32 0) + b1(v1: u32): + v5 = lt v1, u32 4 + jmpif v5 then: b3, else: b2 + b3(): + v8 = load v2 -> u32 + v9 = add v8, v1 + store v9 at v2 + v11 = add v1, u32 1 + jmp b1(v11) + b2(): + v6 = load v2 -> u32 + v7 = eq v6, v0 + constrain v6 == v0 + return + } + "; + Ssa::from_str(src).unwrap() + } + + /// Test case from #6470: + /// ```text + /// unconstrained fn __validate_gt_remainder(a_u60: [u64; 6]) -> [u64; 6] { + /// let mut result_u60: [u64; 6] = [0; 6]; + /// + /// for i in 0..6 { + /// result_u60[i] = a_u60[i] + 1; + /// } + /// + /// result_u60 + /// } + /// ``` + /// The `num_iterations` parameter can be used to make it more costly to inline. + fn brillig_unroll_test_case_6470(num_iterations: usize) -> Ssa { + let src = format!( + " + // After `static_assert` and `assert_constant`: + brillig(inline) fn main f0 {{ + b0(v0: [u64; 6]): + inc_rc v0 + v3 = make_array [u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] : [u64; 6] + inc_rc v3 + v4 = allocate -> &mut [u64; 6] + store v3 at v4 + jmp b1(u32 0) + b1(v1: u32): + v7 = lt v1, u32 {num_iterations} + jmpif v7 then: b3, else: b2 + b3(): + v9 = load v4 -> [u64; 6] + v10 = array_get v0, index v1 -> u64 + v12 = add v10, u64 1 + v13 = array_set v9, index v1, value v12 + v15 = add v1, u32 1 + store v13 at v4 + v16 = add v1, u32 1 // duplicate + jmp b1(v16) + b2(): + v8 = load v4 -> [u64; 6] + dec_rc v0 + return v8 + }} + " + ); + Ssa::from_str(&src).unwrap() + } + + // Boilerplate stats of the first loop in the SSA. + fn loop0_stats(ssa: &Ssa) -> BoilerplateStats { + let function = ssa.main(); + let mut loops = Loops::find_all(function); + let loop0 = loops.yet_to_unroll.pop().expect("there should be a loop"); + loop0.boilerplate_stats(function, &loops.cfg).expect("there should be stats") } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/ast.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/ast.rs index f8fe8c68a98..a34b7fd70d3 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/ast.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/ast.rs @@ -104,6 +104,11 @@ pub(crate) enum ParsedInstruction { value: ParsedValue, typ: Type, }, + MakeArray { + target: Identifier, + elements: Vec, + typ: Type, + }, Not { target: Identifier, value: ParsedValue, @@ -131,9 +136,8 @@ pub(crate) enum ParsedTerminator { Return(Vec), } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum ParsedValue { NumericConstant { constant: FieldElement, typ: Type }, - Array { values: Vec, typ: Type }, Variable(Identifier), } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs index 2a94a4fd1eb..552ac0781c7 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs @@ -1,7 +1,5 @@ use std::collections::HashMap; -use im::Vector; - use crate::ssa::{ function_builder::FunctionBuilder, ir::{basic_block::BasicBlockId, function::FunctionId, value::ValueId}, @@ -27,7 +25,11 @@ struct Translator { /// Maps block names to their IDs blocks: HashMap>, - /// Maps variable names to their IDs + /// Maps variable names to their IDs. + /// + /// This is necessary because the SSA we parse might have undergone some + /// passes already which replaced some of the original IDs. The translator + /// will recreate the SSA step by step, which can result in a new ID layout. variables: HashMap>, } @@ -213,6 +215,14 @@ impl Translator { let value = self.translate_value(value)?; self.builder.increment_array_reference_count(value); } + ParsedInstruction::MakeArray { target, elements, typ } => { + let elements = elements + .into_iter() + .map(|element| self.translate_value(element)) + .collect::>()?; + let value_id = self.builder.insert_make_array(elements, typ); + self.define_variable(target, value_id)?; + } ParsedInstruction::Load { target, value, typ } => { let value = self.translate_value(value)?; let value_id = self.builder.insert_load(value, typ); @@ -255,13 +265,6 @@ impl Translator { ParsedValue::NumericConstant { constant, typ } => { Ok(self.builder.numeric_constant(constant, typ)) } - ParsedValue::Array { values, typ } => { - let mut translated_values = Vector::new(); - for value in values { - translated_values.push_back(self.translate_value(value)?); - } - Ok(self.builder.array_constant(translated_values, typ)) - } ParsedValue::Variable(identifier) => self.lookup_variable(identifier), } } @@ -308,7 +311,13 @@ impl Translator { } fn finish(self) -> Ssa { - self.builder.finish() + let mut ssa = self.builder.finish(); + // Normalize the IDs so we have a better chance of matching the SSA we parsed + // after the step-by-step reconstruction done during translation. This assumes + // that the SSA we parsed was printed by the `SsaBuilder`, which normalizes + // before each print. + ssa.normalize_ids(); + ssa } fn current_function_id(&self) -> FunctionId { diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/mod.rs similarity index 97% rename from noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser.rs rename to noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/mod.rs index 11d43284786..2db2c636a8f 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/mod.rs @@ -429,6 +429,15 @@ impl<'a> Parser<'a> { return Ok(ParsedInstruction::Load { target, value, typ }); } + if self.eat_keyword(Keyword::MakeArray)? { + self.eat_or_error(Token::LeftBracket)?; + let elements = self.parse_comma_separated_values()?; + self.eat_or_error(Token::RightBracket)?; + self.eat_or_error(Token::Colon)?; + let typ = self.parse_type()?; + return Ok(ParsedInstruction::MakeArray { target, elements, typ }); + } + if self.eat_keyword(Keyword::Not)? { let value = self.parse_value_or_error()?; return Ok(ParsedInstruction::Not { target, value }); @@ -557,10 +566,6 @@ impl<'a> Parser<'a> { return Ok(Some(value)); } - if let Some(value) = self.parse_array_value()? { - return Ok(Some(value)); - } - if let Some(identifier) = self.eat_identifier()? { return Ok(Some(ParsedValue::Variable(identifier))); } @@ -590,23 +595,6 @@ impl<'a> Parser<'a> { } } - fn parse_array_value(&mut self) -> ParseResult> { - if self.eat(Token::LeftBracket)? { - let values = self.parse_comma_separated_values()?; - self.eat_or_error(Token::RightBracket)?; - self.eat_or_error(Token::Keyword(Keyword::Of))?; - let types = self.parse_types()?; - let types_len = types.len(); - let values_len = values.len(); - Ok(Some(ParsedValue::Array { - typ: Type::Array(Arc::new(types), values_len / types_len), - values, - })) - } else { - Ok(None) - } - } - fn parse_types(&mut self) -> ParseResult> { if self.eat(Token::LeftParen)? { let types = self.parse_comma_separated_types()?; diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/tests.rs index 9205353151e..60d398bf9d5 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -54,33 +54,36 @@ fn test_return_integer() { } #[test] -fn test_return_array() { +fn test_make_array() { let src = " acir(inline) fn main f0 { b0(): - return [Field 1] of Field + v1 = make_array [Field 1] : [Field; 1] + return v1 } "; assert_ssa_roundtrip(src); } #[test] -fn test_return_empty_array() { +fn test_make_empty_array() { let src = " acir(inline) fn main f0 { b0(): - return [] of Field + v0 = make_array [] : [Field; 0] + return v0 } "; assert_ssa_roundtrip(src); } #[test] -fn test_return_composite_array() { +fn test_make_composite_array() { let src = " acir(inline) fn main f0 { b0(): - return [Field 1, Field 2] of (Field, Field) + v2 = make_array [Field 1, Field 2] : [(Field, Field); 1] + return v2 } "; assert_ssa_roundtrip(src); @@ -103,8 +106,8 @@ fn test_multiple_blocks_and_jmp() { acir(inline) fn main f0 { b0(): jmp b1(Field 1) - b1(v1: Field): - return v1 + b1(v0: Field): + return v0 } "; assert_ssa_roundtrip(src); @@ -115,11 +118,11 @@ fn test_jmpif() { let src = " acir(inline) fn main f0 { b0(v0: Field): - jmpif v0 then: b1, else: b2 - b1(): - return + jmpif v0 then: b2, else: b1 b2(): return + b1(): + return } "; assert_ssa_roundtrip(src); @@ -151,7 +154,9 @@ fn test_call_multiple_return_values() { } acir(inline) fn foo f1 { b0(): - return [Field 1, Field 2, Field 3] of Field, [Field 4] of Field + v3 = make_array [Field 1, Field 2, Field 3] : [Field; 3] + v5 = make_array [Field 4] : [Field; 1] + return v3, v5 } "; assert_ssa_roundtrip(src); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/token.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/token.rs index d648f58de41..f663879e899 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/token.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/token.rs @@ -136,6 +136,7 @@ pub(crate) enum Keyword { Jmpif, Load, Lt, + MakeArray, MaxBitSize, Mod, Mul, @@ -190,6 +191,7 @@ impl Keyword { "jmpif" => Keyword::Jmpif, "load" => Keyword::Load, "lt" => Keyword::Lt, + "make_array" => Keyword::MakeArray, "max_bit_size" => Keyword::MaxBitSize, "mod" => Keyword::Mod, "mul" => Keyword::Mul, @@ -248,6 +250,7 @@ impl Display for Keyword { Keyword::Jmpif => write!(f, "jmpif"), Keyword::Load => write!(f, "load"), Keyword::Lt => write!(f, "lt"), + Keyword::MakeArray => write!(f, "make_array"), Keyword::MaxBitSize => write!(f, "max_bit_size"), Keyword::Mod => write!(f, "mod"), Keyword::Mul => write!(f, "mul"), diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index 96e779482a4..c50f0a7f45c 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -291,7 +291,7 @@ impl<'a> FunctionContext<'a> { }); } - self.builder.array_constant(array, typ).into() + self.builder.insert_make_array(array, typ).into() } fn codegen_block(&mut self, block: &[Expression]) -> Result { @@ -466,6 +466,7 @@ impl<'a> FunctionContext<'a> { /// /// For example, the loop `for i in start .. end { body }` is codegen'd as: /// + /// ```text /// v0 = ... codegen start ... /// v1 = ... codegen end ... /// br loop_entry(v0) @@ -478,6 +479,7 @@ impl<'a> FunctionContext<'a> { /// br loop_entry(v4) /// loop_end(): /// ... This is the current insert point after codegen_for finishes ... + /// ``` fn codegen_for(&mut self, for_expr: &ast::For) -> Result { let loop_entry = self.builder.insert_block(); let loop_body = self.builder.insert_block(); @@ -529,6 +531,7 @@ impl<'a> FunctionContext<'a> { /// /// For example, the expression `if cond { a } else { b }` is codegen'd as: /// + /// ```text /// v0 = ... codegen cond ... /// brif v0, then: then_block, else: else_block /// then_block(): @@ -539,16 +542,19 @@ impl<'a> FunctionContext<'a> { /// br end_if(v2) /// end_if(v3: ?): // Type of v3 matches the type of a and b /// ... This is the current insert point after codegen_if finishes ... + /// ``` /// /// As another example, the expression `if cond { a }` is codegen'd as: /// + /// ```text /// v0 = ... codegen cond ... - /// brif v0, then: then_block, else: end_block + /// brif v0, then: then_block, else: end_if /// then_block: /// v1 = ... codegen a ... /// br end_if() /// end_if: // No block parameter is needed. Without an else, the unit value is always returned. /// ... This is the current insert point after codegen_if finishes ... + /// ``` fn codegen_if(&mut self, if_expr: &ast::If) -> Result { let condition = self.codegen_non_tuple_expression(&if_expr.condition)?; diff --git a/noir/noir-repo/compiler/noirc_frontend/src/ast/traits.rs b/noir/noir-repo/compiler/noirc_frontend/src/ast/traits.rs index 723df775b1e..475e3ff1be9 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/ast/traits.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/ast/traits.rs @@ -24,6 +24,7 @@ pub struct NoirTrait { pub items: Vec>, pub attributes: Vec, pub visibility: ItemVisibility, + pub is_alias: bool, } /// Any declaration inside the body of a trait that a user is required to @@ -77,6 +78,9 @@ pub struct NoirTraitImpl { pub where_clause: Vec, pub items: Vec>, + + /// true if generated at compile-time, e.g. from a trait alias + pub is_synthetic: bool, } /// Represents a simple trait constraint such as `where Foo: TraitY` @@ -130,12 +134,19 @@ impl Display for TypeImpl { } } +// TODO: display where clauses (follow-up issue) impl Display for NoirTrait { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let generics = vecmap(&self.generics, |generic| generic.to_string()); let generics = if generics.is_empty() { "".into() } else { generics.join(", ") }; write!(f, "trait {}{}", self.name, generics)?; + + if self.is_alias { + let bounds = vecmap(&self.bounds, |bound| bound.to_string()).join(" + "); + return write!(f, " = {};", bounds); + } + if !self.bounds.is_empty() { let bounds = vecmap(&self.bounds, |bound| bound.to_string()).join(" + "); write!(f, ": {}", bounds)?; @@ -222,6 +233,11 @@ impl Display for TraitBound { impl Display for NoirTraitImpl { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // Synthetic NoirTraitImpl's don't get printed + if self.is_synthetic { + return Ok(()); + } + write!(f, "impl")?; if !self.impl_generics.is_empty() { write!( diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/errors.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/errors.rs index a63601a4280..a6b6120986e 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/errors.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/errors.rs @@ -208,7 +208,7 @@ pub enum TypeCheckError { #[derive(Debug, Clone, PartialEq, Eq)] pub struct NoMatchingImplFoundError { - constraints: Vec<(Type, String)>, + pub(crate) constraints: Vec<(Type, String)>, pub span: Span, } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/parser/errors.rs b/noir/noir-repo/compiler/noirc_frontend/src/parser/errors.rs index 63471efac43..bcb4ce1c616 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/parser/errors.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/parser/errors.rs @@ -95,6 +95,8 @@ pub enum ParserErrorReason { AssociatedTypesNotAllowedInPaths, #[error("Associated types are not allowed on a method call")] AssociatedTypesNotAllowedInMethodCalls, + #[error("Empty trait alias")] + EmptyTraitAlias, #[error( "Wrong number of arguments for attribute `{}`. Expected {}, found {}", name, diff --git a/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/impls.rs b/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/impls.rs index 9215aec2742..8e6b3bae0e9 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/impls.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/impls.rs @@ -113,6 +113,7 @@ impl<'a> Parser<'a> { let object_type = self.parse_type_or_error(); let where_clause = self.parse_where_clause(); let items = self.parse_trait_impl_body(); + let is_synthetic = false; NoirTraitImpl { impl_generics, @@ -121,6 +122,7 @@ impl<'a> Parser<'a> { object_type, where_clause, items, + is_synthetic, } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/item.rs b/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/item.rs index 4fbcd7abac5..ce712b559d8 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/item.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/item.rs @@ -1,3 +1,5 @@ +use iter_extended::vecmap; + use crate::{ parser::{labels::ParsingRuleLabel, Item, ItemKind}, token::{Keyword, Token}, @@ -13,31 +15,32 @@ impl<'a> Parser<'a> { } pub(crate) fn parse_module_items(&mut self, nested: bool) -> Vec { - self.parse_many("items", without_separator(), |parser| { + self.parse_many_to_many("items", without_separator(), |parser| { parser.parse_module_item_in_list(nested) }) } - fn parse_module_item_in_list(&mut self, nested: bool) -> Option { + fn parse_module_item_in_list(&mut self, nested: bool) -> Vec { loop { // We only break out of the loop on `}` if we are inside a `mod { ..` if nested && self.at(Token::RightBrace) { - return None; + return vec![]; } // We always break on EOF (we don't error because if we are inside `mod { ..` // the outer parsing logic will error instead) if self.at_eof() { - return None; + return vec![]; } - let Some(item) = self.parse_item() else { + let parsed_items = self.parse_item(); + if parsed_items.is_empty() { // If we couldn't parse an item we check which token we got match self.token.token() { Token::RightBrace if nested => { - return None; + return vec![]; } - Token::EOF => return None, + Token::EOF => return vec![], _ => (), } @@ -47,7 +50,7 @@ impl<'a> Parser<'a> { continue; }; - return Some(item); + return parsed_items; } } @@ -85,15 +88,19 @@ impl<'a> Parser<'a> { } /// Item = OuterDocComments ItemKind - fn parse_item(&mut self) -> Option { + fn parse_item(&mut self) -> Vec { let start_span = self.current_token_span; let doc_comments = self.parse_outer_doc_comments(); - let kind = self.parse_item_kind()?; + let kinds = self.parse_item_kind(); let span = self.span_since(start_span); - Some(Item { kind, span, doc_comments }) + vecmap(kinds, |kind| Item { kind, span, doc_comments: doc_comments.clone() }) } + /// This method returns one 'ItemKind' in the majority of cases. + /// The current exception is when parsing a trait alias, + /// which returns both the trait and the impl. + /// /// ItemKind /// = InnerAttribute /// | Attributes Modifiers @@ -106,9 +113,9 @@ impl<'a> Parser<'a> { /// | TypeAlias /// | Function /// ) - fn parse_item_kind(&mut self) -> Option { + fn parse_item_kind(&mut self) -> Vec { if let Some(kind) = self.parse_inner_attribute() { - return Some(ItemKind::InnerAttribute(kind)); + return vec![ItemKind::InnerAttribute(kind)]; } let start_span = self.current_token_span; @@ -122,78 +129,81 @@ impl<'a> Parser<'a> { self.comptime_mutable_and_unconstrained_not_applicable(modifiers); let use_tree = self.parse_use_tree(); - return Some(ItemKind::Import(use_tree, modifiers.visibility)); + return vec![ItemKind::Import(use_tree, modifiers.visibility)]; } if let Some(is_contract) = self.eat_mod_or_contract() { self.comptime_mutable_and_unconstrained_not_applicable(modifiers); - return Some(self.parse_mod_or_contract(attributes, is_contract, modifiers.visibility)); + return vec![self.parse_mod_or_contract(attributes, is_contract, modifiers.visibility)]; } if self.eat_keyword(Keyword::Struct) { self.comptime_mutable_and_unconstrained_not_applicable(modifiers); - return Some(ItemKind::Struct(self.parse_struct( + return vec![ItemKind::Struct(self.parse_struct( attributes, modifiers.visibility, start_span, - ))); + ))]; } if self.eat_keyword(Keyword::Impl) { self.comptime_mutable_and_unconstrained_not_applicable(modifiers); - return Some(match self.parse_impl() { + return vec![match self.parse_impl() { Impl::Impl(type_impl) => ItemKind::Impl(type_impl), Impl::TraitImpl(noir_trait_impl) => ItemKind::TraitImpl(noir_trait_impl), - }); + }]; } if self.eat_keyword(Keyword::Trait) { self.comptime_mutable_and_unconstrained_not_applicable(modifiers); - return Some(ItemKind::Trait(self.parse_trait( - attributes, - modifiers.visibility, - start_span, - ))); + let (noir_trait, noir_impl) = + self.parse_trait(attributes, modifiers.visibility, start_span); + let mut output = vec![ItemKind::Trait(noir_trait)]; + if let Some(noir_impl) = noir_impl { + output.push(ItemKind::TraitImpl(noir_impl)); + } + + return output; } if self.eat_keyword(Keyword::Global) { self.unconstrained_not_applicable(modifiers); - return Some(ItemKind::Global( + return vec![ItemKind::Global( self.parse_global( attributes, modifiers.comptime.is_some(), modifiers.mutable.is_some(), ), modifiers.visibility, - )); + )]; } if self.eat_keyword(Keyword::Type) { self.comptime_mutable_and_unconstrained_not_applicable(modifiers); - return Some(ItemKind::TypeAlias( + return vec![ItemKind::TypeAlias( self.parse_type_alias(modifiers.visibility, start_span), - )); + )]; } if self.eat_keyword(Keyword::Fn) { self.mutable_not_applicable(modifiers); - return Some(ItemKind::Function(self.parse_function( + return vec![ItemKind::Function(self.parse_function( attributes, modifiers.visibility, modifiers.comptime.is_some(), modifiers.unconstrained.is_some(), false, // allow_self - ))); + ))]; } - None + vec![] } fn eat_mod_or_contract(&mut self) -> Option { diff --git a/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/parse_many.rs b/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/parse_many.rs index ea4dfe97122..be156eb1618 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/parse_many.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/parse_many.rs @@ -18,6 +18,19 @@ impl<'a> Parser<'a> { self.parse_many_return_trailing_separator_if_any(items, separated_by, f).0 } + /// parse_many, where the given function `f` may return multiple results + pub(super) fn parse_many_to_many( + &mut self, + items: &'static str, + separated_by: SeparatedBy, + f: F, + ) -> Vec + where + F: FnMut(&mut Parser<'a>) -> Vec, + { + self.parse_many_to_many_return_trailing_separator_if_any(items, separated_by, f).0 + } + /// Same as parse_many, but returns a bool indicating whether a trailing separator was found. pub(super) fn parse_many_return_trailing_separator_if_any( &mut self, @@ -27,6 +40,26 @@ impl<'a> Parser<'a> { ) -> (Vec, bool) where F: FnMut(&mut Parser<'a>) -> Option, + { + let f = |x: &mut Parser<'a>| { + if let Some(result) = f(x) { + vec![result] + } else { + vec![] + } + }; + self.parse_many_to_many_return_trailing_separator_if_any(items, separated_by, f) + } + + /// Same as parse_many, but returns a bool indicating whether a trailing separator was found. + fn parse_many_to_many_return_trailing_separator_if_any( + &mut self, + items: &'static str, + separated_by: SeparatedBy, + mut f: F, + ) -> (Vec, bool) + where + F: FnMut(&mut Parser<'a>) -> Vec, { let mut elements: Vec = Vec::new(); let mut trailing_separator = false; @@ -38,12 +71,13 @@ impl<'a> Parser<'a> { } let start_span = self.current_token_span; - let Some(element) = f(self) else { + let mut new_elements = f(self); + if new_elements.is_empty() { if let Some(end) = &separated_by.until { self.eat(end.clone()); } break; - }; + } if let Some(separator) = &separated_by.token { if !trailing_separator && !elements.is_empty() { @@ -51,7 +85,7 @@ impl<'a> Parser<'a> { } } - elements.push(element); + elements.append(&mut new_elements); trailing_separator = if let Some(separator) = &separated_by.token { self.eat(separator.clone()) diff --git a/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/traits.rs b/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/traits.rs index fead6a34c82..e03b629e9ea 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/traits.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/parser/parser/traits.rs @@ -1,9 +1,14 @@ +use iter_extended::vecmap; + use noirc_errors::Span; -use crate::ast::{Documented, ItemVisibility, NoirTrait, Pattern, TraitItem, UnresolvedType}; +use crate::ast::{ + Documented, GenericTypeArg, GenericTypeArgs, ItemVisibility, NoirTrait, Path, Pattern, + TraitItem, UnresolvedGeneric, UnresolvedTraitConstraint, UnresolvedType, +}; use crate::{ ast::{Ident, UnresolvedTypeData}, - parser::{labels::ParsingRuleLabel, ParserErrorReason}, + parser::{labels::ParsingRuleLabel, NoirTraitImpl, ParserErrorReason}, token::{Attribute, Keyword, SecondaryAttribute, Token}, }; @@ -12,34 +17,117 @@ use super::Parser; impl<'a> Parser<'a> { /// Trait = 'trait' identifier Generics ( ':' TraitBounds )? WhereClause TraitBody + /// | 'trait' identifier Generics '=' TraitBounds WhereClause ';' pub(crate) fn parse_trait( &mut self, attributes: Vec<(Attribute, Span)>, visibility: ItemVisibility, start_span: Span, - ) -> NoirTrait { + ) -> (NoirTrait, Option) { let attributes = self.validate_secondary_attributes(attributes); let Some(name) = self.eat_ident() else { self.expected_identifier(); - return empty_trait(attributes, visibility, self.span_since(start_span)); + let noir_trait = empty_trait(attributes, visibility, self.span_since(start_span)); + let no_implicit_impl = None; + return (noir_trait, no_implicit_impl); }; let generics = self.parse_generics(); - let bounds = if self.eat_colon() { self.parse_trait_bounds() } else { Vec::new() }; - let where_clause = self.parse_where_clause(); - let items = self.parse_trait_body(); - NoirTrait { + // Trait aliases: + // trait Foo<..> = A + B + E where ..; + let (bounds, where_clause, items, is_alias) = if self.eat_assign() { + let bounds = self.parse_trait_bounds(); + + if bounds.is_empty() { + self.push_error(ParserErrorReason::EmptyTraitAlias, self.previous_token_span); + } + + let where_clause = self.parse_where_clause(); + let items = Vec::new(); + if !self.eat_semicolon() { + self.expected_token(Token::Semicolon); + } + + let is_alias = true; + (bounds, where_clause, items, is_alias) + } else { + let bounds = if self.eat_colon() { self.parse_trait_bounds() } else { Vec::new() }; + let where_clause = self.parse_where_clause(); + let items = self.parse_trait_body(); + let is_alias = false; + (bounds, where_clause, items, is_alias) + }; + + let span = self.span_since(start_span); + + let noir_impl = is_alias.then(|| { + let object_type_ident = Ident::new("#T".to_string(), span); + let object_type_path = Path::from_ident(object_type_ident.clone()); + let object_type_generic = UnresolvedGeneric::Variable(object_type_ident); + + let is_synthesized = true; + let object_type = UnresolvedType { + typ: UnresolvedTypeData::Named(object_type_path, vec![].into(), is_synthesized), + span, + }; + + let mut impl_generics = generics.clone(); + impl_generics.push(object_type_generic); + + let trait_name = Path::from_ident(name.clone()); + let trait_generics: GenericTypeArgs = vecmap(generics.clone(), |generic| { + let is_synthesized = true; + let generic_type = UnresolvedType { + typ: UnresolvedTypeData::Named( + Path::from_ident(generic.ident().clone()), + vec![].into(), + is_synthesized, + ), + span, + }; + + GenericTypeArg::Ordered(generic_type) + }) + .into(); + + // bounds from trait + let mut where_clause = where_clause.clone(); + for bound in bounds.clone() { + where_clause.push(UnresolvedTraitConstraint { + typ: object_type.clone(), + trait_bound: bound, + }); + } + + let items = vec![]; + let is_synthetic = true; + + NoirTraitImpl { + impl_generics, + trait_name, + trait_generics, + object_type, + where_clause, + items, + is_synthetic, + } + }); + + let noir_trait = NoirTrait { name, generics, bounds, where_clause, - span: self.span_since(start_span), + span, items, attributes, visibility, - } + is_alias, + }; + + (noir_trait, noir_impl) } /// TraitBody = '{' ( OuterDocComments TraitItem )* '}' @@ -188,28 +276,51 @@ fn empty_trait( items: Vec::new(), attributes, visibility, + is_alias: false, } } #[cfg(test)] mod tests { use crate::{ - ast::{NoirTrait, TraitItem}, + ast::{NoirTrait, NoirTraitImpl, TraitItem}, parser::{ - parser::{parse_program, tests::expect_no_errors}, + parser::{parse_program, tests::expect_no_errors, ParserErrorReason}, ItemKind, }, }; - fn parse_trait_no_errors(src: &str) -> NoirTrait { + fn parse_trait_opt_impl_no_errors(src: &str) -> (NoirTrait, Option) { let (mut module, errors) = parse_program(src); expect_no_errors(&errors); - assert_eq!(module.items.len(), 1); - let item = module.items.remove(0); + let (item, impl_item) = if module.items.len() == 2 { + let item = module.items.remove(0); + let impl_item = module.items.remove(0); + (item, Some(impl_item)) + } else { + assert_eq!(module.items.len(), 1); + let item = module.items.remove(0); + (item, None) + }; let ItemKind::Trait(noir_trait) = item.kind else { panic!("Expected trait"); }; - noir_trait + let noir_trait_impl = impl_item.map(|impl_item| { + let ItemKind::TraitImpl(noir_trait_impl) = impl_item.kind else { + panic!("Expected impl"); + }; + noir_trait_impl + }); + (noir_trait, noir_trait_impl) + } + + fn parse_trait_with_impl_no_errors(src: &str) -> (NoirTrait, NoirTraitImpl) { + let (noir_trait, noir_trait_impl) = parse_trait_opt_impl_no_errors(src); + (noir_trait, noir_trait_impl.expect("expected a NoirTraitImpl")) + } + + fn parse_trait_no_errors(src: &str) -> NoirTrait { + parse_trait_opt_impl_no_errors(src).0 } #[test] @@ -220,6 +331,15 @@ mod tests { assert!(noir_trait.generics.is_empty()); assert!(noir_trait.where_clause.is_empty()); assert!(noir_trait.items.is_empty()); + assert!(!noir_trait.is_alias); + } + + #[test] + fn parse_empty_trait_alias() { + let src = "trait Foo = ;"; + let (_module, errors) = parse_program(src); + assert_eq!(errors.len(), 2); + assert_eq!(errors[1].reason(), Some(ParserErrorReason::EmptyTraitAlias).as_ref()); } #[test] @@ -230,6 +350,50 @@ mod tests { assert_eq!(noir_trait.generics.len(), 2); assert!(noir_trait.where_clause.is_empty()); assert!(noir_trait.items.is_empty()); + assert!(!noir_trait.is_alias); + } + + #[test] + fn parse_trait_alias_with_generics() { + let src = "trait Foo = Bar + Baz;"; + let (noir_trait_alias, noir_trait_impl) = parse_trait_with_impl_no_errors(src); + assert_eq!(noir_trait_alias.name.to_string(), "Foo"); + assert_eq!(noir_trait_alias.generics.len(), 2); + assert_eq!(noir_trait_alias.bounds.len(), 2); + assert_eq!(noir_trait_alias.bounds[0].to_string(), "Bar"); + assert_eq!(noir_trait_alias.bounds[1].to_string(), "Baz"); + assert!(noir_trait_alias.where_clause.is_empty()); + assert!(noir_trait_alias.items.is_empty()); + assert!(noir_trait_alias.is_alias); + + assert_eq!(noir_trait_impl.trait_name.to_string(), "Foo"); + assert_eq!(noir_trait_impl.impl_generics.len(), 3); + assert_eq!(noir_trait_impl.trait_generics.ordered_args.len(), 2); + assert_eq!(noir_trait_impl.where_clause.len(), 2); + assert_eq!(noir_trait_alias.bounds.len(), 2); + assert_eq!(noir_trait_alias.bounds[0].to_string(), "Bar"); + assert_eq!(noir_trait_alias.bounds[1].to_string(), "Baz"); + assert!(noir_trait_impl.items.is_empty()); + assert!(noir_trait_impl.is_synthetic); + + // Equivalent to + let src = "trait Foo: Bar + Baz {}"; + let noir_trait = parse_trait_no_errors(src); + assert_eq!(noir_trait.name.to_string(), noir_trait_alias.name.to_string()); + assert_eq!(noir_trait.generics.len(), noir_trait_alias.generics.len()); + assert_eq!(noir_trait.bounds.len(), noir_trait_alias.bounds.len()); + assert_eq!(noir_trait.bounds[0].to_string(), noir_trait_alias.bounds[0].to_string()); + assert_eq!(noir_trait.where_clause.is_empty(), noir_trait_alias.where_clause.is_empty()); + assert_eq!(noir_trait.items.is_empty(), noir_trait_alias.items.is_empty()); + assert!(!noir_trait.is_alias); + } + + #[test] + fn parse_empty_trait_alias_with_generics() { + let src = "trait Foo = ;"; + let (_module, errors) = parse_program(src); + assert_eq!(errors.len(), 2); + assert_eq!(errors[1].reason(), Some(ParserErrorReason::EmptyTraitAlias).as_ref()); } #[test] @@ -240,6 +404,54 @@ mod tests { assert_eq!(noir_trait.generics.len(), 2); assert_eq!(noir_trait.where_clause.len(), 1); assert!(noir_trait.items.is_empty()); + assert!(!noir_trait.is_alias); + } + + #[test] + fn parse_trait_alias_with_where_clause() { + let src = "trait Foo = Bar + Baz where A: Z;"; + let (noir_trait_alias, noir_trait_impl) = parse_trait_with_impl_no_errors(src); + assert_eq!(noir_trait_alias.name.to_string(), "Foo"); + assert_eq!(noir_trait_alias.generics.len(), 2); + assert_eq!(noir_trait_alias.bounds.len(), 2); + assert_eq!(noir_trait_alias.bounds[0].to_string(), "Bar"); + assert_eq!(noir_trait_alias.bounds[1].to_string(), "Baz"); + assert_eq!(noir_trait_alias.where_clause.len(), 1); + assert!(noir_trait_alias.items.is_empty()); + assert!(noir_trait_alias.is_alias); + + assert_eq!(noir_trait_impl.trait_name.to_string(), "Foo"); + assert_eq!(noir_trait_impl.impl_generics.len(), 3); + assert_eq!(noir_trait_impl.trait_generics.ordered_args.len(), 2); + assert_eq!(noir_trait_impl.where_clause.len(), 3); + assert_eq!(noir_trait_impl.where_clause[0].to_string(), "A: Z"); + assert_eq!(noir_trait_impl.where_clause[1].to_string(), "#T: Bar"); + assert_eq!(noir_trait_impl.where_clause[2].to_string(), "#T: Baz"); + assert!(noir_trait_impl.items.is_empty()); + assert!(noir_trait_impl.is_synthetic); + + // Equivalent to + let src = "trait Foo: Bar + Baz where A: Z {}"; + let noir_trait = parse_trait_no_errors(src); + assert_eq!(noir_trait.name.to_string(), noir_trait_alias.name.to_string()); + assert_eq!(noir_trait.generics.len(), noir_trait_alias.generics.len()); + assert_eq!(noir_trait.bounds.len(), noir_trait_alias.bounds.len()); + assert_eq!(noir_trait.bounds[0].to_string(), noir_trait_alias.bounds[0].to_string()); + assert_eq!(noir_trait.where_clause.len(), noir_trait_alias.where_clause.len()); + assert_eq!( + noir_trait.where_clause[0].to_string(), + noir_trait_alias.where_clause[0].to_string() + ); + assert_eq!(noir_trait.items.is_empty(), noir_trait_alias.items.is_empty()); + assert!(!noir_trait.is_alias); + } + + #[test] + fn parse_empty_trait_alias_with_where_clause() { + let src = "trait Foo = where A: Z;"; + let (_module, errors) = parse_program(src); + assert_eq!(errors.len(), 2); + assert_eq!(errors[1].reason(), Some(ParserErrorReason::EmptyTraitAlias).as_ref()); } #[test] @@ -253,6 +465,7 @@ mod tests { panic!("Expected type"); }; assert_eq!(name.to_string(), "Elem"); + assert!(!noir_trait.is_alias); } #[test] @@ -268,6 +481,7 @@ mod tests { assert_eq!(name.to_string(), "x"); assert_eq!(typ.to_string(), "Field"); assert_eq!(default_value.unwrap().to_string(), "1"); + assert!(!noir_trait.is_alias); } #[test] @@ -281,6 +495,7 @@ mod tests { panic!("Expected function"); }; assert!(body.is_none()); + assert!(!noir_trait.is_alias); } #[test] @@ -294,6 +509,7 @@ mod tests { panic!("Expected function"); }; assert!(body.is_some()); + assert!(!noir_trait.is_alias); } #[test] @@ -306,5 +522,39 @@ mod tests { assert_eq!(noir_trait.bounds[1].to_string(), "Baz"); assert_eq!(noir_trait.to_string(), "trait Foo: Bar + Baz {\n}"); + assert!(!noir_trait.is_alias); + } + + #[test] + fn parse_trait_alias() { + let src = "trait Foo = Bar + Baz;"; + let (noir_trait_alias, noir_trait_impl) = parse_trait_with_impl_no_errors(src); + assert_eq!(noir_trait_alias.bounds.len(), 2); + + assert_eq!(noir_trait_alias.bounds[0].to_string(), "Bar"); + assert_eq!(noir_trait_alias.bounds[1].to_string(), "Baz"); + + assert_eq!(noir_trait_alias.to_string(), "trait Foo = Bar + Baz;"); + assert!(noir_trait_alias.is_alias); + + assert_eq!(noir_trait_impl.trait_name.to_string(), "Foo"); + assert_eq!(noir_trait_impl.impl_generics.len(), 1); + assert_eq!(noir_trait_impl.trait_generics.ordered_args.len(), 0); + assert_eq!(noir_trait_impl.where_clause.len(), 2); + assert_eq!(noir_trait_impl.where_clause[0].to_string(), "#T: Bar"); + assert_eq!(noir_trait_impl.where_clause[1].to_string(), "#T: Baz"); + assert!(noir_trait_impl.items.is_empty()); + assert!(noir_trait_impl.is_synthetic); + + // Equivalent to + let src = "trait Foo: Bar + Baz {}"; + let noir_trait = parse_trait_no_errors(src); + assert_eq!(noir_trait.name.to_string(), noir_trait_alias.name.to_string()); + assert_eq!(noir_trait.generics.len(), noir_trait_alias.generics.len()); + assert_eq!(noir_trait.bounds.len(), noir_trait_alias.bounds.len()); + assert_eq!(noir_trait.bounds[0].to_string(), noir_trait_alias.bounds[0].to_string()); + assert_eq!(noir_trait.where_clause.is_empty(), noir_trait_alias.where_clause.is_empty()); + assert_eq!(noir_trait.items.is_empty(), noir_trait_alias.items.is_empty()); + assert!(!noir_trait.is_alias); } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/tests/traits.rs b/noir/noir-repo/compiler/noirc_frontend/src/tests/traits.rs index 0adf5c90bea..811a32bab86 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/tests/traits.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/tests/traits.rs @@ -1,5 +1,6 @@ use crate::hir::def_collector::dc_crate::CompilationError; use crate::hir::resolution::errors::ResolverError; +use crate::hir::type_check::TypeCheckError; use crate::tests::{get_program_errors, get_program_with_maybe_parser_errors}; use super::assert_no_errors; @@ -320,6 +321,251 @@ fn regression_6314_double_inheritance() { assert_no_errors(src); } +#[test] +fn trait_alias_single_member() { + let src = r#" + trait Foo { + fn foo(self) -> Self; + } + + trait Baz = Foo; + + impl Foo for Field { + fn foo(self) -> Self { self } + } + + fn baz(x: T) -> T where T: Baz { + x.foo() + } + + fn main() { + let x: Field = 0; + let _ = baz(x); + } + "#; + assert_no_errors(src); +} + +#[test] +fn trait_alias_two_members() { + let src = r#" + pub trait Foo { + fn foo(self) -> Self; + } + + pub trait Bar { + fn bar(self) -> Self; + } + + pub trait Baz = Foo + Bar; + + fn baz(x: T) -> T where T: Baz { + x.foo().bar() + } + + impl Foo for Field { + fn foo(self) -> Self { + self + 1 + } + } + + impl Bar for Field { + fn bar(self) -> Self { + self + 2 + } + } + + fn main() { + assert(0.foo().bar() == baz(0)); + }"#; + + assert_no_errors(src); +} + +#[test] +fn trait_alias_polymorphic_inheritance() { + let src = r#" + trait Foo { + fn foo(self) -> Self; + } + + trait Bar { + fn bar(self) -> T; + } + + trait Baz = Foo + Bar; + + fn baz(x: T) -> U where T: Baz { + x.foo().bar() + } + + impl Foo for Field { + fn foo(self) -> Self { + self + 1 + } + } + + impl Bar for Field { + fn bar(self) -> bool { + true + } + } + + fn main() { + assert(0.foo().bar() == baz(0)); + }"#; + + assert_no_errors(src); +} + +// TODO(https://github.com/noir-lang/noir/issues/6467): currently fails with the +// same errors as the desugared version +#[test] +fn trait_alias_polymorphic_where_clause() { + let src = r#" + trait Foo { + fn foo(self) -> Self; + } + + trait Bar { + fn bar(self) -> T; + } + + trait Baz { + fn baz(self) -> bool; + } + + trait Qux = Foo + Bar where T: Baz; + + fn qux(x: T) -> bool where T: Qux { + x.foo().bar().baz() + } + + impl Foo for Field { + fn foo(self) -> Self { + self + 1 + } + } + + impl Bar for Field { + fn bar(self) -> bool { + true + } + } + + impl Baz for bool { + fn baz(self) -> bool { + self + } + } + + fn main() { + assert(0.foo().bar().baz() == qux(0)); + } + "#; + + // TODO(https://github.com/noir-lang/noir/issues/6467) + // assert_no_errors(src); + let errors = get_program_errors(src); + assert_eq!(errors.len(), 2); + + match &errors[0].0 { + CompilationError::TypeError(TypeCheckError::UnresolvedMethodCall { + method_name, .. + }) => { + assert_eq!(method_name, "baz"); + } + other => { + panic!("expected UnresolvedMethodCall, but found {:?}", other); + } + } + + match &errors[1].0 { + CompilationError::TypeError(TypeCheckError::NoMatchingImplFound(err)) => { + assert_eq!(err.constraints.len(), 2); + assert_eq!(err.constraints[0].1, "Baz"); + assert_eq!(err.constraints[1].1, "Qux<_>"); + } + other => { + panic!("expected NoMatchingImplFound, but found {:?}", other); + } + } +} + +// TODO(https://github.com/noir-lang/noir/issues/6467): currently failing, so +// this just tests that the trait alias has an equivalent error to the expected +// desugared version +#[test] +fn trait_alias_with_where_clause_has_equivalent_errors() { + let src = r#" + trait Bar { + fn bar(self) -> Self; + } + + trait Baz { + fn baz(self) -> bool; + } + + trait Qux: Bar where T: Baz {} + + impl Qux for U where + U: Bar, + T: Baz, + {} + + pub fn qux(x: T, _: U) -> bool where U: Qux { + x.baz() + } + + fn main() {} + "#; + + let alias_src = r#" + trait Bar { + fn bar(self) -> Self; + } + + trait Baz { + fn baz(self) -> bool; + } + + trait Qux = Bar where T: Baz; + + pub fn qux(x: T, _: U) -> bool where U: Qux { + x.baz() + } + + fn main() {} + "#; + + let errors = get_program_errors(src); + let alias_errors = get_program_errors(alias_src); + + assert_eq!(errors.len(), 1); + assert_eq!(alias_errors.len(), 1); + + match (&errors[0].0, &alias_errors[0].0) { + ( + CompilationError::TypeError(TypeCheckError::UnresolvedMethodCall { + method_name, + object_type, + .. + }), + CompilationError::TypeError(TypeCheckError::UnresolvedMethodCall { + method_name: alias_method_name, + object_type: alias_object_type, + .. + }), + ) => { + assert_eq!(method_name, alias_method_name); + assert_eq!(object_type, alias_object_type); + } + other => { + panic!("expected UnresolvedMethodCall, but found {:?}", other); + } + } +} + #[test] fn removes_assumed_parent_traits_after_function_ends() { let src = r#" diff --git a/noir/noir-repo/docs/docs/noir/concepts/traits.md b/noir/noir-repo/docs/docs/noir/concepts/traits.md index 9da00a77587..b6c0a886eb0 100644 --- a/noir/noir-repo/docs/docs/noir/concepts/traits.md +++ b/noir/noir-repo/docs/docs/noir/concepts/traits.md @@ -490,12 +490,95 @@ trait CompSciStudent: Programmer + Student { } ``` +### Trait Aliases + +Similar to the proposed Rust feature for [trait aliases](https://github.com/rust-lang/rust/blob/4d215e2426d52ca8d1af166d5f6b5e172afbff67/src/doc/unstable-book/src/language-features/trait-alias.md), +Noir supports aliasing one or more traits and using those aliases wherever +traits would normally be used. + +```rust +trait Foo { + fn foo(self) -> Self; +} + +trait Bar { + fn bar(self) -> Self; +} + +// Equivalent to: +// trait Baz: Foo + Bar {} +// +// impl Baz for T where T: Foo + Bar {} +trait Baz = Foo + Bar; + +// We can use `Baz` to refer to `Foo + Bar` +fn baz(x: T) -> T where T: Baz { + x.foo().bar() +} +``` + +#### Generic Trait Aliases + +Trait aliases can also be generic by placing the generic arguments after the +trait name. These generics are in scope of every item within the trait alias. + +```rust +trait Foo { + fn foo(self) -> Self; +} + +trait Bar { + fn bar(self) -> T; +} + +// Equivalent to: +// trait Baz: Foo + Bar {} +// +// impl Baz for U where U: Foo + Bar {} +trait Baz = Foo + Bar; +``` + +#### Trait Alias Where Clauses + +Trait aliases support where clauses to add trait constraints to any of their +generic arguments, e.g. ensuring `T: Baz` for a trait alias `Qux`. + +```rust +trait Foo { + fn foo(self) -> Self; +} + +trait Bar { + fn bar(self) -> T; +} + +trait Baz { + fn baz(self) -> bool; +} + +// Equivalent to: +// trait Qux: Foo + Bar where T: Baz {} +// +// impl Qux for U where +// U: Foo + Bar, +// T: Baz, +// {} +trait Qux = Foo + Bar where T: Baz; +``` + +Note that while trait aliases support where clauses, +the equivalent traits can fail due to [#6467](https://github.com/noir-lang/noir/issues/6467) + ### Visibility -By default, like functions, traits are private to the module they exist in. You can use `pub` -to make the trait public or `pub(crate)` to make it public to just its crate: +By default, like functions, traits and trait aliases are private to the module +they exist in. You can use `pub` to make the trait public or `pub(crate)` to make +it public to just its crate: ```rust // This trait is now public pub trait Trait {} -``` \ No newline at end of file + +// This trait alias is now public +pub trait Baz = Foo + Bar; +``` diff --git a/noir/noir-repo/test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml b/noir/noir-repo/test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml new file mode 100644 index 00000000000..9c9bd8de04a --- /dev/null +++ b/noir/noir-repo/test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "embedded_curve_msm_simplification" +type = "bin" +authors = [""] + +[dependencies] diff --git a/noir/noir-repo/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr b/noir/noir-repo/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr new file mode 100644 index 00000000000..e5aaa0f4d15 --- /dev/null +++ b/noir/noir-repo/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr @@ -0,0 +1,12 @@ +fn main() { + let pub_x = 0x0000000000000000000000000000000000000000000000000000000000000001; + let pub_y = 0x0000000000000002cf135e7506a45d632d270d45f1181294833fc48d823f272c; + + let g1_y = 17631683881184975370165255887551781615748388533673675138860; + let g1 = std::embedded_curve_ops::EmbeddedCurvePoint { x: 1, y: g1_y, is_infinite: false }; + let scalar = std::embedded_curve_ops::EmbeddedCurveScalar { lo: 1, hi: 0 }; + // Test that multi_scalar_mul correctly derives the public key + let res = std::embedded_curve_ops::multi_scalar_mul([g1], [scalar]); + assert(res.x == pub_x); + assert(res.y == pub_y); +} diff --git a/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/Nargo.toml b/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/Nargo.toml new file mode 100644 index 00000000000..f7076311e1d --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "sha256_brillig_performance_regression" +type = "bin" +authors = [""] +compiler_version = ">=0.33.0" + +[dependencies] diff --git a/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/Prover.toml b/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/Prover.toml new file mode 100644 index 00000000000..5bb7f354257 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/Prover.toml @@ -0,0 +1,16 @@ +input_amount = "1" +minimum_output_amount = "2" +secret_hash_for_L1_to_l2_message = "3" +uniswap_fee_tier = "4" + +[aztec_recipient] +inner = "5" + +[caller_on_L1] +inner = "6" + +[input_asset_bridge_portal_address] +inner = "7" + +[output_asset_bridge_portal_address] +inner = "8" diff --git a/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/src/main.nr b/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/src/main.nr new file mode 100644 index 00000000000..42cc6d4ff3b --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/sha256_brillig_performance_regression/src/main.nr @@ -0,0 +1,104 @@ +// Performance regression extracted from an aztec protocol contract. + +unconstrained fn main( + input_asset_bridge_portal_address: EthAddress, + input_amount: Field, + uniswap_fee_tier: Field, + output_asset_bridge_portal_address: EthAddress, + minimum_output_amount: Field, + aztec_recipient: AztecAddress, + secret_hash_for_L1_to_l2_message: Field, + caller_on_L1: EthAddress, +) -> pub Field { + let mut hash_bytes = [0; 260]; // 8 fields of 32 bytes each + 4 bytes fn selector + let input_token_portal_bytes: [u8; 32] = + input_asset_bridge_portal_address.to_field().to_be_bytes(); + let in_amount_bytes: [u8; 32] = input_amount.to_be_bytes(); + let uniswap_fee_tier_bytes: [u8; 32] = uniswap_fee_tier.to_be_bytes(); + let output_token_portal_bytes: [u8; 32] = + output_asset_bridge_portal_address.to_field().to_be_bytes(); + let amount_out_min_bytes: [u8; 32] = minimum_output_amount.to_be_bytes(); + let aztec_recipient_bytes: [u8; 32] = aztec_recipient.to_field().to_be_bytes(); + let secret_hash_for_L1_to_l2_message_bytes: [u8; 32] = + secret_hash_for_L1_to_l2_message.to_be_bytes(); + let caller_on_L1_bytes: [u8; 32] = caller_on_L1.to_field().to_be_bytes(); + + // The purpose of including the following selector is to make the message unique to that specific call. Note that + // it has nothing to do with calling the function. + let selector = comptime { + std::hash::keccak256( + "swap_public(address,uint256,uint24,address,uint256,bytes32,bytes32,address)".as_bytes(), + 75, + ) + }; + + hash_bytes[0] = selector[0]; + hash_bytes[1] = selector[1]; + hash_bytes[2] = selector[2]; + hash_bytes[3] = selector[3]; + + for i in 0..32 { + hash_bytes[i + 4] = input_token_portal_bytes[i]; + hash_bytes[i + 36] = in_amount_bytes[i]; + hash_bytes[i + 68] = uniswap_fee_tier_bytes[i]; + hash_bytes[i + 100] = output_token_portal_bytes[i]; + hash_bytes[i + 132] = amount_out_min_bytes[i]; + hash_bytes[i + 164] = aztec_recipient_bytes[i]; + hash_bytes[i + 196] = secret_hash_for_L1_to_l2_message_bytes[i]; + hash_bytes[i + 228] = caller_on_L1_bytes[i]; + } + + let content_hash = sha256_to_field(hash_bytes); + content_hash +} + +// Convert a 32 byte array to a field element by truncating the final byte +pub fn field_from_bytes_32_trunc(bytes32: [u8; 32]) -> Field { + // Convert it to a field element + let mut v = 1; + let mut high = 0 as Field; + let mut low = 0 as Field; + + for i in 0..15 { + // covers bytes 16..30 (31 is truncated and ignored) + low = low + (bytes32[15 + 15 - i] as Field) * v; + v = v * 256; + // covers bytes 0..14 + high = high + (bytes32[14 - i] as Field) * v; + } + // covers byte 15 + low = low + (bytes32[15] as Field) * v; + + low + high * v +} + +pub fn sha256_to_field(bytes_to_hash: [u8; N]) -> Field { + let sha256_hashed = std::hash::sha256(bytes_to_hash); + let hash_in_a_field = field_from_bytes_32_trunc(sha256_hashed); + + hash_in_a_field +} + +pub trait ToField { + fn to_field(self) -> Field; +} + +pub struct EthAddress { + inner: Field, +} + +impl ToField for EthAddress { + fn to_field(self) -> Field { + self.inner + } +} + +pub struct AztecAddress { + pub inner: Field, +} + +impl ToField for AztecAddress { + fn to_field(self) -> Field { + self.inner + } +} diff --git a/noir/noir-repo/tooling/nargo_fmt/build.rs b/noir/noir-repo/tooling/nargo_fmt/build.rs index 47bb375f7d1..bd2db5f5b18 100644 --- a/noir/noir-repo/tooling/nargo_fmt/build.rs +++ b/noir/noir-repo/tooling/nargo_fmt/build.rs @@ -47,7 +47,10 @@ fn generate_formatter_tests(test_file: &mut File, test_data_dir: &Path) { .join("\n"); let output_source_path = outputs_dir.join(file_name).display().to_string(); - let output_source = std::fs::read_to_string(output_source_path.clone()).unwrap(); + let output_source = + std::fs::read_to_string(output_source_path.clone()).unwrap_or_else(|_| { + panic!("expected output source at {:?} was not found", &output_source_path) + }); write!( test_file, diff --git a/noir/noir-repo/tooling/nargo_fmt/src/formatter/trait_impl.rs b/noir/noir-repo/tooling/nargo_fmt/src/formatter/trait_impl.rs index 73d9a61b3d4..b31da8a4101 100644 --- a/noir/noir-repo/tooling/nargo_fmt/src/formatter/trait_impl.rs +++ b/noir/noir-repo/tooling/nargo_fmt/src/formatter/trait_impl.rs @@ -10,6 +10,11 @@ use super::Formatter; impl<'a> Formatter<'a> { pub(super) fn format_trait_impl(&mut self, trait_impl: NoirTraitImpl) { + // skip synthetic trait impl's, e.g. generated from trait aliases + if trait_impl.is_synthetic { + return; + } + let has_where_clause = !trait_impl.where_clause.is_empty(); self.write_indentation(); diff --git a/noir/noir-repo/tooling/nargo_fmt/src/formatter/traits.rs b/noir/noir-repo/tooling/nargo_fmt/src/formatter/traits.rs index 9a6b84c6537..1f192be471e 100644 --- a/noir/noir-repo/tooling/nargo_fmt/src/formatter/traits.rs +++ b/noir/noir-repo/tooling/nargo_fmt/src/formatter/traits.rs @@ -15,9 +15,18 @@ impl<'a> Formatter<'a> { self.write_identifier(noir_trait.name); self.format_generics(noir_trait.generics); + if noir_trait.is_alias { + self.write_space(); + self.write_token(Token::Assign); + } + if !noir_trait.bounds.is_empty() { self.skip_comments_and_whitespace(); - self.write_token(Token::Colon); + + if !noir_trait.is_alias { + self.write_token(Token::Colon); + } + self.write_space(); for (index, trait_bound) in noir_trait.bounds.into_iter().enumerate() { @@ -34,6 +43,12 @@ impl<'a> Formatter<'a> { self.format_where_clause(noir_trait.where_clause, true); } + // aliases have ';' in lieu of '{ items }' + if noir_trait.is_alias { + self.write_semicolon(); + return; + } + self.write_space(); self.write_left_brace(); if noir_trait.items.is_empty() { diff --git a/noir/noir-repo/tooling/nargo_fmt/tests/expected/trait_alias.nr b/noir/noir-repo/tooling/nargo_fmt/tests/expected/trait_alias.nr new file mode 100644 index 00000000000..926f3160279 --- /dev/null +++ b/noir/noir-repo/tooling/nargo_fmt/tests/expected/trait_alias.nr @@ -0,0 +1,86 @@ +trait Foo { + fn foo(self) -> Self; +} + +trait Baz = Foo; + +impl Foo for Field { + fn foo(self) -> Self { + self + } +} + +fn baz(x: T) -> T +where + T: Baz, +{ + x.foo() +} + +pub trait Foo_2 { + fn foo_2(self) -> Self; +} + +pub trait Bar_2 { + fn bar_2(self) -> Self; +} + +pub trait Baz_2 = Foo_2 + Bar_2; + +fn baz_2(x: T) -> T +where + T: Baz_2, +{ + x.foo_2().bar_2() +} + +impl Foo_2 for Field { + fn foo_2(self) -> Self { + self + 1 + } +} + +impl Bar_2 for Field { + fn bar_2(self) -> Self { + self + 2 + } +} + +trait Foo_3 { + fn foo_3(self) -> Self; +} + +trait Bar_3 { + fn bar_3(self) -> T; +} + +trait Baz_3 = Foo_3 + Bar_3; + +fn baz_3(x: T) -> U +where + T: Baz_3, +{ + x.foo_3().bar_3() +} + +impl Foo_3 for Field { + fn foo_3(self) -> Self { + self + 1 + } +} + +impl Bar_3 for Field { + fn bar_3(self) -> bool { + true + } +} + +fn main() { + let x: Field = 0; + let _ = baz(x); + + assert(0.foo_2().bar_2() == baz_2(0)); + + assert(0.foo_3().bar_3() == baz_3(0)); +} + diff --git a/noir/noir-repo/tooling/nargo_fmt/tests/input/trait_alias.nr b/noir/noir-repo/tooling/nargo_fmt/tests/input/trait_alias.nr new file mode 100644 index 00000000000..53ae756795b --- /dev/null +++ b/noir/noir-repo/tooling/nargo_fmt/tests/input/trait_alias.nr @@ -0,0 +1,78 @@ +trait Foo { + fn foo(self) -> Self; +} + +trait Baz = Foo; + +impl Foo for Field { + fn foo(self) -> Self { self } +} + +fn baz(x: T) -> T where T: Baz { + x.foo() +} + + +pub trait Foo_2 { + fn foo_2(self) -> Self; +} + +pub trait Bar_2 { + fn bar_2(self) -> Self; +} + +pub trait Baz_2 = Foo_2 + Bar_2; + +fn baz_2(x: T) -> T where T: Baz_2 { + x.foo_2().bar_2() +} + +impl Foo_2 for Field { + fn foo_2(self) -> Self { + self + 1 + } +} + +impl Bar_2 for Field { + fn bar_2(self) -> Self { + self + 2 + } +} + + +trait Foo_3 { + fn foo_3(self) -> Self; +} + +trait Bar_3 { + fn bar_3(self) -> T; +} + +trait Baz_3 = Foo_3 + Bar_3; + +fn baz_3(x: T) -> U where T: Baz_3 { + x.foo_3().bar_3() +} + +impl Foo_3 for Field { + fn foo_3(self) -> Self { + self + 1 + } +} + +impl Bar_3 for Field { + fn bar_3(self) -> bool { + true + } +} + + +fn main() { + let x: Field = 0; + let _ = baz(x); + + assert(0.foo_2().bar_2() == baz_2(0)); + + assert(0.foo_3().bar_3() == baz_3(0)); +} + diff --git a/spartan/aztec-network/files/config/config-prover-env.sh b/spartan/aztec-network/files/config/config-prover-env.sh index 4ee7106cb73..11c4ad5aef2 100644 --- a/spartan/aztec-network/files/config/config-prover-env.sh +++ b/spartan/aztec-network/files/config/config-prover-env.sh @@ -1,11 +1,9 @@ -#!/bin/sh +#!/bin/bash set -eu -alias aztec='node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js' - # Pass the bootnode url as an argument # Ask the bootnode for l1 contract addresses -output=$(aztec get-node-info -u $1) +output=$(node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js get-node-info -u $1) echo "$output" @@ -22,7 +20,7 @@ governance_proposer_address=$(echo "$output" | grep -oP 'GovernanceProposer Addr governance_address=$(echo "$output" | grep -oP 'Governance Address: \K0x[a-fA-F0-9]{40}') # Write the addresses to a file in the shared volume -cat < /shared/contracts.env +cat < /shared/contracts/contracts.env export BOOTSTRAP_NODES=$boot_node_enr export ROLLUP_CONTRACT_ADDRESS=$rollup_address export REGISTRY_CONTRACT_ADDRESS=$registry_address @@ -36,4 +34,4 @@ export GOVERNANCE_PROPOSER_CONTRACT_ADDRESS=$governance_proposer_address export GOVERNANCE_CONTRACT_ADDRESS=$governance_address EOF -cat /shared/contracts.env +cat /shared/contracts/contracts.env diff --git a/spartan/aztec-network/files/config/config-validator-env.sh b/spartan/aztec-network/files/config/config-validator-env.sh index 174482492c4..71d03fbbc98 100644 --- a/spartan/aztec-network/files/config/config-validator-env.sh +++ b/spartan/aztec-network/files/config/config-validator-env.sh @@ -1,11 +1,10 @@ -#!/bin/sh +#!/bin/bash set -eu -alias aztec='node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js' # Pass the bootnode url as an argument # Ask the bootnode for l1 contract addresses -output=$(aztec get-node-info -u $1) +output=$(node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js get-node-info -u $1) echo "$output" @@ -28,7 +27,7 @@ private_key=$(jq -r ".[$INDEX]" /app/config/keys.json) # Write the addresses to a file in the shared volume -cat < /shared/contracts.env +cat < /shared/contracts/contracts.env export BOOTSTRAP_NODES=$boot_node_enr export ROLLUP_CONTRACT_ADDRESS=$rollup_address export REGISTRY_CONTRACT_ADDRESS=$registry_address @@ -45,4 +44,4 @@ export L1_PRIVATE_KEY=$private_key export SEQ_PUBLISHER_PRIVATE_KEY=$private_key EOF -cat /shared/contracts.env \ No newline at end of file +cat /shared/contracts/contracts.env diff --git a/spartan/aztec-network/files/config/deploy-l1-contracts.sh b/spartan/aztec-network/files/config/deploy-l1-contracts.sh index 66cc107f251..4d976821f04 100644 --- a/spartan/aztec-network/files/config/deploy-l1-contracts.sh +++ b/spartan/aztec-network/files/config/deploy-l1-contracts.sh @@ -1,9 +1,8 @@ -#!/bin/sh +#!/bin/bash set -exu CHAIN_ID=$1 -alias aztec='node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js' # Use default account, it is funded on our dev machine export PRIVATE_KEY="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" @@ -12,9 +11,9 @@ export PRIVATE_KEY="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4 output="" # if INIT_VALIDATORS is true, then we need to pass the validators flag to the deploy-l1-contracts command if [ "$INIT_VALIDATORS" = "true" ]; then - output=$(aztec deploy-l1-contracts --validators $2 --l1-chain-id $CHAIN_ID) + output=$(node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js deploy-l1-contracts --validators $2 --l1-chain-id $CHAIN_ID) else - output=$(aztec deploy-l1-contracts --l1-chain-id $CHAIN_ID) + output=$(node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js deploy-l1-contracts --l1-chain-id $CHAIN_ID) fi echo "$output" @@ -32,7 +31,7 @@ governance_proposer_address=$(echo "$output" | grep -oP 'GovernanceProposer Addr governance_address=$(echo "$output" | grep -oP 'Governance Address: \K0x[a-fA-F0-9]{40}') # Write the addresses to a file in the shared volume -cat < /shared/contracts.env +cat < /shared/contracts/contracts.env export ROLLUP_CONTRACT_ADDRESS=$rollup_address export REGISTRY_CONTRACT_ADDRESS=$registry_address export INBOX_CONTRACT_ADDRESS=$inbox_address @@ -45,4 +44,4 @@ export GOVERNANCE_PROPOSER_CONTRACT_ADDRESS=$governance_proposer_address export GOVERNANCE_CONTRACT_ADDRESS=$governance_address EOF -cat /shared/contracts.env +cat /shared/contracts/contracts.env diff --git a/spartan/aztec-network/files/config/setup-p2p-addresses.sh b/spartan/aztec-network/files/config/setup-p2p-addresses.sh new file mode 100644 index 00000000000..f4b2afce6f2 --- /dev/null +++ b/spartan/aztec-network/files/config/setup-p2p-addresses.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +POD_NAME=$(echo $HOSTNAME) + +if [ "${NETWORK_PUBLIC}" = "true" ]; then + # First try treating HOSTNAME as a pod name + NODE_NAME=$(kubectl get pod $POD_NAME -n ${NAMESPACE} -o jsonpath='{.spec.nodeName}' 2>/dev/null) + + # If that fails, HOSTNAME might be the node name itself + if [ $? -ne 0 ]; then + echo "Could not find pod $POD_NAME, assuming $POD_NAME is the node name" + NODE_NAME=$POD_NAME + fi + + EXTERNAL_IP=$(kubectl get node $NODE_NAME -o jsonpath='{.status.addresses[?(@.type=="ExternalIP")].address}') + + if [ -z "$EXTERNAL_IP" ]; then + echo "Warning: Could not find ExternalIP, falling back to InternalIP" + EXTERNAL_IP=$(kubectl get node $NODE_NAME -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + fi + + TCP_ADDR="${EXTERNAL_IP}:${P2P_TCP_PORT}" + UDP_ADDR="${EXTERNAL_IP}:${P2P_UDP_PORT}" + +else + # Get pod IP for non-public networks + POD_IP=$(hostname -i) + TCP_ADDR="${POD_IP}:${P2P_TCP_PORT}" + UDP_ADDR="${POD_IP}:${P2P_UDP_PORT}" +fi + +# Write addresses to file for sourcing +echo "export P2P_TCP_ANNOUNCE_ADDR=${TCP_ADDR}" > /shared/p2p/p2p-addresses +echo "export P2P_TCP_LISTEN_ADDR=0.0.0.0:${P2P_TCP_PORT}" >> /shared/p2p/p2p-addresses +echo "export P2P_UDP_ANNOUNCE_ADDR=${UDP_ADDR}" >> /shared/p2p/p2p-addresses +echo "export P2P_UDP_LISTEN_ADDR=0.0.0.0:${P2P_UDP_PORT}" >> /shared/p2p/p2p-addresses + +echo "P2P addresses configured:" +cat /shared/p2p/p2p-addresses \ No newline at end of file diff --git a/spartan/aztec-network/files/config/setup-service-addresses.sh b/spartan/aztec-network/files/config/setup-service-addresses.sh new file mode 100644 index 00000000000..4594b7a7740 --- /dev/null +++ b/spartan/aztec-network/files/config/setup-service-addresses.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +set -ex + +# Function to get pod and node details +get_service_address() { + local SERVICE_LABEL=$1 + local PORT=$2 + local MAX_RETRIES=30 + local RETRY_INTERVAL=2 + local attempt=1 + + # Get pod name + while [ $attempt -le $MAX_RETRIES ]; do + POD_NAME=$(kubectl get pods -n ${NAMESPACE} -l app=${SERVICE_LABEL} -o jsonpath='{.items[0].metadata.name}') + if [ -n "$POD_NAME" ]; then + break + fi + echo "Attempt $attempt: Waiting for ${SERVICE_LABEL} pod to be available..." >&2 + sleep $RETRY_INTERVAL + attempt=$((attempt + 1)) + done + + if [ -z "$POD_NAME" ]; then + echo "Error: Failed to get ${SERVICE_LABEL} pod name after $MAX_RETRIES attempts" >&2 + return 1 + fi + echo "Pod name: [${POD_NAME}]" >&2 + + # Get node name + attempt=1 + NODE_NAME="" + while [ $attempt -le $MAX_RETRIES ]; do + NODE_NAME=$(kubectl get pod ${POD_NAME} -n ${NAMESPACE} -o jsonpath='{.spec.nodeName}') + if [ -n "$NODE_NAME" ]; then + break + fi + echo "Attempt $attempt: Waiting for node name to be available..." >&2 + sleep $RETRY_INTERVAL + attempt=$((attempt + 1)) + done + + if [ -z "$NODE_NAME" ]; then + echo "Error: Failed to get node name after $MAX_RETRIES attempts" >&2 + return 1 + fi + echo "Node name: ${NODE_NAME}" >&2 + + # Get the node's external IP + NODE_IP=$(kubectl get node ${NODE_NAME} -o jsonpath='{.status.addresses[?(@.type=="ExternalIP")].address}') + echo "Node IP: ${NODE_IP}" >&2 + echo "http://${NODE_IP}:${PORT}" +} + +# Configure Ethereum address +if [ "${ETHEREUM_EXTERNAL_HOST}" != "" ]; then + ETHEREUM_ADDR="${ETHEREUM_EXTERNAL_HOST}" +elif [ "${NETWORK_PUBLIC}" = "true" ]; then + ETHEREUM_ADDR=$(get_service_address "ethereum" "${ETHEREUM_PORT}") +else + ETHEREUM_ADDR="http://${SERVICE_NAME}-ethereum.${NAMESPACE}:${ETHEREUM_PORT}" +fi + +# Configure Boot Node address +if [ "${BOOT_NODE_EXTERNAL_HOST}" != "" ]; then + BOOT_NODE_ADDR="${BOOT_NODE_EXTERNAL_HOST}" +elif [ "${NETWORK_PUBLIC}" = "true" ]; then + BOOT_NODE_ADDR=$(get_service_address "boot-node" "${BOOT_NODE_PORT}") +else + BOOT_NODE_ADDR="http://${SERVICE_NAME}-boot-node.${NAMESPACE}:${BOOT_NODE_PORT}" +fi + +# Configure Prover Node address +if [ "${PROVER_NODE_EXTERNAL_HOST}" != "" ]; then + PROVER_NODE_ADDR="${PROVER_NODE_EXTERNAL_HOST}" +elif [ "${NETWORK_PUBLIC}" = "true" ]; then + PROVER_NODE_ADDR=$(get_service_address "prover-node" "${PROVER_NODE_PORT}") +else + PROVER_NODE_ADDR="http://${SERVICE_NAME}-prover-node.${NAMESPACE}:${PROVER_NODE_PORT}" +fi + + +# Write addresses to file for sourcing +echo "export ETHEREUM_HOST=${ETHEREUM_ADDR}" >> /shared/config/service-addresses +echo "export BOOT_NODE_HOST=${BOOT_NODE_ADDR}" >> /shared/config/service-addresses +echo "export PROVER_NODE_HOST=${PROVER_NODE_ADDR}" >> /shared/config/service-addresses +echo "Addresses configured:" +cat /shared/config/service-addresses diff --git a/spartan/aztec-network/templates/_helpers.tpl b/spartan/aztec-network/templates/_helpers.tpl index 33f8dda0671..8afb0c4636d 100644 --- a/spartan/aztec-network/templates/_helpers.tpl +++ b/spartan/aztec-network/templates/_helpers.tpl @@ -50,37 +50,19 @@ app.kubernetes.io/name: {{ include "aztec-network.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} -{{- define "aztec-network.ethereumHost" -}} -{{- if .Values.ethereum.externalHost -}} -http://{{ .Values.ethereum.externalHost }}:{{ .Values.ethereum.service.port }} -{{- else -}} -http://{{ include "aztec-network.fullname" . }}-ethereum.{{ .Release.Namespace }}:{{ .Values.ethereum.service.port }} -{{- end -}} -{{- end -}} + {{- define "aztec-network.pxeUrl" -}} -{{- if .Values.pxe.externalHost -}} -http://{{ .Values.pxe.externalHost }}:{{ .Values.pxe.service.port }} -{{- else -}} -http://{{ include "aztec-network.fullname" . }}-pxe.{{ .Release.Namespace }}:{{ .Values.pxe.service.port }} -{{- end -}} +http://{{ include "aztec-network.fullname" . }}-pxe.{{ .Release.Namespace }}:{{ .Values.pxe.service.nodePort }} {{- end -}} {{- define "aztec-network.bootNodeUrl" -}} -{{- if .Values.bootNode.externalTcpHost -}} -http://{{ .Values.bootNode.externalTcpHost }}:{{ .Values.bootNode.service.nodePort }} -{{- else -}} http://{{ include "aztec-network.fullname" . }}-boot-node-0.{{ include "aztec-network.fullname" . }}-boot-node.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.bootNode.service.nodePort }} {{- end -}} -{{- end -}} {{- define "aztec-network.validatorUrl" -}} -{{- if .Values.validator.externalTcpHost -}} -http://{{ .Values.validator.externalTcpHost }}:{{ .Values.validator.service.nodePort }} -{{- else -}} http://{{ include "aztec-network.fullname" . }}-validator.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.validator.service.nodePort }} {{- end -}} -{{- end -}} {{- define "aztec-network.metricsHost" -}} http://{{ include "aztec-network.fullname" . }}-metrics.{{ .Release.Namespace }} @@ -123,3 +105,89 @@ http://{{ include "aztec-network.fullname" . }}-metrics.{{ .Release.Namespace }} {{- end -}} {{- end -}} {{- end -}} + +{{/* +P2P Setup Container +*/}} +{{- define "aztec-network.p2pSetupContainer" -}} +- name: setup-p2p-addresses + image: bitnami/kubectl + command: + - /bin/sh + - -c + - | + cp /scripts/setup-p2p-addresses.sh /tmp/setup-p2p-addresses.sh && \ + chmod +x /tmp/setup-p2p-addresses.sh && \ + /tmp/setup-p2p-addresses.sh + env: + - name: NETWORK_PUBLIC + value: "{{ .Values.network.public }}" + - name: NAMESPACE + value: {{ .Release.Namespace }} + - name: P2P_TCP_PORT + value: "{{ .Values.validator.service.p2pTcpPort }}" + - name: P2P_UDP_PORT + value: "{{ .Values.validator.service.p2pUdpPort }}" + volumeMounts: + - name: scripts + mountPath: /scripts + - name: p2p-addresses + mountPath: /shared/p2p +{{- end -}} + +{{/* +Service Address Setup Container +*/}} +{{- define "aztec-network.serviceAddressSetupContainer" -}} +- name: setup-service-addresses + image: bitnami/kubectl + command: + - /bin/bash + - -c + - | + cp /scripts/setup-service-addresses.sh /tmp/setup-service-addresses.sh && \ + chmod +x /tmp/setup-service-addresses.sh && \ + /tmp/setup-service-addresses.sh + env: + - name: NETWORK_PUBLIC + value: "{{ .Values.network.public }}" + - name: NAMESPACE + value: {{ .Release.Namespace }} + - name: EXTERNAL_ETHEREUM_HOST + value: "{{ .Values.ethereum.externalHost }}" + - name: ETHEREUM_PORT + value: "{{ .Values.ethereum.service.port }}" + - name: EXTERNAL_BOOT_NODE_HOST + value: "{{ .Values.bootNode.externalHost }}" + - name: BOOT_NODE_PORT + value: "{{ .Values.bootNode.service.nodePort }}" + - name: EXTERNAL_PROVER_NODE_HOST + value: "{{ .Values.proverNode.externalHost }}" + - name: PROVER_NODE_PORT + value: "{{ .Values.proverNode.service.nodePort }}" + - name: SERVICE_NAME + value: {{ include "aztec-network.fullname" . }} + volumeMounts: + - name: scripts + mountPath: /scripts + - name: config + mountPath: /shared/config +{{- end -}} + +{{/** +Anti-affinity when running in public network mode +*/}} +{{- define "aztec-network.publicAntiAffinity" -}} +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - validator + - boot-node + - prover + topologyKey: "kubernetes.io/hostname" +{{- end -}} diff --git a/spartan/aztec-network/templates/boot-node.yaml b/spartan/aztec-network/templates/boot-node.yaml index 5f29df22010..0643646c8a0 100644 --- a/spartan/aztec-network/templates/boot-node.yaml +++ b/spartan/aztec-network/templates/boot-node.yaml @@ -17,16 +17,25 @@ spec: {{- include "aztec-network.selectorLabels" . | nindent 8 }} app: boot-node spec: + {{- if .Values.network.public }} + hostNetwork: true + {{- include "aztec-network.publicAntiAffinity" . | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "aztec-network.fullname" . }}-node initContainers: + {{- include "aztec-network.p2pSetupContainer" . | nindent 8 }} + {{- include "aztec-network.serviceAddressSetupContainer" . | nindent 8 }} - name: wait-for-ethereum - image: {{ .Values.images.curl.image }} + image: {{ .Values.images.aztec.image }} command: - - /bin/sh + - /bin/bash - -c - | + source /shared/config/service-addresses + echo "Awaiting ethereum node at ${ETHEREUM_HOST}" until curl -s -X POST -H 'Content-Type: application/json' \ -d '{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":67}' \ - {{ include "aztec-network.ethereumHost" . }} | grep -q reth; do + ${ETHEREUM_HOST} | grep -q reth; do echo "Waiting for Ethereum node..." sleep 5 done @@ -38,25 +47,31 @@ spec: done echo "OpenTelemetry collector is ready!" {{- end }} + volumeMounts: + - name: config + mountPath: /shared/config {{- if .Values.bootNode.deployContracts }} - - name: deploy-contracts + - name: deploy-l1-contracts image: {{ .Values.images.aztec.image }} command: [ - "/bin/sh", + "/bin/bash", "-c", - "cp /scripts/deploy-contracts.sh /tmp/deploy-contracts.sh && chmod +x /tmp/deploy-contracts.sh && /tmp/deploy-contracts.sh {{ .Values.ethereum.chainId }} \"{{ join "," .Values.validator.validatorAddresses }}\"" + "cp /scripts/deploy-l1-contracts.sh /tmp/deploy-l1-contracts.sh && \ + chmod +x /tmp/deploy-l1-contracts.sh && \ + source /shared/config/service-addresses && \ + /tmp/deploy-l1-contracts.sh {{ .Values.ethereum.chainId }} \"{{ join "," .Values.validator.validatorAddresses }}\"" ] volumeMounts: - name: scripts-output - mountPath: /shared + mountPath: /shared/contracts + - name: config + mountPath: /shared/config - name: scripts mountPath: /scripts env: - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} - name: INIT_VALIDATORS - value: {{ not .Values.validator.external | quote }} + value: "true" - name: ETHEREUM_SLOT_DURATION value: "{{ .Values.ethereum.blockTime }}" - name: AZTEC_SLOT_DURATION @@ -70,12 +85,15 @@ spec: - name: boot-node image: {{ .Values.images.aztec.image }} command: - # sleep to allow dns name to be resolvable - [ - "/bin/bash", - "-c", - "sleep 30 && source /shared/contracts.env && env && node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --node --archiver --sequencer --pxe", - ] + - /bin/bash + - -c + - | + sleep 30 && \ + source /shared/contracts/contracts.env && \ + source /shared/p2p/p2p-addresses && \ + source /shared/config/service-addresses && \ + env && \ + node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --node --archiver --sequencer --pxe startupProbe: httpGet: path: /status @@ -91,20 +109,24 @@ spec: timeoutSeconds: 30 failureThreshold: 3 volumeMounts: - {{- if .Values.bootNode.deployContracts }} + - name: p2p-addresses + mountPath: /shared/p2p + - name: config + mountPath: /shared/config + {{- if .Values.bootNode.deployContracts }} - name: scripts-output - mountPath: /shared - {{- else }} + mountPath: /shared/contracts + {{- else }} - name: contracts-env - mountPath: /shared/contracts.env + mountPath: /shared/contracts/contracts.env subPath: contracts.env - {{- end }} + {{- end }} env: - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP - - name: PORT + - name: AZTEC_PORT value: "{{ .Values.bootNode.service.nodePort }}" - name: LOG_LEVEL value: "{{ .Values.bootNode.logLevel }}" @@ -112,8 +134,6 @@ spec: value: "1" - name: DEBUG value: "{{ .Values.bootNode.debug }}" - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} - name: P2P_ENABLED value: "{{ .Values.bootNode.p2p.enabled }}" - name: COINBASE @@ -126,22 +146,6 @@ spec: value: "{{ .Values.bootNode.sequencer.maxSecondsBetweenBlocks }}" - name: SEQ_MIN_TX_PER_BLOCK value: "{{ .Values.bootNode.sequencer.minTxsPerBlock }}" - - name: P2P_TCP_ANNOUNCE_ADDR - {{- if .Values.bootNode.externalTcpHost }} - value: "{{ .Values.bootNode.externalTcpHost }}:{{ .Values.bootNode.service.p2pTcpPort }}" - {{- else }} - value: "$(POD_IP):{{ .Values.bootNode.service.p2pTcpPort }}" - {{- end }} - - name: P2P_UDP_ANNOUNCE_ADDR - {{- if .Values.bootNode.externalUdpHost }} - value: "{{ .Values.bootNode.externalUdpHost }}:{{ .Values.bootNode.service.p2pUdpPort }}" - {{- else }} - value: "$(POD_IP):{{ .Values.bootNode.service.p2pUdpPort }}" - {{- end }} - - name: P2P_TCP_LISTEN_ADDR - value: "0.0.0.0:{{ .Values.bootNode.service.p2pTcpPort }}" - - name: P2P_UDP_LISTEN_ADDR - value: "0.0.0.0:{{ .Values.bootNode.service.p2pUdpPort }}" - name: VALIDATOR_PRIVATE_KEY value: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" - name: OTEL_RESOURCE_ATTRIBUTES @@ -170,10 +174,14 @@ spec: resources: {{- toYaml .Values.bootNode.resources | nindent 12 }} volumes: + - name: p2p-addresses + emptyDir: {} + - name: config + emptyDir: {} {{- if .Values.bootNode.deployContracts }} - name: scripts configMap: - name: {{ include "aztec-network.fullname" . }}-deploy-contracts-script + name: {{ include "aztec-network.fullname" . }}-scripts - name: scripts-output emptyDir: {} {{- else }} @@ -181,18 +189,7 @@ spec: configMap: name: {{ include "aztec-network.fullname" . }}-contracts-env {{- end }} -{{- if .Values.bootNode.deployContracts }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "aztec-network.fullname" . }}-deploy-contracts-script - labels: - {{- include "aztec-network.labels" . | nindent 4 }} -data: - deploy-contracts.sh: | - {{ .Files.Get "files/config/deploy-l1-contracts.sh" | nindent 4 }} -{{- else }} +{{- if not .Values.bootNode.deployContracts }} --- apiVersion: v1 kind: ConfigMap @@ -209,6 +206,7 @@ data: export FEE_JUICE_CONTRACT_ADDRESS={{ .Values.bootNode.contracts.feeJuiceAddress }} export FEE_JUICE_PORTAL_CONTRACT_ADDRESS={{ .Values.bootNode.contracts.feeJuicePortalAddress }} {{- end }} +{{if not .Values.network.public }} --- # Headless service for StatefulSet DNS entries apiVersion: v1 @@ -230,43 +228,4 @@ spec: protocol: UDP - port: {{ .Values.bootNode.service.nodePort }} name: node ---- -{{if .Values.network.public }} -apiVersion: v1 -kind: Service -metadata: - name: boot-node-lb-tcp - labels: - {{- include "aztec-network.labels" . | nindent 4 }} -spec: - type: LoadBalancer - selector: - {{- include "aztec-network.selectorLabels" . | nindent 4 }} - app: boot-node - ports: - - port: {{ .Values.bootNode.service.p2pTcpPort }} - name: p2p-tpc - - port: {{ .Values.bootNode.service.nodePort }} - name: node ---- -apiVersion: v1 -kind: Service -metadata: - name: boot-node-lb-udp - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" - service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" - labels: - {{- include "aztec-network.labels" . | nindent 4 }} -spec: - type: LoadBalancer - selector: - {{- include "aztec-network.selectorLabels" . | nindent 4 }} - app: boot-node - ports: - - port: {{ .Values.bootNode.service.p2pUdpPort }} - name: p2p-udp - protocol: UDP ---- {{ end }} diff --git a/spartan/aztec-network/templates/deploy-l1-verifier.yaml b/spartan/aztec-network/templates/deploy-l1-verifier.yaml index 486db8d24ca..cab6f8a78ab 100644 --- a/spartan/aztec-network/templates/deploy-l1-verifier.yaml +++ b/spartan/aztec-network/templates/deploy-l1-verifier.yaml @@ -1,4 +1,4 @@ -{{- if .Values.network.setupL2Contracts }} +{{- if and .Values.network.setupL2Contracts .Values.jobs.deployL1Verifier.enable }} apiVersion: batch/v1 kind: Job metadata: @@ -13,6 +13,12 @@ spec: app: deploy-l1-verifier spec: restartPolicy: OnFailure + volumes: + - name: config + emptyDir: {} + - name: scripts + configMap: + name: {{ include "aztec-network.fullname" . }}-scripts containers: - name: deploy-l1-verifier image: {{ .Values.images.aztec.image }} @@ -21,38 +27,69 @@ spec: - -c - | set -e + # Install kubectl + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + mv kubectl /usr/local/bin/ - [ $ENABLE = "true" ] || exit 0 + # Set up kubeconfig using service account credentials + export KUBECONFIG=/tmp/kubeconfig + kubectl config set-cluster default --server=https://kubernetes.default.svc --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + kubectl config set-credentials default --token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + kubectl config set-context default --cluster=default --user=default + kubectl config use-context default - until curl -s -X GET "$AZTEC_NODE_URL/status"; do - echo "Waiting for Aztec node $AZTEC_NODE_URL..." + cp /scripts/setup-service-addresses.sh /tmp/setup-service-addresses.sh + chmod +x /tmp/setup-service-addresses.sh + /tmp/setup-service-addresses.sh + source /shared/config/service-addresses + + until curl -s -X GET "$BOOT_NODE_HOST/status"; do + echo "Waiting for Aztec node $BOOT_NODE_HOST..." sleep 5 done echo "Boot node is ready!" export ROLLUP_CONTRACT_ADDRESS=$(curl -X POST -H 'Content-Type: application/json' \ -d '{"jsonrpc":"2.0","method":"node_getL1ContractAddresses","params":[],"id":1}' \ - "$AZTEC_NODE_URL" \ + "$BOOT_NODE_HOST" \ | jq -r '.result.rollupAddress.value') echo "Rollup contract address: $ROLLUP_CONTRACT_ADDRESS" node /usr/src/yarn-project/aztec/dest/bin/index.js deploy-l1-verifier --verifier real echo "L1 verifier deployed" env: - - name: ENABLE - value: {{ .Values.jobs.deployL1Verifier.enable | quote }} - name: NODE_NO_WARNINGS value: "1" - name: DEBUG value: "aztec:*" - name: LOG_LEVEL value: "debug" - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} - name: L1_CHAIN_ID value: {{ .Values.ethereum.chainId | quote }} - name: PRIVATE_KEY value: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" - - name: AZTEC_NODE_URL - value: {{ include "aztec-network.bootNodeUrl" . | quote }} + - name: NETWORK_PUBLIC + value: "{{ .Values.network.public }}" + - name: NAMESPACE + value: {{ .Release.Namespace }} + - name: EXTERNAL_ETHEREUM_HOST + value: "{{ .Values.ethereum.externalHost }}" + - name: ETHEREUM_PORT + value: "{{ .Values.ethereum.service.port }}" + - name: EXTERNAL_BOOT_NODE_HOST + value: "{{ .Values.bootNode.externalHost }}" + - name: BOOT_NODE_PORT + value: "{{ .Values.bootNode.service.nodePort }}" + - name: EXTERNAL_PROVER_NODE_HOST + value: "{{ .Values.proverNode.externalHost }}" + - name: PROVER_NODE_PORT + value: "{{ .Values.proverNode.service.nodePort }}" + - name: SERVICE_NAME + value: {{ include "aztec-network.fullname" . }} + volumeMounts: + - name: config + mountPath: /shared/config + - name: scripts + mountPath: /scripts {{ end }} diff --git a/spartan/aztec-network/templates/prover-agent.yaml b/spartan/aztec-network/templates/prover-agent.yaml index f929daa4b79..04f58284d21 100644 --- a/spartan/aztec-network/templates/prover-agent.yaml +++ b/spartan/aztec-network/templates/prover-agent.yaml @@ -17,20 +17,32 @@ spec: {{- include "aztec-network.selectorLabels" . | nindent 8 }} app: prover-agent spec: + serviceAccountName: {{ include "aztec-network.fullname" . }}-node {{- if .Values.proverAgent.nodeSelector }} nodeSelector: {{- toYaml .Values.proverAgent.nodeSelector | nindent 8 }} {{- end }} + {{- if .Values.network.public }} + hostNetwork: true + {{- end }} + volumes: + - name: config + emptyDir: {} + - name: scripts + configMap: + name: {{ include "aztec-network.fullname" . }}-scripts initContainers: + {{- include "aztec-network.serviceAddressSetupContainer" . | nindent 8 }} - name: wait-for-prover-node - image: {{ .Values.images.curl.image }} + image: {{ .Values.images.aztec.image }} command: - - /bin/sh + - /bin/bash - -c - | - until curl -s -X POST "$PROVER_JOB_SOURCE_URL/status"; do - echo "Waiting for Prover node $PROVER_JOB_SOURCE_URL ..." + source /shared/config/service-addresses + until curl -s -X POST ${PROVER_NODE_HOST}/status; do + echo "Waiting for Prover node ${PROVER_NODE_HOST} ..." sleep 5 done echo "Prover node is ready!" @@ -41,18 +53,26 @@ spec: done echo "OpenTelemetry collector is ready!" {{- end }} - env: - - name: PROVER_JOB_SOURCE_URL - value: "http://{{ include "aztec-network.fullname" . }}-prover-node.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.proverNode.service.nodePort }}" + volumeMounts: + - name: config + mountPath: /shared/config containers: - name: prover-agent image: "{{ .Values.images.aztec.image }}" imagePullPolicy: {{ .Values.images.aztec.pullPolicy }} + volumeMounts: + - name: config + mountPath: /shared/config command: - "/bin/bash" - "-c" - - "node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --prover" + - | + source /shared/config/service-addresses && \ + PROVER_JOB_SOURCE_URL=${PROVER_NODE_HOST} \ + node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --prover env: + - name: AZTEC_PORT + value: "{{ .Values.proverAgent.service.nodePort }}" - name: LOG_LEVEL value: "{{ .Values.proverAgent.logLevel }}" - name: LOG_JSON @@ -61,8 +81,6 @@ spec: value: "{{ .Values.proverAgent.debug }}" - name: PROVER_REAL_PROOFS value: "{{ .Values.proverAgent.realProofs }}" - - name: PROVER_JOB_SOURCE_URL - value: "http://{{ include "aztec-network.fullname" . }}-prover-node.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.proverNode.service.nodePort }}" - name: PROVER_AGENT_ENABLED value: "true" - name: PROVER_AGENT_CONCURRENCY diff --git a/spartan/aztec-network/templates/prover-node.yaml b/spartan/aztec-network/templates/prover-node.yaml index ff11cbf1ee3..6b7506149a2 100644 --- a/spartan/aztec-network/templates/prover-node.yaml +++ b/spartan/aztec-network/templates/prover-node.yaml @@ -17,16 +17,24 @@ spec: {{- include "aztec-network.selectorLabels" . | nindent 8 }} app: prover-node spec: + {{- if .Values.network.public }} + hostNetwork: true + {{- include "aztec-network.publicAntiAffinity" . | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "aztec-network.fullname" . }}-node initContainers: + {{- include "aztec-network.serviceAddressSetupContainer" . | nindent 8 }} + {{- include "aztec-network.p2pSetupContainer" . | nindent 8 }} - name: wait-for-services - image: {{ .Values.images.curl.image }} + image: {{ .Values.images.aztec.image }} command: - - /bin/sh + - /bin/bash - -c - | + source /shared/config/service-addresses until curl -s -X POST -H 'Content-Type: application/json' \ -d '{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":67}' \ - {{ include "aztec-network.ethereumHost" . }} | grep -q reth; do + ${ETHEREUM_HOST} | grep -q reth; do echo "Waiting for Ethereum node..." sleep 5 done @@ -38,26 +46,31 @@ spec: done echo "OpenTelemetry collector is ready!" {{- end }} - until curl --head --silent {{ include "aztec-network.bootNodeUrl" . }}/status; do + until curl --head --silent $BOOT_NODE_HOST/status; do echo "Waiting for boot node..." sleep 5 done echo "Boot node is ready!" + volumeMounts: + - name: config + mountPath: /shared/config - name: configure-prover-env image: "{{ .Values.images.aztec.image }}" imagePullPolicy: {{ .Values.images.aztec.pullPolicy }} command: - - "/bin/sh" + - "/bin/bash" - "-c" - - "cp /scripts/configure-prover-env.sh /tmp/configure-prover-env.sh && chmod +x /tmp/configure-prover-env.sh && /tmp/configure-prover-env.sh {{ include "aztec-network.bootNodeUrl" . }}" + - "cp /scripts/configure-prover-env.sh /tmp/configure-prover-env.sh && \ + chmod +x /tmp/configure-prover-env.sh && \ + source /shared/config/service-addresses && \ + /tmp/configure-prover-env.sh ${BOOT_NODE_HOST}" volumeMounts: - - name: shared-volume - mountPath: /shared + - name: contracts-env + mountPath: /shared/contracts - name: scripts mountPath: /scripts - env: - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} + - name: config + mountPath: /shared/config containers: - name: prover-node @@ -66,16 +79,25 @@ spec: command: - "/bin/bash" - "-c" - - "source /shared/contracts.env && env && node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --prover-node --archiver" + - | + source /shared/contracts/contracts.env && \ + source /shared/p2p/p2p-addresses && \ + source /shared/config/service-addresses && \ + env && \ + node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --prover-node --archiver volumeMounts: - - name: shared-volume - mountPath: /shared + - name: contracts-env + mountPath: /shared/contracts + - name: p2p-addresses + mountPath: /shared/p2p + - name: config + mountPath: /shared/config env: - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP - - name: PORT + - name: AZTEC_PORT value: "{{ .Values.proverNode.service.nodePort }}" - name: LOG_LEVEL value: "{{ .Values.proverNode.logLevel }}" @@ -83,8 +105,6 @@ spec: value: "1" - name: DEBUG value: "{{ .Values.proverNode.debug }}" - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} - name: PROVER_REAL_PROOFS value: "{{ .Values.proverNode.realProofs }}" - name: PROVER_AGENT_ENABLED @@ -106,18 +126,6 @@ spec: value: "{{ .Values.ethereum.chainId }}" - name: P2P_ENABLED value: "{{ .Values.proverNode.p2pEnabled }}" - - name: P2P_TCP_ANNOUNCE_ADDR - {{- if .Values.proverNode.externalTcpHost }} - value: "{{ .Values.proverNode.externalTcpHost }}:{{ .Values.proverNode.service.p2pTcpPort }}" - {{- else }} - value: "$(POD_IP):{{ .Values.proverNode.service.p2pTcpPort }}" - {{- end }} - - name: P2P_UDP_ANNOUNCE_ADDR - {{- if .Values.proverNode.externalUdpHost }} - value: "{{ .Values.proverNode.externalUdpHost }}:{{ .Values.proverNode.service.p2pUdpPort }}" - {{- else }} - value: "$(POD_IP):{{ .Values.proverNode.service.p2pUdpPort }}" - {{- end }} - name: P2P_TCP_LISTEN_ADDR value: "0.0.0.0:{{ .Values.proverNode.service.p2pTcpPort }}" - name: P2P_UDP_LISTEN_ADDR @@ -140,7 +148,13 @@ spec: volumes: - name: scripts configMap: - name: {{ include "aztec-network.fullname" . }}-configure-prover-env + name: {{ include "aztec-network.fullname" . }}-scripts + - name: contracts-env + emptyDir: {} + - name: p2p-addresses + emptyDir: {} + - name: config + emptyDir: {} volumeClaimTemplates: - metadata: name: shared-volume @@ -151,16 +165,7 @@ spec: resources: requests: storage: {{ .Values.proverNode.storage }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "aztec-network.fullname" . }}-configure-prover-env - labels: - {{- include "aztec-network.labels" . | nindent 4 }} -data: - configure-prover-env.sh: | - {{ .Files.Get "files/config/config-prover-env.sh" | nindent 4 }} +{{if not .Values.network.public }} --- apiVersion: v1 kind: Service @@ -181,43 +186,4 @@ spec: - port: {{ .Values.proverNode.service.p2pUdpPort }} name: p2p-udp protocol: UDP ---- -{{if .Values.proverNode.public }} -apiVersion: v1 -kind: Service -metadata: - name: prover-node-lb-tcp - labels: - {{- include "aztec-network.labels" . | nindent 4 }} -spec: - type: LoadBalancer - selector: - {{- include "aztec-network.selectorLabels" . | nindent 4 }} - app: prover-node - ports: - - port: {{ .Values.proverNode.service.nodePort }} - name: node - - port: {{ .Values.proverNode.service.p2pTcpPort }} - name: p2p-tcp ---- -apiVersion: v1 -kind: Service -metadata: - name: prover-node-lb-udp - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" - service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" - labels: - {{- include "aztec-network.labels" . | nindent 4 }} -spec: - type: LoadBalancer - selector: - {{- include "aztec-network.selectorLabels" . | nindent 4 }} - app: prover-node - ports: - - port: {{ .Values.proverNode.service.p2pUdpPort }} - name: p2p-udp - protocol: UDP ---- {{ end }} diff --git a/spartan/aztec-network/templates/pxe.yaml b/spartan/aztec-network/templates/pxe.yaml index d229227e2c8..94a8a87886c 100644 --- a/spartan/aztec-network/templates/pxe.yaml +++ b/spartan/aztec-network/templates/pxe.yaml @@ -16,17 +16,36 @@ spec: {{- include "aztec-network.selectorLabels" . | nindent 8 }} app: pxe spec: + serviceAccountName: {{ include "aztec-network.fullname" . }}-node + {{- if .Values.network.public }} + hostNetwork: true + {{- end }} + volumes: + - name: config + emptyDir: {} + - name: scripts + configMap: + name: {{ include "aztec-network.fullname" . }}-scripts + - name: scripts-output + emptyDir: {} initContainers: + {{- include "aztec-network.serviceAddressSetupContainer" . | nindent 8 }} - name: wait-for-boot-node image: {{ .Values.images.curl.image }} command: - /bin/sh - -c - | - until curl --head --silent {{ include "aztec-network.bootNodeUrl" . }}/status; do + source /shared/config/service-addresses + until curl --head --silent ${BOOT_NODE_HOST}/status; do echo "Waiting for boot node..." sleep 5 done + volumeMounts: + - name: config + mountPath: /shared/config + {{- if not .Values.network.public }} + # We only need to wait for the validator service if the network is not public - name: wait-for-validator-service image: {{ .Values.images.curl.image }} command: @@ -37,19 +56,30 @@ spec: echo "Waiting for validator service..." sleep 5 done + {{- end }} containers: - name: pxe image: "{{ .Values.images.aztec.image }}" + volumeMounts: + - name: config + mountPath: /shared/config command: - "/bin/bash" - "-c" - - > + - | + source /shared/config/service-addresses + {{- if .Values.network.public }} + # If the network is public, we need to use the boot node URL + export AZTEC_NODE_URL=${BOOT_NODE_HOST} + {{- else }} + # If the network is not public, we can use the validator URL + export AZTEC_NODE_URL={{ include "aztec-network.validatorUrl" . }} + {{- end }} + echo "AZTEC_NODE_URL=${AZTEC_NODE_URL}" node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --pxe env: - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} - - name: AZTEC_NODE_URL - value: {{ include "aztec-network.validatorUrl" . | quote }} + - name: AZTEC_PORT + value: "{{ .Values.pxe.service.nodePort }}" - name: LOG_JSON value: "1" - name: LOG_LEVEL @@ -60,7 +90,7 @@ spec: value: "{{ .Values.pxe.proverEnabled }}" ports: - name: http - containerPort: {{ .Values.pxe.service.port }} + containerPort: {{ .Values.pxe.service.nodePort }} protocol: TCP readinessProbe: exec: @@ -70,7 +100,7 @@ spec: - | curl -s -X POST -H 'content-type: application/json' \ -d '{"jsonrpc":"2.0","method":"pxe_isGlobalStateSynchronized","params":[],"id":67}' \ - 127.0.0.1:{{ .Values.pxe.service.port }} | grep -q '"result":true' + 127.0.0.1:{{ .Values.pxe.service.nodePort }} | grep -q '"result":true' initialDelaySeconds: {{ .Values.pxe.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.pxe.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.pxe.readinessProbe.timeoutSeconds }} @@ -92,8 +122,8 @@ spec: app: pxe ports: - protocol: TCP - port: {{ .Values.pxe.service.port }} - targetPort: {{ .Values.pxe.service.targetPort }} + port: {{ .Values.pxe.service.nodePort }} + targetPort: {{ .Values.pxe.service.nodePort }} {{- if and (eq .Values.pxe.service.type "NodePort") .Values.pxe.service.nodePort }} nodePort: {{ .Values.pxe.service.nodePort }} {{- end }} @@ -103,6 +133,10 @@ apiVersion: v1 kind: Service metadata: name: pxe-lb + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" + service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" labels: {{- include "aztec-network.labels" . | nindent 4 }} spec: @@ -112,8 +146,8 @@ spec: app: pxe ports: - protocol: TCP - port: {{ .Values.pxe.service.port }} - targetPort: {{ .Values.pxe.service.targetPort }} + port: {{ .Values.pxe.service.nodePort }} + targetPort: {{ .Values.pxe.service.nodePort }} {{- if and (eq .Values.pxe.service.type "NodePort") .Values.pxe.service.nodePort }} nodePort: {{ .Values.pxe.service.nodePort }} {{- end }} diff --git a/spartan/aztec-network/templates/rbac.yaml b/spartan/aztec-network/templates/rbac.yaml new file mode 100644 index 00000000000..a0e8e68cd11 --- /dev/null +++ b/spartan/aztec-network/templates/rbac.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "aztec-network.fullname" . }}-node + labels: + {{- include "aztec-network.labels" . | nindent 4 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "aztec-network.fullname" . }}-node + labels: + {{- include "aztec-network.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: ["services", "pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "aztec-network.fullname" . }}-node + labels: + {{- include "aztec-network.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "aztec-network.fullname" . }}-node +subjects: +- kind: ServiceAccount + name: {{ include "aztec-network.fullname" . }}-node +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "aztec-network.fullname" . }}-node + labels: + {{- include "aztec-network.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "aztec-network.fullname" . }}-node + labels: + {{- include "aztec-network.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "aztec-network.fullname" . }}-node +subjects: +- kind: ServiceAccount + name: {{ include "aztec-network.fullname" . }}-node + namespace: {{ .Release.Namespace }} diff --git a/spartan/aztec-network/templates/reth.yaml b/spartan/aztec-network/templates/reth.yaml index 7312bab7ad5..d6230ecf0ad 100644 --- a/spartan/aztec-network/templates/reth.yaml +++ b/spartan/aztec-network/templates/reth.yaml @@ -16,6 +16,9 @@ spec: {{- include "aztec-network.selectorLabels" . | nindent 8 }} app: ethereum spec: + {{- if .Values.network.public }} + hostNetwork: true + {{- end }} containers: - name: ethereum image: "{{ .Values.images.reth.image }}" @@ -39,21 +42,6 @@ spec: mountPath: /data - name: genesis mountPath: /genesis - # readinessProbe: - # exec: - # command: - # - sh - # - -c - # - | - # wget -qO- --post-data='{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":67}' \ - # --header='Content-Type: application/json' \ - # 127.0.0.1:{{ .Values.ethereum.service.port }} \ - # | grep -q 'reth' - # initialDelaySeconds: {{ .Values.ethereum.readinessProbe.initialDelaySeconds }} - # periodSeconds: {{ .Values.ethereum.readinessProbe.periodSeconds }} - # timeoutSeconds: {{ .Values.ethereum.readinessProbe.timeoutSeconds }} - # successThreshold: {{ .Values.ethereum.readinessProbe.successThreshold }} - # failureThreshold: {{ .Values.ethereum.readinessProbe.failureThreshold }} resources: {{- toYaml .Values.ethereum.resources | nindent 12 }} volumes: @@ -63,6 +51,7 @@ spec: - name: genesis configMap: name: {{ include "aztec-network.fullname" . }}-reth-genesis +{{if not .Values.network.public }} --- apiVersion: v1 kind: Service @@ -82,26 +71,6 @@ spec: {{- if and (eq .Values.ethereum.service.type "NodePort") .Values.ethereum.service.nodePort }} nodePort: {{ .Values.ethereum.service.nodePort }} {{- end }} ---- -{{if .Values.network.public }} -apiVersion: v1 -kind: Service -metadata: - name: ethereum-lb - labels: - {{- include "aztec-network.labels" . | nindent 4 }} -spec: - type: LoadBalancer - selector: - {{- include "aztec-network.selectorLabels" . | nindent 4 }} - app: ethereum - ports: - - protocol: TCP - port: {{ .Values.ethereum.service.port }} - targetPort: {{ .Values.ethereum.service.targetPort }} - {{- if and (eq .Values.ethereum.service.type "NodePort") .Values.ethereum.service.nodePort }} - nodePort: {{ .Values.ethereum.service.nodePort }} - {{- end }} {{ end }} --- apiVersion: v1 diff --git a/spartan/aztec-network/templates/scripts-configmap.yaml b/spartan/aztec-network/templates/scripts-configmap.yaml new file mode 100644 index 00000000000..bc86aabbd36 --- /dev/null +++ b/spartan/aztec-network/templates/scripts-configmap.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "aztec-network.fullname" . }}-scripts + labels: + {{- include "aztec-network.labels" . | nindent 4 }} +data: + setup-service-addresses.sh: | + {{ .Files.Get "files/config/setup-service-addresses.sh" | nindent 4 }} + setup-p2p-addresses.sh: | + {{ .Files.Get "files/config/setup-p2p-addresses.sh" | nindent 4 }} + configure-validator-env.sh: | + {{ .Files.Get "files/config/config-validator-env.sh" | nindent 4 }} + configure-prover-env.sh: | + {{ .Files.Get "files/config/config-prover-env.sh" | nindent 4 }} + deploy-l1-contracts.sh: | + {{ .Files.Get "files/config/deploy-l1-contracts.sh" | nindent 4 }} diff --git a/spartan/aztec-network/templates/setup-l2-contracts.yaml b/spartan/aztec-network/templates/setup-l2-contracts.yaml index df05ffd20cc..56cf8fc57f2 100644 --- a/spartan/aztec-network/templates/setup-l2-contracts.yaml +++ b/spartan/aztec-network/templates/setup-l2-contracts.yaml @@ -13,16 +13,46 @@ spec: app: setup-l2-contracts spec: restartPolicy: OnFailure + serviceAccountName: {{ include "aztec-network.fullname" . }}-node + volumes: + - name: scripts + configMap: + name: {{ include "aztec-network.fullname" . }}-scripts + - name: config + emptyDir: {} containers: - name: setup-l2-contracts image: {{ .Values.images.aztec.image }} + volumeMounts: + - name: config + mountPath: /shared/config + - name: scripts + mountPath: /scripts command: - /bin/bash - -c - | + # Install kubectl + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + mv kubectl /usr/local/bin/ + + # Set up kubeconfig using service account credentials + export KUBECONFIG=/tmp/kubeconfig + kubectl config set-cluster default --server=https://kubernetes.default.svc --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + kubectl config set-credentials default --token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + kubectl config set-context default --cluster=default --user=default + kubectl config use-context default + + cp /scripts/setup-service-addresses.sh /tmp/setup-service-addresses.sh + chmod +x /tmp/setup-service-addresses.sh + /tmp/setup-service-addresses.sh + source /shared/config/service-addresses + export AZTEC_NODE_URL=$BOOT_NODE_HOST + export PXE_URL=$BOOT_NODE_HOST until curl -s -X POST -H 'content-type: application/json' \ -d '{"jsonrpc":"2.0","method":"pxe_getNodeInfo","params":[],"id":67}' \ - {{ include "aztec-network.pxeUrl" . }} | grep -q '"enr:-'; do + $PXE_URL | grep -q '"enr:-'; do echo "Waiting for PXE service..." sleep 5 done @@ -31,10 +61,26 @@ spec: node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js setup-protocol-contracts --skipProofWait --l1-chain-id {{ .Values.ethereum.chainId }} echo "L2 contracts initialized" env: - - name: PXE_URL - value: {{ include "aztec-network.bootNodeUrl" . | quote }} - name: DEBUG value: "aztec:*" - name: LOG_LEVEL value: "debug" + - name: NETWORK_PUBLIC + value: "{{ .Values.network.public }}" + - name: NAMESPACE + value: {{ .Release.Namespace }} + - name: EXTERNAL_ETHEREUM_HOST + value: "{{ .Values.ethereum.externalHost }}" + - name: ETHEREUM_PORT + value: "{{ .Values.ethereum.service.port }}" + - name: EXTERNAL_BOOT_NODE_HOST + value: "{{ .Values.bootNode.externalHost }}" + - name: BOOT_NODE_PORT + value: "{{ .Values.bootNode.service.nodePort }}" + - name: EXTERNAL_PROVER_NODE_HOST + value: "{{ .Values.proverNode.externalHost }}" + - name: PROVER_NODE_PORT + value: "{{ .Values.proverNode.service.nodePort }}" + - name: SERVICE_NAME + value: {{ include "aztec-network.fullname" . }} {{ end }} diff --git a/spartan/aztec-network/templates/transaction-bot.yaml b/spartan/aztec-network/templates/transaction-bot.yaml index 9f1239fcc2b..cd5b88a13bd 100644 --- a/spartan/aztec-network/templates/transaction-bot.yaml +++ b/spartan/aztec-network/templates/transaction-bot.yaml @@ -17,38 +17,64 @@ spec: {{- include "aztec-network.selectorLabels" . | nindent 8 }} app: bot spec: + {{- if .Values.network.public }} + hostNetwork: true + {{- end }} + serviceAccountName: {{ include "aztec-network.fullname" . }}-node + volumes: + - name: config + emptyDir: {} + - name: scripts + configMap: + name: {{ include "aztec-network.fullname" . }}-scripts + - name: scripts-output + emptyDir: {} initContainers: + {{- include "aztec-network.serviceAddressSetupContainer" . | nindent 8 }} - name: wait-for-aztec-node image: "{{ .Values.images.curl.image }}" - env: - - name: AZTEC_NODE_URL - {{- if .Values.bot.nodeUrl }} - value: "{{ .Values.bot.nodeUrl }}" - {{- else }} - value: {{ include "aztec-network.validatorUrl" . | quote }} - {{- end }} command: - /bin/sh - -c - | - until curl -s $(AZTEC_NODE_URL)/status; do echo waiting for aztec-node; sleep 2; done + source /shared/config/service-addresses + {{- if .Values.bot.nodeUrl }} + export AZTEC_NODE_URL={{ .Values.bot.nodeUrl }} + {{- else if .Values.network.public }} + export AZTEC_NODE_URL=${BOOT_NODE_HOST} + {{- else }} + export AZTEC_NODE_URL={{ include "aztec-network.validatorUrl" . }} + {{- end }} + echo "AZTEC_NODE_URL=${AZTEC_NODE_URL}" + until curl -s ${AZTEC_NODE_URL}/status; do echo waiting for aztec-node; sleep 2; done + volumeMounts: + - name: config + mountPath: /shared/config containers: - name: transaction-bot image: "{{ .Values.images.aztec.image }}" + volumeMounts: + - name: config + mountPath: /shared/config + - name: scripts + mountPath: /scripts command: - "/bin/bash" - "-c" - - > - node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --pxe --bot - env: - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} - - name: AZTEC_NODE_URL + - | + source /shared/config/service-addresses {{- if .Values.bot.nodeUrl }} - value: "{{ .Values.bot.nodeUrl }}" + export AZTEC_NODE_URL={{ .Values.bot.nodeUrl }} + {{- else if .Values.network.public }} + export AZTEC_NODE_URL=${BOOT_NODE_HOST} {{- else }} - value: {{ include "aztec-network.validatorUrl" . | quote }} + export AZTEC_NODE_URL={{ include "aztec-network.validatorUrl" . }} {{- end }} + echo "AZTEC_NODE_URL=${AZTEC_NODE_URL}" + node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --pxe --bot + env: + - name: AZTEC_PORT + value: "{{ .Values.bot.service.nodePort }}" - name: LOG_JSON value: "1" - name: LOG_LEVEL @@ -77,7 +103,7 @@ spec: value: "{{ .Values.bot.stopIfUnhealthy }}" ports: - name: http - containerPort: {{ .Values.bot.service.port }} + containerPort: {{ .Values.bot.service.nodePort }} protocol: TCP readinessProbe: exec: @@ -87,7 +113,7 @@ spec: - | curl -s -X POST -H 'content-type: application/json' \ -d '{"jsonrpc":"2.0","method":"pxe_getNodeInfo","params":[],"id":67}' \ - 127.0.0.1:{{ .Values.bot.service.port }} > /tmp/probe_output.txt && \ + 127.0.0.1:{{ .Values.bot.service.nodePort }} > /tmp/probe_output.txt && \ cat /tmp/probe_output.txt && \ grep -q '"enr:-' /tmp/probe_output.txt initialDelaySeconds: {{ .Values.bot.readinessProbe.initialDelaySeconds }} @@ -111,8 +137,8 @@ spec: app: bot ports: - protocol: TCP - port: {{ .Values.bot.service.port }} - targetPort: {{ .Values.bot.service.targetPort }} + port: {{ .Values.bot.service.nodePort }} + targetPort: {{ .Values.bot.service.nodePort }} {{- if and (eq .Values.bot.service.type "NodePort") .Values.bot.service.nodePort }} nodePort: {{ .Values.bot.service.nodePort }} {{- end }} diff --git a/spartan/aztec-network/templates/validator.yaml b/spartan/aztec-network/templates/validator.yaml index d9c3dd9a21b..f5a2fb8ce54 100644 --- a/spartan/aztec-network/templates/validator.yaml +++ b/spartan/aztec-network/templates/validator.yaml @@ -18,17 +18,25 @@ spec: {{- include "aztec-network.selectorLabels" . | nindent 8 }} app: validator spec: + {{- if .Values.network.public }} + hostNetwork: true + {{- include "aztec-network.publicAntiAffinity" . | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "aztec-network.fullname" . }}-node initContainers: + {{- include "aztec-network.p2pSetupContainer" . | nindent 8 }} + {{- include "aztec-network.serviceAddressSetupContainer" . | nindent 8 }} - name: wait-for-services - image: {{ .Values.images.curl.image }} + image: {{ .Values.images.aztec.image }} command: - - /bin/sh + - /bin/bash - -c - | + source /shared/config/service-addresses # First check ethereum node until curl -s -X POST -H 'Content-Type: application/json' \ -d '{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":67}' \ - {{ include "aztec-network.ethereumHost" . }} | grep -q reth; do + $ETHEREUM_HOST | grep -q reth; do echo "Waiting for Ethereum node..." sleep 5 done @@ -44,59 +52,47 @@ spec: if [ "{{ .Values.validator.dynamicBootNode }}" = "true" ]; then # Get the list of pod IPs for the validator service - MAX_ATTEMPTS=3 - for i in $(seq 0 $(({{ .Values.validator.replicas }} - 1))); do - PEER_IP="{{ include "aztec-network.fullname" . }}-validator-${i}.{{ include "aztec-network.fullname" . }}-validator" - echo "Checking ${PEER_IP} for /status" - for attempt in $(seq 1 $MAX_ATTEMPTS); do - if curl --silent --head --fail "http://${PEER_IP}:{{ .Values.validator.service.nodePort }}/status" > /dev/null; then - echo "Found responsive peer at ${PEER_IP}" - # the PXE has its node set to the the validator service. - # and that's all we need to know to bootstrap, - # since it will get a good node ENR from whatever node the PXE connects to. - echo "{{ include "aztec-network.pxeUrl" . }}" > /shared/pxe_url - break 2 - fi - sleep 2 - done - done - if [ ! -f /shared/pxe_url ]; then - echo "No responsive peers found after multiple attempts, exiting." - exit 1 - fi + echo "{{ include "aztec-network.pxeUrl" . }}" > /shared/pxe/pxe_url else - until curl --silent --head --fail "{{ include "aztec-network.bootNodeUrl" . }}/status" > /dev/null; do + until curl --silent --head --fail "${BOOT_NODE_HOST}/status" > /dev/null; do echo "Waiting for boot node..." sleep 5 done echo "Boot node is ready!" - echo "{{ include "aztec-network.bootNodeUrl" . }}" > /shared/pxe_url + echo "${BOOT_NODE_HOST}" > /shared/pxe/pxe_url fi volumeMounts: - - name: shared-volume - mountPath: /shared + - name: pxe-url + mountPath: /shared/pxe + - name: scripts + mountPath: /scripts + - name: config + mountPath: /shared/config - name: configure-validator-env image: "{{ .Values.images.aztec.image }}" imagePullPolicy: {{ .Values.images.aztec.pullPolicy }} command: - - "/bin/sh" + - "/bin/bash" - "-c" - | + source /shared/config/service-addresses && \ cp /scripts/configure-validator-env.sh /tmp/configure-validator-env.sh && \ chmod +x /tmp/configure-validator-env.sh && \ - /tmp/configure-validator-env.sh "$(cat /shared/pxe_url)" + /tmp/configure-validator-env.sh "$(cat /shared/pxe/pxe_url)" volumeMounts: - - name: shared-volume - mountPath: /shared + - name: contracts-env + mountPath: /shared/contracts + - name: pxe-url + mountPath: /shared/pxe - name: scripts mountPath: /scripts - name: validator-keys mountPath: /app/config readOnly: true + - name: config + mountPath: /shared/config env: - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} - name: POD_NAME valueFrom: fieldRef: @@ -108,7 +104,13 @@ spec: command: - "/bin/bash" - "-c" - - "sleep 10 && source /shared/contracts.env && env && node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --node --archiver --sequencer" + - | + sleep 10 && \ + source /shared/contracts/contracts.env && \ + source /shared/p2p/p2p-addresses && \ + source /shared/config/service-addresses && \ + env && \ + node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --node --archiver --sequencer startupProbe: httpGet: path: /status @@ -126,14 +128,18 @@ spec: timeoutSeconds: 30 failureThreshold: 3 volumeMounts: - - name: shared-volume - mountPath: /shared + - name: contracts-env + mountPath: /shared/contracts + - name: p2p-addresses + mountPath: /shared/p2p + - name: config + mountPath: /shared/config env: - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP - - name: PORT + - name: AZTEC_PORT value: "{{ .Values.validator.service.nodePort }}" - name: LOG_LEVEL value: "{{ .Values.validator.logLevel }}" @@ -141,8 +147,6 @@ spec: value: "1" - name: DEBUG value: "{{ .Values.validator.debug }}" - - name: ETHEREUM_HOST - value: {{ include "aztec-network.ethereumHost" . | quote }} - name: P2P_ENABLED value: "{{ .Values.validator.p2p.enabled }}" - name: VALIDATOR_DISABLED @@ -157,22 +161,6 @@ spec: value: "{{ .Values.validator.sequencer.enforceTimeTable }}" - name: L1_CHAIN_ID value: "{{ .Values.ethereum.chainId }}" - - name: P2P_TCP_ANNOUNCE_ADDR - {{- if .Values.validator.externalTcpHost }} - value: "{{ .Values.validator.externalTcpHost }}:{{ .Values.validator.service.p2pTcpPort }}" - {{- else }} - value: "$(POD_IP):{{ .Values.validator.service.p2pTcpPort }}" - {{- end }} - - name: P2P_UDP_ANNOUNCE_ADDR - {{- if .Values.validator.externalUdpHost }} - value: "{{ .Values.validator.externalUdpHost }}:{{ .Values.validator.service.p2pUdpPort }}" - {{- else }} - value: "$(POD_IP):{{ .Values.validator.service.p2pUdpPort }}" - {{- end }} - - name: P2P_TCP_LISTEN_ADDR - value: "0.0.0.0:{{ .Values.validator.service.p2pTcpPort }}" - - name: P2P_UDP_LISTEN_ADDR - value: "0.0.0.0:{{ .Values.validator.service.p2pUdpPort }}" - name: OTEL_RESOURCE_ATTRIBUTES value: service.name={{ .Release.Name }},service.namespace={{ .Release.Namespace }},service.version={{ .Chart.AppVersion }},environment={{ .Values.environment | default "production" }} - name: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT @@ -199,24 +187,21 @@ spec: volumes: - name: scripts configMap: - name: {{ include "aztec-network.fullname" . }}-configure-validator-env + name: {{ include "aztec-network.fullname" . }}-scripts - name: validator-keys configMap: name: {{ include "aztec-network.fullname" . }}-validator-keys - - name: shared-volume + - name: contracts-env + emptyDir: {} + - name: p2p-addresses + emptyDir: {} + - name: pxe-url + emptyDir: {} + - name: config emptyDir: {} --- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "aztec-network.fullname" . }}-configure-validator-env - labels: - {{- include "aztec-network.labels" . | nindent 4 }} -data: - configure-validator-env.sh: | - {{ .Files.Get "files/config/config-validator-env.sh" | nindent 4 }} ---- -# Headless service for StatefulSet DNS entries +# If this is not a public network, create a headless service for StatefulSet DNS entries +{{ if not .Values.network.public }} apiVersion: v1 kind: Service metadata: @@ -238,55 +223,4 @@ spec: - port: {{ .Values.validator.service.nodePort }} name: node protocol: TCP ---- -{{if .Values.network.public }} -{{- range $i, $e := until (int .Values.validator.replicas) }} -# Service template for TCP load balancers -apiVersion: v1 -kind: Service -metadata: - name: validator-{{ $i }}-lb-tcp - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" - service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" - labels: - {{- include "aztec-network.labels" $ | nindent 4 }} -spec: - type: LoadBalancer - selector: - statefulset.kubernetes.io/pod-name: {{ include "aztec-network.fullname" $ }}-validator-{{ $i }} - {{- include "aztec-network.selectorLabels" $ | nindent 4 }} - app: validator - ports: - - port: {{ $.Values.validator.service.p2pTcpPort }} - name: p2p-tcp - protocol: TCP - - port: {{ $.Values.validator.service.nodePort }} - name: node - protocol: TCP ---- -# Service template for UDP load balancers -apiVersion: v1 -kind: Service -metadata: - name: validator-{{ $i }}-lb-udp - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" - service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" - labels: - {{- include "aztec-network.labels" $ | nindent 4 }} -spec: - type: LoadBalancer - selector: - statefulset.kubernetes.io/pod-name: {{ include "aztec-network.fullname" $ }}-validator-{{ $i }} - {{- include "aztec-network.selectorLabels" $ | nindent 4 }} - app: validator - ports: - - port: {{ $.Values.validator.service.p2pUdpPort }} - name: p2p-udp - protocol: UDP ---- -{{- end }} {{ end }} diff --git a/spartan/aztec-network/values.yaml b/spartan/aztec-network/values.yaml index d962a22d1aa..17b879a88e5 100644 --- a/spartan/aztec-network/values.yaml +++ b/spartan/aztec-network/values.yaml @@ -2,6 +2,12 @@ nameOverride: "" fullnameOverride: "" network: + # If true, pods will use host networking. + # This is to ensure that nodes are individually addressable from the outside. + # Under the current configuration, this also means that there must be a unique + # physical node in the cluster for each pod that participates in peer-to-peer. + # I.e. the sum of the number of validator, boot node, and prover nodes must be + # less than the number of physical nodes in the cluster. public: false setupL2Contracts: true @@ -29,8 +35,7 @@ aztec: epochProofClaimWindow: 13 # in L2 slots bootNode: - externalTcpHost: "" - externalUdpHost: "" + externalHost: "" replicas: 1 service: p2pTcpPort: 40400 @@ -71,8 +76,6 @@ validator: # This cannot be used when the network first starts up. # But it must be used if the boot node is killed, and the validator is restarted. dynamicBootNode: false - externalTcpHost: "" - externalUdpHost: "" replicas: 1 validatorKeys: - 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 @@ -83,7 +86,7 @@ validator: p2pUdpPort: 40400 nodePort: 8080 logLevel: "debug" - debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*,-aztec:l2_block_stream,-aztec:world-state:database" + debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*,-aztec:world-state:database,-aztec:l2_block_stream*" sequencer: maxSecondsBetweenBlocks: 0 minTxsPerBlock: 1 @@ -104,9 +107,7 @@ validator: cpu: "200m" proverNode: - public: false - externalTcpHost: "" - externalUdpHost: "" + externalHost: "" replicas: 1 p2pEnabled: true service: @@ -125,15 +126,12 @@ proverNode: pxe: proverEnabled: false - externalHost: "" logLevel: "debug" proverEnable: false debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*,-aztec:world-state:database,-aztec:l2_block_stream*" replicas: 1 service: - port: 8080 - targetPort: 8080 - nodePort: "" + nodePort: 8081 readinessProbe: initialDelaySeconds: 5 periodSeconds: 10 @@ -164,9 +162,7 @@ bot: stopIfUnhealthy: true service: type: ClusterIP - port: 8080 - targetPort: 8080 - nodePort: "" + nodePort: 8082 readinessProbe: initialDelaySeconds: 5 periodSeconds: 10 @@ -205,13 +201,18 @@ ethereum: storage: "80Gi" proverAgent: + service: + nodePort: 8083 enabled: true replicas: 1 + logLevel: "debug" debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*,-aztec:world-state:database,-aztec:l2_block_stream*" realProofs: false concurrency: 1 bb: hardwareConcurrency: "" + nodeSelector: {} + resources: {} jobs: deployL1Verifier: diff --git a/yarn-project/archiver/src/archiver/archiver_store_test_suite.ts b/yarn-project/archiver/src/archiver/archiver_store_test_suite.ts index 9253a99fede..c974942d422 100644 --- a/yarn-project/archiver/src/archiver/archiver_store_test_suite.ts +++ b/yarn-project/archiver/src/archiver/archiver_store_test_suite.ts @@ -38,12 +38,18 @@ export function describeArchiverDataStore(testName: string, getStore: () => Arch [5, 2, () => blocks.slice(4, 6)], ]; + const makeL1Published = (block: L2Block, l1BlockNumber: number): L1Published => ({ + data: block, + l1: { + blockNumber: BigInt(l1BlockNumber), + blockHash: `0x${l1BlockNumber}`, + timestamp: BigInt(l1BlockNumber * 1000), + }, + }); + beforeEach(() => { store = getStore(); - blocks = times(10, i => ({ - data: L2Block.random(i + 1), - l1: { blockNumber: BigInt(i + 10), blockHash: `0x${i}`, timestamp: BigInt(i * 1000) }, - })); + blocks = times(10, i => makeL1Published(L2Block.random(i + 1), i + 10)); }); describe('addBlocks', () => { @@ -69,6 +75,21 @@ export function describeArchiverDataStore(testName: string, getStore: () => Arch expect(await store.getSynchedL2BlockNumber()).toBe(blockNumber - 1); expect(await store.getBlocks(blockNumber, 1)).toEqual([]); }); + + it('can unwind multiple empty blocks', async () => { + const emptyBlocks = times(10, i => makeL1Published(L2Block.random(i + 1, 0), i + 10)); + await store.addBlocks(emptyBlocks); + expect(await store.getSynchedL2BlockNumber()).toBe(10); + + await store.unwindBlocks(10, 3); + expect(await store.getSynchedL2BlockNumber()).toBe(7); + expect((await store.getBlocks(1, 10)).map(b => b.data.number)).toEqual([1, 2, 3, 4, 5, 6, 7]); + }); + + it('refuses to unwind blocks if the tip is not the last block', async () => { + await store.addBlocks(blocks); + await expect(store.unwindBlocks(5, 1)).rejects.toThrow(/can only unwind blocks from the tip/i); + }); }); describe('getBlocks', () => { diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/block_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/block_store.ts index 85bbacc0369..91ae9d578c2 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/block_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/block_store.ts @@ -20,7 +20,7 @@ export class BlockStore { /** Map block number to block data */ #blocks: AztecMap; - /** Map block body hash to block body */ + /** Map block hash to block body */ #blockBodies: AztecMap; /** Stores L1 block number in which the last processed L2 block was included */ @@ -72,7 +72,7 @@ export class BlockStore { void this.#txIndex.set(tx.txHash.toString(), [block.data.number, i]); }); - void this.#blockBodies.set(block.data.body.getTxsEffectsHash().toString('hex'), block.data.body.toBuffer()); + void this.#blockBodies.set(block.data.hash().toString(), block.data.body.toBuffer()); } void this.#lastSynchedL1Block.set(blocks[blocks.length - 1].l1.blockNumber); @@ -92,7 +92,7 @@ export class BlockStore { return this.db.transaction(() => { const last = this.getSynchedL2BlockNumber(); if (from != last) { - throw new Error(`Can only remove from the tip`); + throw new Error(`Can only unwind blocks from the tip (requested ${from} but current tip is ${last})`); } for (let i = 0; i < blocksToUnwind; i++) { @@ -106,7 +106,9 @@ export class BlockStore { block.data.body.txEffects.forEach(tx => { void this.#txIndex.delete(tx.txHash.toString()); }); - void this.#blockBodies.delete(block.data.body.getTxsEffectsHash().toString('hex')); + const blockHash = block.data.hash().toString(); + void this.#blockBodies.delete(blockHash); + this.#log.debug(`Unwound block ${blockNumber} ${blockHash}`); } return true; @@ -154,10 +156,12 @@ export class BlockStore { private getBlockFromBlockStorage(blockStorage: BlockStorage) { const header = Header.fromBuffer(blockStorage.header); const archive = AppendOnlyTreeSnapshot.fromBuffer(blockStorage.archive); - - const blockBodyBuffer = this.#blockBodies.get(header.contentCommitment.txsEffectsHash.toString('hex')); + const blockHash = header.hash().toString(); + const blockBodyBuffer = this.#blockBodies.get(blockHash); if (blockBodyBuffer === undefined) { - throw new Error('Body could not be retrieved'); + throw new Error( + `Could not retrieve body for block ${header.globalVariables.blockNumber.toNumber()} ${blockHash}`, + ); } const body = Body.fromBuffer(blockBodyBuffer); diff --git a/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts b/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts index 7f5418f79a6..33d8a09128d 100644 --- a/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts +++ b/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts @@ -201,7 +201,7 @@ export class MemoryArchiverStore implements ArchiverDataStore { public async unwindBlocks(from: number, blocksToUnwind: number): Promise { const last = await this.getSynchedL2BlockNumber(); if (from != last) { - throw new Error(`Can only remove the tip`); + throw new Error(`Can only unwind blocks from the tip (requested ${from} but current tip is ${last})`); } const stopAt = from - blocksToUnwind; diff --git a/yarn-project/bb-prover/src/avm_proving.test.ts b/yarn-project/bb-prover/src/avm_proving.test.ts index 6923c705762..b5580a4d97f 100644 --- a/yarn-project/bb-prover/src/avm_proving.test.ts +++ b/yarn-project/bb-prover/src/avm_proving.test.ts @@ -1,41 +1,16 @@ -import { - AvmCircuitInputs, - AvmCircuitPublicInputs, - Gas, - GlobalVariables, - type PublicFunction, - PublicKeys, - SerializableContractInstance, - VerificationKeyData, -} from '@aztec/circuits.js'; -import { makeContractClassPublic, makeContractInstanceFromClassId } from '@aztec/circuits.js/testing'; -import { AztecAddress } from '@aztec/foundation/aztec-address'; -import { Fr, Point } from '@aztec/foundation/fields'; +import { VerificationKeyData } from '@aztec/circuits.js'; +import { Fr } from '@aztec/foundation/fields'; import { createDebugLogger } from '@aztec/foundation/log'; -import { openTmpStore } from '@aztec/kv-store/utils'; -import { AvmSimulator, PublicSideEffectTrace, type WorldStateDB } from '@aztec/simulator'; -import { - getAvmTestContractBytecode, - getAvmTestContractFunctionSelector, - initContext, - initExecutionEnvironment, - initPersistableStateManager, - resolveAvmTestContractAssertionMessage, -} from '@aztec/simulator/avm/fixtures'; -import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; -import { MerkleTrees } from '@aztec/world-state'; +import { simulateAvmTestContractGenerateCircuitInputs } from '@aztec/simulator/public/fixtures'; -import { mock } from 'jest-mock-extended'; import fs from 'node:fs/promises'; import { tmpdir } from 'node:os'; import path from 'path'; import { type BBSuccess, BB_RESULT, generateAvmProof, verifyAvmProof } from './bb/execute.js'; -import { getPublicInputs } from './test/test_avm.js'; import { extractAvmVkData } from './verification_key/verification_key_data.js'; const TIMEOUT = 180_000; -const TIMESTAMP = new Fr(99833); describe('AVM WitGen, proof generation and verification', () => { it( @@ -50,77 +25,8 @@ describe('AVM WitGen, proof generation and verification', () => { ); }); -/************************************************************************ - * Helpers - ************************************************************************/ - -/** - * If assertionErrString is set, we expect a (non exceptional halting) revert due to a failing assertion and - * we check that the revert reason error contains this string. However, the circuit must correctly prove the - * execution. - */ -const proveAndVerifyAvmTestContract = async ( - functionName: string, - calldata: Fr[] = [], - assertionErrString?: string, -) => { - const startSideEffectCounter = 0; - const functionSelector = getAvmTestContractFunctionSelector(functionName); - calldata = [functionSelector.toField(), ...calldata]; - const globals = GlobalVariables.empty(); - globals.timestamp = TIMESTAMP; - - const worldStateDB = mock(); - // - // Top level contract call - const bytecode = getAvmTestContractBytecode('public_dispatch'); - const fnSelector = getAvmTestContractFunctionSelector('public_dispatch'); - const publicFn: PublicFunction = { bytecode, selector: fnSelector }; - const contractClass = makeContractClassPublic(0, publicFn); - const contractInstance = makeContractInstanceFromClassId(contractClass.id); - - // The values here should match those in `avm_simulator.test.ts` - const instanceGet = new SerializableContractInstance({ - version: 1, - salt: new Fr(0x123), - deployer: new AztecAddress(new Fr(0x456)), - contractClassId: new Fr(0x789), - initializationHash: new Fr(0x101112), - publicKeys: new PublicKeys( - new Point(new Fr(0x131415), new Fr(0x161718), false), - new Point(new Fr(0x192021), new Fr(0x222324), false), - new Point(new Fr(0x252627), new Fr(0x282930), false), - new Point(new Fr(0x313233), new Fr(0x343536), false), - ), - }).withAddress(contractInstance.address); - - worldStateDB.getContractInstance - .mockResolvedValueOnce(contractInstance) - .mockResolvedValueOnce(instanceGet) // test gets deployer - .mockResolvedValueOnce(instanceGet) // test gets class id - .mockResolvedValueOnce(instanceGet) // test gets init hash - .mockResolvedValue(contractInstance); - worldStateDB.getContractClass.mockResolvedValue(contractClass); - - const storageValue = new Fr(5); - worldStateDB.storageRead.mockResolvedValue(Promise.resolve(storageValue)); - - const trace = new PublicSideEffectTrace(startSideEffectCounter); - const telemetry = new NoopTelemetryClient(); - const merkleTrees = await (await MerkleTrees.new(openTmpStore(), telemetry)).fork(); - worldStateDB.getMerkleInterface.mockReturnValue(merkleTrees); - const persistableState = initPersistableStateManager({ worldStateDB, trace, merkleTrees, doMerkleOperations: true }); - const environment = initExecutionEnvironment({ - functionSelector, - calldata, - globals, - address: contractInstance.address, - }); - const context = initContext({ env: environment, persistableState }); - - worldStateDB.getBytecode.mockResolvedValue(bytecode); - - const startGas = new Gas(context.machineState.gasLeft.daGas, context.machineState.gasLeft.l2Gas); +async function proveAndVerifyAvmTestContract(functionName: string, calldata: Fr[] = []) { + const avmCircuitInputs = await simulateAvmTestContractGenerateCircuitInputs(functionName, calldata); const internalLogger = createDebugLogger('aztec:avm-proving-test'); const logger = (msg: string, _data?: any) => internalLogger.verbose(msg); @@ -129,39 +35,11 @@ const proveAndVerifyAvmTestContract = async ( const bbPath = path.resolve('../../barretenberg/cpp/build/bin/bb'); const bbWorkingDirectory = await fs.mkdtemp(path.join(tmpdir(), 'bb-')); - // First we simulate (though it's not needed in this simple case). - const simulator = new AvmSimulator(context); - const avmResult = await simulator.execute(); - - if (assertionErrString == undefined) { - expect(avmResult.reverted).toBe(false); - } else { - // Explicit revert when an assertion failed. - expect(avmResult.reverted).toBe(true); - expect(avmResult.revertReason).toBeDefined(); - expect(resolveAvmTestContractAssertionMessage(functionName, avmResult.revertReason!, avmResult.output)).toContain( - assertionErrString, - ); - } - - const pxResult = trace.toPublicFunctionCallResult( - environment, - startGas, - /*bytecode=*/ simulator.getBytecode()!, - avmResult.finalize(), - functionName, - ); - - const avmCircuitInputs = new AvmCircuitInputs( - functionName, - /*calldata=*/ context.environment.calldata, - /*publicInputs=*/ getPublicInputs(pxResult), - /*avmHints=*/ pxResult.avmCircuitHints, - /*output*/ AvmCircuitPublicInputs.empty(), - ); - // Then we prove. const proofRes = await generateAvmProof(bbPath, bbWorkingDirectory, avmCircuitInputs, logger); + if (proofRes.status === BB_RESULT.FAILURE) { + internalLogger.error(`Proof generation failed: ${proofRes.reason}`); + } expect(proofRes.status).toEqual(BB_RESULT.SUCCESS); // Then we test VK extraction and serialization. @@ -173,4 +51,4 @@ const proveAndVerifyAvmTestContract = async ( const rawVkPath = path.join(succeededRes.vkPath!, 'vk'); const verificationRes = await verifyAvmProof(bbPath, succeededRes.proofPath!, rawVkPath, logger); expect(verificationRes.status).toBe(BB_RESULT.SUCCESS); -}; +} diff --git a/yarn-project/bb-prover/src/prover/bb_private_kernel_prover.ts b/yarn-project/bb-prover/src/prover/bb_private_kernel_prover.ts index 4de354a0b56..7f16b0292fd 100644 --- a/yarn-project/bb-prover/src/prover/bb_private_kernel_prover.ts +++ b/yarn-project/bb-prover/src/prover/bb_private_kernel_prover.ts @@ -338,7 +338,7 @@ export class BBNativePrivateKernelProver implements PrivateKernelProver { ); if (vkResult.status === BB_RESULT.FAILURE) { - this.log.error(`Failed to generate proof for ${circuitType}${dbgCircuitName}: ${vkResult.reason}`); + this.log.error(`Failed to generate verification key for ${circuitType}${dbgCircuitName}: ${vkResult.reason}`); throw new Error(vkResult.reason); } diff --git a/yarn-project/bb-prover/src/prover/bb_prover.ts b/yarn-project/bb-prover/src/prover/bb_prover.ts index d4db2fbd376..f737b093a38 100644 --- a/yarn-project/bb-prover/src/prover/bb_prover.ts +++ b/yarn-project/bb-prover/src/prover/bb_prover.ts @@ -555,7 +555,7 @@ export class BBNativeRollupProver implements ServerCircuitProver { const provingResult = await generateTubeProof(this.config.bbBinaryPath, bbWorkingDirectory, logger.verbose); if (provingResult.status === BB_RESULT.FAILURE) { - logger.error(`Failed to generate proof for tube proof: ${provingResult.reason}`); + logger.error(`Failed to generate proof for tube circuit: ${provingResult.reason}`); throw new ProvingError(provingResult.reason, provingResult, provingResult.retry); } return provingResult; diff --git a/yarn-project/circuit-types/src/interfaces/archiver.test.ts b/yarn-project/circuit-types/src/interfaces/archiver.test.ts index 6e9dacdd574..36947324e24 100644 --- a/yarn-project/circuit-types/src/interfaces/archiver.test.ts +++ b/yarn-project/circuit-types/src/interfaces/archiver.test.ts @@ -230,7 +230,7 @@ describe('ArchiverApiSchema', () => { it('addContractArtifact', async () => { await context.client.addContractArtifact(AztecAddress.random(), artifact); - }); + }, 20_000); it('getContract', async () => { const address = AztecAddress.random(); diff --git a/yarn-project/circuit-types/src/interfaces/aztec-node.test.ts b/yarn-project/circuit-types/src/interfaces/aztec-node.test.ts index 7e33bc9400d..5b32cdd1d1e 100644 --- a/yarn-project/circuit-types/src/interfaces/aztec-node.test.ts +++ b/yarn-project/circuit-types/src/interfaces/aztec-node.test.ts @@ -207,7 +207,7 @@ describe('AztecNodeApiSchema', () => { it('addContractArtifact', async () => { await context.client.addContractArtifact(AztecAddress.random(), artifact); - }); + }, 20_000); it('getLogs(Encrypted)', async () => { const response = await context.client.getLogs(1, 1, LogType.ENCRYPTED); diff --git a/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts b/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts index bb411de60c8..46437e0da40 100644 --- a/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts +++ b/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts @@ -10,6 +10,8 @@ import { } from '@aztec/foundation/serialize'; import { type FieldsOf } from '@aztec/foundation/types'; +import { inspect } from 'util'; + import { MAX_ENQUEUED_CALLS_PER_CALL, MAX_L1_TO_L2_MSG_READ_REQUESTS_PER_CALL, @@ -324,4 +326,66 @@ export class PublicCircuitPublicInputs { reader.readField(), ); } + + [inspect.custom]() { + return `PublicCircuitPublicInputs { + callContext: ${inspect(this.callContext)}, + argsHash: ${inspect(this.argsHash)}, + returnsHash: ${inspect(this.returnsHash)}, + noteHashReadRequests: [${this.noteHashReadRequests + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + nullifierReadRequests: [${this.nullifierReadRequests + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + nullifierNonExistentReadRequests: [${this.nullifierNonExistentReadRequests + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + l1ToL2MsgReadRequests: [${this.l1ToL2MsgReadRequests + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + contractStorageUpdateRequests: [${this.contractStorageUpdateRequests + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + contractStorageReads: [${this.contractStorageReads + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + publicCallRequests: [${this.publicCallRequests + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + noteHashes: [${this.noteHashes + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + nullifiers: [${this.nullifiers + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + l2ToL1Msgs: [${this.l2ToL1Msgs + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + startSideEffectCounter: ${inspect(this.startSideEffectCounter)}, + endSideEffectCounter: ${inspect(this.endSideEffectCounter)}, + startSideEffectCounter: ${inspect(this.startSideEffectCounter)}, + unencryptedLogsHashes: [${this.unencryptedLogsHashes + .filter(x => !x.isEmpty()) + .map(h => inspect(h)) + .join(', ')}]}, + historicalHeader: ${inspect(this.historicalHeader)}, + globalVariables: ${inspect(this.globalVariables)}, + proverAddress: ${inspect(this.proverAddress)}, + revertCode: ${inspect(this.revertCode)}, + startGasLeft: ${inspect(this.startGasLeft)}, + endGasLeft: ${inspect(this.endGasLeft)}, + transactionFee: ${inspect(this.transactionFee)}, + }`; + } } diff --git a/yarn-project/end-to-end/scripts/network_test.sh b/yarn-project/end-to-end/scripts/network_test.sh index 8c9890f4657..6789c4a75b8 100755 --- a/yarn-project/end-to-end/scripts/network_test.sh +++ b/yarn-project/end-to-end/scripts/network_test.sh @@ -167,7 +167,7 @@ docker run --rm --network=host \ -e SPARTAN_DIR="/usr/src/spartan" \ -e NAMESPACE="$NAMESPACE" \ -e HOST_PXE_PORT=$PXE_PORT \ - -e CONTAINER_PXE_PORT=8080 \ + -e CONTAINER_PXE_PORT=8081 \ -e HOST_ETHEREUM_PORT=$ANVIL_PORT \ -e CONTAINER_ETHEREUM_PORT=8545 \ -e DEBUG="aztec:*" \ diff --git a/yarn-project/end-to-end/src/spartan/4epochs.test.ts b/yarn-project/end-to-end/src/spartan/4epochs.test.ts index 29f24e42f56..feef5c9f243 100644 --- a/yarn-project/end-to-end/src/spartan/4epochs.test.ts +++ b/yarn-project/end-to-end/src/spartan/4epochs.test.ts @@ -30,13 +30,13 @@ describe('token transfer test', () => { beforeAll(async () => { if (isK8sConfig(config)) { await startPortForward({ - resource: 'svc/spartan-aztec-network-pxe', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-pxe`, namespace: config.NAMESPACE, containerPort: config.CONTAINER_PXE_PORT, hostPort: config.HOST_PXE_PORT, }); await startPortForward({ - resource: 'svc/spartan-aztec-network-ethereum', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-ethereum`, namespace: config.NAMESPACE, containerPort: config.CONTAINER_ETHEREUM_PORT, hostPort: config.HOST_ETHEREUM_PORT, diff --git a/yarn-project/end-to-end/src/spartan/gating-passive.test.ts b/yarn-project/end-to-end/src/spartan/gating-passive.test.ts index 03726310982..8623027e7de 100644 --- a/yarn-project/end-to-end/src/spartan/gating-passive.test.ts +++ b/yarn-project/end-to-end/src/spartan/gating-passive.test.ts @@ -41,13 +41,13 @@ describe('a test that passively observes the network in the presence of network it('survives network chaos', async () => { await startPortForward({ - resource: 'svc/spartan-aztec-network-pxe', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-pxe`, namespace: NAMESPACE, containerPort: CONTAINER_PXE_PORT, hostPort: HOST_PXE_PORT, }); await startPortForward({ - resource: 'svc/spartan-aztec-network-ethereum', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-ethereum`, namespace: NAMESPACE, containerPort: CONTAINER_ETHEREUM_PORT, hostPort: HOST_ETHEREUM_PORT, diff --git a/yarn-project/end-to-end/src/spartan/proving.test.ts b/yarn-project/end-to-end/src/spartan/proving.test.ts index fa1d1e73807..8681f17601c 100644 --- a/yarn-project/end-to-end/src/spartan/proving.test.ts +++ b/yarn-project/end-to-end/src/spartan/proving.test.ts @@ -19,7 +19,7 @@ describe('proving test', () => { let PXE_URL; if (isK8sConfig(config)) { proc = await startPortForward({ - resource: 'svc/spartan-aztec-network-pxe', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-pxe`, namespace: config.NAMESPACE, containerPort: config.CONTAINER_PXE_PORT, hostPort: config.HOST_PXE_PORT, diff --git a/yarn-project/end-to-end/src/spartan/reorg.test.ts b/yarn-project/end-to-end/src/spartan/reorg.test.ts index 8421e7387c5..c315fe05def 100644 --- a/yarn-project/end-to-end/src/spartan/reorg.test.ts +++ b/yarn-project/end-to-end/src/spartan/reorg.test.ts @@ -47,13 +47,13 @@ describe('reorg test', () => { it('survives a reorg', async () => { await startPortForward({ - resource: 'svc/spartan-aztec-network-pxe', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-pxe`, namespace: NAMESPACE, containerPort: CONTAINER_PXE_PORT, hostPort: HOST_PXE_PORT, }); await startPortForward({ - resource: 'svc/spartan-aztec-network-ethereum', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-ethereum`, namespace: NAMESPACE, containerPort: CONTAINER_ETHEREUM_PORT, hostPort: HOST_ETHEREUM_PORT, @@ -99,7 +99,7 @@ describe('reorg test', () => { await waitForResourceByLabel({ resource: 'pods', namespace: NAMESPACE, label: 'app=pxe' }); await sleep(30 * 1000); await startPortForward({ - resource: 'svc/spartan-aztec-network-pxe', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-pxe`, namespace: NAMESPACE, containerPort: CONTAINER_PXE_PORT, hostPort: HOST_PXE_PORT, diff --git a/yarn-project/end-to-end/src/spartan/smoke.test.ts b/yarn-project/end-to-end/src/spartan/smoke.test.ts index 43a67dc9d12..dc47f4f97f8 100644 --- a/yarn-project/end-to-end/src/spartan/smoke.test.ts +++ b/yarn-project/end-to-end/src/spartan/smoke.test.ts @@ -18,7 +18,7 @@ describe('smoke test', () => { let PXE_URL; if (isK8sConfig(config)) { await startPortForward({ - resource: 'svc/spartan-aztec-network-pxe', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-pxe`, namespace: config.NAMESPACE, containerPort: config.CONTAINER_PXE_PORT, hostPort: config.HOST_PXE_PORT, diff --git a/yarn-project/end-to-end/src/spartan/transfer.test.ts b/yarn-project/end-to-end/src/spartan/transfer.test.ts index e82f0518fb0..a1a9d7aea9a 100644 --- a/yarn-project/end-to-end/src/spartan/transfer.test.ts +++ b/yarn-project/end-to-end/src/spartan/transfer.test.ts @@ -23,7 +23,7 @@ describe('token transfer test', () => { let PXE_URL; if (isK8sConfig(config)) { await startPortForward({ - resource: 'svc/spartan-aztec-network-pxe', + resource: `svc/${config.INSTANCE_NAME}-aztec-network-pxe`, namespace: config.NAMESPACE, containerPort: config.CONTAINER_PXE_PORT, hostPort: config.HOST_PXE_PORT, diff --git a/yarn-project/ivc-integration/package.json b/yarn-project/ivc-integration/package.json index c9fc4dae62e..54c7f409712 100644 --- a/yarn-project/ivc-integration/package.json +++ b/yarn-project/ivc-integration/package.json @@ -11,7 +11,7 @@ "./package.local.json" ], "scripts": { - "build": "yarn clean && yarn generate && rm -rf dest && webpack && tsc -b", + "build": "yarn clean && yarn generate && tsc -b && rm -rf dest && webpack", "clean": "rm -rf ./dest .tsbuildinfo src/types artifacts", "formatting": "run -T prettier --check ./src && run -T eslint ./src", "formatting:fix": "run -T eslint --fix ./src && run -T prettier -w ./src", diff --git a/yarn-project/ivc-integration/package.local.json b/yarn-project/ivc-integration/package.local.json index ecdfdf5167a..f3ab16fc679 100644 --- a/yarn-project/ivc-integration/package.local.json +++ b/yarn-project/ivc-integration/package.local.json @@ -1,6 +1,6 @@ { "scripts": { - "build": "yarn clean && yarn generate && rm -rf dest && webpack && tsc -b", + "build": "yarn clean && yarn generate && tsc -b && rm -rf dest && webpack", "clean": "rm -rf ./dest .tsbuildinfo src/types artifacts", "test:non-browser": "NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --testPathIgnorePatterns=browser --passWithNoTests ", "test:browser": "./run_browser_tests.sh", diff --git a/yarn-project/ivc-integration/src/avm_integration.test.ts b/yarn-project/ivc-integration/src/avm_integration.test.ts index 4bbb4e19633..63ed8fbaa5b 100644 --- a/yarn-project/ivc-integration/src/avm_integration.test.ts +++ b/yarn-project/ivc-integration/src/avm_integration.test.ts @@ -1,21 +1,4 @@ -import { - type BBSuccess, - BB_RESULT, - generateAvmProof, - generateProof, - getPublicInputs, - verifyProof, -} from '@aztec/bb-prover'; -import { - AvmCircuitInputs, - AvmCircuitPublicInputs, - AztecAddress, - Gas, - GlobalVariables, - type PublicFunction, - PublicKeys, - SerializableContractInstance, -} from '@aztec/circuits.js'; +import { type BBSuccess, BB_RESULT, generateAvmProof, generateProof, verifyProof } from '@aztec/bb-prover'; import { AVM_PROOF_LENGTH_IN_FIELDS, AVM_PUBLIC_COLUMN_MAX_SIZE, @@ -23,27 +6,14 @@ import { AVM_VERIFICATION_KEY_LENGTH_IN_FIELDS, PUBLIC_CIRCUIT_PUBLIC_INPUTS_LENGTH, } from '@aztec/circuits.js/constants'; -import { makeContractClassPublic, makeContractInstanceFromClassId } from '@aztec/circuits.js/testing'; -import { Fr, Point } from '@aztec/foundation/fields'; +import { Fr } from '@aztec/foundation/fields'; import { createDebugLogger } from '@aztec/foundation/log'; import { BufferReader } from '@aztec/foundation/serialize'; -import { openTmpStore } from '@aztec/kv-store/utils'; import { type FixedLengthArray } from '@aztec/noir-protocol-circuits-types/types'; -import { AvmSimulator, PublicSideEffectTrace, type WorldStateDB } from '@aztec/simulator'; -import { - getAvmTestContractBytecode, - getAvmTestContractFunctionSelector, - initContext, - initExecutionEnvironment, - initPersistableStateManager, - resolveAvmTestContractAssertionMessage, -} from '@aztec/simulator/avm/fixtures'; -import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; -import { MerkleTrees } from '@aztec/world-state'; +import { simulateAvmTestContractGenerateCircuitInputs } from '@aztec/simulator/public/fixtures'; import { jest } from '@jest/globals'; import fs from 'fs/promises'; -import { mock } from 'jest-mock-extended'; import { tmpdir } from 'node:os'; import os from 'os'; import path from 'path'; @@ -153,107 +123,22 @@ describe('AVM Integration', () => { }); }); -// Helper - -const proveAvmTestContract = async ( - functionName: string, - calldata: Fr[] = [], - assertionErrString?: string, -): Promise => { - const worldStateDB = mock(); - const startSideEffectCounter = 0; - const functionSelector = getAvmTestContractFunctionSelector(functionName); - calldata = [functionSelector.toField(), ...calldata]; - const globals = GlobalVariables.empty(); - - // Top level contract call - const bytecode = getAvmTestContractBytecode('public_dispatch'); - const fnSelector = getAvmTestContractFunctionSelector('public_dispatch'); - const publicFn: PublicFunction = { bytecode, selector: fnSelector }; - const contractClass = makeContractClassPublic(0, publicFn); - const contractInstance = makeContractInstanceFromClassId(contractClass.id); - - const instanceGet = new SerializableContractInstance({ - version: 1, - salt: new Fr(0x123), - deployer: AztecAddress.fromNumber(0x456), - contractClassId: new Fr(0x789), - initializationHash: new Fr(0x101112), - publicKeys: new PublicKeys( - new Point(new Fr(0x131415), new Fr(0x161718), false), - new Point(new Fr(0x192021), new Fr(0x222324), false), - new Point(new Fr(0x252627), new Fr(0x282930), false), - new Point(new Fr(0x313233), new Fr(0x343536), false), - ), - }).withAddress(contractInstance.address); - - worldStateDB.getContractInstance - .mockResolvedValueOnce(contractInstance) - .mockResolvedValueOnce(instanceGet) // test gets deployer - .mockResolvedValueOnce(instanceGet) // test gets class id - .mockResolvedValueOnce(instanceGet) // test gets init hash - .mockResolvedValue(contractInstance); - - worldStateDB.getContractClass.mockResolvedValue(contractClass); - - const storageValue = new Fr(5); - worldStateDB.storageRead.mockResolvedValue(storageValue); +async function proveAvmTestContract(functionName: string, calldata: Fr[] = []): Promise { + const avmCircuitInputs = await simulateAvmTestContractGenerateCircuitInputs(functionName, calldata); - const trace = new PublicSideEffectTrace(startSideEffectCounter); - const telemetry = new NoopTelemetryClient(); - const merkleTrees = await (await MerkleTrees.new(openTmpStore(), telemetry)).fork(); - worldStateDB.getMerkleInterface.mockReturnValue(merkleTrees); - const persistableState = initPersistableStateManager({ worldStateDB, trace, merkleTrees, doMerkleOperations: true }); - const environment = initExecutionEnvironment({ - functionSelector, - calldata, - globals, - address: contractInstance.address, - }); - const context = initContext({ env: environment, persistableState }); - - worldStateDB.getBytecode.mockResolvedValue(bytecode); - - const startGas = new Gas(context.machineState.gasLeft.daGas, context.machineState.gasLeft.l2Gas); + const internalLogger = createDebugLogger('aztec:avm-proving-test'); + const logger = (msg: string, _data?: any) => internalLogger.verbose(msg); - // Use a simple contract that emits a side effect // The paths for the barretenberg binary and the write path are hardcoded for now. const bbPath = path.resolve('../../barretenberg/cpp/build/bin/bb'); const bbWorkingDirectory = await fs.mkdtemp(path.join(tmpdir(), 'bb-')); - // First we simulate (though it's not needed in this simple case). - const simulator = new AvmSimulator(context); - const avmResult = await simulator.execute(); - - if (assertionErrString == undefined) { - expect(avmResult.reverted).toBe(false); - } else { - // Explicit revert when an assertion failed. - expect(avmResult.reverted).toBe(true); - expect(avmResult.revertReason).toBeDefined(); - expect(resolveAvmTestContractAssertionMessage(functionName, avmResult.revertReason!, avmResult.output)).toContain( - assertionErrString, - ); - } - - const pxResult = trace.toPublicFunctionCallResult( - environment, - startGas, - /*bytecode=*/ simulator.getBytecode()!, - avmResult.finalize(), - functionName, - ); - - const avmCircuitInputs = new AvmCircuitInputs( - functionName, - /*calldata=*/ context.environment.calldata, - /*publicInputs=*/ getPublicInputs(pxResult), - /*avmHints=*/ pxResult.avmCircuitHints, - AvmCircuitPublicInputs.empty(), - ); // Then we prove. - const proofRes = await generateAvmProof(bbPath, bbWorkingDirectory, avmCircuitInputs, logger.info); + const proofRes = await generateAvmProof(bbPath, bbWorkingDirectory, avmCircuitInputs, logger); + if (proofRes.status === BB_RESULT.FAILURE) { + internalLogger.error(`Proof generation failed: ${proofRes.reason}`); + } expect(proofRes.status).toEqual(BB_RESULT.SUCCESS); return proofRes as BBSuccess; -}; +} diff --git a/yarn-project/ivc-integration/src/serve.ts b/yarn-project/ivc-integration/src/serve.ts index 0910e64aae2..b16a125a05c 100644 --- a/yarn-project/ivc-integration/src/serve.ts +++ b/yarn-project/ivc-integration/src/serve.ts @@ -5,14 +5,88 @@ import { generate3FunctionTestingIVCStack, proveAndVerifyBrowser } from './index createDebug.enable('*'); const logger = createDebug('aztec:ivc-test'); +/* eslint-disable no-console */ + +// Function to set up the output element and redirect all console output +function setupConsoleOutput() { + const container = document.createElement('div'); + container.style.marginBottom = '10px'; + document.body.appendChild(container); + + const copyButton = document.createElement('button'); + copyButton.innerText = 'Copy Logs to Clipboard'; + copyButton.style.marginBottom = '10px'; + copyButton.addEventListener('click', () => { + const logContent = logContainer.textContent || ''; // Get text content of log container + navigator.clipboard + .writeText(logContent) + .then(() => { + alert('Logs copied to clipboard!'); + }) + .catch(err => { + console.error('Failed to copy logs:', err); + }); + }); + container.appendChild(copyButton); + + const logContainer = document.createElement('pre'); + logContainer.id = 'logOutput'; + logContainer.style.border = '1px solid #ccc'; + logContainer.style.padding = '10px'; + logContainer.style.maxHeight = '400px'; + logContainer.style.overflowY = 'auto'; + container.appendChild(logContainer); + + // Helper to append messages to logContainer + function addLogMessage(message: string) { + logContainer.textContent += message + '\n'; + logContainer.scrollTop = logContainer.scrollHeight; // Auto-scroll to the bottom + } + + // Override console methods to output clean logs + const originalLog = console.log; + const originalDebug = console.debug; + + console.log = (...args: any[]) => { + const message = args + .map(arg => + typeof arg === 'string' + ? arg + .replace(/%c/g, '') + .replace(/color:.*?(;|$)/g, '') + .trim() + : arg, + ) + .join(' '); + originalLog.apply(console, args); // Keep original behavior + addLogMessage(message); + }; + + console.debug = (...args: any[]) => { + const message = args + .map(arg => + typeof arg === 'string' + ? arg + .replace(/%c/g, '') + .replace(/color:.*?(;|$)/g, '') + .trim() + : arg, + ) + .join(' '); + originalDebug.apply(console, args); // Keep original behavior + addLogMessage(message); + }; +} + (window as any).proveAndVerifyBrowser = proveAndVerifyBrowser; document.addEventListener('DOMContentLoaded', function () { + setupConsoleOutput(); // Initialize console output capture + const button = document.createElement('button'); button.innerText = 'Run Test'; button.addEventListener('click', async () => { logger(`generating circuit and witness...`); - []; const [bytecodes, witnessStack] = await generate3FunctionTestingIVCStack(); logger(`done. proving and verifying...`); const verified = await proveAndVerifyBrowser(bytecodes, witnessStack); diff --git a/yarn-project/simulator/package.json b/yarn-project/simulator/package.json index 31b561d6d6f..2832153c30a 100644 --- a/yarn-project/simulator/package.json +++ b/yarn-project/simulator/package.json @@ -4,7 +4,7 @@ "type": "module", "exports": { ".": "./dest/index.js", - "./avm/fixtures": "./dest/avm/fixtures/index.js" + "./public/fixtures": "./dest/public/fixtures/index.js" }, "typedocOptions": { "entryPoints": [ diff --git a/yarn-project/simulator/src/avm/journal/journal.ts b/yarn-project/simulator/src/avm/journal/journal.ts index 637ec02d7fe..dd62d155e73 100644 --- a/yarn-project/simulator/src/avm/journal/journal.ts +++ b/yarn-project/simulator/src/avm/journal/journal.ts @@ -69,6 +69,8 @@ export class AvmPersistableStateManager { worldStateDB: WorldStateDB, trace: PublicSideEffectTraceInterface, pendingSiloedNullifiers: Fr[], + doMerkleOperations: boolean = false, + merkleTrees?: MerkleTreeWriteOperations, ) { const parentNullifiers = NullifierManager.newWithPendingSiloedNullifiers(worldStateDB, pendingSiloedNullifiers); return new AvmPersistableStateManager( @@ -76,6 +78,8 @@ export class AvmPersistableStateManager { trace, /*publicStorage=*/ new PublicStorage(worldStateDB), /*nullifiers=*/ parentNullifiers.fork(), + doMerkleOperations, + merkleTrees, ); } diff --git a/yarn-project/simulator/src/index.ts b/yarn-project/simulator/src/index.ts index fc5a4653413..f8095f6baf7 100644 --- a/yarn-project/simulator/src/index.ts +++ b/yarn-project/simulator/src/index.ts @@ -4,5 +4,4 @@ export * from './client/index.js'; export * from './common/index.js'; export * from './public/index.js'; export * from './providers/index.js'; -export * from './mocks/index.js'; export * from './stats/index.js'; diff --git a/yarn-project/simulator/src/mocks/fixtures.ts b/yarn-project/simulator/src/mocks/fixtures.ts deleted file mode 100644 index 9c5bd74e06d..00000000000 --- a/yarn-project/simulator/src/mocks/fixtures.ts +++ /dev/null @@ -1,72 +0,0 @@ -import { SimulationError } from '@aztec/circuit-types'; -import { ARGS_LENGTH, Fr, Gas } from '@aztec/circuits.js'; -import { makeAztecAddress, makeSelector } from '@aztec/circuits.js/testing'; -import { FunctionType } from '@aztec/foundation/abi'; -import { padArrayEnd } from '@aztec/foundation/collection'; - -import { type EnqueuedPublicCallExecutionResult } from '../public/execution.js'; - -export class PublicExecutionResultBuilder { - private _returnValues: Fr[] = []; - private _reverted = false; - private _revertReason: SimulationError | undefined = undefined; - - constructor() {} - - static empty(basicRevert = false) { - const builder = new PublicExecutionResultBuilder(); - if (basicRevert) { - builder.withReverted(new SimulationError('Simulation failed', [])); - } - return builder; - } - - static fromPublicExecutionRequest({ - returnValues = [new Fr(1n)], - revertReason = undefined, - }: { - returnValues?: Fr[]; - revertReason?: SimulationError; - }): PublicExecutionResultBuilder { - const builder = new PublicExecutionResultBuilder(); - - builder.withReturnValues(...returnValues); - if (revertReason) { - builder.withReverted(revertReason); - } - - return builder; - } - - withReturnValues(...values: Fr[]): PublicExecutionResultBuilder { - this._returnValues.push(...values); - return this; - } - - withReverted(reason: SimulationError): PublicExecutionResultBuilder { - this._reverted = true; - this._revertReason = reason; - return this; - } - - build(overrides: Partial = {}): EnqueuedPublicCallExecutionResult { - return { - endGasLeft: Gas.empty(), - endSideEffectCounter: Fr.ZERO, - returnValues: padArrayEnd(this._returnValues, Fr.ZERO, 4), // TODO(#5450) Need to use the proper return values here - reverted: this._reverted, - revertReason: this._revertReason, - ...overrides, - }; - } -} - -export const makeFunctionCall = ( - name = 'function', - to = makeAztecAddress(30), - selector = makeSelector(5), - type = FunctionType.PUBLIC, - args = new Array(ARGS_LENGTH).fill(Fr.ZERO), - isStatic = false, - returnTypes = [], -) => ({ name, to, selector, type, args, isStatic, returnTypes }); diff --git a/yarn-project/simulator/src/mocks/index.ts b/yarn-project/simulator/src/mocks/index.ts deleted file mode 100644 index dd1a464237b..00000000000 --- a/yarn-project/simulator/src/mocks/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './fixtures.js'; diff --git a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts index 6cc11d08fd6..768243bf473 100644 --- a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts +++ b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts @@ -320,7 +320,7 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI public traceNewNoteHash( contractAddress: AztecAddress, noteHash: Fr, - leafIndex: Fr, + leafIndex: Fr = Fr.zero(), path: Fr[] = emptyNoteHashPath(), ) { if (this.noteHashes.length + this.previousSideEffectArrayLengths.noteHashes >= MAX_NOTE_HASHES_PER_TX) { diff --git a/yarn-project/simulator/src/public/fixtures/index.ts b/yarn-project/simulator/src/public/fixtures/index.ts new file mode 100644 index 00000000000..8c0f53fab32 --- /dev/null +++ b/yarn-project/simulator/src/public/fixtures/index.ts @@ -0,0 +1,158 @@ +import { PublicExecutionRequest, Tx } from '@aztec/circuit-types'; +import { + type AvmCircuitInputs, + CallContext, + DEFAULT_GAS_LIMIT, + Gas, + GasFees, + GasSettings, + GlobalVariables, + Header, + MAX_L2_GAS_PER_ENQUEUED_CALL, + PartialPrivateTailPublicInputsForPublic, + PrivateKernelTailCircuitPublicInputs, + type PublicFunction, + PublicKeys, + RollupValidationRequests, + SerializableContractInstance, + TxConstantData, + TxContext, +} from '@aztec/circuits.js'; +import { makeContractClassPublic, makeContractInstanceFromClassId } from '@aztec/circuits.js/testing'; +import { AztecAddress } from '@aztec/foundation/aztec-address'; +import { Fr, Point } from '@aztec/foundation/fields'; +import { openTmpStore } from '@aztec/kv-store/utils'; +import { PublicTxSimulator, type WorldStateDB } from '@aztec/simulator'; +import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; +import { MerkleTrees } from '@aztec/world-state'; + +import { mock } from 'jest-mock-extended'; + +import { getAvmTestContractBytecode, getAvmTestContractFunctionSelector } from '../../avm/fixtures/index.js'; + +const TIMESTAMP = new Fr(99833); + +/** + * If assertionErrString is set, we expect a (non exceptional halting) revert due to a failing assertion and + * we check that the revert reason error contains this string. However, the circuit must correctly prove the + * execution. + */ +export async function simulateAvmTestContractGenerateCircuitInputs( + functionName: string, + calldata: Fr[] = [], + assertionErrString?: string, +): Promise { + const sender = AztecAddress.random(); + const functionSelector = getAvmTestContractFunctionSelector(functionName); + calldata = [functionSelector.toField(), ...calldata]; + + const globalVariables = GlobalVariables.empty(); + globalVariables.gasFees = GasFees.default(); + globalVariables.timestamp = TIMESTAMP; + + const worldStateDB = mock(); + const telemetry = new NoopTelemetryClient(); + const merkleTrees = await (await MerkleTrees.new(openTmpStore(), telemetry)).fork(); + worldStateDB.getMerkleInterface.mockReturnValue(merkleTrees); + + // Top level contract call + const bytecode = getAvmTestContractBytecode('public_dispatch'); + const dispatchSelector = getAvmTestContractFunctionSelector('public_dispatch'); + const publicFn: PublicFunction = { bytecode, selector: dispatchSelector }; + const contractClass = makeContractClassPublic(0, publicFn); + const contractInstance = makeContractInstanceFromClassId(contractClass.id); + + // The values here should match those in `avm_simulator.test.ts` + const instanceGet = new SerializableContractInstance({ + version: 1, + salt: new Fr(0x123), + deployer: new AztecAddress(new Fr(0x456)), + contractClassId: new Fr(0x789), + initializationHash: new Fr(0x101112), + publicKeys: new PublicKeys( + new Point(new Fr(0x131415), new Fr(0x161718), false), + new Point(new Fr(0x192021), new Fr(0x222324), false), + new Point(new Fr(0x252627), new Fr(0x282930), false), + new Point(new Fr(0x313233), new Fr(0x343536), false), + ), + }).withAddress(contractInstance.address); + worldStateDB.getContractInstance + .mockResolvedValueOnce(contractInstance) + .mockResolvedValueOnce(instanceGet) // test gets deployer + .mockResolvedValueOnce(instanceGet) // test gets class id + .mockResolvedValueOnce(instanceGet) // test gets init hash + .mockResolvedValue(contractInstance); + worldStateDB.getContractClass.mockResolvedValue(contractClass); + worldStateDB.getBytecode.mockResolvedValue(bytecode); + + const storageValue = new Fr(5); + worldStateDB.storageRead.mockResolvedValue(Promise.resolve(storageValue)); + + const simulator = new PublicTxSimulator( + merkleTrees, + worldStateDB, + new NoopTelemetryClient(), + globalVariables, + /*doMerkleOperations=*/ true, + ); + + const callContext = new CallContext(sender, contractInstance.address, dispatchSelector, /*isStaticCall=*/ false); + const executionRequest = new PublicExecutionRequest(callContext, calldata); + + const tx: Tx = createTxForPublicCall(executionRequest); + + const avmResult = await simulator.simulate(tx); + + if (assertionErrString == undefined) { + expect(avmResult.revertCode.isOK()).toBe(true); + } else { + // Explicit revert when an assertion failed. + expect(avmResult.revertCode.isOK()).toBe(false); + expect(avmResult.revertReason).toBeDefined(); + expect(avmResult.revertReason?.getMessage()).toContain(assertionErrString); + } + + const avmCircuitInputs: AvmCircuitInputs = avmResult.avmProvingRequest.inputs; + return avmCircuitInputs; +} + +/** + * Craft a carrier transaction for a public call for simulation by PublicTxSimulator. + */ +export function createTxForPublicCall( + executionRequest: PublicExecutionRequest, + gasUsedByPrivate: Gas = Gas.empty(), + isTeardown: boolean = false, +): Tx { + const callRequest = executionRequest.toCallRequest(); + // use max limits + const gasLimits = new Gas(DEFAULT_GAS_LIMIT, MAX_L2_GAS_PER_ENQUEUED_CALL); + + const forPublic = PartialPrivateTailPublicInputsForPublic.empty(); + // TODO(#9269): Remove this fake nullifier method as we move away from 1st nullifier as hash. + forPublic.nonRevertibleAccumulatedData.nullifiers[0] = Fr.random(); // fake tx nullifier + if (isTeardown) { + forPublic.publicTeardownCallRequest = callRequest; + } else { + forPublic.revertibleAccumulatedData.publicCallRequests[0] = callRequest; + } + + const teardownGasLimits = isTeardown ? gasLimits : Gas.empty(); + const gasSettings = new GasSettings(gasLimits, teardownGasLimits, GasFees.empty()); + const txContext = new TxContext(Fr.zero(), Fr.zero(), gasSettings); + const constantData = new TxConstantData(Header.empty(), txContext, Fr.zero(), Fr.zero()); + + const txData = new PrivateKernelTailCircuitPublicInputs( + constantData, + RollupValidationRequests.empty(), + /*gasUsed=*/ gasUsedByPrivate, + AztecAddress.zero(), + forPublic, + ); + const tx = isTeardown ? Tx.newWithTxData(txData, executionRequest) : Tx.newWithTxData(txData); + if (!isTeardown) { + tx.enqueuedPublicFunctionCalls[0] = executionRequest; + } + + return tx; +} diff --git a/yarn-project/simulator/src/public/public_tx_context.ts b/yarn-project/simulator/src/public/public_tx_context.ts index 4cb1390e2dc..65c287fc769 100644 --- a/yarn-project/simulator/src/public/public_tx_context.ts +++ b/yarn-project/simulator/src/public/public_tx_context.ts @@ -70,6 +70,7 @@ export class PublicTxContext { private readonly nonRevertibleAccumulatedDataFromPrivate: PrivateToPublicAccumulatedData, private readonly revertibleAccumulatedDataFromPrivate: PrivateToPublicAccumulatedData, public trace: PublicEnqueuedCallSideEffectTrace, // FIXME(dbanks12): should be private + private doMerkleOperations: boolean, ) { this.log = createDebugLogger(`aztec:public_tx_context`); this.gasUsed = startGasUsed; @@ -80,6 +81,7 @@ export class PublicTxContext { worldStateDB: WorldStateDB, tx: Tx, globalVariables: GlobalVariables, + doMerkleOperations: boolean, ) { const nonRevertibleAccumulatedDataFromPrivate = tx.data.forPublic!.nonRevertibleAccumulatedData; const revertibleAccumulatedDataFromPrivate = tx.data.forPublic!.revertibleAccumulatedData; @@ -113,6 +115,7 @@ export class PublicTxContext { worldStateDB, trace, nonRevertibleNullifiersFromPrivate, + doMerkleOperations, ); return new PublicTxContext( @@ -131,6 +134,7 @@ export class PublicTxContext { tx.data.forPublic!.nonRevertibleAccumulatedData, tx.data.forPublic!.revertibleAccumulatedData, enqueuedCallTrace, + doMerkleOperations, ); } diff --git a/yarn-project/simulator/src/public/public_tx_simulator.ts b/yarn-project/simulator/src/public/public_tx_simulator.ts index a4d31296500..44801eff13f 100644 --- a/yarn-project/simulator/src/public/public_tx_simulator.ts +++ b/yarn-project/simulator/src/public/public_tx_simulator.ts @@ -58,6 +58,7 @@ export class PublicTxSimulator { client: TelemetryClient, private globalVariables: GlobalVariables, private realAvmProvingRequests: boolean = true, + private doMerkleOperations: boolean = false, ) { this.log = createDebugLogger(`aztec:public_tx_simulator`); this.metrics = new ExecutorMetrics(client, 'PublicTxSimulator'); @@ -71,7 +72,13 @@ export class PublicTxSimulator { async simulate(tx: Tx): Promise { this.log.verbose(`Processing tx ${tx.getTxHash()}`); - const context = await PublicTxContext.create(this.db, this.worldStateDB, tx, this.globalVariables); + const context = await PublicTxContext.create( + this.db, + this.worldStateDB, + tx, + this.globalVariables, + this.doMerkleOperations, + ); // add new contracts to the contracts db so that their functions may be found and called // TODO(#4073): This is catching only private deployments, when we add public ones, we'll diff --git a/yarn-project/simulator/src/public/side_effect_trace.ts b/yarn-project/simulator/src/public/side_effect_trace.ts index 2ec074470e7..ac1f4a98f16 100644 --- a/yarn-project/simulator/src/public/side_effect_trace.ts +++ b/yarn-project/simulator/src/public/side_effect_trace.ts @@ -199,7 +199,7 @@ export class PublicSideEffectTrace implements PublicSideEffectTraceInterface { public traceNewNoteHash( _contractAddress: AztecAddress, noteHash: Fr, - leafIndex: Fr, + leafIndex: Fr = Fr.zero(), path: Fr[] = emptyNoteHashPath(), ) { if (this.noteHashes.length >= MAX_NOTE_HASHES_PER_TX) { diff --git a/yarn-project/txe/src/oracle/txe_oracle.ts b/yarn-project/txe/src/oracle/txe_oracle.ts index 65811c2d6db..eec5749bf8e 100644 --- a/yarn-project/txe/src/oracle/txe_oracle.ts +++ b/yarn-project/txe/src/oracle/txe_oracle.ts @@ -8,7 +8,6 @@ import { PublicDataWitness, PublicExecutionRequest, SimulationError, - Tx, type UnencryptedL2Log, } from '@aztec/circuit-types'; import { type CircuitWitnessGenerationStats } from '@aztec/circuit-types/stats'; @@ -16,31 +15,23 @@ import { CallContext, type ContractInstance, type ContractInstanceWithAddress, - DEFAULT_GAS_LIMIT, Gas, GasFees, - GasSettings, GlobalVariables, Header, IndexedTaggingSecret, type KeyValidationRequest, type L1_TO_L2_MSG_TREE_HEIGHT, - MAX_L2_GAS_PER_ENQUEUED_CALL, NULLIFIER_SUBTREE_HEIGHT, type NULLIFIER_TREE_HEIGHT, type NullifierLeafPreimage, PRIVATE_CONTEXT_INPUTS_LENGTH, type PUBLIC_DATA_TREE_HEIGHT, PUBLIC_DISPATCH_SELECTOR, - PartialPrivateTailPublicInputsForPublic, PrivateContextInputs, - PrivateKernelTailCircuitPublicInputs, PublicDataTreeLeaf, type PublicDataTreeLeafPreimage, type PublicDataWrite, - RollupValidationRequests, - TxConstantData, - TxContext, computeContractClassId, computeTaggingSecret, deriveKeys, @@ -81,6 +72,7 @@ import { toACVMWitness, witnessMapToFields, } from '@aztec/simulator'; +import { createTxForPublicCall } from '@aztec/simulator/public/fixtures'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; import { MerkleTreeSnapshotOperationsFacade, type MerkleTrees } from '@aztec/world-state'; @@ -663,7 +655,6 @@ export class TXE implements TypedOracle { const globalVariables = GlobalVariables.empty(); globalVariables.chainId = this.chainId; - globalVariables.chainId = this.chainId; globalVariables.version = this.version; globalVariables.blockNumber = new Fr(this.blockNumber); globalVariables.gasFees = GasFees.default(); @@ -676,7 +667,10 @@ export class TXE implements TypedOracle { /*realAvmProvingRequests=*/ false, ); - const tx = this.createTxForPublicCall(executionRequest, isTeardown); + // When setting up a teardown call, we tell it that + // private execution used Gas(1, 1) so it can compute a tx fee. + const gasUsedByPrivate = isTeardown ? new Gas(1, 1) : Gas.empty(); + const tx = createTxForPublicCall(executionRequest, gasUsedByPrivate, isTeardown); const result = await simulator.simulate(tx); return Promise.resolve(result); @@ -886,44 +880,4 @@ export class TXE implements TypedOracle { return preimage.value; } - - /** - * Craft a carrier transaction for a public call. - */ - private createTxForPublicCall(executionRequest: PublicExecutionRequest, teardown: boolean): Tx { - const callRequest = executionRequest.toCallRequest(); - // use max limits - const gasLimits = new Gas(DEFAULT_GAS_LIMIT, MAX_L2_GAS_PER_ENQUEUED_CALL); - - const forPublic = PartialPrivateTailPublicInputsForPublic.empty(); - // TODO(#9269): Remove this fake nullifier method as we move away from 1st nullifier as hash. - forPublic.nonRevertibleAccumulatedData.nullifiers[0] = Fr.random(); // fake tx nullifier - if (teardown) { - forPublic.publicTeardownCallRequest = callRequest; - } else { - forPublic.revertibleAccumulatedData.publicCallRequests[0] = callRequest; - } - - // When setting up a teardown call, we tell it that - // private execution "used" Gas(1, 1) so it can compute a tx fee. - const gasUsedByPrivate = teardown ? new Gas(1, 1) : Gas.empty(); - const teardownGasLimits = teardown ? gasLimits : Gas.empty(); - const gasSettings = new GasSettings(gasLimits, teardownGasLimits, GasFees.empty()); - const txContext = new TxContext(Fr.zero(), Fr.zero(), gasSettings); - const constantData = new TxConstantData(Header.empty(), txContext, Fr.zero(), Fr.zero()); - - const txData = new PrivateKernelTailCircuitPublicInputs( - constantData, - RollupValidationRequests.empty(), - /*gasUsed=*/ gasUsedByPrivate, - AztecAddress.zero(), - forPublic, - ); - const tx = teardown ? Tx.newWithTxData(txData, executionRequest) : Tx.newWithTxData(txData); - if (!teardown) { - tx.enqueuedPublicFunctionCalls[0] = executionRequest; - } - - return tx; - } }