diff --git a/avm-transpiler/src/main.rs b/avm-transpiler/src/main.rs index a6c2c118123..384348fc463 100644 --- a/avm-transpiler/src/main.rs +++ b/avm-transpiler/src/main.rs @@ -28,7 +28,7 @@ fn main() { // Parse original (pre-transpile) contract. let contract_json = fs::read_to_string(Path::new(in_contract_artifact_path)) - .expect(&format!("Unable to read file: {in_contract_artifact_path}")); + .unwrap_or_else(|_| panic!("Unable to read file: {in_contract_artifact_path}")); let raw_json_obj: serde_json::Value = serde_json::from_str(&contract_json).expect(&json_parse_error); @@ -44,7 +44,7 @@ fn main() { Path::new(out_transpiled_artifact_path), Path::new(&(out_transpiled_artifact_path.clone() + ".bak")), ) - .expect(&format!("Unable to backup file: {out_transpiled_artifact_path}")); + .unwrap_or_else(|_| panic!("Unable to backup file: {out_transpiled_artifact_path}")); } // Parse json into contract object diff --git a/avm-transpiler/src/transpile.rs b/avm-transpiler/src/transpile.rs index da3a6eef0a9..3c8ae4f4240 100644 --- a/avm-transpiler/src/transpile.rs +++ b/avm-transpiler/src/transpile.rs @@ -982,7 +982,7 @@ fn handle_storage_write( inputs: &Vec, ) { assert!(inputs.len() == 2); - assert!(destinations.len() == 0); + assert!(destinations.is_empty()); let slot_offset_maybe = inputs[0]; let slot_offset = match slot_offset_maybe { diff --git a/barretenberg/.gitrepo b/barretenberg/.gitrepo index 74e52bf0cab..f3d419b3ba9 100644 --- a/barretenberg/.gitrepo +++ b/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = 5b08a47947e88a60842bc6883a17a8e8e707db0e - parent = ed7c7da57a37d3727e2362d519c37dec0c36a12d + commit = a3bef86e97765dd218fc3a8361afeba83e4c955f + parent = 2d3e0b672c11eddf0e4e50f00a42a662bdd67c0c method = merge cmdver = 0.4.6 diff --git a/barretenberg/cpp/Earthfile b/barretenberg/cpp/Earthfile index bc9cbfa2670..73b8cdeb5cd 100644 --- a/barretenberg/cpp/Earthfile +++ b/barretenberg/cpp/Earthfile @@ -233,12 +233,9 @@ test-clang-format: test: ARG hardware_concurrency="" # prefetch - BUILD +test-binaries BUILD +preset-release-assert-test BUILD +test-clang-format BUILD ./srs_db/+build # prefetch - FROM +source - COPY --dir +test-binaries/build build FROM +preset-release-assert-test COPY --dir ./srs_db/+build/. srs_db # limit hardware concurrency, if provided diff --git a/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/main.cpp b/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/main.cpp index 2b6e2caf4a0..e16cdbaee35 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/main.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/main.cpp @@ -72,7 +72,7 @@ int pippenger() scalar_multiplication::pippenger_runtime_state state(NUM_POINTS); std::chrono::steady_clock::time_point time_start = std::chrono::steady_clock::now(); g1::element result = scalar_multiplication::pippenger_unsafe( - { &scalars[0], /*size*/ NUM_POINTS }, reference_string->get_monomial_points(), NUM_POINTS, state); + { &scalars[0], /*size*/ NUM_POINTS }, reference_string->get_monomial_points(), state); std::chrono::steady_clock::time_point time_end = std::chrono::steady_clock::now(); std::chrono::microseconds diff = std::chrono::duration_cast(time_end - time_start); std::cout << "run time: " << diff.count() << "us" << std::endl; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/commit.bench.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/commit.bench.cpp index f0b20c0a03c..c2cd8ec68b3 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/commit.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/commit.bench.cpp @@ -129,6 +129,22 @@ template void bench_commit_random(::benchmark::State& state) key->commit(polynomial); } } +// Commit to a polynomial with dense random nonzero entries but NOT our happiest case of an exact power of 2 +// Note this used to be a 50% regression just subtracting a power of 2 by 1. +template void bench_commit_random_non_power_of_2(::benchmark::State& state) +{ + using Fr = typename Curve::ScalarField; + auto key = create_commitment_key(MAX_NUM_POINTS); + + const size_t num_points = 1 << state.range(0); + auto polynomial = Polynomial(num_points - 1); + for (auto& coeff : polynomial) { + coeff = Fr::random_element(); + } + for (auto _ : state) { + key->commit(polynomial); + } +} BENCHMARK(bench_commit_zero) ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) @@ -148,6 +164,9 @@ BENCHMARK(bench_commit_sparse_random_preprocessed) BENCHMARK(bench_commit_random) ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) ->Unit(benchmark::kMillisecond); +BENCHMARK(bench_commit_random_non_power_of_2) + ->DenseRange(MIN_LOG_NUM_POINTS, MAX_LOG_NUM_POINTS) + ->Unit(benchmark::kMillisecond); } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp index 4509ba3e405..746ebbe3786 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.hpp @@ -9,6 +9,7 @@ #include "barretenberg/common/op_count.hpp" #include "barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/numeric/bitop/pow.hpp" #include "barretenberg/polynomials/polynomial_arithmetic.hpp" #include "barretenberg/srs/factories/crs_factory.hpp" @@ -34,6 +35,17 @@ template class CommitmentKey { using Fr = typename Curve::ScalarField; using Commitment = typename Curve::AffineElement; using G1 = typename Curve::AffineElement; + static constexpr size_t EXTRA_SRS_POINTS_FOR_ECCVM_IPA = 1; + + static size_t get_num_needed_srs_points(size_t num_points) + { + // NOTE 1: Currently we must round up internal space for points as our pippenger algorithm (specifically, + // pippenger_unsafe_optimized_for_non_dyadic_polys) will use next power of 2. This is used to simplify the + // recursive halving scheme. We do, however allow the polynomial to not be fully formed. Pippenger internally + // will pad 0s into the runtime state. + // NOTE 2: We then add one for ECCVM to provide for IPA verification + return numeric::round_up_power_2(num_points) + EXTRA_SRS_POINTS_FOR_ECCVM_IPA; + } public: scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; @@ -50,9 +62,9 @@ template class CommitmentKey { * */ CommitmentKey(const size_t num_points) - : pippenger_runtime_state(num_points) + : pippenger_runtime_state(get_num_needed_srs_points(num_points)) , crs_factory(srs::get_crs_factory()) - , srs(crs_factory->get_prover_crs(num_points)) + , srs(crs_factory->get_prover_crs(get_num_needed_srs_points(num_points))) {} // Note: This constructor is to be used only by Plonk; For Honk the srs lives in the CommitmentKey @@ -70,16 +82,17 @@ template class CommitmentKey { Commitment commit(std::span polynomial) { BB_OP_COUNT_TIME(); - const size_t degree = polynomial.size(); - if (degree > srs->get_monomial_size()) { - info("Attempting to commit to a polynomial of degree ", - degree, - " with an SRS of size ", + // See constructor, we must round up the number of used srs points to a power of 2. + const size_t consumed_srs = numeric::round_up_power_2(polynomial.size()); + if (consumed_srs > srs->get_monomial_size()) { + info("Attempting to commit to a polynomial that needs ", + consumed_srs, + " points with an SRS of size ", srs->get_monomial_size()); ASSERT(false); } - return scalar_multiplication::pippenger_unsafe( - polynomial, srs->get_monomial_points(), degree, pippenger_runtime_state); + return scalar_multiplication::pippenger_unsafe_optimized_for_non_dyadic_polys( + polynomial, { srs->get_monomial_points(), srs->get_monomial_size() }, pippenger_runtime_state); }; /** @@ -145,8 +158,7 @@ template class CommitmentKey { } // Call the version of pippenger which assumes all points are distinct - return scalar_multiplication::pippenger_unsafe( - scalars, points.data(), scalars.size(), pippenger_runtime_state); + return scalar_multiplication::pippenger_unsafe(scalars, points.data(), pippenger_runtime_state); } }; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp index 10e8bfbe80f..fa0ed6e5f96 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp @@ -215,13 +215,13 @@ template class IPA { // Step 6.a (using letters, because doxygen automaticall converts the sublist counters to letters :( ) // L_i = < a_vec_lo, G_vec_hi > + inner_prod_L * aux_generator L_i = bb::scalar_multiplication::pippenger_without_endomorphism_basis_points( - {&a_vec[0], /*size*/ round_size}, &G_vec_local[round_size], round_size, ck->pippenger_runtime_state); + {&a_vec[0], /*size*/ round_size}, &G_vec_local[round_size], ck->pippenger_runtime_state); L_i += aux_generator * inner_prod_L; // Step 6.b // R_i = < a_vec_hi, G_vec_lo > + inner_prod_R * aux_generator R_i = bb::scalar_multiplication::pippenger_without_endomorphism_basis_points( - {&a_vec[round_size], /*size*/ round_size}, &G_vec_local[0], round_size, ck->pippenger_runtime_state); + {&a_vec[round_size], /*size*/ round_size}, &G_vec_local[0], ck->pippenger_runtime_state); R_i += aux_generator * inner_prod_R; // Step 6.c @@ -345,7 +345,7 @@ template class IPA { // Step 5. // Compute C₀ = C' + ∑_{j ∈ [k]} u_j^{-1}L_j + ∑_{j ∈ [k]} u_jR_j GroupElement LR_sums = bb::scalar_multiplication::pippenger_without_endomorphism_basis_points( - {&msm_scalars[0], /*size*/ pippenger_size}, &msm_elements[0], pippenger_size, vk->pippenger_runtime_state); + {&msm_scalars[0], /*size*/ pippenger_size}, &msm_elements[0], vk->pippenger_runtime_state); GroupElement C_zero = C_prime + LR_sums; // Step 6. @@ -394,7 +394,7 @@ template class IPA { // Step 8. // Compute G₀ Commitment G_zero = bb::scalar_multiplication::pippenger_without_endomorphism_basis_points( - {&s_vec[0], /*size*/ poly_length}, &G_vec_local[0], poly_length, vk->pippenger_runtime_state); + {&s_vec[0], /*size*/ poly_length}, &G_vec_local[0], vk->pippenger_runtime_state); // Step 9. // Receive a₀ from the prover diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/runtime_states.hpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/runtime_states.hpp index 19f686f2f3b..d6b659bd25f 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/runtime_states.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/runtime_states.hpp @@ -64,7 +64,7 @@ constexpr size_t get_num_rounds(const size_t num_points) } template struct affine_product_runtime_state { - typename Curve::AffineElement* points; + const typename Curve::AffineElement* points; typename Curve::AffineElement* point_pairs_1; typename Curve::AffineElement* point_pairs_2; typename Curve::BaseField* scratch_space; diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp index 329b8138a1e..3bd52afd563 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -12,6 +13,7 @@ #include "barretenberg/common/op_count.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/common/throw_or_abort.hpp" +#include "barretenberg/ecc/curves/bn254/bn254.hpp" #include "barretenberg/ecc/groups/wnaf.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" @@ -101,7 +103,7 @@ namespace bb::scalar_multiplication { * to use the curve endomorphism for faster scalar multiplication. See below for more details. */ template -void generate_pippenger_point_table(typename Curve::AffineElement* points, +void generate_pippenger_point_table(const typename Curve::AffineElement* points, typename Curve::AffineElement* table, size_t num_points) { @@ -127,7 +129,7 @@ void generate_pippenger_point_table(typename Curve::AffineElement* points, *points we're multiplying. Once we have the number of rounds, `m`, we need to split our scalar into `m` bit-slices. *Each pippenger round will work on one bit-slice. * - * Pippenger's algorithm works by, for each round, iterating over the points we're multplying. For each point, we + * Pippenger's algorithm works by, for each round, iterating over the points we're multiplying. For each point, we *examing the point's scalar multiplier and extract the bit-slice associated with the current pippenger round (we start *with the most significant slice). We then use the bit-slice to index a 'bucket', which we add the point into. For *example, if the bit slice is 01101, we add the corresponding point into bucket[13]. @@ -193,7 +195,7 @@ void generate_pippenger_point_table(typename Curve::AffineElement* points, * @param input_skew_table Pointer to the output array with all skews * @param round_counts The number of points in each round * @param scalars The pointer to the region with initial scalars that need to be converted into WNAF - * @param num_initial_points The number of points before the endomorphism split + * @param num_initial_points The number of points before the endomorphism split. A rounded up power of 2. **/ template void compute_wnaf_states(uint64_t* point_schedule, @@ -220,30 +222,46 @@ void compute_wnaf_states(uint64_t* point_schedule, } parallel_for(num_threads, [&](size_t i) { - Fr T0; uint64_t* wnaf_table = &point_schedule[(2 * i) * num_initial_points_per_thread]; - const Fr* thread_scalars = &scalars[i * num_initial_points_per_thread]; bool* skew_table = &input_skew_table[(2 * i) * num_initial_points_per_thread]; - uint64_t offset = i * num_points_per_thread; - - for (uint64_t j = 0; j < num_initial_points_per_thread; ++j) { - T0 = thread_scalars[j].from_montgomery_form(); - Fr::split_into_endomorphism_scalars(T0, T0, *(Fr*)&T0.data[2]); - - wnaf::fixed_wnaf_with_counts(&T0.data[0], - &wnaf_table[(j << 1UL)], - skew_table[j << 1ULL], + // Our offsets for this thread + const uint64_t point_offset = i * num_points_per_thread; + const size_t scalar_offset = i * num_initial_points_per_thread; + + // How many defined scalars are there? + const size_t defined_extent = std::min(scalar_offset + num_initial_points_per_thread, scalars.size()); + const size_t defined_scalars = defined_extent > scalar_offset ? defined_extent - scalar_offset : 0; + + auto wnaf_first_half = [&](const uint64_t* scalar, size_t j) { + wnaf::fixed_wnaf_with_counts(scalar, + &wnaf_table[j * 2], + skew_table[j * 2], &thread_round_counts[i][0], - ((j << 1ULL) + offset) << 32ULL, + (j * 2ULL + point_offset) << 32ULL, num_points, wnaf_bits); - wnaf::fixed_wnaf_with_counts(&T0.data[2], - &wnaf_table[(j << 1UL) + 1], - skew_table[(j << 1UL) + 1], + }; + auto wnaf_second_half = [&](const uint64_t* scalar, size_t j) { + wnaf::fixed_wnaf_with_counts(scalar, + &wnaf_table[j * 2 + 1], + skew_table[j * 2 + 1], &thread_round_counts[i][0], - ((j << 1UL) + offset + 1) << 32UL, + (j * 2ULL + point_offset + 1ULL) << 32ULL, num_points, wnaf_bits); + }; + for (size_t j = 0; j < defined_scalars; j++) { + Fr T0 = scalars[scalar_offset + j].from_montgomery_form(); + Fr::split_into_endomorphism_scalars(T0, T0, *(Fr*)&T0.data[2]); + + wnaf_first_half(&T0.data[0], j); + wnaf_second_half(&T0.data[2], j); + } + for (size_t j = defined_scalars; j < num_initial_points_per_thread; j++) { + // If we are trying to use a non-power-of-2 + static const uint64_t PADDING_ZEROES[] = { 0, 0 }; + wnaf_first_half(PADDING_ZEROES, j); + wnaf_second_half(PADDING_ZEROES, j); } }); @@ -740,7 +758,7 @@ uint32_t construct_addition_chains(affine_product_runtime_state& state, b template typename Curve::Element evaluate_pippenger_rounds(pippenger_runtime_state& state, - typename Curve::AffineElement* points, + const typename Curve::AffineElement* points, const size_t num_points, bool handle_edge_cases) { @@ -828,7 +846,7 @@ typename Curve::Element evaluate_pippenger_rounds(pippenger_runtime_state if (i == (num_rounds - 1)) { const size_t num_points_per_thread = num_points / num_threads; bool* skew_table = &state.skew_table[j * num_points_per_thread]; - AffineElement* point_table = &points[j * num_points_per_thread]; + const AffineElement* point_table = &points[j * num_points_per_thread]; AffineElement addition_temporary; for (size_t k = 0; k < num_points_per_thread; ++k) { if (skew_table[k]) { @@ -873,7 +891,6 @@ typename Curve::Element pippenger_internal(typename Curve::AffineElement* points template typename Curve::Element pippenger(std::span scalars, typename Curve::AffineElement* points, - const size_t num_initial_points, pippenger_runtime_state& state, bool handle_edge_cases) { @@ -886,7 +903,7 @@ typename Curve::Element pippenger(std::span s // For 8 threads, this neatly coincides with the threshold where Strauss scalar multiplication outperforms // Pippenger const size_t threshold = get_num_cpus_pow2() * 8; - + size_t num_initial_points = scalars.size(); if (num_initial_points == 0) { Element out = Group::one; out.self_set_infinity(); @@ -911,16 +928,39 @@ typename Curve::Element pippenger(std::span s Element result = pippenger_internal(points, scalars, num_slice_points, state, handle_edge_cases); if (num_slice_points != num_initial_points) { - const uint64_t leftover_points = num_initial_points - num_slice_points; return result + pippenger(scalars.subspan(num_slice_points), points + static_cast(num_slice_points * 2), - static_cast(leftover_points), state, handle_edge_cases); } return result; } +/* @brief Used for commits. +The main reason for this existing alone as this has one assumption: +The number of points is equal to or larger than #scalars rounded up to next power of 2. +Pippenger above can behavely poorly with numbers with many bits set.*/ + +template +typename Curve::Element pippenger_unsafe_optimized_for_non_dyadic_polys( + std::span scalars, + std::span points, + pippenger_runtime_state& state) +{ + BB_OP_COUNT_TIME(); + + // our windowed non-adjacent form algorthm requires that each thread can work on at least 8 points. + const size_t threshold = get_num_cpus_pow2() * 8; + // Delegate edge-cases to normal pippenger_unsafe(). + if (scalars.size() <= threshold) { + return pippenger_unsafe(scalars, &points[0], state); + } + // We need a padding of scalars. + ASSERT(numeric::round_up_power_2(scalars.size()) <= points.size()); + // We do not optimize for the small case at all. + return pippenger_internal(&points[0], scalars, numeric::round_up_power_2(scalars.size()), state, false); +} + /** * It's pippenger! But this one has go-faster stripes and a prediliction for questionable life choices. * We use affine-addition formula in this method, which paradoxically is ~45% faster than the mixed addition @@ -939,27 +979,25 @@ typename Curve::Element pippenger(std::span s template typename Curve::Element pippenger_unsafe(std::span scalars, typename Curve::AffineElement* points, - const size_t num_initial_points, pippenger_runtime_state& state) { - return pippenger(scalars, points, num_initial_points, state, false); + return pippenger(scalars, points, state, false); } template typename Curve::Element pippenger_without_endomorphism_basis_points( std::span scalars, typename Curve::AffineElement* points, - const size_t num_initial_points, pippenger_runtime_state& state) { - std::vector G_mod(num_initial_points * 2); - bb::scalar_multiplication::generate_pippenger_point_table(points, &G_mod[0], num_initial_points); - return pippenger(scalars, &G_mod[0], num_initial_points, state, false); + std::vector G_mod(scalars.size() * 2); + bb::scalar_multiplication::generate_pippenger_point_table(points, &G_mod[0], scalars.size()); + return pippenger(scalars, &G_mod[0], state, false); } // Explicit instantiation // BN254 -template void generate_pippenger_point_table(curve::BN254::AffineElement* points, +template void generate_pippenger_point_table(const curve::BN254::AffineElement* points, curve::BN254::AffineElement* table, size_t num_points); @@ -967,7 +1005,7 @@ template uint32_t construct_addition_chains(affine_product_runtime bool empty_bucket_counts = true); template void add_affine_points(curve::BN254::AffineElement* points, - const size_t num_points, + size_t num_points, curve::BN254::BaseField* scratch_space); template void add_affine_points_with_edge_cases(curve::BN254::AffineElement* points, @@ -984,7 +1022,7 @@ template curve::BN254::Element pippenger_internal(curve::BN254::Af bool handle_edge_cases); template curve::BN254::Element evaluate_pippenger_rounds(pippenger_runtime_state& state, - curve::BN254::AffineElement* points, + const curve::BN254::AffineElement* points, const size_t num_points, bool handle_edge_cases = false); @@ -994,23 +1032,25 @@ template curve::BN254::AffineElement* reduce_buckets(affine_produc template curve::BN254::Element pippenger(std::span scalars, curve::BN254::AffineElement* points, - const size_t num_points, pippenger_runtime_state& state, bool handle_edge_cases = true); template curve::BN254::Element pippenger_unsafe(std::span scalars, curve::BN254::AffineElement* points, - const size_t num_initial_points, pippenger_runtime_state& state); +template curve::BN254::Element pippenger_unsafe_optimized_for_non_dyadic_polys( + std::span scalars, + std::span points, + pippenger_runtime_state& state); + template curve::BN254::Element pippenger_without_endomorphism_basis_points( std::span scalars, curve::BN254::AffineElement* points, - const size_t num_initial_points, pippenger_runtime_state& state); // Grumpkin -template void generate_pippenger_point_table(curve::Grumpkin::AffineElement* points, +template void generate_pippenger_point_table(const curve::Grumpkin::AffineElement* points, curve::Grumpkin::AffineElement* table, size_t num_points); @@ -1037,7 +1077,7 @@ template curve::Grumpkin::Element pippenger_internal( template curve::Grumpkin::Element evaluate_pippenger_rounds( pippenger_runtime_state& state, - curve::Grumpkin::AffineElement* points, + const curve::Grumpkin::AffineElement* points, const size_t num_points, bool handle_edge_cases = false); @@ -1046,20 +1086,21 @@ template curve::Grumpkin::AffineElement* reduce_buckets( template curve::Grumpkin::Element pippenger(std::span scalars, curve::Grumpkin::AffineElement* points, - const size_t num_points, pippenger_runtime_state& state, bool handle_edge_cases = true); template curve::Grumpkin::Element pippenger_unsafe( std::span scalars, curve::Grumpkin::AffineElement* points, - const size_t num_initial_points, + pippenger_runtime_state& state); +template curve::Grumpkin::Element pippenger_unsafe_optimized_for_non_dyadic_polys( + std::span scalars, + std::span points, pippenger_runtime_state& state); template curve::Grumpkin::Element pippenger_without_endomorphism_basis_points( std::span scalars, curve::Grumpkin::AffineElement* points, - const size_t num_initial_points, pippenger_runtime_state& state); } // namespace bb::scalar_multiplication diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp index f178023b325..a4604f652ab 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp @@ -93,7 +93,7 @@ void compute_wnaf_states(uint64_t* point_schedule, size_t num_initial_points); template -void generate_pippenger_point_table(typename Curve::AffineElement* points, +void generate_pippenger_point_table(const typename Curve::AffineElement* points, typename Curve::AffineElement* table, size_t num_points); @@ -142,7 +142,7 @@ typename Curve::Element pippenger_internal(typename Curve::AffineElement* points template typename Curve::Element evaluate_pippenger_rounds(pippenger_runtime_state& state, - typename Curve::AffineElement* points, + const typename Curve::AffineElement* points, size_t num_points, bool handle_edge_cases = false); @@ -154,21 +154,26 @@ typename Curve::AffineElement* reduce_buckets(affine_product_runtime_state typename Curve::Element pippenger(std::span scalars, typename Curve::AffineElement* points, - size_t num_initial_points, pippenger_runtime_state& state, bool handle_edge_cases = true); template typename Curve::Element pippenger_unsafe(std::span scalars, typename Curve::AffineElement* points, - size_t num_initial_points, pippenger_runtime_state& state); template typename Curve::Element pippenger_without_endomorphism_basis_points( std::span scalars, typename Curve::AffineElement* points, - size_t num_initial_points, + pippenger_runtime_state& state); + +// NOTE: pippenger_unsafe_optimized_for_non_dyadic_polys requires SRS to have #scalars +// rounded up to nearest power of 2 or above points. +template +typename Curve::Element pippenger_unsafe_optimized_for_non_dyadic_polys( + std::span scalars, + std::span points, pippenger_runtime_state& state); // Explicit instantiation diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp index 6ecd4a7b5f9..418e89d0de7 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp @@ -130,7 +130,7 @@ template class ProvingKey_ { { if (commitment_key == nullptr) { ZoneScopedN("init commitment key"); - this->commitment_key = std::make_shared(circuit_size + 1); + this->commitment_key = std::make_shared(circuit_size); } else { // Don't create another commitment key if we already have one this->commitment_key = commitment_key; @@ -439,7 +439,7 @@ MegaRecursiveFlavor_, TranslatorRecursiveFlavor_, TranslatorRecursiveFlavor_, TranslatorRecursiveFlavor_, -ECCVMRecursiveFlavor_, +ECCVMRecursiveFlavor_, AvmRecursiveFlavor_>; template concept IsECCVMRecursiveFlavor = IsAnyOf>; diff --git a/barretenberg/cpp/src/barretenberg/numeric/bitop/get_msb.hpp b/barretenberg/cpp/src/barretenberg/numeric/bitop/get_msb.hpp index fc456a68a8e..3bf24ca9e70 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/bitop/get_msb.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/bitop/get_msb.hpp @@ -43,4 +43,10 @@ template constexpr inline T get_msb(const T in) return (sizeof(T) <= 4) ? get_msb32(in) : get_msb64(in); } +template constexpr inline T round_up_power_2(const T in) +{ + auto lower_bound = T(1) << get_msb(in); + return (lower_bound == in || lower_bound == 1) ? in : lower_bound * 2; +} + } // namespace bb::numeric \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.cpp b/barretenberg/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.cpp index dd8c4d2a6f3..cea9ea412d2 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.cpp @@ -182,8 +182,8 @@ template bool VerifierBase::verify g1::element P[2]; - P[0] = bb::scalar_multiplication::pippenger( - { &scalars[0], num_elements }, &elements[0], num_elements, state); + P[0] = + bb::scalar_multiplication::pippenger({ &scalars[0], /*size*/ num_elements }, &elements[0], state); P[1] = -(g1::element(PI_Z_OMEGA) * separator_challenge + PI_Z); if (key->contains_recursive_proof) { diff --git a/barretenberg/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.test.cpp b/barretenberg/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.test.cpp index 427108062ec..87c1037625e 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.test.cpp @@ -36,7 +36,6 @@ plonk::Verifier generate_verifier(std::shared_ptr circuit_proving_k commitments[i] = g1::affine_element(scalar_multiplication::pippenger( { poly_coefficients[i].get(), circuit_proving_key->circuit_size }, circuit_proving_key->reference_string->get_monomial_points(), - circuit_proving_key->circuit_size, state)); } diff --git a/barretenberg/cpp/src/barretenberg/plonk/work_queue/work_queue.cpp b/barretenberg/cpp/src/barretenberg/plonk/work_queue/work_queue.cpp index e6308a8343f..2b5eef768b5 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/work_queue/work_queue.cpp +++ b/barretenberg/cpp/src/barretenberg/plonk/work_queue/work_queue.cpp @@ -214,7 +214,7 @@ void work_queue::process_queue() // Run pippenger multi-scalar multiplication. auto runtime_state = bb::scalar_multiplication::pippenger_runtime_state(msm_size); bb::g1::affine_element result(bb::scalar_multiplication::pippenger_unsafe( - { item.mul_scalars.get(), msm_size }, srs_points, msm_size, runtime_state)); + { item.mul_scalars.get(), msm_size }, srs_points, runtime_state)); transcript->add_element(item.tag, result.to_buffer()); diff --git a/barretenberg/cpp/src/barretenberg/polynomials/legacy_polynomials.bench.cpp b/barretenberg/cpp/src/barretenberg/polynomials/legacy_polynomials.bench.cpp index 716c79d8600..abad771e626 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/legacy_polynomials.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/legacy_polynomials.bench.cpp @@ -124,7 +124,7 @@ void pippenger_bench(State& state) noexcept state.ResumeTiming(); // uint64_t before = rdtsc(); scalar_multiplication::pippenger( - { &globals.scalars[0], /*size*/ num_points }, &globals.monomials[0], num_points, run_state); + { &globals.scalars[0], /*size*/ num_points }, &globals.monomials[0], run_state); // uint64_t after = rdtsc(); // count += (after - before); // ++i; @@ -169,23 +169,23 @@ void new_plonk_scalar_multiplications_bench(State& state) noexcept uint64_t before = rdtsc(); g1::element a = scalar_multiplication::pippenger( - { &globals.scalars[0], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[0], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); g1::element b = scalar_multiplication::pippenger( - { &globals.scalars[1], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[1], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); g1::element c = scalar_multiplication::pippenger( - { &globals.scalars[2], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[2], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); g1::element d = scalar_multiplication::pippenger( - { &globals.scalars[3], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[3], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); g1::element e = scalar_multiplication::pippenger( - { &globals.scalars[4], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[4], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); g1::element f = scalar_multiplication::pippenger( - { &globals.scalars[5], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[5], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); g1::element g = scalar_multiplication::pippenger( - { &globals.scalars[6], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[6], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); g1::element h = scalar_multiplication::pippenger( - { &globals.scalars[7], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[7], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); g1::element i = scalar_multiplication::pippenger( - { &globals.scalars[8], /*size*/ MAX_GATES }, &globals.monomials[0], MAX_GATES, run_state); + { &globals.scalars[8], /*size*/ MAX_GATES }, &globals.monomials[0], run_state); uint64_t after = rdtsc(); count += (after - before); ++k; diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/file_crs_factory.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/file_crs_factory.cpp index 823c3ebdc0c..bb4ddcf5f42 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/file_crs_factory.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/file_crs_factory.cpp @@ -55,8 +55,7 @@ FileCrsFactory::FileCrsFactory(std::string path, size_t initial_degree) template std::shared_ptr> FileCrsFactory::get_prover_crs(size_t degree) { - if (degree != degree_ || !prover_crs_) { - ZoneScopedN("get_prover_crs"); + if (degree_ < degree || !prover_crs_) { prover_crs_ = std::make_shared>(degree, path_); degree_ = degree; } @@ -66,7 +65,7 @@ std::shared_ptr> FileCrsFactory::get template std::shared_ptr> FileCrsFactory::get_verifier_crs(size_t degree) { - if (degree != degree_ || !verifier_crs_) { + if (degree_ < degree || !verifier_crs_) { verifier_crs_ = std::make_shared>(path_, degree); degree_ = degree; } diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/mem_grumpkin_crs_factory.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/mem_grumpkin_crs_factory.cpp index bbec0dccf0d..8d0741cc920 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/mem_grumpkin_crs_factory.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/mem_grumpkin_crs_factory.cpp @@ -1,5 +1,6 @@ #include "mem_grumpkin_crs_factory.hpp" #include "../io.hpp" +#include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include "barretenberg/ecc/scalar_multiplication/point_table.hpp" #include "barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp" @@ -42,13 +43,19 @@ MemGrumpkinCrsFactory::MemGrumpkinCrsFactory(std::vector(points)) {} -std::shared_ptr> MemGrumpkinCrsFactory::get_prover_crs(size_t) +std::shared_ptr> MemGrumpkinCrsFactory::get_prover_crs(size_t degree) { + if (prover_crs_->get_monomial_size() < degree) { + throw_or_abort("prover trying to get too many points in MemGrumpkinCrsFactory!"); + } return prover_crs_; } -std::shared_ptr> MemGrumpkinCrsFactory::get_verifier_crs(size_t) +std::shared_ptr> MemGrumpkinCrsFactory::get_verifier_crs(size_t degree) { + if (prover_crs_->get_monomial_size() < degree) { + throw_or_abort("verifier trying to get too many points in MemGrumpkinCrsFactory!"); + } return verifier_crs_; } diff --git a/barretenberg/cpp/src/barretenberg/srs/io.hpp b/barretenberg/cpp/src/barretenberg/srs/io.hpp index 893f5bdd443..bd0b8a1444c 100644 --- a/barretenberg/cpp/src/barretenberg/srs/io.hpp +++ b/barretenberg/cpp/src/barretenberg/srs/io.hpp @@ -87,7 +87,8 @@ template class IO { file.read((char*)&manifest, sizeof(Manifest)); if (!file) { ptrdiff_t read = file.gcount(); - throw_or_abort(format("Only read ", read, " bytes from file but expected ", sizeof(Manifest), ".")); + throw_or_abort(format( + "Only read ", read, " bytes from manifest file ", filename, " but expected ", sizeof(Manifest), ".")); } file.close(); @@ -130,7 +131,8 @@ template class IO { file.read(buffer, (int)size); if (!file) { ptrdiff_t read = file.gcount(); - throw_or_abort(format("Only read ", read, " bytes from file but expected ", size, ".")); + throw_or_abort( + format("Only read ", read, " bytes from transcript file ", filename, " but expected ", size, ".")); } file.close(); diff --git a/barretenberg/cpp/src/barretenberg/srs/scalar_multiplication.test.cpp b/barretenberg/cpp/src/barretenberg/srs/scalar_multiplication.test.cpp index 660339f674e..247911339ae 100644 --- a/barretenberg/cpp/src/barretenberg/srs/scalar_multiplication.test.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/scalar_multiplication.test.cpp @@ -681,8 +681,7 @@ TYPED_TEST(ScalarMultiplicationTests, OversizedInputs) } scalar_multiplication::pippenger_runtime_state state(target_degree); - Element first = - scalar_multiplication::pippenger({ scalars, /*size*/ target_degree }, monomials, target_degree, state); + Element first = scalar_multiplication::pippenger({ scalars, /*size*/ target_degree }, monomials, state); first = first.normalize(); for (size_t i = 0; i < target_degree; ++i) { @@ -690,8 +689,7 @@ TYPED_TEST(ScalarMultiplicationTests, OversizedInputs) } scalar_multiplication::pippenger_runtime_state state_2(target_degree); - Element second = - scalar_multiplication::pippenger({ scalars, /*size*/ target_degree }, monomials, target_degree, state_2); + Element second = scalar_multiplication::pippenger({ scalars, /*size*/ target_degree }, monomials, state_2); second = second.normalize(); EXPECT_EQ((first.z == second.z), true); @@ -734,8 +732,7 @@ TYPED_TEST(ScalarMultiplicationTests, UndersizedInputs) scalar_multiplication::pippenger_runtime_state state(num_points); - Element result = - scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points, num_points, state); + Element result = scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points, state); result = result.normalize(); aligned_free(scalars); @@ -772,8 +769,7 @@ TYPED_TEST(ScalarMultiplicationTests, PippengerSmall) scalar_multiplication::generate_pippenger_point_table(points, points, num_points); scalar_multiplication::pippenger_runtime_state state(num_points); - Element result = - scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points, num_points, state); + Element result = scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points, state); result = result.normalize(); aligned_free(scalars); @@ -812,8 +808,7 @@ TYPED_TEST(ScalarMultiplicationTests, PippengerEdgeCaseDbl) } scalar_multiplication::generate_pippenger_point_table(points, points, num_points); scalar_multiplication::pippenger_runtime_state state(num_points); - Element result = - scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points, num_points, state); + Element result = scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points, state); result = result.normalize(); aligned_free(scalars); @@ -871,8 +866,7 @@ TYPED_TEST(ScalarMultiplicationTests, PippengerShortInputs) scalar_multiplication::generate_pippenger_point_table(points.get(), points.get(), num_points); scalar_multiplication::pippenger_runtime_state state(num_points); - Element result = - scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points.get(), num_points, state); + Element result = scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points.get(), state); result = result.normalize(); aligned_free(scalars); @@ -908,8 +902,8 @@ TYPED_TEST(ScalarMultiplicationTests, PippengerUnsafe) scalar_multiplication::generate_pippenger_point_table(points.get(), points.get(), num_points); scalar_multiplication::pippenger_runtime_state state(num_points); - Element result = scalar_multiplication::pippenger_unsafe( - { scalars, /*size*/ num_points }, points.get(), num_points, state); + Element result = + scalar_multiplication::pippenger_unsafe({ scalars, /*size*/ num_points }, points.get(), state); result = result.normalize(); aligned_free(scalars); @@ -966,8 +960,7 @@ TYPED_TEST(ScalarMultiplicationTests, PippengerUnsafeShortInputs) scalar_multiplication::generate_pippenger_point_table(points, points, num_points); scalar_multiplication::pippenger_runtime_state state(num_points); - Element result = - scalar_multiplication::pippenger_unsafe({ scalars, /*size*/ num_points }, points, num_points, state); + Element result = scalar_multiplication::pippenger_unsafe({ scalars, /*size*/ num_points }, points, state); result = result.normalize(); aligned_free(scalars); @@ -1004,8 +997,7 @@ TYPED_TEST(ScalarMultiplicationTests, PippengerOne) scalar_multiplication::generate_pippenger_point_table(points, points, num_points); scalar_multiplication::pippenger_runtime_state state(num_points); - Element result = - scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points, num_points, state); + Element result = scalar_multiplication::pippenger({ scalars, /*size*/ num_points }, points, state); result = result.normalize(); aligned_free(scalars); @@ -1026,7 +1018,7 @@ TYPED_TEST(ScalarMultiplicationTests, PippengerZeroPoints) AffineElement* points = (AffineElement*)aligned_alloc(32, sizeof(AffineElement) * (2 + 1)); scalar_multiplication::pippenger_runtime_state state(0); - Element result = scalar_multiplication::pippenger({ scalars, /*size*/ 0 }, points, 0, state); + Element result = scalar_multiplication::pippenger({ scalars, /*size*/ 0 }, points, state); aligned_free(scalars); aligned_free(points); @@ -1051,7 +1043,7 @@ TYPED_TEST(ScalarMultiplicationTests, PippengerMulByZero) scalar_multiplication::generate_pippenger_point_table(points, points, 1); scalar_multiplication::pippenger_runtime_state state(1); - Element result = scalar_multiplication::pippenger({ scalars, /*size*/ 1 }, points, 1, state); + Element result = scalar_multiplication::pippenger({ scalars, /*size*/ 1 }, points, state); aligned_free(scalars); aligned_free(points); diff --git a/l1-contracts/src/core/Rollup.sol b/l1-contracts/src/core/Rollup.sol index 93761eb3ac7..e4bc8ebb589 100644 --- a/l1-contracts/src/core/Rollup.sol +++ b/l1-contracts/src/core/Rollup.sol @@ -67,10 +67,6 @@ contract Rollup is Leonidas, IRollup, ITestRollup { bytes32 public vkTreeRoot; - // @note This should not exists, but we have it now to ensure we will not be killing the devnet with our - // timeliness requirements. - bool public isDevNet = Constants.IS_DEV_NET == 1; - // @note Assume that all blocks up to this value are automatically proven. Speeds up bootstrapping. // Testing only. This should be removed eventually. uint256 private assumeProvenUntilBlockNumber; @@ -111,12 +107,10 @@ contract Rollup is Leonidas, IRollup, ITestRollup { * @notice Prune the pending chain up to the last proven block * * @dev Will revert if there is nothing to prune or if the chain is not ready to be pruned + * + * @dev While in devnet, this will be guarded behind an `onlyOwner` */ - function prune() external override(IRollup) { - if (isDevNet) { - revert Errors.DevNet__NoPruningAllowed(); - } - + function prune() external override(IRollup) onlyOwner { if (pendingBlockCount == provenBlockCount) { revert Errors.Rollup__NothingToPrune(); } @@ -155,17 +149,6 @@ contract Rollup is Leonidas, IRollup, ITestRollup { assumeProvenUntilBlockNumber = blockNumber; } - /** - * @notice Set the devnet mode - * - * @dev This is only needed for testing, and should be removed - * - * @param _devNet - Whether or not the contract is in devnet mode - */ - function setDevNet(bool _devNet) external override(ITestRollup) onlyOwner { - isDevNet = _devNet; - } - /** * @notice Set the verifier contract * @@ -412,13 +395,9 @@ contract Rollup is Leonidas, IRollup, ITestRollup { revert Errors.Rollup__InvalidArchive(tipArchive, _archive); } - if (isDevNet) { - _devnetSequencerSubmissionChecks(_proposer); - } else { - address proposer = getProposerAt(_ts); - if (proposer != address(0) && proposer != _proposer) { - revert Errors.Leonidas__InvalidProposer(proposer, _proposer); - } + address proposer = getProposerAt(_ts); + if (proposer != address(0) && proposer != _proposer) { + revert Errors.Leonidas__InvalidProposer(proposer, _proposer); } return (slot, pendingBlockCount); @@ -568,8 +547,6 @@ contract Rollup is Leonidas, IRollup, ITestRollup { * This might be relaxed for allow consensus set to better handle short-term bursts of L1 congestion * - The slot MUST be in the current epoch * - * @dev While in isDevNet, we allow skipping all of the checks as we simply assume only TRUSTED sequencers - * * @param _slot - The slot of the header to validate * @param _signatures - The signatures to validate * @param _digest - The digest that signatures sign over @@ -581,19 +558,6 @@ contract Rollup is Leonidas, IRollup, ITestRollup { uint256 _currentTime, DataStructures.ExecutionFlags memory _flags ) internal view { - if (isDevNet) { - // @note If we are running in a devnet, we don't want to perform all the consensus - // checks, we instead simply require that either there are NO validators or - // that the proposer is a validator. - // - // This means that we relaxes the condition that the block must land in the - // correct slot and epoch to make it more fluid for the devnet launch - // or for testing. - - _devnetSequencerSubmissionChecks(msg.sender); - return; - } - // Ensure that the slot proposed is NOT in the future uint256 currentSlot = getSlotAt(_currentTime); if (_slot != currentSlot) { @@ -687,15 +651,4 @@ contract Rollup is Leonidas, IRollup, ITestRollup { revert Errors.Rollup__UnavailableTxs(_header.contentCommitment.txsEffectsHash); } } - - function _devnetSequencerSubmissionChecks(address _proposer) internal view { - if (getValidatorCount() == 0) { - return; - } - - if (!isValidator(_proposer)) { - revert Errors.DevNet__InvalidProposer(getValidatorAt(0), _proposer); - } - return; - } } diff --git a/l1-contracts/src/core/interfaces/IRollup.sol b/l1-contracts/src/core/interfaces/IRollup.sol index 7e2276342b7..33e5640537b 100644 --- a/l1-contracts/src/core/interfaces/IRollup.sol +++ b/l1-contracts/src/core/interfaces/IRollup.sol @@ -9,7 +9,6 @@ import {SignatureLib} from "../sequencer_selection/SignatureLib.sol"; import {DataStructures} from "../libraries/DataStructures.sol"; interface ITestRollup { - function setDevNet(bool _devNet) external; function setVerifier(address _verifier) external; function setVkTreeRoot(bytes32 _vkTreeRoot) external; function setAssumeProvenUntilBlockNumber(uint256 blockNumber) external; diff --git a/l1-contracts/src/core/libraries/ConstantsGen.sol b/l1-contracts/src/core/libraries/ConstantsGen.sol index 2e04e93dfe2..e85d0854c55 100644 --- a/l1-contracts/src/core/libraries/ConstantsGen.sol +++ b/l1-contracts/src/core/libraries/ConstantsGen.sol @@ -104,7 +104,6 @@ library Constants { uint256 internal constant ETHEREUM_SLOT_DURATION = 12; uint256 internal constant AZTEC_SLOT_DURATION = 12; uint256 internal constant AZTEC_EPOCH_DURATION = 48; - uint256 internal constant IS_DEV_NET = 1; uint256 internal constant GENESIS_ARCHIVE_ROOT = 8142738430000951296386584486068033372964809139261822027365426310856631083550; uint256 internal constant FEE_JUICE_INITIAL_MINT = 20000000000; diff --git a/l1-contracts/test/Rollup.t.sol b/l1-contracts/test/Rollup.t.sol index 5845f95b05c..c908099f6a0 100644 --- a/l1-contracts/test/Rollup.t.sol +++ b/l1-contracts/test/Rollup.t.sol @@ -113,13 +113,6 @@ contract RollupTest is DecoderBase { } function testRevertPrune() public setUpFor("mixed_block_1") { - if (rollup.isDevNet()) { - vm.expectRevert(abi.encodeWithSelector(Errors.DevNet__NoPruningAllowed.selector)); - rollup.prune(); - - return; - } - vm.expectRevert(abi.encodeWithSelector(Errors.Rollup__NothingToPrune.selector)); rollup.prune(); @@ -136,10 +129,6 @@ contract RollupTest is DecoderBase { } function testPrune() public setUpFor("mixed_block_1") { - if (rollup.isDevNet()) { - return; - } - _testBlock("mixed_block_1", false); assertEq(inbox.inProgress(), 3, "Invalid in progress"); diff --git a/l1-contracts/test/sparta/DevNet.t.sol b/l1-contracts/test/sparta/DevNet.t.sol deleted file mode 100644 index 3b27c2fee3d..00000000000 --- a/l1-contracts/test/sparta/DevNet.t.sol +++ /dev/null @@ -1,232 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2023 Aztec Labs. -pragma solidity >=0.8.18; - -import {DecoderBase} from "../decoders/Base.sol"; - -import {DataStructures} from "../../src/core/libraries/DataStructures.sol"; -import {Constants} from "../../src/core/libraries/ConstantsGen.sol"; -import {SignatureLib} from "../../src/core/sequencer_selection/SignatureLib.sol"; - -import {Registry} from "../../src/core/messagebridge/Registry.sol"; -import {Inbox} from "../../src/core/messagebridge/Inbox.sol"; -import {Outbox} from "../../src/core/messagebridge/Outbox.sol"; -import {Errors} from "../../src/core/libraries/Errors.sol"; -import {Rollup} from "../../src/core/Rollup.sol"; -import {Leonidas} from "../../src/core/sequencer_selection/Leonidas.sol"; -import {AvailabilityOracle} from "../../src/core/availability_oracle/AvailabilityOracle.sol"; -import {NaiveMerkle} from "../merkle/Naive.sol"; -import {MerkleTestUtil} from "../merkle/TestUtil.sol"; -import {TxsDecoderHelper} from "../decoders/helpers/TxsDecoderHelper.sol"; -import {IFeeJuicePortal} from "../../src/core/interfaces/IFeeJuicePortal.sol"; - -/** - * We are using the same blocks as from Rollup.t.sol. - * The tests in this file is testing the sequencer selection - * - * We will skip these test if we are running with IS_DEV_NET = true - */ -contract DevNetTest is DecoderBase { - Registry internal registry; - Inbox internal inbox; - Outbox internal outbox; - Rollup internal rollup; - MerkleTestUtil internal merkleTestUtil; - TxsDecoderHelper internal txsHelper; - - AvailabilityOracle internal availabilityOracle; - - mapping(address validator => uint256 privateKey) internal privateKeys; - - SignatureLib.Signature internal emptySignature; - - /** - * @notice Set up the contracts needed for the tests with time aligned to the provided block name - */ - modifier setup(uint256 _validatorCount) { - string memory _name = "mixed_block_1"; - { - Leonidas leo = new Leonidas(address(1)); - DecoderBase.Full memory full = load(_name); - uint256 slotNumber = full.block.decodedHeader.globalVariables.slotNumber; - uint256 initialTime = - full.block.decodedHeader.globalVariables.timestamp - slotNumber * leo.SLOT_DURATION(); - vm.warp(initialTime); - } - - registry = new Registry(address(this)); - availabilityOracle = new AvailabilityOracle(); - rollup = new Rollup( - registry, - availabilityOracle, - IFeeJuicePortal(address(0)), - bytes32(0), - address(this), - new address[](0) - ); - inbox = Inbox(address(rollup.INBOX())); - outbox = Outbox(address(rollup.OUTBOX())); - - registry.upgrade(address(rollup)); - - merkleTestUtil = new MerkleTestUtil(); - txsHelper = new TxsDecoderHelper(); - - for (uint256 i = 1; i < _validatorCount + 1; i++) { - uint256 privateKey = uint256(keccak256(abi.encode("validator", i))); - address validator = vm.addr(privateKey); - privateKeys[validator] = privateKey; - rollup.addValidator(validator); - } - _; - } - - function testProposerForNonSetupEpoch(uint8 _epochsToJump) public setup(5) { - if (Constants.IS_DEV_NET == 0) { - return; - } - - uint256 pre = rollup.getCurrentEpoch(); - vm.warp( - block.timestamp + uint256(_epochsToJump) * rollup.EPOCH_DURATION() * rollup.SLOT_DURATION() - ); - uint256 post = rollup.getCurrentEpoch(); - assertEq(pre + _epochsToJump, post, "Invalid epoch"); - - address expectedProposer = rollup.getCurrentProposer(); - - // Add a validator which will also setup the epoch - rollup.addValidator(address(0xdead)); - - address actualProposer = rollup.getCurrentProposer(); - assertEq(expectedProposer, actualProposer, "Invalid proposer"); - } - - function testNoValidators() public setup(0) { - if (Constants.IS_DEV_NET == 0) { - return; - } - - _testBlock("mixed_block_1", false, false); - } - - function testInvalidProposer() public setup(1) { - if (Constants.IS_DEV_NET == 0) { - return; - } - - _testBlock("mixed_block_1", true, true); - } - - struct StructToAvoidDeepStacks { - uint256 needed; - address proposer; - bool shouldRevert; - } - - function _testBlock(string memory _name, bool _expectRevert, bool _invalidProposer) internal { - _testBlock(_name, _expectRevert, _invalidProposer, 0); - } - - function _testBlock(string memory _name, bool _expectRevert, bool _invalidProposer, uint256 _ts) - internal - { - DecoderBase.Full memory full = load(_name); - bytes memory header = full.block.header; - bytes32 archive = full.block.archive; - bytes memory body = full.block.body; - - StructToAvoidDeepStacks memory ree; - - // We jump to the time of the block. (unless it is in the past) - vm.warp(max(block.timestamp, max(full.block.decodedHeader.globalVariables.timestamp, _ts))); - - if (_ts > 0) { - // Update the timestamp and slot in the header - uint256 slotValue = rollup.getCurrentSlot(); - uint256 timestampMemoryPosition = 0x01b4; - uint256 slotMemoryPosition = 0x0194; - assembly { - mstore(add(header, add(0x20, timestampMemoryPosition)), _ts) - mstore(add(header, add(0x20, slotMemoryPosition)), slotValue) - } - } - - _populateInbox(full.populate.sender, full.populate.recipient, full.populate.l1ToL2Content); - - availabilityOracle.publish(body); - - ree.proposer = rollup.getCurrentProposer(); - ree.shouldRevert = false; - - rollup.setupEpoch(); - - if (_invalidProposer) { - ree.proposer = address(uint160(uint256(keccak256(abi.encode("invalid", ree.proposer))))); - // Why don't we end up here? - vm.expectRevert( - abi.encodeWithSelector( - Errors.DevNet__InvalidProposer.selector, rollup.getValidatorAt(0), ree.proposer - ) - ); - ree.shouldRevert = true; - } - - vm.prank(ree.proposer); - rollup.propose(header, archive, bytes32(0)); - - assertEq(_expectRevert, ree.shouldRevert, "Invalid revert expectation"); - - if (ree.shouldRevert) { - return; - } - - bytes32 l2ToL1MessageTreeRoot; - { - uint32 numTxs = full.block.numTxs; - // NB: The below works with full blocks because we require the largest possible subtrees - // for L2 to L1 messages - usually we make variable height subtrees, the roots of which - // form a balanced tree - - // The below is a little janky - we know that this test deals with full txs with equal numbers - // of msgs or txs with no messages, so the division works - // TODO edit full.messages to include information about msgs per tx? - uint256 subTreeHeight = merkleTestUtil.calculateTreeHeightFromSize( - full.messages.l2ToL1Messages.length == 0 ? 0 : full.messages.l2ToL1Messages.length / numTxs - ); - uint256 outHashTreeHeight = merkleTestUtil.calculateTreeHeightFromSize(numTxs); - uint256 numMessagesWithPadding = numTxs * Constants.MAX_L2_TO_L1_MSGS_PER_TX; - - uint256 treeHeight = subTreeHeight + outHashTreeHeight; - NaiveMerkle tree = new NaiveMerkle(treeHeight); - for (uint256 i = 0; i < numMessagesWithPadding; i++) { - if (i < full.messages.l2ToL1Messages.length) { - tree.insertLeaf(full.messages.l2ToL1Messages[i]); - } else { - tree.insertLeaf(bytes32(0)); - } - } - - l2ToL1MessageTreeRoot = tree.computeRoot(); - } - - (bytes32 root,) = outbox.getRootData(full.block.decodedHeader.globalVariables.blockNumber); - - if (rollup.provenBlockCount() > full.block.decodedHeader.globalVariables.blockNumber) { - assertEq(l2ToL1MessageTreeRoot, root, "Invalid l2 to l1 message tree root"); - } else { - assertEq(root, bytes32(0), "Invalid outbox root"); - } - - assertEq(rollup.archive(), archive, "Invalid archive"); - } - - function _populateInbox(address _sender, bytes32 _recipient, bytes32[] memory _contents) internal { - for (uint256 i = 0; i < _contents.length; i++) { - vm.prank(_sender); - inbox.sendL2Message( - DataStructures.L2Actor({actor: _recipient, version: 1}), _contents[i], bytes32(0) - ); - } - } -} diff --git a/l1-contracts/test/sparta/Sparta.t.sol b/l1-contracts/test/sparta/Sparta.t.sol index e02935ef76f..a3e4eac2d2d 100644 --- a/l1-contracts/test/sparta/Sparta.t.sol +++ b/l1-contracts/test/sparta/Sparta.t.sol @@ -24,8 +24,6 @@ import {IFeeJuicePortal} from "../../src/core/interfaces/IFeeJuicePortal.sol"; /** * We are using the same blocks as from Rollup.t.sol. * The tests in this file is testing the sequencer selection - * - * We will skip these test if we are running with IS_DEV_NET = true */ contract SpartaTest is DecoderBase { @@ -111,10 +109,6 @@ contract SpartaTest is DecoderBase { } function testProposerForNonSetupEpoch(uint8 _epochsToJump) public setup(4) { - if (Constants.IS_DEV_NET == 1) { - return; - } - uint256 pre = rollup.getCurrentEpoch(); vm.warp( block.timestamp + uint256(_epochsToJump) * rollup.EPOCH_DURATION() * rollup.SLOT_DURATION() @@ -132,10 +126,6 @@ contract SpartaTest is DecoderBase { } function testValidatorSetLargerThanCommittee(bool _insufficientSigs) public setup(100) { - if (Constants.IS_DEV_NET == 1) { - return; - } - assertGt(rollup.getValidators().length, rollup.TARGET_COMMITTEE_SIZE(), "Not enough validators"); uint256 committeSize = rollup.TARGET_COMMITTEE_SIZE() * 2 / 3 + (_insufficientSigs ? 0 : 1); @@ -149,27 +139,15 @@ contract SpartaTest is DecoderBase { } function testHappyPath() public setup(4) { - if (Constants.IS_DEV_NET == 1) { - return; - } - _testBlock("mixed_block_1", false, 3, false); _testBlock("mixed_block_2", false, 3, false); } function testInvalidProposer() public setup(4) { - if (Constants.IS_DEV_NET == 1) { - return; - } - _testBlock("mixed_block_1", true, 3, true); } function testInsufficientSigs() public setup(4) { - if (Constants.IS_DEV_NET == 1) { - return; - } - _testBlock("mixed_block_1", true, 2, false); } diff --git a/noir-projects/aztec-nr/.gitrepo b/noir-projects/aztec-nr/.gitrepo index 2596030777a..f5e41181613 100644 --- a/noir-projects/aztec-nr/.gitrepo +++ b/noir-projects/aztec-nr/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/aztec-nr branch = master - commit = e9dd03910aafad2061f9d23ffeab0c5578655623 + commit = 7c5decc5c648e0a9abbf7e979d008099697a78a1 method = merge cmdver = 0.4.6 - parent = 8aa4c631ff1e181e31f45dc0aaa63e114aeccca6 + parent = 4e52ed0ce68b32580a2240a94b0904efa9f83f84 diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/constants.nr b/noir-projects/noir-protocol-circuits/crates/types/src/constants.nr index 95c3c14e1b3..0183f973880 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/constants.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/constants.nr @@ -137,7 +137,6 @@ global ETHEREUM_SLOT_DURATION: u32 = 12; // AZTEC_SLOT_DURATION should be a multiple of ETHEREUM_SLOT_DURATION global AZTEC_SLOT_DURATION: u32 = ETHEREUM_SLOT_DURATION * 1; global AZTEC_EPOCH_DURATION: u32 = 48; -global IS_DEV_NET: bool = true; // The following is taken from building a block and looking at the `lastArchive` value in it. // You can run the `integration_l1_publisher.test.ts` and look at the first blocks in the fixtures. global GENESIS_ARCHIVE_ROOT: Field = 0x1200a06aae1368abe36530b585bd7a4d2ba4de5037b82076412691a187d7621e; diff --git a/yarn-project/circuits.js/src/constants.gen.ts b/yarn-project/circuits.js/src/constants.gen.ts index 1b25acd8207..10c2bd38b55 100644 --- a/yarn-project/circuits.js/src/constants.gen.ts +++ b/yarn-project/circuits.js/src/constants.gen.ts @@ -90,7 +90,6 @@ export const BLOB_SIZE_IN_BYTES = 126976; export const ETHEREUM_SLOT_DURATION = 12; export const AZTEC_SLOT_DURATION = 12; export const AZTEC_EPOCH_DURATION = 48; -export const IS_DEV_NET = 1; export const GENESIS_ARCHIVE_ROOT = 8142738430000951296386584486068033372964809139261822027365426310856631083550n; export const FEE_JUICE_INITIAL_MINT = 20000000000; export const MAX_PACKED_PUBLIC_BYTECODE_SIZE_IN_FIELDS = 20000; diff --git a/yarn-project/end-to-end/Earthfile b/yarn-project/end-to-end/Earthfile index 75512dfe5f8..b59dbd47986 100644 --- a/yarn-project/end-to-end/Earthfile +++ b/yarn-project/end-to-end/Earthfile @@ -110,6 +110,9 @@ NETWORK_TEST: e2e-p2p: DO +E2E_TEST --test=./src/e2e_p2p_network.test.ts +e2e-l1-with-wall-time: + DO +E2E_TEST --test=./src/e2e_l1_with_wall_time.test.ts + e2e-2-pxes: DO +E2E_TEST --test=./src/e2e_2_pxes.test.ts diff --git a/yarn-project/end-to-end/src/e2e_l1_with_wall_time.test.ts b/yarn-project/end-to-end/src/e2e_l1_with_wall_time.test.ts new file mode 100644 index 00000000000..70ec2f7f6b4 --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_l1_with_wall_time.test.ts @@ -0,0 +1,66 @@ +import { getSchnorrAccount } from '@aztec/accounts/schnorr'; +import { type DebugLogger, Fr, GrumpkinScalar, type PXE, type SentTx, TxStatus } from '@aztec/aztec.js'; +import { EthAddress } from '@aztec/circuits.js'; +import { type PXEService } from '@aztec/pxe'; + +import { privateKeyToAccount } from 'viem/accounts'; + +import { getPrivateKeyFromIndex, setup } from './fixtures/utils.js'; + +describe('e2e_l1_with_wall_time', () => { + let logger: DebugLogger; + let teardown: () => Promise; + let pxe: PXE; + + beforeEach(async () => { + const account = privateKeyToAccount(`0x${getPrivateKeyFromIndex(0)!.toString('hex')}`); + const initialValidators = [EthAddress.fromString(account.address)]; + + ({ teardown, logger, pxe } = await setup(0, { initialValidators, l1BlockTime: 12 })); + }); + + afterEach(() => teardown()); + + it('should produce blocks with a bunch of transactions', async () => { + for (let i = 0; i < 4; i++) { + const txs = await submitTxsTo(pxe as PXEService, 8); + await Promise.all( + txs.map(async (tx, j) => { + logger.info(`Waiting for tx ${i}-${j}: ${await tx.getTxHash()} to be mined`); + return tx.wait(); + }), + ); + } + }); + + // submits a set of transactions to the provided Private eXecution Environment (PXE) + const submitTxsTo = async (pxe: PXEService, numTxs: number) => { + const txs: SentTx[] = []; + for (let i = 0; i < numTxs; i++) { + const accountManager = getSchnorrAccount(pxe, Fr.random(), GrumpkinScalar.random(), Fr.random()); + const deployMethod = await accountManager.getDeployMethod(); + await deployMethod.create({ + contractAddressSalt: accountManager.salt, + skipClassRegistration: true, + skipPublicDeployment: true, + universalDeploy: true, + }); + await deployMethod.prove({}); + const tx = deployMethod.send(); + + const txHash = await tx.getTxHash(); + + logger.info(`Tx sent with hash ${txHash}`); + const receipt = await tx.getReceipt(); + expect(receipt).toEqual( + expect.objectContaining({ + status: TxStatus.PENDING, + error: '', + }), + ); + logger.info(`Receipt received for ${txHash}`); + txs.push(tx); + } + return txs; + }; +}); diff --git a/yarn-project/end-to-end/src/e2e_p2p_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p_network.test.ts index f297b084b47..2037f9a200d 100644 --- a/yarn-project/end-to-end/src/e2e_p2p_network.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p_network.test.ts @@ -1,12 +1,24 @@ import { getSchnorrAccount } from '@aztec/accounts/schnorr'; import { type AztecNodeConfig, type AztecNodeService } from '@aztec/aztec-node'; -import { CompleteAddress, type DebugLogger, Fr, GrumpkinScalar, type SentTx, TxStatus, sleep } from '@aztec/aztec.js'; -import { EthAddress, IS_DEV_NET } from '@aztec/circuits.js'; +import { + CompleteAddress, + type DebugLogger, + type DeployL1Contracts, + EthCheatCodes, + Fr, + GrumpkinScalar, + type SentTx, + TxStatus, + sleep, +} from '@aztec/aztec.js'; +import { EthAddress } from '@aztec/circuits.js'; +import { RollupAbi } from '@aztec/l1-artifacts'; import { type BootstrapNode } from '@aztec/p2p'; import { type PXEService, createPXEService, getPXEServiceConfig as getRpcConfig } from '@aztec/pxe'; import { jest } from '@jest/globals'; import fs from 'fs'; +import { getContract } from 'viem'; import { privateKeyToAccount } from 'viem/accounts'; import { @@ -32,6 +44,7 @@ describe('e2e_p2p_network', () => { let teardown: () => Promise; let bootstrapNode: BootstrapNode; let bootstrapNodeEnr: string; + let deployL1ContractsValues: DeployL1Contracts; beforeEach(async () => { // If we want to test with interval mining, we can use the local host and start `anvil --block-time 12` @@ -47,21 +60,32 @@ describe('e2e_p2p_network', () => { const initialValidators = [EthAddress.fromString(account.address)]; - // Add 1 extra validator if in devnet or NUM_NODES if not. - // Each of these will become a validator and sign attestations. - const limit = IS_DEV_NET ? 1 : NUM_NODES; - for (let i = 0; i < limit; i++) { - const account = privateKeyToAccount(`0x${getPrivateKeyFromIndex(i + 1)!.toString('hex')}`); - initialValidators.push(EthAddress.fromString(account.address)); - } - - ({ teardown, config, logger } = await setup(0, { initialValidators, ...options })); + ({ teardown, config, logger, deployL1ContractsValues } = await setup(0, { initialValidators, ...options })); bootstrapNode = await createBootstrapNode(BOOT_NODE_UDP_PORT); bootstrapNodeEnr = bootstrapNode.getENR().encodeTxt(); config.minTxsPerBlock = NUM_TXS_PER_BLOCK; config.maxTxsPerBlock = NUM_TXS_PER_BLOCK; + + const rollup = getContract({ + address: deployL1ContractsValues.l1ContractAddresses.rollupAddress.toString(), + abi: RollupAbi, + client: deployL1ContractsValues.walletClient, + }); + + for (let i = 0; i < NUM_NODES; i++) { + const account = privateKeyToAccount(`0x${getPrivateKeyFromIndex(i + 1)!.toString('hex')}`); + await rollup.write.addValidator([account.address]); + } + + //@note Now we jump ahead to the next epoch such that the validator committee is picked + // INTERVAL MINING: If we are using anvil interval mining this will NOT progress the time! + // Which means that the validator set will still be empty! So anyone can propose. + const slotsInEpoch = await rollup.read.EPOCH_DURATION(); + const timestamp = await rollup.read.getTimestampForSlot([slotsInEpoch]); + const cheatCodes = new EthCheatCodes(config.l1RpcUrl); + await cheatCodes.warp(Number(timestamp)); }); afterEach(() => teardown()); @@ -88,7 +112,6 @@ describe('e2e_p2p_network', () => { bootstrapNodeEnr, NUM_NODES, BOOT_NODE_UDP_PORT, - /*activate validators=*/ !IS_DEV_NET, ); // wait a bit for peers to discover each other @@ -149,7 +172,6 @@ describe('e2e_p2p_network', () => { i + 1 + BOOT_NODE_UDP_PORT, undefined, i, - /*validators*/ !IS_DEV_NET, `./data-${i}`, ); logger.info(`Node ${i} restarted`); diff --git a/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts b/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts index 57fb87f171f..560677f6bee 100644 --- a/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts +++ b/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts @@ -34,18 +34,10 @@ export async function createNodes( bootstrapNodeEnr: string, numNodes: number, bootNodePort: number, - activateValidators: boolean = false, ): Promise { const nodes = []; for (let i = 0; i < numNodes; i++) { - const node = await createNode( - config, - peerIdPrivateKeys[i], - i + 1 + bootNodePort, - bootstrapNodeEnr, - i, - activateValidators, - ); + const node = await createNode(config, peerIdPrivateKeys[i], i + 1 + bootNodePort, bootstrapNodeEnr, i); nodes.push(node); } return nodes; @@ -58,7 +50,6 @@ export async function createNode( tcpListenPort: number, bootstrapNode: string | undefined, publisherAddressIndex: number, - activateValidators: boolean = false, dataDirectory?: string, ) { // We use different L1 publisher accounts in order to avoid duplicate tx nonces. We start from @@ -66,11 +57,8 @@ export async function createNode( const publisherPrivKey = getPrivateKeyFromIndex(publisherAddressIndex + 1); config.publisherPrivateKey = `0x${publisherPrivKey!.toString('hex')}`; - if (activateValidators) { - const validatorPrivKey = getPrivateKeyFromIndex(1 + publisherAddressIndex); - config.validatorPrivateKey = `0x${validatorPrivKey!.toString('hex')}`; - config.disableValidator = false; - } + const validatorPrivKey = getPrivateKeyFromIndex(1 + publisherAddressIndex); + config.validatorPrivateKey = `0x${validatorPrivKey!.toString('hex')}`; const newConfig: AztecNodeConfig = { ...config, diff --git a/yarn-project/end-to-end/src/fixtures/utils.ts b/yarn-project/end-to-end/src/fixtures/utils.ts index 7fb9bf3f5b1..dfbebc93636 100644 --- a/yarn-project/end-to-end/src/fixtures/utils.ts +++ b/yarn-project/end-to-end/src/fixtures/utils.ts @@ -300,6 +300,8 @@ type SetupOptions = { salt?: number; /** An initial set of validators */ initialValidators?: EthAddress[]; + /** Anvil block time (interval) */ + l1BlockTime?: number; } & Partial; /** Context for an end-to-end test as returned by the `setup` function */ @@ -337,7 +339,6 @@ export async function setup( opts: SetupOptions = {}, pxeOpts: Partial = {}, enableGas = false, - enableValidators = false, chain: Chain = foundry, ): Promise { const config = { ...getConfigEnvVars(), ...opts }; @@ -355,7 +356,7 @@ export async function setup( ); } - const res = await startAnvil(); + const res = await startAnvil(opts.l1BlockTime); anvil = res.anvil; config.l1RpcUrl = res.rpcUrl; } @@ -404,11 +405,8 @@ export async function setup( config.l1Contracts = deployL1ContractsValues.l1ContractAddresses; // Run the test with validators enabled - if (enableValidators) { - const validatorPrivKey = getPrivateKeyFromIndex(1); - config.validatorPrivateKey = `0x${validatorPrivKey!.toString('hex')}`; - } - config.disableValidator = !enableValidators; + const validatorPrivKey = getPrivateKeyFromIndex(1); + config.validatorPrivateKey = `0x${validatorPrivKey!.toString('hex')}`; logger.verbose('Creating and synching an aztec node...'); @@ -508,7 +506,7 @@ export function getL1WalletClient(rpcUrl: string, index: number) { * Ensures there's a running Anvil instance and returns the RPC URL. * @returns */ -export async function startAnvil(): Promise<{ anvil: Anvil; rpcUrl: string }> { +export async function startAnvil(l1BlockTime?: number): Promise<{ anvil: Anvil; rpcUrl: string }> { let rpcUrl: string | undefined = undefined; // Start anvil. @@ -517,7 +515,11 @@ export async function startAnvil(): Promise<{ anvil: Anvil; rpcUrl: string }> { async () => { const ethereumHostPort = await getPort(); rpcUrl = `http://127.0.0.1:${ethereumHostPort}`; - const anvil = createAnvil({ anvilBinary: './scripts/anvil_kill_wrapper.sh', port: ethereumHostPort }); + const anvil = createAnvil({ + anvilBinary: './scripts/anvil_kill_wrapper.sh', + port: ethereumHostPort, + blockTime: l1BlockTime, + }); await anvil.start(); return anvil; }, diff --git a/yarn-project/end-to-end/src/public-testnet/e2e_public_testnet_transfer.test.ts b/yarn-project/end-to-end/src/public-testnet/e2e_public_testnet_transfer.test.ts index fb9affae54d..dec793c0c7f 100644 --- a/yarn-project/end-to-end/src/public-testnet/e2e_public_testnet_transfer.test.ts +++ b/yarn-project/end-to-end/src/public-testnet/e2e_public_testnet_transfer.test.ts @@ -35,7 +35,6 @@ describe(`deploys and transfers a private only token`, () => { { skipProtocolContracts: true, stateLoad: undefined }, {}, false, - false, chain, )); proverConfig = getProverNodeConfigFromEnv(); diff --git a/yarn-project/sequencer-client/src/publisher/l1-publisher.test.ts b/yarn-project/sequencer-client/src/publisher/l1-publisher.test.ts index 0ecc86cd335..82f466b1151 100644 --- a/yarn-project/sequencer-client/src/publisher/l1-publisher.test.ts +++ b/yarn-project/sequencer-client/src/publisher/l1-publisher.test.ts @@ -214,20 +214,11 @@ describe('L1Publisher', () => { it('does not retry if simulating a publish and propose tx fails', async () => { rollupContractRead.archive.mockResolvedValue(l2Block.header.lastArchive.root.toString() as `0x${string}`); - rollupContractSimulate.propose.mockRejectedValueOnce(new Error()); + rollupContractRead.validateHeader.mockRejectedValueOnce(new Error('Test error')); - if (L1Publisher.SKIP_SIMULATION) { - rollupContractRead.validateHeader.mockRejectedValueOnce(new Error('Test error')); - } - - const result = await publisher.processL2Block(l2Block); + await expect(publisher.processL2Block(l2Block)).rejects.toThrow(); - expect(result).toEqual(false); - if (!L1Publisher.SKIP_SIMULATION) { - expect(rollupContractSimulate.propose).toHaveBeenCalledTimes(1); - } expect(rollupContractRead.validateHeader).toHaveBeenCalledTimes(1); - expect(rollupContractWrite.propose).toHaveBeenCalledTimes(0); }); diff --git a/yarn-project/sequencer-client/src/publisher/l1-publisher.ts b/yarn-project/sequencer-client/src/publisher/l1-publisher.ts index 00caf53414c..dc36dee4f9c 100644 --- a/yarn-project/sequencer-client/src/publisher/l1-publisher.ts +++ b/yarn-project/sequencer-client/src/publisher/l1-publisher.ts @@ -1,8 +1,8 @@ import { type L2Block, type Signature } from '@aztec/circuit-types'; import { type L1PublishBlockStats, type L1PublishProofStats } from '@aztec/circuit-types/stats'; -import { ETHEREUM_SLOT_DURATION, EthAddress, GENESIS_ARCHIVE_ROOT, type Header, type Proof } from '@aztec/circuits.js'; +import { ETHEREUM_SLOT_DURATION, EthAddress, type Header, type Proof } from '@aztec/circuits.js'; import { createEthereumChain } from '@aztec/ethereum'; -import { Fr } from '@aztec/foundation/fields'; +import { type Fr } from '@aztec/foundation/fields'; import { createDebugLogger } from '@aztec/foundation/log'; import { serializeToBuffer } from '@aztec/foundation/serialize'; import { InterruptibleSleep } from '@aztec/foundation/sleep'; @@ -183,27 +183,37 @@ export class L1Publisher { return [slot, blockNumber]; } + /** + * @notice Will call `validateHeader` to make sure that it is possible to propose + * + * @dev Throws if unable to propose + * + * @param header - The header to propose + * @param digest - The digest that attestations are signing over + * + */ public async validateBlockForSubmission( header: Header, - digest: Buffer = new Fr(GENESIS_ARCHIVE_ROOT).toBuffer(), - attestations: Signature[] = [], - ): Promise { + attestationData: { digest: Buffer; signatures: Signature[] } = { + digest: Buffer.alloc(32), + signatures: [], + }, + ): Promise { const ts = BigInt((await this.publicClient.getBlock()).timestamp + BigInt(ETHEREUM_SLOT_DURATION)); - const formattedAttestations = attestations.map(attest => attest.toViemSignature()); - const flags = { ignoreDA: true, ignoreSignatures: attestations.length == 0 }; + const formattedSignatures = attestationData.signatures.map(attest => attest.toViemSignature()); + const flags = { ignoreDA: true, ignoreSignatures: formattedSignatures.length == 0 }; const args = [ `0x${header.toBuffer().toString('hex')}`, - formattedAttestations, - `0x${digest.toString('hex')}`, + formattedSignatures, + `0x${attestationData.digest.toString('hex')}`, ts, flags, ] as const; try { await this.rollupContract.read.validateHeader(args, { account: this.account }); - return true; } catch (error: unknown) { // Specify the type of error if (error instanceof ContractFunctionRevertedError) { @@ -212,7 +222,7 @@ export class L1Publisher { } else { this.log.debug(`Unexpected error during validation: ${error}`); } - return false; + throw error; } } @@ -271,14 +281,6 @@ export class L1Publisher { blockHash: block.hash().toString(), }; - // @note This will make sure that we are passing the checks for our header ASSUMING that the data is also made available - // This means that we can avoid the simulation issues in later checks. - // By simulation issue, I mean the fact that the block.timestamp is equal to the last block, not the next, which - // make time consistency checks break. - if (!(await this.validateBlockForSubmission(block.header, block.archive.root.toBuffer(), attestations))) { - return false; - } - const processTxArgs = { header: block.header.toBuffer(), archive: block.archive.root.toBuffer(), @@ -292,7 +294,18 @@ export class L1Publisher { let txHash; const timer = new Timer(); - if (await this.checkIfTxsAreAvailable(block)) { + const isAvailable = await this.checkIfTxsAreAvailable(block); + + // @note This will make sure that we are passing the checks for our header ASSUMING that the data is also made available + // This means that we can avoid the simulation issues in later checks. + // By simulation issue, I mean the fact that the block.timestamp is equal to the last block, not the next, which + // make time consistency checks break. + await this.validateBlockForSubmission(block.header, { + digest: block.archive.root.toBuffer(), + signatures: attestations ?? [], + }); + + if (isAvailable) { this.log.verbose(`Transaction effects of block ${block.number} already published.`, ctx); txHash = await this.sendProposeWithoutBodyTx(processTxArgs); } else { diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts index 24a52633326..d3a8ec1fc82 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts @@ -23,7 +23,6 @@ import { Fr, GasFees, GlobalVariables, - IS_DEV_NET, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP, } from '@aztec/circuits.js'; import { Buffer32 } from '@aztec/foundation/buffer'; @@ -73,35 +72,44 @@ describe('sequencer', () => { const mockedSig = new Signature(Buffer32.fromField(Fr.random()), Buffer32.fromField(Fr.random()), 27); const committee = [EthAddress.random()]; - const getSignatures = () => (IS_DEV_NET ? undefined : [mockedSig]); + const getSignatures = () => [mockedSig]; const getAttestations = () => { - if (IS_DEV_NET) { - return undefined; - } - const attestation = new BlockAttestation(block.header, archive, mockedSig); (attestation as any).sender = committee[0]; return [attestation]; }; + const createBlockProposal = () => { return new BlockProposal(block.header, archive, [TxHash.random()], mockedSig); }; let block: L2Block; + let mockedGlobalVariables: GlobalVariables; beforeEach(() => { lastBlockNumber = 0; block = L2Block.random(lastBlockNumber + 1); + mockedGlobalVariables = new GlobalVariables( + chainId, + version, + block.header.globalVariables.blockNumber, + block.header.globalVariables.slotNumber, + Fr.ZERO, + coinbase, + feeRecipient, + gasFees, + ); + publisher = mock(); publisher.getCurrentEpochCommittee.mockResolvedValue(committee); publisher.canProposeAtNextEthBlock.mockResolvedValue([ block.header.globalVariables.slotNumber.toBigInt(), block.header.globalVariables.blockNumber.toBigInt(), ]); - publisher.validateBlockForSubmission.mockResolvedValue(true); + publisher.validateBlockForSubmission.mockResolvedValue(); globalVariableBuilder = mock(); merkleTreeOps = mock(); @@ -147,12 +155,10 @@ describe('sequencer', () => { create: () => blockSimulator, }); - if (!IS_DEV_NET) { - validatorClient = mock({ - collectAttestations: mockFn().mockResolvedValue(getAttestations()), - createBlockProposal: mockFn().mockResolvedValue(createBlockProposal()), - }); - } + validatorClient = mock({ + collectAttestations: mockFn().mockResolvedValue(getAttestations()), + createBlockProposal: mockFn().mockResolvedValue(createBlockProposal()), + }); sequencer = new TestSubject( publisher, @@ -186,17 +192,6 @@ describe('sequencer', () => { blockSimulator.finaliseBlock.mockResolvedValue({ block }); publisher.processL2Block.mockResolvedValueOnce(true); - const mockedGlobalVariables = new GlobalVariables( - chainId, - version, - block.header.globalVariables.blockNumber, - block.header.globalVariables.slotNumber, - Fr.ZERO, - coinbase, - feeRecipient, - gasFees, - ); - globalVariableBuilder.buildGlobalVariables.mockResolvedValueOnce(mockedGlobalVariables); await sequencer.initialSync(); @@ -228,22 +223,11 @@ describe('sequencer', () => { blockSimulator.finaliseBlock.mockResolvedValue({ block }); publisher.processL2Block.mockResolvedValueOnce(true); - const mockedGlobalVariables = new GlobalVariables( - chainId, - version, - block.header.globalVariables.blockNumber, - block.header.globalVariables.slotNumber, - Fr.ZERO, - coinbase, - feeRecipient, - gasFees, - ); - globalVariableBuilder.buildGlobalVariables.mockResolvedValue(mockedGlobalVariables); // Not your turn! publisher.canProposeAtNextEthBlock.mockRejectedValue(new Error()); - publisher.validateBlockForSubmission.mockResolvedValue(false); + publisher.validateBlockForSubmission.mockRejectedValue(new Error()); await sequencer.initialSync(); await sequencer.work(); @@ -260,7 +244,7 @@ describe('sequencer', () => { // Now it is! publisher.validateBlockForSubmission.mockClear(); - publisher.validateBlockForSubmission.mockResolvedValue(true); + publisher.validateBlockForSubmission.mockResolvedValue(); await sequencer.work(); expect(blockSimulator.startNewBlock).toHaveBeenCalledWith( @@ -290,17 +274,6 @@ describe('sequencer', () => { blockSimulator.finaliseBlock.mockResolvedValue({ block }); publisher.processL2Block.mockResolvedValueOnce(true); - const mockedGlobalVariables = new GlobalVariables( - chainId, - version, - block.header.globalVariables.blockNumber, - block.header.globalVariables.slotNumber, - Fr.ZERO, - coinbase, - feeRecipient, - gasFees, - ); - globalVariableBuilder.buildGlobalVariables.mockResolvedValueOnce(mockedGlobalVariables); // We make a nullifier from tx1 a part of the nullifier tree, so it gets rejected as double spend @@ -342,17 +315,6 @@ describe('sequencer', () => { blockSimulator.finaliseBlock.mockResolvedValue({ block }); publisher.processL2Block.mockResolvedValueOnce(true); - const mockedGlobalVariables = new GlobalVariables( - chainId, - version, - block.header.globalVariables.blockNumber, - block.header.globalVariables.slotNumber, - Fr.ZERO, - coinbase, - feeRecipient, - gasFees, - ); - globalVariableBuilder.buildGlobalVariables.mockResolvedValueOnce(mockedGlobalVariables); // We make the chain id on the invalid tx not equal to the configured chain id @@ -388,17 +350,6 @@ describe('sequencer', () => { blockSimulator.finaliseBlock.mockResolvedValue({ block }); publisher.processL2Block.mockResolvedValueOnce(true); - const mockedGlobalVariables = new GlobalVariables( - chainId, - version, - new Fr(lastBlockNumber + 1), - block.header.globalVariables.slotNumber, - Fr.ZERO, - coinbase, - feeRecipient, - gasFees, - ); - globalVariableBuilder.buildGlobalVariables.mockResolvedValueOnce(mockedGlobalVariables); // We make txs[1] too big to fit @@ -435,17 +386,6 @@ describe('sequencer', () => { blockSimulator.finaliseBlock.mockResolvedValue({ block }); publisher.processL2Block.mockResolvedValueOnce(true); - const mockedGlobalVariables = new GlobalVariables( - chainId, - version, - new Fr(lastBlockNumber + 1), - block.header.globalVariables.slotNumber, - Fr.ZERO, - coinbase, - feeRecipient, - gasFees, - ); - globalVariableBuilder.buildGlobalVariables.mockResolvedValueOnce(mockedGlobalVariables); await sequencer.initialSync(); @@ -472,7 +412,7 @@ describe('sequencer', () => { Array(NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP).fill(new Fr(0n)), ); expect(publisher.processL2Block).toHaveBeenCalledTimes(1); - expect(publisher.processL2Block).toHaveBeenCalledWith(block, getAttestations()); + expect(publisher.processL2Block).toHaveBeenCalledWith(block, getSignatures()); expect(blockSimulator.cancelBlock).toHaveBeenCalledTimes(0); }); @@ -494,17 +434,6 @@ describe('sequencer', () => { blockSimulator.finaliseBlock.mockResolvedValue({ block }); publisher.processL2Block.mockResolvedValueOnce(true); - const mockedGlobalVariables = new GlobalVariables( - chainId, - version, - block.header.globalVariables.blockNumber, - block.header.globalVariables.slotNumber, - Fr.ZERO, - coinbase, - feeRecipient, - gasFees, - ); - globalVariableBuilder.buildGlobalVariables.mockResolvedValueOnce(mockedGlobalVariables); await sequencer.initialSync(); @@ -534,7 +463,7 @@ describe('sequencer', () => { Array(NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP).fill(new Fr(0n)), ); expect(publisher.processL2Block).toHaveBeenCalledTimes(1); - expect(publisher.processL2Block).toHaveBeenCalledWith(block, getAttestations()); + expect(publisher.processL2Block).toHaveBeenCalledWith(block, getSignatures()); expect(blockSimulator.cancelBlock).toHaveBeenCalledTimes(0); }); @@ -556,17 +485,6 @@ describe('sequencer', () => { blockSimulator.finaliseBlock.mockResolvedValue({ block }); publisher.processL2Block.mockResolvedValueOnce(true); - const mockedGlobalVariables = new GlobalVariables( - chainId, - version, - block.header.globalVariables.blockNumber, - block.header.globalVariables.slotNumber, - Fr.ZERO, - coinbase, - feeRecipient, - gasFees, - ); - globalVariableBuilder.buildGlobalVariables.mockResolvedValueOnce(mockedGlobalVariables); await sequencer.initialSync(); @@ -596,7 +514,8 @@ describe('sequencer', () => { Array(NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP).fill(new Fr(0n)), ); expect(publisher.processL2Block).toHaveBeenCalledTimes(1); - expect(publisher.processL2Block).toHaveBeenCalledWith(block, getAttestations()); + + expect(publisher.processL2Block).toHaveBeenCalledWith(block, getSignatures()); expect(blockSimulator.cancelBlock).toHaveBeenCalledTimes(0); }); @@ -632,9 +551,9 @@ describe('sequencer', () => { // This could practically be for any reason, e.g., could also be that we have entered a new slot. publisher.validateBlockForSubmission - .mockResolvedValueOnce(true) - .mockResolvedValueOnce(true) - .mockResolvedValueOnce(false); + .mockResolvedValueOnce() + .mockResolvedValueOnce() + .mockRejectedValueOnce(new Error()); await sequencer.work(); diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.ts index 2b8fa1ac1bf..2f90618e58f 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.ts @@ -17,7 +17,6 @@ import { EthAddress, GENESIS_ARCHIVE_ROOT, Header, - IS_DEV_NET, StateReference, } from '@aztec/circuits.js'; import { Fr } from '@aztec/foundation/fields'; @@ -183,91 +182,99 @@ export class Sequencer { } /** - * Grabs up to maxTxsPerBlock from the p2p client, constructs a block, and pushes it to L1. + * @notice Performs most of the sequencer duties: + * - Checks if we are up to date + * - If we are and we are the sequencer, collect txs and build a block + * - Collect attestations for the block + * - Submit block + * - If our block for some reason is not included, revert the state */ protected async work() { - try { - // Update state when the previous block has been synced - const prevBlockSynced = await this.isBlockSynced(); - // Do not go forward with new block if the previous one has not been mined and processed - if (!prevBlockSynced) { - this.log.debug('Previous block has not been mined and processed yet'); - return; - } + // Update state when the previous block has been synced + const prevBlockSynced = await this.isBlockSynced(); + // Do not go forward with new block if the previous one has not been mined and processed + if (!prevBlockSynced) { + this.log.debug('Previous block has not been mined and processed yet'); + return; + } - if (prevBlockSynced && this.state === SequencerState.PUBLISHING_BLOCK) { - this.log.debug(`Block has been synced`); - this.state = SequencerState.IDLE; - } + if (prevBlockSynced && this.state === SequencerState.PUBLISHING_BLOCK) { + this.log.debug(`Block has been synced`); + this.state = SequencerState.IDLE; + } - const chainTip = await this.l2BlockSource.getBlock(-1); - const historicalHeader = chainTip?.header; + const chainTip = await this.l2BlockSource.getBlock(-1); + const historicalHeader = chainTip?.header; - const newBlockNumber = - (historicalHeader === undefined - ? await this.l2BlockSource.getBlockNumber() - : Number(historicalHeader.globalVariables.blockNumber.toBigInt())) + 1; + const newBlockNumber = + (historicalHeader === undefined + ? await this.l2BlockSource.getBlockNumber() + : Number(historicalHeader.globalVariables.blockNumber.toBigInt())) + 1; - // If we cannot find a tip archive, assume genesis. - const chainTipArchive = - chainTip == undefined ? new Fr(GENESIS_ARCHIVE_ROOT).toBuffer() : chainTip?.archive.root.toBuffer(); + // If we cannot find a tip archive, assume genesis. + const chainTipArchive = + chainTip == undefined ? new Fr(GENESIS_ARCHIVE_ROOT).toBuffer() : chainTip?.archive.root.toBuffer(); - let slot: bigint; - try { - slot = await this.mayProposeBlock(chainTipArchive, BigInt(newBlockNumber)); - } catch (err) { - this.log.debug(`Cannot propose for block ${newBlockNumber}`); - return; - } + let slot: bigint; + try { + slot = await this.mayProposeBlock(chainTipArchive, BigInt(newBlockNumber)); + } catch (err) { + this.log.debug(`Cannot propose for block ${newBlockNumber}`); + return; + } - if (!this.shouldProposeBlock(historicalHeader, {})) { - return; - } + if (!this.shouldProposeBlock(historicalHeader, {})) { + return; + } - this.state = SequencerState.WAITING_FOR_TXS; + this.state = SequencerState.WAITING_FOR_TXS; - // Get txs to build the new block. - const pendingTxs = this.p2pClient.getTxs('pending'); + // Get txs to build the new block. + const pendingTxs = this.p2pClient.getTxs('pending'); - if (!this.shouldProposeBlock(historicalHeader, { pendingTxsCount: pendingTxs.length })) { - return; - } - this.log.debug(`Retrieved ${pendingTxs.length} txs from P2P pool`); + if (!this.shouldProposeBlock(historicalHeader, { pendingTxsCount: pendingTxs.length })) { + return; + } + this.log.debug(`Retrieved ${pendingTxs.length} txs from P2P pool`); - const newGlobalVariables = await this.globalsBuilder.buildGlobalVariables( - new Fr(newBlockNumber), - this._coinbase, - this._feeRecipient, - slot, - ); + const newGlobalVariables = await this.globalsBuilder.buildGlobalVariables( + new Fr(newBlockNumber), + this._coinbase, + this._feeRecipient, + slot, + ); - // If I created a "partial" header here that should make our job much easier. - const proposalHeader = new Header( - new AppendOnlyTreeSnapshot(Fr.fromBuffer(chainTipArchive), 1), - ContentCommitment.empty(), - StateReference.empty(), - newGlobalVariables, - Fr.ZERO, - ); + // If I created a "partial" header here that should make our job much easier. + const proposalHeader = new Header( + new AppendOnlyTreeSnapshot(Fr.fromBuffer(chainTipArchive), 1), + ContentCommitment.empty(), + StateReference.empty(), + newGlobalVariables, + Fr.ZERO, + ); - // TODO: It should be responsibility of the P2P layer to validate txs before passing them on here - const allValidTxs = await this.takeValidTxs( - pendingTxs, - this.txValidatorFactory.validatorForNewTxs(newGlobalVariables, this.allowedInSetup), - ); + // TODO: It should be responsibility of the P2P layer to validate txs before passing them on here + const allValidTxs = await this.takeValidTxs( + pendingTxs, + this.txValidatorFactory.validatorForNewTxs(newGlobalVariables, this.allowedInSetup), + ); - // TODO: We are taking the size of the tx from private-land, but we should be doing this after running - // public functions. Only reason why we do it here now is because the public processor and orchestrator - // are set up such that they require knowing the total number of txs in advance. Still, main reason for - // exceeding max block size in bytes is contract class registration, which happens in private-land. This - // may break if we start emitting lots of log data from public-land. - const validTxs = this.takeTxsWithinMaxSize(allValidTxs); + // TODO: We are taking the size of the tx from private-land, but we should be doing this after running + // public functions. Only reason why we do it here now is because the public processor and orchestrator + // are set up such that they require knowing the total number of txs in advance. Still, main reason for + // exceeding max block size in bytes is contract class registration, which happens in private-land. This + // may break if we start emitting lots of log data from public-land. + const validTxs = this.takeTxsWithinMaxSize(allValidTxs); - // Bail if we don't have enough valid txs - if (!this.shouldProposeBlock(historicalHeader, { validTxsCount: validTxs.length })) { - return; - } + // Bail if we don't have enough valid txs + if (!this.shouldProposeBlock(historicalHeader, { validTxsCount: validTxs.length })) { + return; + } + try { + // @note It is very important that the following function will FAIL and not just return early + // if it have made any state changes. If not, we won't rollback the state, and you will + // be in for a world of pain. await this.buildBlockAndPublish(validTxs, proposalHeader, historicalHeader); } catch (err) { if (BlockProofError.isBlockProofError(err)) { @@ -319,23 +326,21 @@ export class Sequencer { return true; } - if (IS_DEV_NET) { - // Compute time elapsed since the previous block - const lastBlockTime = historicalHeader?.globalVariables.timestamp.toNumber() || 0; - const currentTime = Math.floor(Date.now() / 1000); - const elapsedSinceLastBlock = currentTime - lastBlockTime; + // Compute time elapsed since the previous block + const lastBlockTime = historicalHeader?.globalVariables.timestamp.toNumber() || 0; + const currentTime = Math.floor(Date.now() / 1000); + const elapsedSinceLastBlock = currentTime - lastBlockTime; + this.log.debug( + `Last block mined at ${lastBlockTime} current time is ${currentTime} (elapsed ${elapsedSinceLastBlock})`, + ); + + // If we haven't hit the maxSecondsBetweenBlocks, we need to have at least minTxsPerBLock txs. + // Do not go forward with new block if not enough time has passed since last block + if (this.minSecondsBetweenBlocks > 0 && elapsedSinceLastBlock < this.minSecondsBetweenBlocks) { this.log.debug( - `Last block mined at ${lastBlockTime} current time is ${currentTime} (elapsed ${elapsedSinceLastBlock})`, + `Not creating block because not enough time ${this.minSecondsBetweenBlocks} has passed since last block`, ); - - // If we haven't hit the maxSecondsBetweenBlocks, we need to have at least minTxsPerBLock txs. - // Do not go forward with new block if not enough time has passed since last block - if (this.minSecondsBetweenBlocks > 0 && elapsedSinceLastBlock < this.minSecondsBetweenBlocks) { - this.log.debug( - `Not creating block because not enough time ${this.minSecondsBetweenBlocks} has passed since last block`, - ); - return false; - } + return false; } const skipCheck = this.skipMinTxsPerBlockCheck(historicalHeader); @@ -381,6 +386,16 @@ export class Sequencer { return true; } + /** + * @notice Build and propose a block to the chain + * + * @dev MUST throw instead of exiting early to ensure that world-state + * is being rolled back if the block is dropped. + * + * @param validTxs - The valid transactions to construct the block from + * @param proposalHeader - The partial header constructed for the proposal + * @param historicalHeader - The historical header of the parent + */ @trackSpan('Sequencer.buildBlockAndPublish', (_validTxs, proposalHeader, _historicalHeader) => ({ [Attributes.BLOCK_NUMBER]: proposalHeader.globalVariables.blockNumber.toNumber(), })) @@ -389,9 +404,7 @@ export class Sequencer { proposalHeader: Header, historicalHeader: Header | undefined, ): Promise { - if (!(await this.publisher.validateBlockForSubmission(proposalHeader))) { - return; - } + await this.publisher.validateBlockForSubmission(proposalHeader); const newGlobalVariables = proposalHeader.globalVariables; @@ -426,15 +439,16 @@ export class Sequencer { await this.p2pClient.deleteTxs(Tx.getHashes(failedTxData)); } + await this.publisher.validateBlockForSubmission(proposalHeader); + if ( - !(await this.publisher.validateBlockForSubmission(proposalHeader)) || !this.shouldProposeBlock(historicalHeader, { validTxsCount: validTxs.length, processedTxsCount: processedTxs.length, }) ) { blockBuilder.cancelBlock(); - return; + throw new Error('Should not propose the block'); } // All real transactions have been added, set the block as full and complete the proving. @@ -451,9 +465,7 @@ export class Sequencer { // Block is ready, now finalise const { block } = await blockBuilder.finaliseBlock(); - if (!(await this.publisher.validateBlockForSubmission(block.header))) { - return; - } + await this.publisher.validateBlockForSubmission(block.header); const workDuration = workTimer.ms(); this.log.verbose( @@ -496,24 +508,6 @@ export class Sequencer { } protected async collectAttestations(block: L2Block): Promise { - // @todo This should collect attestations properly and fix the ordering of them to make sense - // the current implementation is a PLACEHOLDER and should be nuked from orbit. - // It is assuming that there will only be ONE (1) validator, so only one attestation - // is needed. - // @note This is quite a sin, but I'm committing war crimes in this code already. - // _ ._ _ , _ ._ - // (_ ' ( ` )_ .__) - // ( ( ( ) `) ) _) - // (__ (_ (_ . _) _) ,__) - // `~~`\ ' . /`~~` - // ; ; - // / \ - // _____________/_ __ \_____________ - - if (IS_DEV_NET || !this.validatorClient) { - return undefined; - } - // TODO(https://github.com/AztecProtocol/aztec-packages/issues/7962): inefficient to have a round trip in here - this should be cached const committee = await this.publisher.getCurrentEpochCommittee(); @@ -521,6 +515,12 @@ export class Sequencer { return undefined; } + if (!this.validatorClient) { + const msg = 'Missing validator client: Cannot collect attestations'; + this.log.error(msg); + throw new Error(msg); + } + const numberOfRequiredAttestations = Math.floor((committee.length * 2) / 3) + 1; // TODO(https://github.com/AztecProtocol/aztec-packages/issues/7974): we do not have transaction[] lists in the block for now @@ -553,7 +553,7 @@ export class Sequencer { if (publishedL2Block) { this.lastPublishedBlock = block.number; } else { - throw new Error(`Failed to publish block`); + throw new Error(`Failed to publish block ${block.number}`); } } diff --git a/yarn-project/validator-client/src/validator.ts b/yarn-project/validator-client/src/validator.ts index 1425f7d464c..7e44d06b9c2 100644 --- a/yarn-project/validator-client/src/validator.ts +++ b/yarn-project/validator-client/src/validator.ts @@ -86,9 +86,11 @@ export class ValidatorClient implements Validator { this.log.info(`Waiting for ${numberOfRequiredAttestations} attestations for slot: ${slot}`); - let attestations: BlockAttestation[] = [await this.attestToProposal(proposal)]; + const myAttestation = await this.attestToProposal(proposal); + + let attestations: BlockAttestation[] = []; while (attestations.length < numberOfRequiredAttestations) { - attestations = await this.p2pClient.getAttestationsForSlot(slot); + attestations = [myAttestation, ...(await this.p2pClient.getAttestationsForSlot(slot))]; if (attestations.length < numberOfRequiredAttestations) { this.log.verbose(`Waiting ${this.attestationPoolingIntervalMs}ms for more attestations...`);